Merge
This commit is contained in:
commit
1dc8f7d026
2
.hgtags
2
.hgtags
@ -172,3 +172,5 @@ b820143a6f1ce993c6e6f31db4d64de990f42654 jdk8-b47
|
||||
086271e35b0a419b38e8bda9bebd70693811df0a jdk8-b48
|
||||
cecd7026f30cbd83b0601925a7a5e059aec98138 jdk8-b49
|
||||
38fe5ab028908cf64dd73a43336ba3211577bfc3 jdk8-b50
|
||||
382651d28f2502d371eca751962232c0e535e57a jdk8-b51
|
||||
b67041a6cb508da18d2f5c7687e6a31e08bea4fc jdk8-b52
|
||||
|
@ -172,3 +172,5 @@ e4f81a817447c3a4f6868f083c81c2fb1b15d44c jdk8-b44
|
||||
3f6c72d1c2a6e5c9e7d81c3dc984886678a128ad jdk8-b48
|
||||
c97b99424815c43818e3cc3ffcdd1a60f3198b52 jdk8-b49
|
||||
2fd67618b9a3c847780ed7b9d228e862b6e2824c jdk8-b50
|
||||
57c0aee7309050b9d6cfcbd202dc704e9260b377 jdk8-b51
|
||||
8d24def5ceb3b8f2e857f2e18b2804fc59eecf8d jdk8-b52
|
||||
|
@ -172,3 +172,5 @@ cd879aff5d3cc1f58829aab3116880aa19525b78 jdk8-b43
|
||||
7e2b179a5b4dbd3f097e28daa00abfcc72ba3e0b jdk8-b48
|
||||
fe44e58a6bdbeae350ce96aafb49770a5dca5d8a jdk8-b49
|
||||
d20d9eb9f093adbf392918c703960ad24c93a331 jdk8-b50
|
||||
9b0f841ca9f7ee9bacf16a5ab41c4f829276bc6b jdk8-b51
|
||||
80689ff9cb499837513f18a1136dac7f0686cd55 jdk8-b52
|
||||
|
@ -267,3 +267,6 @@ e3619706a7253540a2d94e9e841acaab8ace7038 jdk8-b49
|
||||
72e0362c3f0cfacbbac8af8a5b9d2e182f21c17b hs24-b18
|
||||
58f237a9e83af6ded0d2e2c81d252cd47c0f4c45 jdk8-b50
|
||||
3b3ad16429701b2eb6712851c2f7c5a726eb2cbe hs24-b19
|
||||
663fc23da8d51c4c0552cbcb17ffc85f5869d4fd jdk8-b51
|
||||
4c8f2a12e757e7a808aa85827573e09f75d7459f hs24-b20
|
||||
6d0436885201db3f581523344a734793bb989549 jdk8-b52
|
||||
|
@ -93,7 +93,6 @@ public class CodeBlob extends VMObject {
|
||||
public boolean isUncommonTrapStub() { return false; }
|
||||
public boolean isExceptionStub() { return false; }
|
||||
public boolean isSafepointStub() { return false; }
|
||||
public boolean isRicochetBlob() { return false; }
|
||||
public boolean isAdapterBlob() { return false; }
|
||||
|
||||
// Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod()
|
||||
|
@ -57,7 +57,6 @@ public class CodeCache {
|
||||
virtualConstructor.addMapping("BufferBlob", BufferBlob.class);
|
||||
virtualConstructor.addMapping("nmethod", NMethod.class);
|
||||
virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class);
|
||||
virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class);
|
||||
virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class);
|
||||
virtualConstructor.addMapping("MethodHandlesAdapterBlob", MethodHandlesAdapterBlob.class);
|
||||
virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class);
|
||||
@ -127,10 +126,6 @@ public class CodeCache {
|
||||
Assert.that(result.blobContains(start) || result.blobContains(start.addOffsetTo(8)),
|
||||
"found wrong CodeBlob");
|
||||
}
|
||||
if (result.isRicochetBlob()) {
|
||||
// This should probably be done for other SingletonBlobs
|
||||
return VM.getVM().ricochetBlob();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1,70 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.code;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
/** RicochetBlob (currently only used by Compiler 2) */
|
||||
|
||||
public class RicochetBlob extends SingletonBlob {
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("RicochetBlob");
|
||||
|
||||
bounceOffsetField = type.getCIntegerField("_bounce_offset");
|
||||
exceptionOffsetField = type.getCIntegerField("_exception_offset");
|
||||
}
|
||||
|
||||
private static CIntegerField bounceOffsetField;
|
||||
private static CIntegerField exceptionOffsetField;
|
||||
|
||||
public RicochetBlob(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
public boolean isRicochetBlob() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public Address bounceAddr() {
|
||||
return codeBegin().addOffsetTo(bounceOffsetField.getValue(addr));
|
||||
}
|
||||
|
||||
public boolean returnsToBounceAddr(Address pc) {
|
||||
Address bouncePc = bounceAddr();
|
||||
return (pc.equals(bouncePc) || pc.addOffsetTo(Frame.pcReturnOffset()).equals(bouncePc));
|
||||
}
|
||||
|
||||
}
|
@ -147,12 +147,6 @@ public abstract class Frame implements Cloneable {
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isRicochetFrame() {
|
||||
CodeBlob cb = VM.getVM().getCodeCache().findBlob(getPC());
|
||||
RicochetBlob rcb = VM.getVM().ricochetBlob();
|
||||
return (cb == rcb && rcb != null && rcb.returnsToBounceAddr(getPC()));
|
||||
}
|
||||
|
||||
public boolean isCompiledFrame() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(!VM.getVM().isCore(), "noncore builds only");
|
||||
@ -216,8 +210,7 @@ public abstract class Frame implements Cloneable {
|
||||
public Frame realSender(RegisterMap map) {
|
||||
if (!VM.getVM().isCore()) {
|
||||
Frame result = sender(map);
|
||||
while (result.isRuntimeFrame() ||
|
||||
result.isRicochetFrame()) {
|
||||
while (result.isRuntimeFrame()) {
|
||||
result = result.sender(map);
|
||||
}
|
||||
return result;
|
||||
@ -631,9 +624,6 @@ public abstract class Frame implements Cloneable {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(cb != null, "sanity check");
|
||||
}
|
||||
if (cb == VM.getVM().ricochetBlob()) {
|
||||
oopsRicochetDo(oopVisitor, regMap);
|
||||
}
|
||||
if (cb.getOopMaps() != null) {
|
||||
OopMapSet.oopsDo(this, cb, regMap, oopVisitor, VM.getVM().isDebugging());
|
||||
|
||||
@ -650,10 +640,6 @@ public abstract class Frame implements Cloneable {
|
||||
// }
|
||||
}
|
||||
|
||||
private void oopsRicochetDo (AddressVisitor oopVisitor, RegisterMap regMap) {
|
||||
// XXX Empty for now
|
||||
}
|
||||
|
||||
// FIXME: implement the above routines, plus add
|
||||
// oops_interpreted_arguments_do and oops_compiled_arguments_do
|
||||
}
|
||||
|
@ -87,8 +87,6 @@ public class VM {
|
||||
private StubRoutines stubRoutines;
|
||||
private Bytes bytes;
|
||||
|
||||
private RicochetBlob ricochetBlob;
|
||||
|
||||
/** Flags indicating whether we are attached to a core, C1, or C2 build */
|
||||
private boolean usingClientCompiler;
|
||||
private boolean usingServerCompiler;
|
||||
@ -628,18 +626,6 @@ public class VM {
|
||||
return stubRoutines;
|
||||
}
|
||||
|
||||
public RicochetBlob ricochetBlob() {
|
||||
if (ricochetBlob == null) {
|
||||
Type ricochetType = db.lookupType("SharedRuntime");
|
||||
AddressField ricochetBlobAddress = ricochetType.getAddressField("_ricochet_blob");
|
||||
Address addr = ricochetBlobAddress.getValue();
|
||||
if (addr != null) {
|
||||
ricochetBlob = new RicochetBlob(addr);
|
||||
}
|
||||
}
|
||||
return ricochetBlob;
|
||||
}
|
||||
|
||||
public VMRegImpl getVMRegImplInfo() {
|
||||
if (vmregImpl == null) {
|
||||
vmregImpl = new VMRegImpl();
|
||||
|
@ -571,8 +571,6 @@ public class SPARCFrame extends Frame {
|
||||
// registers callee-saved, then we will have to copy over
|
||||
// the RegisterMap update logic from the Intel code.
|
||||
|
||||
if (isRicochetFrame()) return senderForRicochetFrame(map);
|
||||
|
||||
// The constructor of the sender must know whether this frame is interpreted so it can set the
|
||||
// sender's _interpreter_sp_adjustment field.
|
||||
if (VM.getVM().getInterpreter().contains(pc)) {
|
||||
@ -945,20 +943,6 @@ public class SPARCFrame extends Frame {
|
||||
}
|
||||
|
||||
|
||||
private Frame senderForRicochetFrame(SPARCRegisterMap map) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForRicochetFrame");
|
||||
}
|
||||
//RicochetFrame* f = RicochetFrame::from_frame(fr);
|
||||
// Cf. is_interpreted_frame path of frame::sender
|
||||
Address youngerSP = getSP();
|
||||
Address sp = getSenderSP();
|
||||
map.makeIntegerRegsUnsaved();
|
||||
map.shiftWindow(sp, youngerSP);
|
||||
boolean thisFrameAdjustedStack = true; // I5_savedSP is live in this RF
|
||||
return new SPARCFrame(biasSP(sp), biasSP(youngerSP), thisFrameAdjustedStack);
|
||||
}
|
||||
|
||||
private Frame senderForEntryFrame(RegisterMap regMap) {
|
||||
SPARCRegisterMap map = (SPARCRegisterMap) regMap;
|
||||
|
||||
|
@ -1,77 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.sparc;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.asm.sparc.SPARCRegister;
|
||||
import sun.jvm.hotspot.asm.sparc.SPARCRegisters;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
public class SPARCRicochetFrame {
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private SPARCFrame frame;
|
||||
|
||||
private static void initialize(TypeDataBase db) {
|
||||
// Type type = db.lookupType("MethodHandles::RicochetFrame");
|
||||
|
||||
}
|
||||
|
||||
static SPARCRicochetFrame fromFrame(SPARCFrame f) {
|
||||
return new SPARCRicochetFrame(f);
|
||||
}
|
||||
|
||||
private SPARCRicochetFrame(SPARCFrame f) {
|
||||
frame = f;
|
||||
}
|
||||
|
||||
private Address registerValue(SPARCRegister reg) {
|
||||
return frame.getSP().addOffsetTo(reg.spOffsetInSavedWindow()).getAddressAt(0);
|
||||
}
|
||||
|
||||
public Address savedArgsBase() {
|
||||
return registerValue(SPARCRegisters.L4);
|
||||
}
|
||||
public Address exactSenderSP() {
|
||||
return registerValue(SPARCRegisters.I5);
|
||||
}
|
||||
public Address senderLink() {
|
||||
return frame.getSenderSP();
|
||||
}
|
||||
public Address senderPC() {
|
||||
return frame.getSenderPC();
|
||||
}
|
||||
public Address extendedSenderSP() {
|
||||
return savedArgsBase();
|
||||
}
|
||||
}
|
@ -269,7 +269,6 @@ public class X86Frame extends Frame {
|
||||
|
||||
if (isEntryFrame()) return senderForEntryFrame(map);
|
||||
if (isInterpretedFrame()) return senderForInterpreterFrame(map);
|
||||
if (isRicochetFrame()) return senderForRicochetFrame(map);
|
||||
|
||||
if(cb == null) {
|
||||
cb = VM.getVM().getCodeCache().findBlob(getPC());
|
||||
@ -288,16 +287,6 @@ public class X86Frame extends Frame {
|
||||
return new X86Frame(getSenderSP(), getLink(), getSenderPC());
|
||||
}
|
||||
|
||||
private Frame senderForRicochetFrame(X86RegisterMap map) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForRicochetFrame");
|
||||
}
|
||||
X86RicochetFrame f = X86RicochetFrame.fromFrame(this);
|
||||
if (map.getUpdateMap())
|
||||
updateMapWithSavedLink(map, f.senderLinkAddress());
|
||||
return new X86Frame(f.extendedSenderSP(), f.exactSenderSP(), f.senderLink(), f.senderPC());
|
||||
}
|
||||
|
||||
private Frame senderForEntryFrame(X86RegisterMap map) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForEntryFrame");
|
||||
|
@ -1,81 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.x86;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
public class X86RicochetFrame extends VMObject {
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("MethodHandles::RicochetFrame");
|
||||
|
||||
senderLinkField = type.getAddressField("_sender_link");
|
||||
savedArgsBaseField = type.getAddressField("_saved_args_base");
|
||||
exactSenderSPField = type.getAddressField("_exact_sender_sp");
|
||||
senderPCField = type.getAddressField("_sender_pc");
|
||||
}
|
||||
|
||||
private static AddressField senderLinkField;
|
||||
private static AddressField savedArgsBaseField;
|
||||
private static AddressField exactSenderSPField;
|
||||
private static AddressField senderPCField;
|
||||
|
||||
static X86RicochetFrame fromFrame(X86Frame f) {
|
||||
return new X86RicochetFrame(f.getFP().addOffsetTo(- senderLinkField.getOffset()));
|
||||
}
|
||||
|
||||
private X86RicochetFrame(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
public Address senderLink() {
|
||||
return senderLinkField.getValue(addr);
|
||||
}
|
||||
public Address senderLinkAddress() {
|
||||
return addr.addOffsetTo(senderLinkField.getOffset());
|
||||
}
|
||||
public Address savedArgsBase() {
|
||||
return savedArgsBaseField.getValue(addr);
|
||||
}
|
||||
public Address extendedSenderSP() {
|
||||
return savedArgsBase();
|
||||
}
|
||||
public Address exactSenderSP() {
|
||||
return exactSenderSPField.getValue(addr);
|
||||
}
|
||||
public Address senderPC() {
|
||||
return senderPCField.getValue(addr);
|
||||
}
|
||||
}
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
|
||||
|
||||
HS_MAJOR_VER=24
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=19
|
||||
HS_BUILD_NUMBER=20
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
@ -44,8 +44,10 @@
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
#define STOP(error) stop(error)
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) block_comment(str)
|
||||
#define STOP(error) block_comment(error); stop(error)
|
||||
#endif
|
||||
|
||||
// Convert the raw encoding form into the form expected by the
|
||||
@ -992,7 +994,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
|
||||
save_frame(0); // to avoid clobbering O0
|
||||
ld_ptr(pc_addr, L0);
|
||||
br_null_short(L0, Assembler::pt, PcOk);
|
||||
stop("last_Java_pc not zeroed before leaving Java");
|
||||
STOP("last_Java_pc not zeroed before leaving Java");
|
||||
bind(PcOk);
|
||||
|
||||
// Verify that flags was zeroed on return to Java
|
||||
@ -1001,7 +1003,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
|
||||
tst(L0);
|
||||
br(Assembler::zero, false, Assembler::pt, FlagsOk);
|
||||
delayed() -> restore();
|
||||
stop("flags not zeroed before leaving Java");
|
||||
STOP("flags not zeroed before leaving Java");
|
||||
bind(FlagsOk);
|
||||
#endif /* ASSERT */
|
||||
//
|
||||
@ -1021,7 +1023,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
|
||||
andcc(last_java_sp, 0x01, G0);
|
||||
br(Assembler::notZero, false, Assembler::pt, StackOk);
|
||||
delayed()->nop();
|
||||
stop("Stack Not Biased in set_last_Java_frame");
|
||||
STOP("Stack Not Biased in set_last_Java_frame");
|
||||
bind(StackOk);
|
||||
#endif // ASSERT
|
||||
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
|
||||
@ -1650,23 +1652,28 @@ void MacroAssembler::safepoint() {
|
||||
|
||||
|
||||
void RegistersForDebugging::print(outputStream* s) {
|
||||
FlagSetting fs(Debugging, true);
|
||||
int j;
|
||||
for ( j = 0; j < 8; ++j )
|
||||
if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]);
|
||||
else s->print_cr( "fp = 0x%.16lx", i[j]);
|
||||
for (j = 0; j < 8; ++j) {
|
||||
if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); }
|
||||
else { s->print( "fp = " ); os::print_location(s, i[j]); }
|
||||
}
|
||||
s->cr();
|
||||
|
||||
for ( j = 0; j < 8; ++j )
|
||||
s->print_cr("l%d = 0x%.16lx", j, l[j]);
|
||||
for (j = 0; j < 8; ++j) {
|
||||
s->print("l%d = ", j); os::print_location(s, l[j]);
|
||||
}
|
||||
s->cr();
|
||||
|
||||
for ( j = 0; j < 8; ++j )
|
||||
if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]);
|
||||
else s->print_cr( "sp = 0x%.16lx", o[j]);
|
||||
for (j = 0; j < 8; ++j) {
|
||||
if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); }
|
||||
else { s->print( "sp = " ); os::print_location(s, o[j]); }
|
||||
}
|
||||
s->cr();
|
||||
|
||||
for ( j = 0; j < 8; ++j )
|
||||
s->print_cr("g%d = 0x%.16lx", j, g[j]);
|
||||
for (j = 0; j < 8; ++j) {
|
||||
s->print("g%d = ", j); os::print_location(s, g[j]);
|
||||
}
|
||||
s->cr();
|
||||
|
||||
// print out floats with compression
|
||||
@ -2020,8 +2027,8 @@ void MacroAssembler::untested(const char* what) {
|
||||
char* b = new char[1024];
|
||||
sprintf(b, "untested: %s", what);
|
||||
|
||||
if ( ShowMessageBoxOnError ) stop(b);
|
||||
else warn(b);
|
||||
if (ShowMessageBoxOnError) { STOP(b); }
|
||||
else { warn(b); }
|
||||
}
|
||||
|
||||
|
||||
@ -2998,26 +3005,60 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
}
|
||||
|
||||
|
||||
// virtual method calling
|
||||
void MacroAssembler::lookup_virtual_method(Register recv_klass,
|
||||
RegisterOrConstant vtable_index,
|
||||
Register method_result) {
|
||||
assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
|
||||
Register sethi_temp = method_result;
|
||||
const int base = (instanceKlass::vtable_start_offset() * wordSize +
|
||||
// method pointer offset within the vtable entry:
|
||||
vtableEntry::method_offset_in_bytes());
|
||||
RegisterOrConstant vtable_offset = vtable_index;
|
||||
// Each of the following three lines potentially generates an instruction.
|
||||
// But the total number of address formation instructions will always be
|
||||
// at most two, and will often be zero. In any case, it will be optimal.
|
||||
// If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x).
|
||||
// If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t).
|
||||
vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset);
|
||||
vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp);
|
||||
Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp));
|
||||
ld_ptr(vtable_entry_addr, method_result);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::check_klass_subtype(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp_reg,
|
||||
Register temp2_reg,
|
||||
Label& L_success) {
|
||||
Label L_failure, L_pop_to_failure;
|
||||
check_klass_subtype_fast_path(sub_klass, super_klass,
|
||||
temp_reg, temp2_reg,
|
||||
&L_success, &L_failure, NULL);
|
||||
Register sub_2 = sub_klass;
|
||||
Register sup_2 = super_klass;
|
||||
if (!sub_2->is_global()) sub_2 = L0;
|
||||
if (!sup_2->is_global()) sup_2 = L1;
|
||||
bool did_save = false;
|
||||
if (temp_reg == noreg || temp2_reg == noreg) {
|
||||
temp_reg = L2;
|
||||
temp2_reg = L3;
|
||||
save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
|
||||
sub_klass = sub_2;
|
||||
super_klass = sup_2;
|
||||
did_save = true;
|
||||
}
|
||||
Label L_failure, L_pop_to_failure, L_pop_to_success;
|
||||
check_klass_subtype_fast_path(sub_klass, super_klass,
|
||||
temp_reg, temp2_reg,
|
||||
(did_save ? &L_pop_to_success : &L_success),
|
||||
(did_save ? &L_pop_to_failure : &L_failure), NULL);
|
||||
|
||||
save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
|
||||
if (!did_save)
|
||||
save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
|
||||
check_klass_subtype_slow_path(sub_2, sup_2,
|
||||
L2, L3, L4, L5,
|
||||
NULL, &L_pop_to_failure);
|
||||
|
||||
// on success:
|
||||
bind(L_pop_to_success);
|
||||
restore();
|
||||
ba_short(L_success);
|
||||
|
||||
@ -3234,54 +3275,6 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
|
||||
Register temp_reg,
|
||||
Label& wrong_method_type) {
|
||||
assert_different_registers(mtype_reg, mh_reg, temp_reg);
|
||||
// compare method type against that of the receiver
|
||||
RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg);
|
||||
load_heap_oop(mh_reg, mhtype_offset, temp_reg);
|
||||
cmp_and_brx_short(temp_reg, mtype_reg, Assembler::notEqual, Assembler::pn, wrong_method_type);
|
||||
}
|
||||
|
||||
|
||||
// A method handle has a "vmslots" field which gives the size of its
|
||||
// argument list in JVM stack slots. This field is either located directly
|
||||
// in every method handle, or else is indirectly accessed through the
|
||||
// method handle's MethodType. This macro hides the distinction.
|
||||
void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
|
||||
Register temp_reg) {
|
||||
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
|
||||
// load mh.type.form.vmslots
|
||||
Register temp2_reg = vmslots_reg;
|
||||
load_heap_oop(Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
|
||||
load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
|
||||
ld( Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
|
||||
assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
|
||||
assert_different_registers(mh_reg, temp_reg);
|
||||
|
||||
// pick out the interpreted side of the handler
|
||||
// NOTE: vmentry is not an oop!
|
||||
ld_ptr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
|
||||
|
||||
// off we go...
|
||||
ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg);
|
||||
jmp(temp_reg, 0);
|
||||
|
||||
// for the various stubs which take control at this point,
|
||||
// see MethodHandles::generate_method_handle_stub
|
||||
|
||||
// Some callers can fill the delay slot.
|
||||
if (emit_delayed_nop) {
|
||||
delayed()->nop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
|
||||
Register temp_reg,
|
||||
int extra_slot_offset) {
|
||||
@ -3914,7 +3907,7 @@ void MacroAssembler::verify_tlab() {
|
||||
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
|
||||
or3(t1, t2, t3);
|
||||
cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next);
|
||||
stop("assert(top >= start)");
|
||||
STOP("assert(top >= start)");
|
||||
should_not_reach_here();
|
||||
|
||||
bind(next);
|
||||
@ -3922,13 +3915,13 @@ void MacroAssembler::verify_tlab() {
|
||||
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
|
||||
or3(t3, t2, t3);
|
||||
cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2);
|
||||
stop("assert(top <= end)");
|
||||
STOP("assert(top <= end)");
|
||||
should_not_reach_here();
|
||||
|
||||
bind(next2);
|
||||
and3(t3, MinObjAlignmentInBytesMask, t3);
|
||||
cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok);
|
||||
stop("assert(aligned)");
|
||||
STOP("assert(aligned)");
|
||||
should_not_reach_here();
|
||||
|
||||
bind(ok);
|
||||
@ -3976,7 +3969,7 @@ void MacroAssembler::eden_allocate(
|
||||
btst(MinObjAlignmentInBytesMask, obj);
|
||||
br(Assembler::zero, false, Assembler::pt, L);
|
||||
delayed()->nop();
|
||||
stop("eden top is not properly aligned");
|
||||
STOP("eden top is not properly aligned");
|
||||
bind(L);
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -4013,7 +4006,7 @@ void MacroAssembler::eden_allocate(
|
||||
btst(MinObjAlignmentInBytesMask, top_addr);
|
||||
br(Assembler::zero, false, Assembler::pt, L);
|
||||
delayed()->nop();
|
||||
stop("eden top is not properly aligned");
|
||||
STOP("eden top is not properly aligned");
|
||||
bind(L);
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -4066,7 +4059,7 @@ void MacroAssembler::tlab_allocate(
|
||||
btst(MinObjAlignmentInBytesMask, free);
|
||||
br(Assembler::zero, false, Assembler::pt, L);
|
||||
delayed()->nop();
|
||||
stop("updated TLAB free is not properly aligned");
|
||||
STOP("updated TLAB free is not properly aligned");
|
||||
bind(L);
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -4164,7 +4157,7 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
||||
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
|
||||
sll_ptr(t2, LogHeapWordSize, t2);
|
||||
cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok);
|
||||
stop("assert(t1 == tlab_size)");
|
||||
STOP("assert(t1 == tlab_size)");
|
||||
should_not_reach_here();
|
||||
|
||||
bind(ok);
|
||||
|
@ -2538,6 +2538,11 @@ public:
|
||||
Register temp_reg, Register temp2_reg,
|
||||
Label& no_such_interface);
|
||||
|
||||
// virtual method calling
|
||||
void lookup_virtual_method(Register recv_klass,
|
||||
RegisterOrConstant vtable_index,
|
||||
Register method_result);
|
||||
|
||||
// Test sub_klass against super_klass, with fast and slow paths.
|
||||
|
||||
// The fast path produces a tri-state answer: yes / no / maybe-slow.
|
||||
@ -2577,12 +2582,6 @@ public:
|
||||
Label& L_success);
|
||||
|
||||
// method handles (JSR 292)
|
||||
void check_method_handle_type(Register mtype_reg, Register mh_reg,
|
||||
Register temp_reg,
|
||||
Label& wrong_method_type);
|
||||
void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
|
||||
Register temp_reg);
|
||||
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
|
||||
// offset relative to Gargs of argument at tos[arg_slot].
|
||||
// (arg_slot == 0 means the last argument, not the first).
|
||||
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
|
||||
@ -2590,7 +2589,7 @@ public:
|
||||
int extra_slot_offset = 0);
|
||||
// Address of Gargs and argument_offset.
|
||||
Address argument_address(RegisterOrConstant arg_slot,
|
||||
Register temp_reg,
|
||||
Register temp_reg = noreg,
|
||||
int extra_slot_offset = 0);
|
||||
|
||||
// Stack overflow checking
|
||||
|
@ -2956,6 +2956,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
ciMethod* method = op->profiled_method();
|
||||
int bci = op->profiled_bci();
|
||||
ciMethod* callee = op->profiled_callee();
|
||||
|
||||
// Update counter for all call types
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
@ -2984,9 +2985,11 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
|
||||
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
|
||||
Bytecodes::Code bc = method->java_code_at_bci(bci);
|
||||
const bool callee_is_static = callee->is_loaded() && callee->is_static();
|
||||
// Perform additional virtual call profiling for invokevirtual and
|
||||
// invokeinterface bytecodes
|
||||
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
|
||||
!callee_is_static && // required for optimized MH invokes
|
||||
C1ProfileVirtualCalls) {
|
||||
assert(op->recv()->is_single_cpu(), "recv must be allocated");
|
||||
Register recv = op->recv()->as_register();
|
||||
|
@ -515,9 +515,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Get the type from the constant pool cache
|
||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch);
|
||||
// Make sure we don't need to mask G1_scratch for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
|
||||
// Make sure we don't need to mask G1_scratch after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmp(G1_scratch, atos );
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
|
||||
|
@ -514,7 +514,6 @@ frame frame::sender(RegisterMap* map) const {
|
||||
// interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
|
||||
// explicitly recognized.
|
||||
|
||||
if (is_ricochet_frame()) return sender_for_ricochet_frame(map);
|
||||
|
||||
bool frame_is_interpreted = is_interpreted_frame();
|
||||
if (frame_is_interpreted) {
|
||||
@ -821,9 +820,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
|
||||
}
|
||||
|
||||
if (is_ricochet_frame()) {
|
||||
MethodHandles::RicochetFrame::describe(this, values, frame_no);
|
||||
} else if (is_interpreted_frame()) {
|
||||
if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_padding);
|
||||
|
@ -505,7 +505,7 @@ void InterpreterMacroAssembler::store_ptr(int n, Register val) {
|
||||
void InterpreterMacroAssembler::load_receiver(Register param_count,
|
||||
Register recv) {
|
||||
sll(param_count, Interpreter::logStackElementSize, param_count);
|
||||
ld_ptr(Lesp, param_count, recv); // gets receiver Oop
|
||||
ld_ptr(Lesp, param_count, recv); // gets receiver oop
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::empty_expression_stack() {
|
||||
@ -767,8 +767,12 @@ void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register
|
||||
get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size);
|
||||
ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);
|
||||
const int shift_count = (1 + byte_no) * BitsPerByte;
|
||||
srl( bytecode, shift_count, bytecode);
|
||||
and3(bytecode, 0xFF, bytecode);
|
||||
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
|
||||
(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
|
||||
"correct shift count");
|
||||
srl(bytecode, shift_count, bytecode);
|
||||
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
|
||||
and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode);
|
||||
}
|
||||
|
||||
|
||||
|
@ -32,7 +32,6 @@
|
||||
address generate_normal_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_method_handle_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
|
@ -255,17 +255,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
}
|
||||
|
||||
|
||||
// Method handle invoker
|
||||
// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
|
||||
address InterpreterGenerator::generate_method_handle_entry(void) {
|
||||
if (!EnableInvokeDynamic) {
|
||||
return generate_abstract_entry();
|
||||
}
|
||||
|
||||
return MethodHandles::generate_method_handle_interpreter_entry(_masm);
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Entry points & stack frame layout
|
||||
//
|
||||
@ -395,7 +384,7 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : break;
|
||||
case Interpreter::java_lang_math_cos : break;
|
||||
case Interpreter::java_lang_math_tan : break;
|
||||
@ -407,7 +396,9 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
|
||||
case Interpreter::java_lang_math_exp : break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) return entry_point;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -30,186 +30,9 @@ enum /* platform_dependent_constants */ {
|
||||
adapter_code_size = NOT_LP64(23000 DEBUG_ONLY(+ 40000)) LP64_ONLY(35000 DEBUG_ONLY(+ 50000))
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
class RicochetFrame : public ResourceObj {
|
||||
friend class MethodHandles;
|
||||
|
||||
private:
|
||||
/*
|
||||
RF field x86 SPARC
|
||||
sender_pc *(rsp+0) I7-0x8
|
||||
sender_link rbp I6+BIAS
|
||||
exact_sender_sp rsi/r13 I5_savedSP
|
||||
conversion *(rcx+&amh_conv) L5_conv
|
||||
saved_args_base rax L4_sab (cf. Gargs = G4)
|
||||
saved_args_layout #NULL L3_sal
|
||||
saved_target *(rcx+&mh_vmtgt) L2_stgt
|
||||
continuation #STUB_CON L1_cont
|
||||
*/
|
||||
static const Register L1_continuation ; // what to do when control gets back here
|
||||
static const Register L2_saved_target ; // target method handle to invoke on saved_args
|
||||
static const Register L3_saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie
|
||||
static const Register L4_saved_args_base ; // base of pushed arguments (slot 0, arg N) (-3)
|
||||
static const Register L5_conversion ; // misc. information from original AdapterMethodHandle (-2)
|
||||
|
||||
frame _fr;
|
||||
|
||||
RicochetFrame(const frame& fr) : _fr(fr) { }
|
||||
|
||||
intptr_t* register_addr(Register reg) const {
|
||||
assert((_fr.sp() + reg->sp_offset_in_saved_window()) == _fr.register_addr(reg), "must agree");
|
||||
return _fr.register_addr(reg);
|
||||
}
|
||||
intptr_t register_value(Register reg) const { return *register_addr(reg); }
|
||||
|
||||
public:
|
||||
intptr_t* continuation() const { return (intptr_t*) register_value(L1_continuation); }
|
||||
oop saved_target() const { return (oop) register_value(L2_saved_target); }
|
||||
oop saved_args_layout() const { return (oop) register_value(L3_saved_args_layout); }
|
||||
intptr_t* saved_args_base() const { return (intptr_t*) register_value(L4_saved_args_base); }
|
||||
intptr_t conversion() const { return register_value(L5_conversion); }
|
||||
intptr_t* exact_sender_sp() const { return (intptr_t*) register_value(I5_savedSP); }
|
||||
intptr_t* sender_link() const { return _fr.sender_sp(); } // XXX
|
||||
address sender_pc() const { return _fr.sender_pc(); }
|
||||
|
||||
// This value is not used for much, but it apparently must be nonzero.
|
||||
static int frame_size_in_bytes() { return wordSize * 4; }
|
||||
|
||||
intptr_t* extended_sender_sp() const { return saved_args_base(); }
|
||||
|
||||
intptr_t return_value_slot_number() const {
|
||||
return adapter_conversion_vminfo(conversion());
|
||||
}
|
||||
BasicType return_value_type() const {
|
||||
return adapter_conversion_dest_type(conversion());
|
||||
}
|
||||
bool has_return_value_slot() const {
|
||||
return return_value_type() != T_VOID;
|
||||
}
|
||||
intptr_t* return_value_slot_addr() const {
|
||||
assert(has_return_value_slot(), "");
|
||||
return saved_arg_slot_addr(return_value_slot_number());
|
||||
}
|
||||
intptr_t* saved_target_slot_addr() const {
|
||||
return saved_arg_slot_addr(saved_args_length());
|
||||
}
|
||||
intptr_t* saved_arg_slot_addr(int slot) const {
|
||||
assert(slot >= 0, "");
|
||||
return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
|
||||
}
|
||||
|
||||
jint saved_args_length() const;
|
||||
jint saved_arg_offset(int arg) const;
|
||||
|
||||
// GC interface
|
||||
oop* saved_target_addr() { return (oop*)register_addr(L2_saved_target); }
|
||||
oop* saved_args_layout_addr() { return (oop*)register_addr(L3_saved_args_layout); }
|
||||
|
||||
oop compute_saved_args_layout(bool read_cache, bool write_cache);
|
||||
|
||||
#ifdef ASSERT
|
||||
// The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
|
||||
enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
|
||||
static const Register L0_magic_number_1 ; // cookie for debugging, at start of RSA
|
||||
static Address magic_number_2_addr() { return Address(L4_saved_args_base, -wordSize); }
|
||||
intptr_t magic_number_1() const { return register_value(L0_magic_number_1); }
|
||||
intptr_t magic_number_2() const { return saved_args_base()[-1]; }
|
||||
#endif //ASSERT
|
||||
|
||||
public:
|
||||
enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
|
||||
|
||||
void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
|
||||
|
||||
static void generate_ricochet_blob(MacroAssembler* _masm,
|
||||
// output params:
|
||||
int* bounce_offset,
|
||||
int* exception_offset,
|
||||
int* frame_size_in_words);
|
||||
|
||||
static void enter_ricochet_frame(MacroAssembler* _masm,
|
||||
Register recv_reg,
|
||||
Register argv_reg,
|
||||
address return_handler);
|
||||
|
||||
static void leave_ricochet_frame(MacroAssembler* _masm,
|
||||
Register recv_reg,
|
||||
Register new_sp_reg,
|
||||
Register sender_pc_reg);
|
||||
|
||||
static RicochetFrame* from_frame(const frame& fr) {
|
||||
RicochetFrame* rf = new RicochetFrame(fr);
|
||||
rf->verify();
|
||||
return rf;
|
||||
}
|
||||
|
||||
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
|
||||
|
||||
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// Additional helper methods for MethodHandles code generation:
|
||||
public:
|
||||
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);
|
||||
static void load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg);
|
||||
static void extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
|
||||
static void extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
|
||||
|
||||
static void load_stack_move(MacroAssembler* _masm,
|
||||
Address G3_amh_conversion,
|
||||
Register G5_stack_move);
|
||||
|
||||
static void insert_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
Register argslot_reg,
|
||||
Register temp_reg, Register temp2_reg, Register temp3_reg);
|
||||
|
||||
static void remove_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
Register argslot_reg,
|
||||
Register temp_reg, Register temp2_reg, Register temp3_reg);
|
||||
|
||||
static void push_arg_slots(MacroAssembler* _masm,
|
||||
Register argslot_reg,
|
||||
RegisterOrConstant slot_count,
|
||||
Register temp_reg, Register temp2_reg);
|
||||
|
||||
static void move_arg_slots_up(MacroAssembler* _masm,
|
||||
Register bottom_reg, // invariant
|
||||
Address top_addr, // can use temp_reg
|
||||
RegisterOrConstant positive_distance_in_slots,
|
||||
Register temp_reg, Register temp2_reg);
|
||||
|
||||
static void move_arg_slots_down(MacroAssembler* _masm,
|
||||
Address bottom_addr, // can use temp_reg
|
||||
Register top_reg, // invariant
|
||||
RegisterOrConstant negative_distance_in_slots,
|
||||
Register temp_reg, Register temp2_reg);
|
||||
|
||||
static void move_typed_arg(MacroAssembler* _masm,
|
||||
BasicType type, bool is_element,
|
||||
Address value_src, Address slot_dest,
|
||||
Register temp_reg);
|
||||
|
||||
static void move_return_value(MacroAssembler* _masm, BasicType type,
|
||||
Address return_slot);
|
||||
|
||||
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
|
||||
Register temp_reg,
|
||||
const char* error_message) NOT_DEBUG_RETURN;
|
||||
|
||||
static void verify_argslots(MacroAssembler* _masm,
|
||||
RegisterOrConstant argslot_count,
|
||||
Register argslot_reg,
|
||||
Register temp_reg,
|
||||
Register temp2_reg,
|
||||
bool negate_argslot,
|
||||
const char* error_message) NOT_DEBUG_RETURN;
|
||||
|
||||
static void verify_stack_move(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
int direction) NOT_DEBUG_RETURN;
|
||||
|
||||
static void verify_klass(MacroAssembler* _masm,
|
||||
Register obj_reg, KlassHandle klass,
|
||||
@ -223,8 +46,17 @@ public:
|
||||
"reference is a MH");
|
||||
}
|
||||
|
||||
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
|
||||
|
||||
// Similar to InterpreterMacroAssembler::jump_from_interpreted.
|
||||
// Takes care of special dispatch from single stepping too.
|
||||
static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, Register temp2);
|
||||
static void jump_from_method_handle(MacroAssembler* _masm, Register method,
|
||||
Register temp, Register temp2,
|
||||
bool for_compiler_entry);
|
||||
|
||||
static void jump_to_lambda_form(MacroAssembler* _masm,
|
||||
Register recv, Register method_temp,
|
||||
Register temp2, Register temp3,
|
||||
bool for_compiler_entry);
|
||||
|
||||
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
|
||||
|
@ -400,13 +400,13 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
case T_LONG: // LP64, longs compete with int args
|
||||
assert(sig_bt[i+1] == T_VOID, "");
|
||||
#ifdef _LP64
|
||||
if (int_reg_cnt < int_reg_max) int_reg_cnt++;
|
||||
if (int_reg_cnt < int_reg_max) int_reg_cnt++;
|
||||
#endif
|
||||
break;
|
||||
case T_OBJECT:
|
||||
case T_ARRAY:
|
||||
case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
|
||||
if (int_reg_cnt < int_reg_max) int_reg_cnt++;
|
||||
if (int_reg_cnt < int_reg_max) int_reg_cnt++;
|
||||
#ifndef _LP64
|
||||
else stk_reg_pairs++;
|
||||
#endif
|
||||
@ -416,11 +416,11 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
case T_CHAR:
|
||||
case T_BYTE:
|
||||
case T_BOOLEAN:
|
||||
if (int_reg_cnt < int_reg_max) int_reg_cnt++;
|
||||
if (int_reg_cnt < int_reg_max) int_reg_cnt++;
|
||||
else stk_reg_pairs++;
|
||||
break;
|
||||
case T_FLOAT:
|
||||
if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
|
||||
if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
|
||||
else stk_reg_pairs++;
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
@ -436,7 +436,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
// This is where the longs/doubles start on the stack.
|
||||
stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
|
||||
|
||||
int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
|
||||
int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
|
||||
|
||||
// int stk_reg = frame::register_save_words*(wordSize>>2);
|
||||
@ -517,24 +516,15 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
stk_reg_pairs += 2;
|
||||
}
|
||||
#else // COMPILER2
|
||||
if (int_reg_pairs + 1 < int_reg_max) {
|
||||
if (is_outgoing) {
|
||||
regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
|
||||
} else {
|
||||
regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
|
||||
}
|
||||
int_reg_pairs += 2;
|
||||
} else {
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
|
||||
stk_reg_pairs += 2;
|
||||
}
|
||||
#endif // COMPILER2
|
||||
#endif // _LP64
|
||||
break;
|
||||
|
||||
case T_FLOAT:
|
||||
if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
|
||||
else regs[i].set1( VMRegImpl::stack2reg(stk_reg++));
|
||||
else regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
assert(sig_bt[i+1] == T_VOID, "expecting half");
|
||||
@ -886,6 +876,20 @@ void AdapterGenerator::gen_c2i_adapter(
|
||||
__ delayed()->add(SP, G1, Gargs);
|
||||
}
|
||||
|
||||
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
|
||||
address code_start, address code_end,
|
||||
Label& L_ok) {
|
||||
Label L_fail;
|
||||
__ set(ExternalAddress(code_start), temp_reg);
|
||||
__ set(pointer_delta(code_end, code_start, 1), temp2_reg);
|
||||
__ cmp(pc_reg, temp_reg);
|
||||
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
|
||||
__ delayed()->add(temp_reg, temp2_reg, temp_reg);
|
||||
__ cmp(pc_reg, temp_reg);
|
||||
__ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
|
||||
__ bind(L_fail);
|
||||
}
|
||||
|
||||
void AdapterGenerator::gen_i2c_adapter(
|
||||
int total_args_passed,
|
||||
// VMReg max_arg,
|
||||
@ -907,6 +911,51 @@ void AdapterGenerator::gen_i2c_adapter(
|
||||
// This removes all sorts of headaches on the x86 side and also eliminates
|
||||
// the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
|
||||
|
||||
// More detail:
|
||||
// Adapters can be frameless because they do not require the caller
|
||||
// to perform additional cleanup work, such as correcting the stack pointer.
|
||||
// An i2c adapter is frameless because the *caller* frame, which is interpreted,
|
||||
// routinely repairs its own stack pointer (from interpreter_frame_last_sp),
|
||||
// even if a callee has modified the stack pointer.
|
||||
// A c2i adapter is frameless because the *callee* frame, which is interpreted,
|
||||
// routinely repairs its caller's stack pointer (from sender_sp, which is set
|
||||
// up via the senderSP register).
|
||||
// In other words, if *either* the caller or callee is interpreted, we can
|
||||
// get the stack pointer repaired after a call.
|
||||
// This is why c2i and i2c adapters cannot be indefinitely composed.
|
||||
// In particular, if a c2i adapter were to somehow call an i2c adapter,
|
||||
// both caller and callee would be compiled methods, and neither would
|
||||
// clean up the stack pointer changes performed by the two adapters.
|
||||
// If this happens, control eventually transfers back to the compiled
|
||||
// caller, but with an uncorrected stack, causing delayed havoc.
|
||||
|
||||
if (VerifyAdapterCalls &&
|
||||
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
|
||||
// So, let's test for cascading c2i/i2c adapters right now.
|
||||
// assert(Interpreter::contains($return_addr) ||
|
||||
// StubRoutines::contains($return_addr),
|
||||
// "i2c adapter must return to an interpreter frame");
|
||||
__ block_comment("verify_i2c { ");
|
||||
Label L_ok;
|
||||
if (Interpreter::code() != NULL)
|
||||
range_check(masm, O7, O0, O1,
|
||||
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
|
||||
L_ok);
|
||||
if (StubRoutines::code1() != NULL)
|
||||
range_check(masm, O7, O0, O1,
|
||||
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
|
||||
L_ok);
|
||||
if (StubRoutines::code2() != NULL)
|
||||
range_check(masm, O7, O0, O1,
|
||||
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
|
||||
L_ok);
|
||||
const char* msg = "i2c adapter must return to an interpreter frame";
|
||||
__ block_comment(msg);
|
||||
__ stop(msg);
|
||||
__ bind(L_ok);
|
||||
__ block_comment("} verify_i2ce ");
|
||||
}
|
||||
|
||||
// As you can see from the list of inputs & outputs there are not a lot
|
||||
// of temp registers to work with: mostly G1, G3 & G4.
|
||||
|
||||
@ -1937,20 +1986,156 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
static void verify_oop_args(MacroAssembler* masm,
|
||||
int total_args_passed,
|
||||
const BasicType* sig_bt,
|
||||
const VMRegPair* regs) {
|
||||
Register temp_reg = G5_method; // not part of any compiled calling seq
|
||||
if (VerifyOops) {
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
if (sig_bt[i] == T_OBJECT ||
|
||||
sig_bt[i] == T_ARRAY) {
|
||||
VMReg r = regs[i].first();
|
||||
assert(r->is_valid(), "bad oop arg");
|
||||
if (r->is_stack()) {
|
||||
RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
|
||||
ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
|
||||
__ ld_ptr(SP, ld_off, temp_reg);
|
||||
__ verify_oop(temp_reg);
|
||||
} else {
|
||||
__ verify_oop(r->as_Register());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_special_dispatch(MacroAssembler* masm,
|
||||
int total_args_passed,
|
||||
int comp_args_on_stack,
|
||||
vmIntrinsics::ID special_dispatch,
|
||||
const BasicType* sig_bt,
|
||||
const VMRegPair* regs) {
|
||||
verify_oop_args(masm, total_args_passed, sig_bt, regs);
|
||||
|
||||
// Now write the args into the outgoing interpreter space
|
||||
bool has_receiver = false;
|
||||
Register receiver_reg = noreg;
|
||||
int member_arg_pos = -1;
|
||||
Register member_reg = noreg;
|
||||
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
|
||||
if (ref_kind != 0) {
|
||||
member_arg_pos = total_args_passed - 1; // trailing MemberName argument
|
||||
member_reg = G5_method; // known to be free at this point
|
||||
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
|
||||
} else if (special_dispatch == vmIntrinsics::_invokeBasic) {
|
||||
has_receiver = true;
|
||||
} else {
|
||||
fatal(err_msg("special_dispatch=%d", special_dispatch));
|
||||
}
|
||||
|
||||
if (member_reg != noreg) {
|
||||
// Load the member_arg into register, if necessary.
|
||||
assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
|
||||
assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
|
||||
VMReg r = regs[member_arg_pos].first();
|
||||
assert(r->is_valid(), "bad member arg");
|
||||
if (r->is_stack()) {
|
||||
RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
|
||||
ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
|
||||
__ ld_ptr(SP, ld_off, member_reg);
|
||||
} else {
|
||||
// no data motion is needed
|
||||
member_reg = r->as_Register();
|
||||
}
|
||||
}
|
||||
|
||||
if (has_receiver) {
|
||||
// Make sure the receiver is loaded into a register.
|
||||
assert(total_args_passed > 0, "oob");
|
||||
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
|
||||
VMReg r = regs[0].first();
|
||||
assert(r->is_valid(), "bad receiver arg");
|
||||
if (r->is_stack()) {
|
||||
// Porting note: This assumes that compiled calling conventions always
|
||||
// pass the receiver oop in a register. If this is not true on some
|
||||
// platform, pick a temp and load the receiver from stack.
|
||||
assert(false, "receiver always in a register");
|
||||
receiver_reg = G3_scratch; // known to be free at this point
|
||||
RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
|
||||
ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
|
||||
__ ld_ptr(SP, ld_off, receiver_reg);
|
||||
} else {
|
||||
// no data motion is needed
|
||||
receiver_reg = r->as_Register();
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out which address we are really jumping to:
|
||||
MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
|
||||
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Generate a native wrapper for a given method. The method takes arguments
|
||||
// in the Java compiled code convention, marshals them to the native
|
||||
// convention (handlizes oops, etc), transitions to native, makes the call,
|
||||
// returns to java state (possibly blocking), unhandlizes any result and
|
||||
// returns.
|
||||
//
|
||||
// Critical native functions are a shorthand for the use of
|
||||
// GetPrimtiveArrayCritical and disallow the use of any other JNI
|
||||
// functions. The wrapper is expected to unpack the arguments before
|
||||
// passing them to the callee and perform checks before and after the
|
||||
// native call to ensure that they GC_locker
|
||||
// lock_critical/unlock_critical semantics are followed. Some other
|
||||
// parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// block and the check for pending exceptions it's impossible for them
|
||||
// to be thrown.
|
||||
//
|
||||
// They are roughly structured like this:
|
||||
// if (GC_locker::needs_gc())
|
||||
// SharedRuntime::block_for_jni_critical();
|
||||
// tranistion to thread_in_native
|
||||
// unpack arrray arguments and call native entry point
|
||||
// check for safepoint in progress
|
||||
// check if any thread suspend flags are set
|
||||
// call into JVM and possible unlock the JNI critical
|
||||
// if a GC was suppressed while in the critical native.
|
||||
// transition back to thread_in_Java
|
||||
// return to caller
|
||||
//
|
||||
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
methodHandle method,
|
||||
int compile_id,
|
||||
int total_in_args,
|
||||
int comp_args_on_stack, // in VMRegStackSlots
|
||||
BasicType *in_sig_bt,
|
||||
VMRegPair *in_regs,
|
||||
BasicType* in_sig_bt,
|
||||
VMRegPair* in_regs,
|
||||
BasicType ret_type) {
|
||||
if (method->is_method_handle_intrinsic()) {
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
intptr_t start = (intptr_t)__ pc();
|
||||
int vep_offset = ((intptr_t)__ pc()) - start;
|
||||
gen_special_dispatch(masm,
|
||||
total_in_args,
|
||||
comp_args_on_stack,
|
||||
method->intrinsic_id(),
|
||||
in_sig_bt,
|
||||
in_regs);
|
||||
int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
|
||||
__ flush();
|
||||
int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
|
||||
return nmethod::new_native_nmethod(method,
|
||||
compile_id,
|
||||
masm->code(),
|
||||
vep_offset,
|
||||
frame_complete,
|
||||
stack_slots / VMRegImpl::slots_per_word,
|
||||
in_ByteSize(-1),
|
||||
in_ByteSize(-1),
|
||||
(OopMapSet*)NULL);
|
||||
}
|
||||
bool is_critical_native = true;
|
||||
address native_func = method->critical_native_function();
|
||||
if (native_func == NULL) {
|
||||
|
@ -3404,14 +3404,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
|
||||
#endif // COMPILER2 !=> _LP64
|
||||
|
||||
// Build this early so it's available for the interpreter. The
|
||||
// stub expects the required and actual type to already be in O1
|
||||
// and O2 respectively.
|
||||
StubRoutines::_throw_WrongMethodTypeException_entry =
|
||||
generate_throw_exception("WrongMethodTypeException throw_exception",
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
|
||||
G5_method_type, G3_method_handle);
|
||||
|
||||
// Build this early so it's available for the interpreter.
|
||||
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
|
||||
}
|
||||
|
@ -694,9 +694,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Get the type from the constant pool cache
|
||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch);
|
||||
// Make sure we don't need to mask G1_scratch for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
|
||||
// Make sure we don't need to mask G1_scratch after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmp(G1_scratch, atos );
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
|
||||
@ -1662,7 +1662,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
|
||||
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
|
||||
} else {
|
||||
assert(caller->is_compiled_frame() || caller->is_entry_frame() || caller->is_ricochet_frame(), "only possible cases");
|
||||
assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
|
||||
// Don't have Lesp available; lay out locals block in the caller
|
||||
// adjacent to the register window save area.
|
||||
//
|
||||
|
@ -378,7 +378,7 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
Register Rcache = G3_scratch;
|
||||
Register Rscratch = G4_scratch;
|
||||
|
||||
resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
|
||||
resolve_cache_and_index(f12_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
|
||||
|
||||
__ verify_oop(Otos_i);
|
||||
|
||||
@ -2093,10 +2093,12 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
// Depends on cpCacheOop layout!
|
||||
Label resolved;
|
||||
|
||||
if (byte_no == f1_oop) {
|
||||
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
|
||||
// This kind of CP cache entry does not need to match the flags byte, because
|
||||
if (byte_no == f12_oop) {
|
||||
// We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
|
||||
// This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
|
||||
// there is a 1-1 relation between bytecode type and CP entry type.
|
||||
// The caller will also load a methodOop from f2.
|
||||
assert(result != noreg, "");
|
||||
assert_different_registers(result, Rcache);
|
||||
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
|
||||
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
|
||||
@ -2123,10 +2125,13 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
case Bytecodes::_invokespecial : // fall through
|
||||
case Bytecodes::_invokestatic : // fall through
|
||||
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
|
||||
case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
|
||||
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
|
||||
case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
|
||||
case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
|
||||
break;
|
||||
}
|
||||
// first time invocation - must resolve first
|
||||
__ call_VM(noreg, entry, O1);
|
||||
@ -2139,48 +2144,54 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
}
|
||||
|
||||
void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
|
||||
Register Rmethod,
|
||||
Register Ritable_index,
|
||||
Register Rflags,
|
||||
Register method,
|
||||
Register itable_index,
|
||||
Register flags,
|
||||
bool is_invokevirtual,
|
||||
bool is_invokevfinal,
|
||||
bool is_invokedynamic) {
|
||||
// Uses both G3_scratch and G4_scratch
|
||||
Register Rcache = G3_scratch;
|
||||
Register Rscratch = G4_scratch;
|
||||
assert_different_registers(Rcache, Rmethod, Ritable_index);
|
||||
|
||||
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
|
||||
Register cache = G3_scratch;
|
||||
Register index = G4_scratch;
|
||||
assert_different_registers(cache, method, itable_index);
|
||||
|
||||
// determine constant pool cache field offsets
|
||||
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
|
||||
const int method_offset = in_bytes(
|
||||
cp_base_offset +
|
||||
(is_invokevirtual
|
||||
constantPoolCacheOopDesc::base_offset() +
|
||||
((byte_no == f2_byte)
|
||||
? ConstantPoolCacheEntry::f2_offset()
|
||||
: ConstantPoolCacheEntry::f1_offset()
|
||||
)
|
||||
);
|
||||
const int flags_offset = in_bytes(cp_base_offset +
|
||||
const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::flags_offset());
|
||||
// access constant pool cache fields
|
||||
const int index_offset = in_bytes(cp_base_offset +
|
||||
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::f2_offset());
|
||||
|
||||
if (is_invokevfinal) {
|
||||
__ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
|
||||
__ ld_ptr(Rcache, method_offset, Rmethod);
|
||||
} else if (byte_no == f1_oop) {
|
||||
// Resolved f1_oop goes directly into 'method' register.
|
||||
resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4));
|
||||
__ get_cache_and_index_at_bcp(cache, index, 1);
|
||||
__ ld_ptr(Address(cache, method_offset), method);
|
||||
} else if (byte_no == f12_oop) {
|
||||
// Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
|
||||
// Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
|
||||
// See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
|
||||
size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
|
||||
resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
|
||||
__ ld_ptr(Address(cache, index_offset), method);
|
||||
itable_index = noreg; // hack to disable load below
|
||||
} else {
|
||||
resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2));
|
||||
__ ld_ptr(Rcache, method_offset, Rmethod);
|
||||
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
|
||||
__ ld_ptr(Address(cache, method_offset), method);
|
||||
}
|
||||
|
||||
if (Ritable_index != noreg) {
|
||||
__ ld_ptr(Rcache, index_offset, Ritable_index);
|
||||
if (itable_index != noreg) {
|
||||
// pick up itable index from f2 also:
|
||||
assert(byte_no == f1_byte, "already picked up f1");
|
||||
__ ld_ptr(Address(cache, index_offset), itable_index);
|
||||
}
|
||||
__ ld_ptr(Rcache, flags_offset, Rflags);
|
||||
__ ld_ptr(Address(cache, flags_offset), flags);
|
||||
}
|
||||
|
||||
// The Rcache register must be set before call
|
||||
@ -2272,7 +2283,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
|
||||
|
||||
if (__ membar_has_effect(membar_bits)) {
|
||||
// Get volatile flag
|
||||
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
|
||||
__ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
|
||||
__ and3(Rflags, Lscratch, Lscratch);
|
||||
}
|
||||
|
||||
@ -2280,9 +2291,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
|
||||
|
||||
// compute field type
|
||||
Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
|
||||
__ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
|
||||
// Make sure we don't need to mask Rflags for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
|
||||
// Make sure we don't need to mask Rflags after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
|
||||
// Check atos before itos for getstatic, more likely (in Queens at least)
|
||||
__ cmp(Rflags, atos);
|
||||
@ -2445,7 +2456,7 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
if (__ membar_has_effect(membar_bits)) {
|
||||
// Get volatile flag
|
||||
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
|
||||
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
|
||||
__ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
|
||||
}
|
||||
|
||||
switch (bytecode()) {
|
||||
@ -2569,9 +2580,9 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool i
|
||||
Label two_word, valsizeknown;
|
||||
__ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
|
||||
__ mov(Lesp, G4_scratch);
|
||||
__ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
|
||||
// Make sure we don't need to mask Rflags for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
|
||||
// Make sure we don't need to mask Rflags after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmp(Rflags, ltos);
|
||||
__ br(Assembler::equal, false, Assembler::pt, two_word);
|
||||
__ delayed()->cmp(Rflags, dtos);
|
||||
@ -2625,7 +2636,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
|
||||
|
||||
Label notVolatile, checkVolatile, exit;
|
||||
if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
|
||||
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
|
||||
__ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
|
||||
__ and3(Rflags, Lscratch, Lscratch);
|
||||
|
||||
if (__ membar_has_effect(read_bits)) {
|
||||
@ -2635,9 +2646,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
|
||||
}
|
||||
}
|
||||
|
||||
__ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
|
||||
// Make sure we don't need to mask Rflags for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
|
||||
// Make sure we don't need to mask Rflags after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
|
||||
// compute field type
|
||||
Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
|
||||
@ -2833,7 +2844,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
Label notVolatile, checkVolatile, exit;
|
||||
if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
|
||||
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
|
||||
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
|
||||
__ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
|
||||
__ and3(Rflags, Lscratch, Lscratch);
|
||||
if (__ membar_has_effect(read_bits)) {
|
||||
__ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
|
||||
@ -2916,7 +2927,7 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
|
||||
// Test volatile
|
||||
Label notVolatile;
|
||||
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
|
||||
__ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
|
||||
__ btst(Rflags, Lscratch);
|
||||
__ br(Assembler::zero, false, Assembler::pt, notVolatile);
|
||||
__ delayed()->nop();
|
||||
@ -2936,27 +2947,82 @@ void TemplateTable::count_calls(Register method, Register temp) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::prepare_invoke(int byte_no,
|
||||
Register method, // linked method (or i-klass)
|
||||
Register ra, // return address
|
||||
Register index, // itable index, MethodType, etc.
|
||||
Register recv, // if caller wants to see it
|
||||
Register flags // if caller wants to test it
|
||||
) {
|
||||
// determine flags
|
||||
const Bytecodes::Code code = bytecode();
|
||||
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
|
||||
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
|
||||
const bool is_invokehandle = code == Bytecodes::_invokehandle;
|
||||
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
|
||||
const bool is_invokespecial = code == Bytecodes::_invokespecial;
|
||||
const bool load_receiver = (recv != noreg);
|
||||
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
|
||||
assert(recv == noreg || recv == O0, "");
|
||||
assert(flags == noreg || flags == O1, "");
|
||||
|
||||
// setup registers & access constant pool cache
|
||||
if (recv == noreg) recv = O0;
|
||||
if (flags == noreg) flags = O1;
|
||||
const Register temp = O2;
|
||||
assert_different_registers(method, ra, index, recv, flags, temp);
|
||||
|
||||
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
|
||||
|
||||
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
|
||||
|
||||
// maybe push appendix to arguments
|
||||
if (is_invokedynamic || is_invokehandle) {
|
||||
Label L_no_push;
|
||||
__ verify_oop(index);
|
||||
__ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp);
|
||||
__ btst(flags, temp);
|
||||
__ br(Assembler::zero, false, Assembler::pt, L_no_push);
|
||||
__ delayed()->nop();
|
||||
// Push the appendix as a trailing parameter.
|
||||
// This must be done before we get the receiver,
|
||||
// since the parameter_size includes it.
|
||||
__ push_ptr(index); // push appendix (MethodType, CallSite, etc.)
|
||||
__ bind(L_no_push);
|
||||
}
|
||||
|
||||
// load receiver if needed (after appendix is pushed so parameter size is correct)
|
||||
if (load_receiver) {
|
||||
__ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size
|
||||
__ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp
|
||||
__ verify_oop(recv);
|
||||
}
|
||||
|
||||
// compute return type
|
||||
__ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
|
||||
// Make sure we don't need to mask flags after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
// load return address
|
||||
{
|
||||
const address table_addr = (is_invokeinterface || is_invokedynamic) ?
|
||||
(address)Interpreter::return_5_addrs_by_index_table() :
|
||||
(address)Interpreter::return_3_addrs_by_index_table();
|
||||
AddressLiteral table(table_addr);
|
||||
__ set(table, temp);
|
||||
__ sll(ra, LogBytesPerWord, ra);
|
||||
__ ld_ptr(Address(temp, ra), ra);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
|
||||
Register Rtemp = G4_scratch;
|
||||
Register Rcall = Rindex;
|
||||
assert_different_registers(Rcall, G5_method, Gargs, Rret);
|
||||
|
||||
// get target methodOop & entry point
|
||||
const int base = instanceKlass::vtable_start_offset() * wordSize;
|
||||
if (vtableEntry::size() % 3 == 0) {
|
||||
// scale the vtable index by 12:
|
||||
int one_third = vtableEntry::size() / 3;
|
||||
__ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp);
|
||||
__ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex);
|
||||
__ add(Rindex, Rtemp, Rindex);
|
||||
} else {
|
||||
// scale the vtable index by 8:
|
||||
__ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex);
|
||||
}
|
||||
|
||||
__ add(Rrecv, Rindex, Rrecv);
|
||||
__ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method);
|
||||
|
||||
__ lookup_virtual_method(Rrecv, Rindex, G5_method);
|
||||
__ call_from_interpreter(Rcall, Gargs, Rret);
|
||||
}
|
||||
|
||||
@ -2965,16 +3031,16 @@ void TemplateTable::invokevirtual(int byte_no) {
|
||||
assert(byte_no == f2_byte, "use this argument");
|
||||
|
||||
Register Rscratch = G3_scratch;
|
||||
Register Rtemp = G4_scratch;
|
||||
Register Rret = Lscratch;
|
||||
Register Rrecv = G5_method;
|
||||
Register Rtemp = G4_scratch;
|
||||
Register Rret = Lscratch;
|
||||
Register O0_recv = O0;
|
||||
Label notFinal;
|
||||
|
||||
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
|
||||
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
|
||||
|
||||
// Check for vfinal
|
||||
__ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch);
|
||||
__ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch);
|
||||
__ btst(Rret, G4_scratch);
|
||||
__ br(Assembler::zero, false, Assembler::pt, notFinal);
|
||||
__ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
|
||||
@ -2986,27 +3052,27 @@ void TemplateTable::invokevirtual(int byte_no) {
|
||||
__ bind(notFinal);
|
||||
|
||||
__ mov(G5_method, Rscratch); // better scratch register
|
||||
__ load_receiver(G4_scratch, O0); // gets receiverOop
|
||||
// receiver is in O0
|
||||
__ verify_oop(O0);
|
||||
__ load_receiver(G4_scratch, O0_recv); // gets receiverOop
|
||||
// receiver is in O0_recv
|
||||
__ verify_oop(O0_recv);
|
||||
|
||||
// get return address
|
||||
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
|
||||
__ set(table, Rtemp);
|
||||
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
|
||||
// Make sure we don't need to mask Rret for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
|
||||
// Make sure we don't need to mask Rret after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ sll(Rret, LogBytesPerWord, Rret);
|
||||
__ ld_ptr(Rtemp, Rret, Rret); // get return address
|
||||
|
||||
// get receiver klass
|
||||
__ null_check(O0, oopDesc::klass_offset_in_bytes());
|
||||
__ load_klass(O0, Rrecv);
|
||||
__ verify_oop(Rrecv);
|
||||
__ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
|
||||
__ load_klass(O0_recv, O0_recv);
|
||||
__ verify_oop(O0_recv);
|
||||
|
||||
__ profile_virtual_call(Rrecv, O4);
|
||||
__ profile_virtual_call(O0_recv, O4);
|
||||
|
||||
generate_vtable_call(Rrecv, Rscratch, Rret);
|
||||
generate_vtable_call(O0_recv, Rscratch, Rret);
|
||||
}
|
||||
|
||||
void TemplateTable::fast_invokevfinal(int byte_no) {
|
||||
@ -3036,9 +3102,9 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
|
||||
// get return address
|
||||
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
|
||||
__ set(table, Rtemp);
|
||||
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
|
||||
// Make sure we don't need to mask Rret for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
|
||||
// Make sure we don't need to mask Rret after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ sll(Rret, LogBytesPerWord, Rret);
|
||||
__ ld_ptr(Rtemp, Rret, Rret); // get return address
|
||||
|
||||
@ -3047,65 +3113,37 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
|
||||
__ call_from_interpreter(Rscratch, Gargs, Rret);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::invokespecial(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
|
||||
Register Rscratch = G3_scratch;
|
||||
Register Rtemp = G4_scratch;
|
||||
Register Rret = Lscratch;
|
||||
const Register Rret = Lscratch;
|
||||
const Register O0_recv = O0;
|
||||
const Register Rscratch = G3_scratch;
|
||||
|
||||
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
|
||||
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
|
||||
|
||||
__ verify_oop(G5_method);
|
||||
|
||||
__ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
|
||||
__ load_receiver(G4_scratch, O0);
|
||||
|
||||
// receiver NULL check
|
||||
__ null_check(O0);
|
||||
|
||||
__ profile_call(O4);
|
||||
|
||||
// get return address
|
||||
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
|
||||
__ set(table, Rtemp);
|
||||
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
|
||||
// Make sure we don't need to mask Rret for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ sll(Rret, LogBytesPerWord, Rret);
|
||||
__ ld_ptr(Rtemp, Rret, Rret); // get return address
|
||||
prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check
|
||||
__ null_check(O0_recv);
|
||||
|
||||
// do the call
|
||||
__ verify_oop(G5_method);
|
||||
__ profile_call(O4);
|
||||
__ call_from_interpreter(Rscratch, Gargs, Rret);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::invokestatic(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
|
||||
Register Rscratch = G3_scratch;
|
||||
Register Rtemp = G4_scratch;
|
||||
Register Rret = Lscratch;
|
||||
const Register Rret = Lscratch;
|
||||
const Register Rscratch = G3_scratch;
|
||||
|
||||
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
|
||||
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
|
||||
|
||||
__ verify_oop(G5_method);
|
||||
|
||||
__ profile_call(O4);
|
||||
|
||||
// get return address
|
||||
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
|
||||
__ set(table, Rtemp);
|
||||
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
|
||||
// Make sure we don't need to mask Rret for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ sll(Rret, LogBytesPerWord, Rret);
|
||||
__ ld_ptr(Rtemp, Rret, Rret); // get return address
|
||||
prepare_invoke(byte_no, G5_method, Rret); // get f1 methodOop
|
||||
|
||||
// do the call
|
||||
__ verify_oop(G5_method);
|
||||
__ profile_call(O4);
|
||||
__ call_from_interpreter(Rscratch, Gargs, Rret);
|
||||
}
|
||||
|
||||
@ -3122,7 +3160,7 @@ void TemplateTable::invokeinterface_object_method(Register RklassOop,
|
||||
Label notFinal;
|
||||
|
||||
// Check for vfinal
|
||||
__ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch);
|
||||
__ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
|
||||
__ btst(Rflags, Rscratch);
|
||||
__ br(Assembler::zero, false, Assembler::pt, notFinal);
|
||||
__ delayed()->nop();
|
||||
@ -3144,53 +3182,37 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
|
||||
Register Rscratch = G4_scratch;
|
||||
Register Rret = G3_scratch;
|
||||
Register Rindex = Lscratch;
|
||||
Register Rinterface = G1_scratch;
|
||||
Register RklassOop = G5_method;
|
||||
Register Rflags = O1;
|
||||
const Register Rinterface = G1_scratch;
|
||||
const Register Rret = G3_scratch;
|
||||
const Register Rindex = Lscratch;
|
||||
const Register O0_recv = O0;
|
||||
const Register O1_flags = O1;
|
||||
const Register O2_klassOop = O2;
|
||||
const Register Rscratch = G4_scratch;
|
||||
assert_different_registers(Rscratch, G5_method);
|
||||
|
||||
load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false);
|
||||
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
|
||||
|
||||
// get receiver
|
||||
__ and3(Rflags, 0xFF, Rscratch); // gets number of parameters
|
||||
__ load_receiver(Rscratch, O0);
|
||||
__ verify_oop(O0);
|
||||
|
||||
__ mov(Rflags, Rret);
|
||||
|
||||
// get return address
|
||||
AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
|
||||
__ set(table, Rscratch);
|
||||
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
|
||||
// Make sure we don't need to mask Rret for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ sll(Rret, LogBytesPerWord, Rret);
|
||||
__ ld_ptr(Rscratch, Rret, Rret); // get return address
|
||||
prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags);
|
||||
|
||||
// get receiver klass
|
||||
__ null_check(O0, oopDesc::klass_offset_in_bytes());
|
||||
__ load_klass(O0, RklassOop);
|
||||
__ verify_oop(RklassOop);
|
||||
__ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
|
||||
__ load_klass(O0_recv, O2_klassOop);
|
||||
__ verify_oop(O2_klassOop);
|
||||
|
||||
// Special case of invokeinterface called for virtual method of
|
||||
// java.lang.Object. See cpCacheOop.cpp for details.
|
||||
// This code isn't produced by javac, but could be produced by
|
||||
// another compliant java compiler.
|
||||
Label notMethod;
|
||||
__ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch);
|
||||
__ btst(Rflags, Rscratch);
|
||||
__ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
|
||||
__ btst(O1_flags, Rscratch);
|
||||
__ br(Assembler::zero, false, Assembler::pt, notMethod);
|
||||
__ delayed()->nop();
|
||||
|
||||
invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags);
|
||||
invokeinterface_object_method(O2_klassOop, Rinterface, Rret, O1_flags);
|
||||
|
||||
__ bind(notMethod);
|
||||
|
||||
__ profile_virtual_call(RklassOop, O4);
|
||||
__ profile_virtual_call(O2_klassOop, O4);
|
||||
|
||||
//
|
||||
// find entry point to call
|
||||
@ -3199,9 +3221,9 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// compute start of first itableOffsetEntry (which is at end of vtable)
|
||||
const int base = instanceKlass::vtable_start_offset() * wordSize;
|
||||
Label search;
|
||||
Register Rtemp = Rflags;
|
||||
Register Rtemp = O1_flags;
|
||||
|
||||
__ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp);
|
||||
__ ld(O2_klassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp);
|
||||
if (align_object_offset(1) > 1) {
|
||||
__ round_to(Rtemp, align_object_offset(1));
|
||||
}
|
||||
@ -3212,7 +3234,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ set(base, Rscratch);
|
||||
__ add(Rscratch, Rtemp, Rtemp);
|
||||
}
|
||||
__ add(RklassOop, Rtemp, Rscratch);
|
||||
__ add(O2_klassOop, Rtemp, Rscratch);
|
||||
|
||||
__ bind(search);
|
||||
|
||||
@ -3244,7 +3266,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
|
||||
__ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
|
||||
__ add(Rscratch, Rindex, Rscratch);
|
||||
__ ld_ptr(RklassOop, Rscratch, G5_method);
|
||||
__ ld_ptr(O2_klassOop, Rscratch, G5_method);
|
||||
|
||||
// Check for abstract method error.
|
||||
{
|
||||
@ -3260,13 +3282,42 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
|
||||
__ verify_oop(G5_method);
|
||||
__ call_from_interpreter(Rcall, Gargs, Rret);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::invokehandle(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f12_oop, "use this argument");
|
||||
|
||||
if (!EnableInvokeDynamic) {
|
||||
// rewriter does not generate this bytecode
|
||||
__ should_not_reach_here();
|
||||
return;
|
||||
}
|
||||
|
||||
const Register Rret = Lscratch;
|
||||
const Register G4_mtype = G4_scratch; // f1
|
||||
const Register O0_recv = O0;
|
||||
const Register Rscratch = G3_scratch;
|
||||
|
||||
prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
|
||||
__ null_check(O0_recv);
|
||||
|
||||
// G4: MethodType object (from f1)
|
||||
// G5: MH.linkToCallSite method (from f2)
|
||||
|
||||
// Note: G4_mtype is already pushed (if necessary) by prepare_invoke
|
||||
|
||||
// do the call
|
||||
__ verify_oop(G5_method);
|
||||
__ profile_final_call(O4); // FIXME: profile the LambdaForm also
|
||||
__ call_from_interpreter(Rscratch, Gargs, Rret);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::invokedynamic(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_oop, "use this argument");
|
||||
assert(byte_no == f12_oop, "use this argument");
|
||||
|
||||
if (!EnableInvokeDynamic) {
|
||||
// We should not encounter this bytecode if !EnableInvokeDynamic.
|
||||
@ -3279,42 +3330,24 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
return;
|
||||
}
|
||||
|
||||
// G5: CallSite object (f1)
|
||||
// XX: unused (f2)
|
||||
// XX: flags (unused)
|
||||
const Register Rret = Lscratch;
|
||||
const Register G4_callsite = G4_scratch;
|
||||
const Register Rscratch = G3_scratch;
|
||||
|
||||
Register G5_callsite = G5_method;
|
||||
Register Rscratch = G3_scratch;
|
||||
Register Rtemp = G1_scratch;
|
||||
Register Rret = Lscratch;
|
||||
prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
|
||||
|
||||
load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
|
||||
/*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
|
||||
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
|
||||
// G4: CallSite object (from f1)
|
||||
// G5: MH.linkToCallSite method (from f2)
|
||||
|
||||
// Note: G4_callsite is already pushed by prepare_invoke
|
||||
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
// profile this call
|
||||
__ profile_call(O4);
|
||||
|
||||
// get return address
|
||||
AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
|
||||
__ set(table, Rtemp);
|
||||
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
|
||||
// Make sure we don't need to mask Rret for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ sll(Rret, LogBytesPerWord, Rret);
|
||||
__ ld_ptr(Rtemp, Rret, Rret); // get return address
|
||||
|
||||
__ verify_oop(G5_callsite);
|
||||
__ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
|
||||
__ null_check(G3_method_handle);
|
||||
__ verify_oop(G3_method_handle);
|
||||
|
||||
// Adjust Rret first so Llast_SP can be same as Rret
|
||||
__ add(Rret, -frame::pc_return_offset, O7);
|
||||
__ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
|
||||
__ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
|
||||
// Record SP so we can remove any stack space allocated by adapter transition
|
||||
__ delayed()->mov(SP, Llast_SP);
|
||||
// do the call
|
||||
__ verify_oop(G5_method);
|
||||
__ call_from_interpreter(Rscratch, Gargs, Rret);
|
||||
}
|
||||
|
||||
|
||||
|
@ -25,6 +25,13 @@
|
||||
#ifndef CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP
|
||||
#define CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP
|
||||
|
||||
static void prepare_invoke(int byte_no,
|
||||
Register method, // linked method (or i-klass)
|
||||
Register ra, // return address
|
||||
Register index = noreg, // itable index, MethodType, etc.
|
||||
Register recv = noreg, // if caller wants to see it
|
||||
Register flags = noreg // if caller wants to test it
|
||||
);
|
||||
// helper function
|
||||
static void invokevfinal_helper(Register Rcache, Register Rret);
|
||||
static void invokeinterface_object_method(Register RklassOop, Register Rcall,
|
||||
|
@ -44,10 +44,11 @@ protected:
|
||||
fmaf_instructions = 10,
|
||||
fmau_instructions = 11,
|
||||
vis3_instructions = 12,
|
||||
sparc64_family = 13,
|
||||
T_family = 14,
|
||||
T1_model = 15,
|
||||
cbcond_instructions = 16
|
||||
cbcond_instructions = 13,
|
||||
sparc64_family = 14,
|
||||
M_family = 15,
|
||||
T_family = 16,
|
||||
T1_model = 17
|
||||
};
|
||||
|
||||
enum Feature_Flag_Set {
|
||||
@ -67,10 +68,11 @@ protected:
|
||||
fmaf_instructions_m = 1 << fmaf_instructions,
|
||||
fmau_instructions_m = 1 << fmau_instructions,
|
||||
vis3_instructions_m = 1 << vis3_instructions,
|
||||
cbcond_instructions_m = 1 << cbcond_instructions,
|
||||
sparc64_family_m = 1 << sparc64_family,
|
||||
M_family_m = 1 << M_family,
|
||||
T_family_m = 1 << T_family,
|
||||
T1_model_m = 1 << T1_model,
|
||||
cbcond_instructions_m = 1 << cbcond_instructions,
|
||||
|
||||
generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
|
||||
generic_v9_m = generic_v8_m | v9_instructions_m,
|
||||
@ -89,6 +91,7 @@ protected:
|
||||
static int platform_features(int features);
|
||||
|
||||
// Returns true if the platform is in the niagara line (T series)
|
||||
static bool is_M_family(int features) { return (features & M_family_m) != 0; }
|
||||
static bool is_T_family(int features) { return (features & T_family_m) != 0; }
|
||||
static bool is_niagara() { return is_T_family(_features); }
|
||||
DEBUG_ONLY( static bool is_niagara(int features) { return (features & sun4v_m) != 0; } )
|
||||
|
@ -70,7 +70,6 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
__ load_klass(O0, G3_scratch);
|
||||
|
||||
// set methodOop (in case of interpreted method), and destination address
|
||||
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
@ -82,13 +81,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
|
||||
if (Assembler::is_simm13(v_off)) {
|
||||
__ ld_ptr(G3, v_off, G5_method);
|
||||
} else {
|
||||
__ set(v_off,G5);
|
||||
__ ld_ptr(G3, G5, G5_method);
|
||||
}
|
||||
|
||||
__ lookup_virtual_method(G3_scratch, vtable_index, G5_method);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
|
@ -41,6 +41,15 @@
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
#define STOP(error) stop(error)
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) block_comment(str)
|
||||
#define STOP(error) block_comment(error); stop(error)
|
||||
#endif
|
||||
|
||||
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
||||
// Implementation of AddressLiteral
|
||||
|
||||
AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
|
||||
@ -5508,23 +5517,7 @@ void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rd
|
||||
// To see where a verify_oop failed, get $ebx+40/X for this frame.
|
||||
// This is the value of eip which points to where verify_oop will return.
|
||||
if (os::message_box(msg, "Execution stopped, print registers?")) {
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("eip = 0x%08x", eip);
|
||||
#ifndef PRODUCT
|
||||
if ((WizardMode || Verbose) && PrintMiscellaneous) {
|
||||
tty->cr();
|
||||
findpc(eip);
|
||||
tty->cr();
|
||||
}
|
||||
#endif
|
||||
tty->print_cr("rax = 0x%08x", rax);
|
||||
tty->print_cr("rbx = 0x%08x", rbx);
|
||||
tty->print_cr("rcx = 0x%08x", rcx);
|
||||
tty->print_cr("rdx = 0x%08x", rdx);
|
||||
tty->print_cr("rdi = 0x%08x", rdi);
|
||||
tty->print_cr("rsi = 0x%08x", rsi);
|
||||
tty->print_cr("rbp = 0x%08x", rbp);
|
||||
tty->print_cr("rsp = 0x%08x", rsp);
|
||||
print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
|
||||
BREAKPOINT;
|
||||
assert(false, "start up GDB");
|
||||
}
|
||||
@ -5536,12 +5529,53 @@ void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rd
|
||||
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
|
||||
}
|
||||
|
||||
void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
|
||||
ttyLocker ttyl;
|
||||
FlagSetting fs(Debugging, true);
|
||||
tty->print_cr("eip = 0x%08x", eip);
|
||||
#ifndef PRODUCT
|
||||
if ((WizardMode || Verbose) && PrintMiscellaneous) {
|
||||
tty->cr();
|
||||
findpc(eip);
|
||||
tty->cr();
|
||||
}
|
||||
#endif
|
||||
#define PRINT_REG(rax) \
|
||||
{ tty->print("%s = ", #rax); os::print_location(tty, rax); }
|
||||
PRINT_REG(rax);
|
||||
PRINT_REG(rbx);
|
||||
PRINT_REG(rcx);
|
||||
PRINT_REG(rdx);
|
||||
PRINT_REG(rdi);
|
||||
PRINT_REG(rsi);
|
||||
PRINT_REG(rbp);
|
||||
PRINT_REG(rsp);
|
||||
#undef PRINT_REG
|
||||
// Print some words near top of staack.
|
||||
int* dump_sp = (int*) rsp;
|
||||
for (int col1 = 0; col1 < 8; col1++) {
|
||||
tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
|
||||
os::print_location(tty, *dump_sp++);
|
||||
}
|
||||
for (int row = 0; row < 16; row++) {
|
||||
tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
|
||||
for (int col = 0; col < 8; col++) {
|
||||
tty->print(" 0x%08x", *dump_sp++);
|
||||
}
|
||||
tty->cr();
|
||||
}
|
||||
// Print some instructions around pc:
|
||||
Disassembler::decode((address)eip-64, (address)eip);
|
||||
tty->print_cr("--------");
|
||||
Disassembler::decode((address)eip, (address)eip+32);
|
||||
}
|
||||
|
||||
void MacroAssembler::stop(const char* msg) {
|
||||
ExternalAddress message((address)msg);
|
||||
// push address of message
|
||||
pushptr(message.addr());
|
||||
{ Label L; call(L, relocInfo::none); bind(L); } // push eip
|
||||
pusha(); // push registers
|
||||
pusha(); // push registers
|
||||
call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
|
||||
hlt();
|
||||
}
|
||||
@ -5558,6 +5592,18 @@ void MacroAssembler::warn(const char* msg) {
|
||||
pop_CPU_state();
|
||||
}
|
||||
|
||||
void MacroAssembler::print_state() {
|
||||
{ Label L; call(L, relocInfo::none); bind(L); } // push eip
|
||||
pusha(); // push registers
|
||||
|
||||
push_CPU_state();
|
||||
call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
|
||||
pop_CPU_state();
|
||||
|
||||
popa();
|
||||
addl(rsp, wordSize);
|
||||
}
|
||||
|
||||
#else // _LP64
|
||||
|
||||
// 64 bit versions
|
||||
@ -6023,14 +6069,33 @@ void MacroAssembler::stop(const char* msg) {
|
||||
}
|
||||
|
||||
void MacroAssembler::warn(const char* msg) {
|
||||
push(rsp);
|
||||
push(rbp);
|
||||
movq(rbp, rsp);
|
||||
andq(rsp, -16); // align stack as required by push_CPU_state and call
|
||||
|
||||
push_CPU_state(); // keeps alignment at 16 bytes
|
||||
lea(c_rarg0, ExternalAddress((address) msg));
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
|
||||
pop_CPU_state();
|
||||
pop(rsp);
|
||||
mov(rsp, rbp);
|
||||
pop(rbp);
|
||||
}
|
||||
|
||||
void MacroAssembler::print_state() {
|
||||
address rip = pc();
|
||||
pusha(); // get regs on stack
|
||||
push(rbp);
|
||||
movq(rbp, rsp);
|
||||
andq(rsp, -16); // align stack as required by push_CPU_state and call
|
||||
push_CPU_state(); // keeps alignment at 16 bytes
|
||||
|
||||
lea(c_rarg0, InternalAddress(rip));
|
||||
lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
|
||||
|
||||
pop_CPU_state();
|
||||
mov(rsp, rbp);
|
||||
pop(rbp);
|
||||
popa();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -6039,7 +6104,7 @@ extern "C" void findpc(intptr_t x);
|
||||
|
||||
void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
|
||||
// In order to get locks to work, we need to fake a in_VM state
|
||||
if (ShowMessageBoxOnError ) {
|
||||
if (ShowMessageBoxOnError) {
|
||||
JavaThread* thread = JavaThread::current();
|
||||
JavaThreadState saved_state = thread->thread_state();
|
||||
thread->set_thread_state(_thread_in_vm);
|
||||
@ -6053,30 +6118,9 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
|
||||
// XXX correct this offset for amd64
|
||||
// This is the value of eip which points to where verify_oop will return.
|
||||
if (os::message_box(msg, "Execution stopped, print registers?")) {
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("rip = 0x%016lx", pc);
|
||||
#ifndef PRODUCT
|
||||
tty->cr();
|
||||
findpc(pc);
|
||||
tty->cr();
|
||||
#endif
|
||||
tty->print_cr("rax = 0x%016lx", regs[15]);
|
||||
tty->print_cr("rbx = 0x%016lx", regs[12]);
|
||||
tty->print_cr("rcx = 0x%016lx", regs[14]);
|
||||
tty->print_cr("rdx = 0x%016lx", regs[13]);
|
||||
tty->print_cr("rdi = 0x%016lx", regs[8]);
|
||||
tty->print_cr("rsi = 0x%016lx", regs[9]);
|
||||
tty->print_cr("rbp = 0x%016lx", regs[10]);
|
||||
tty->print_cr("rsp = 0x%016lx", regs[11]);
|
||||
tty->print_cr("r8 = 0x%016lx", regs[7]);
|
||||
tty->print_cr("r9 = 0x%016lx", regs[6]);
|
||||
tty->print_cr("r10 = 0x%016lx", regs[5]);
|
||||
tty->print_cr("r11 = 0x%016lx", regs[4]);
|
||||
tty->print_cr("r12 = 0x%016lx", regs[3]);
|
||||
tty->print_cr("r13 = 0x%016lx", regs[2]);
|
||||
tty->print_cr("r14 = 0x%016lx", regs[1]);
|
||||
tty->print_cr("r15 = 0x%016lx", regs[0]);
|
||||
print_state64(pc, regs);
|
||||
BREAKPOINT;
|
||||
assert(false, "start up GDB");
|
||||
}
|
||||
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
|
||||
} else {
|
||||
@ -6087,6 +6131,54 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
|
||||
ttyLocker ttyl;
|
||||
FlagSetting fs(Debugging, true);
|
||||
tty->print_cr("rip = 0x%016lx", pc);
|
||||
#ifndef PRODUCT
|
||||
tty->cr();
|
||||
findpc(pc);
|
||||
tty->cr();
|
||||
#endif
|
||||
#define PRINT_REG(rax, value) \
|
||||
{ tty->print("%s = ", #rax); os::print_location(tty, value); }
|
||||
PRINT_REG(rax, regs[15]);
|
||||
PRINT_REG(rbx, regs[12]);
|
||||
PRINT_REG(rcx, regs[14]);
|
||||
PRINT_REG(rdx, regs[13]);
|
||||
PRINT_REG(rdi, regs[8]);
|
||||
PRINT_REG(rsi, regs[9]);
|
||||
PRINT_REG(rbp, regs[10]);
|
||||
PRINT_REG(rsp, regs[11]);
|
||||
PRINT_REG(r8 , regs[7]);
|
||||
PRINT_REG(r9 , regs[6]);
|
||||
PRINT_REG(r10, regs[5]);
|
||||
PRINT_REG(r11, regs[4]);
|
||||
PRINT_REG(r12, regs[3]);
|
||||
PRINT_REG(r13, regs[2]);
|
||||
PRINT_REG(r14, regs[1]);
|
||||
PRINT_REG(r15, regs[0]);
|
||||
#undef PRINT_REG
|
||||
// Print some words near top of staack.
|
||||
int64_t* rsp = (int64_t*) regs[11];
|
||||
int64_t* dump_sp = rsp;
|
||||
for (int col1 = 0; col1 < 8; col1++) {
|
||||
tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
|
||||
os::print_location(tty, *dump_sp++);
|
||||
}
|
||||
for (int row = 0; row < 25; row++) {
|
||||
tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
|
||||
for (int col = 0; col < 4; col++) {
|
||||
tty->print(" 0x%016lx", *dump_sp++);
|
||||
}
|
||||
tty->cr();
|
||||
}
|
||||
// Print some instructions around pc:
|
||||
Disassembler::decode((address)pc-64, (address)pc);
|
||||
tty->print_cr("--------");
|
||||
Disassembler::decode((address)pc, (address)pc+32);
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
// Now versions that are common to 32/64 bit
|
||||
@ -6456,7 +6548,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
|
||||
get_thread(rax);
|
||||
cmpptr(java_thread, rax);
|
||||
jcc(Assembler::equal, L);
|
||||
stop("MacroAssembler::call_VM_base: rdi not callee saved?");
|
||||
STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
|
||||
bind(L);
|
||||
}
|
||||
pop(rax);
|
||||
@ -7196,7 +7288,7 @@ void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
|
||||
jcc(Assembler::notZero, integer);
|
||||
cmpl(tmp3, 0x80000000);
|
||||
jcc(Assembler::notZero, integer);
|
||||
stop("integer indefinite value shouldn't be seen here");
|
||||
STOP("integer indefinite value shouldn't be seen here");
|
||||
bind(integer);
|
||||
}
|
||||
#else
|
||||
@ -7206,7 +7298,7 @@ void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
|
||||
shlq(tmp3, 1);
|
||||
jcc(Assembler::carryClear, integer);
|
||||
jcc(Assembler::notZero, integer);
|
||||
stop("integer indefinite value shouldn't be seen here");
|
||||
STOP("integer indefinite value shouldn't be seen here");
|
||||
bind(integer);
|
||||
}
|
||||
#endif
|
||||
@ -8388,7 +8480,7 @@ Register MacroAssembler::tlab_refill(Label& retry,
|
||||
shlptr(tsize, LogHeapWordSize);
|
||||
cmpptr(t1, tsize);
|
||||
jcc(Assembler::equal, ok);
|
||||
stop("assert(t1 != tlab size)");
|
||||
STOP("assert(t1 != tlab size)");
|
||||
should_not_reach_here();
|
||||
|
||||
bind(ok);
|
||||
@ -8727,6 +8819,19 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
}
|
||||
|
||||
|
||||
// virtual method calling
|
||||
void MacroAssembler::lookup_virtual_method(Register recv_klass,
|
||||
RegisterOrConstant vtable_index,
|
||||
Register method_result) {
|
||||
const int base = instanceKlass::vtable_start_offset() * wordSize;
|
||||
assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
|
||||
Address vtable_entry_addr(recv_klass,
|
||||
vtable_index, Address::times_ptr,
|
||||
base + vtableEntry::method_offset_in_bytes());
|
||||
movptr(method_result, vtable_entry_addr);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::check_klass_subtype(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp_reg,
|
||||
@ -8976,6 +9081,7 @@ void MacroAssembler::verify_oop(Register reg, const char* s) {
|
||||
// Pass register number to verify_oop_subroutine
|
||||
char* b = new char[strlen(s) + 50];
|
||||
sprintf(b, "verify_oop: %s: %s", reg->name(), s);
|
||||
BLOCK_COMMENT("verify_oop {");
|
||||
#ifdef _LP64
|
||||
push(rscratch1); // save r10, trashed by movptr()
|
||||
#endif
|
||||
@ -8990,6 +9096,7 @@ void MacroAssembler::verify_oop(Register reg, const char* s) {
|
||||
movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
|
||||
call(rax);
|
||||
// Caller pops the arguments (oop, message) and restores rax, r10
|
||||
BLOCK_COMMENT("} verify_oop");
|
||||
}
|
||||
|
||||
|
||||
@ -9010,7 +9117,7 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
|
||||
jcc(Assembler::notZero, L);
|
||||
char* buf = new char[40];
|
||||
sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
|
||||
stop(buf);
|
||||
STOP(buf);
|
||||
} else {
|
||||
jccb(Assembler::notZero, L);
|
||||
hlt();
|
||||
@ -9026,60 +9133,6 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
|
||||
}
|
||||
|
||||
|
||||
// registers on entry:
|
||||
// - rax ('check' register): required MethodType
|
||||
// - rcx: method handle
|
||||
// - rdx, rsi, or ?: killable temp
|
||||
void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
|
||||
Register temp_reg,
|
||||
Label& wrong_method_type) {
|
||||
Address type_addr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg));
|
||||
// compare method type against that of the receiver
|
||||
if (UseCompressedOops) {
|
||||
load_heap_oop(temp_reg, type_addr);
|
||||
cmpptr(mtype_reg, temp_reg);
|
||||
} else {
|
||||
cmpptr(mtype_reg, type_addr);
|
||||
}
|
||||
jcc(Assembler::notEqual, wrong_method_type);
|
||||
}
|
||||
|
||||
|
||||
// A method handle has a "vmslots" field which gives the size of its
|
||||
// argument list in JVM stack slots. This field is either located directly
|
||||
// in every method handle, or else is indirectly accessed through the
|
||||
// method handle's MethodType. This macro hides the distinction.
|
||||
void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
|
||||
Register temp_reg) {
|
||||
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
|
||||
// load mh.type.form.vmslots
|
||||
Register temp2_reg = vmslots_reg;
|
||||
load_heap_oop(temp2_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)));
|
||||
load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)));
|
||||
movl(vmslots_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)));
|
||||
}
|
||||
|
||||
|
||||
// registers on entry:
|
||||
// - rcx: method handle
|
||||
// - rdx: killable temp (interpreted only)
|
||||
// - rax: killable temp (compiled only)
|
||||
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) {
|
||||
assert(mh_reg == rcx, "caller must put MH object in rcx");
|
||||
assert_different_registers(mh_reg, temp_reg);
|
||||
|
||||
// pick out the interpreted side of the handler
|
||||
// NOTE: vmentry is not an oop!
|
||||
movptr(temp_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg)));
|
||||
|
||||
// off we go...
|
||||
jmp(Address(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes()));
|
||||
|
||||
// for the various stubs which take control at this point,
|
||||
// see MethodHandles::generate_method_handle_stub
|
||||
}
|
||||
|
||||
|
||||
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
|
||||
int extra_slot_offset) {
|
||||
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
|
||||
@ -9152,14 +9205,14 @@ void MacroAssembler::verify_tlab() {
|
||||
movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
|
||||
cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
|
||||
jcc(Assembler::aboveEqual, next);
|
||||
stop("assert(top >= start)");
|
||||
STOP("assert(top >= start)");
|
||||
should_not_reach_here();
|
||||
|
||||
bind(next);
|
||||
movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
|
||||
cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
|
||||
jcc(Assembler::aboveEqual, ok);
|
||||
stop("assert(top <= end)");
|
||||
STOP("assert(top <= end)");
|
||||
should_not_reach_here();
|
||||
|
||||
bind(ok);
|
||||
@ -9592,6 +9645,25 @@ void MacroAssembler::store_heap_oop(Address dst, Register src) {
|
||||
movptr(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
|
||||
assert_different_registers(src1, tmp);
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) {
|
||||
bool did_push = false;
|
||||
if (tmp == noreg) {
|
||||
tmp = rax;
|
||||
push(tmp);
|
||||
did_push = true;
|
||||
assert(!src2.uses(rsp), "can't push");
|
||||
}
|
||||
load_heap_oop(tmp, src2);
|
||||
cmpptr(src1, tmp);
|
||||
if (did_push) pop(tmp);
|
||||
} else
|
||||
#endif
|
||||
cmpptr(src1, src2);
|
||||
}
|
||||
|
||||
// Used for storing NULLs.
|
||||
void MacroAssembler::store_heap_oop_null(Address dst) {
|
||||
#ifdef _LP64
|
||||
@ -9622,7 +9694,7 @@ void MacroAssembler::verify_heapbase(const char* msg) {
|
||||
push(rscratch1); // cmpptr trashes rscratch1
|
||||
cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
|
||||
jcc(Assembler::equal, ok);
|
||||
stop(msg);
|
||||
STOP(msg);
|
||||
bind(ok);
|
||||
pop(rscratch1);
|
||||
}
|
||||
@ -9655,7 +9727,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register r) {
|
||||
Label ok;
|
||||
testq(r, r);
|
||||
jcc(Assembler::notEqual, ok);
|
||||
stop("null oop passed to encode_heap_oop_not_null");
|
||||
STOP("null oop passed to encode_heap_oop_not_null");
|
||||
bind(ok);
|
||||
}
|
||||
#endif
|
||||
@ -9676,7 +9748,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
|
||||
Label ok;
|
||||
testq(src, src);
|
||||
jcc(Assembler::notEqual, ok);
|
||||
stop("null oop passed to encode_heap_oop_not_null2");
|
||||
STOP("null oop passed to encode_heap_oop_not_null2");
|
||||
bind(ok);
|
||||
}
|
||||
#endif
|
||||
@ -9867,7 +9939,7 @@ void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode
|
||||
cmpptr(rax, StackAlignmentInBytes-wordSize);
|
||||
pop(rax);
|
||||
jcc(Assembler::equal, L);
|
||||
stop("Stack is not properly aligned!");
|
||||
STOP("Stack is not properly aligned!");
|
||||
bind(L);
|
||||
}
|
||||
#endif
|
||||
@ -10541,13 +10613,6 @@ void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Regist
|
||||
bind(DONE);
|
||||
}
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) block_comment(str)
|
||||
#endif
|
||||
|
||||
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
||||
void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
Register to, Register value, Register count,
|
||||
Register rtmp, XMMRegister xtmp) {
|
||||
|
@ -1940,6 +1940,7 @@ class MacroAssembler: public Assembler {
|
||||
void load_heap_oop(Register dst, Address src);
|
||||
void load_heap_oop_not_null(Register dst, Address src);
|
||||
void store_heap_oop(Address dst, Register src);
|
||||
void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg);
|
||||
|
||||
// Used for storing NULL. All other oop constants should be
|
||||
// stored using routines that take a jobject.
|
||||
@ -2117,6 +2118,11 @@ class MacroAssembler: public Assembler {
|
||||
Register scan_temp,
|
||||
Label& no_such_interface);
|
||||
|
||||
// virtual method calling
|
||||
void lookup_virtual_method(Register recv_klass,
|
||||
RegisterOrConstant vtable_index,
|
||||
Register method_result);
|
||||
|
||||
// Test sub_klass against super_klass, with fast and slow paths.
|
||||
|
||||
// The fast path produces a tri-state answer: yes / no / maybe-slow.
|
||||
@ -2152,15 +2158,8 @@ class MacroAssembler: public Assembler {
|
||||
Label& L_success);
|
||||
|
||||
// method handles (JSR 292)
|
||||
void check_method_handle_type(Register mtype_reg, Register mh_reg,
|
||||
Register temp_reg,
|
||||
Label& wrong_method_type);
|
||||
void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
|
||||
Register temp_reg);
|
||||
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
|
||||
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
|
||||
|
||||
|
||||
//----
|
||||
void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
|
||||
|
||||
@ -2179,8 +2178,13 @@ class MacroAssembler: public Assembler {
|
||||
// prints msg and continues
|
||||
void warn(const char* msg);
|
||||
|
||||
// dumps registers and other state
|
||||
void print_state();
|
||||
|
||||
static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
|
||||
static void debug64(char* msg, int64_t pc, int64_t regs[]);
|
||||
static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
|
||||
static void print_state64(int64_t pc, int64_t regs[]);
|
||||
|
||||
void os_breakpoint();
|
||||
|
||||
|
@ -3508,6 +3508,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
ciMethod* method = op->profiled_method();
|
||||
int bci = op->profiled_bci();
|
||||
ciMethod* callee = op->profiled_callee();
|
||||
|
||||
// Update counter for all call types
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
@ -3519,9 +3520,11 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
__ movoop(mdo, md->constant_encoding());
|
||||
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
|
||||
Bytecodes::Code bc = method->java_code_at_bci(bci);
|
||||
const bool callee_is_static = callee->is_loaded() && callee->is_static();
|
||||
// Perform additional virtual call profiling for invokevirtual and
|
||||
// invokeinterface bytecodes
|
||||
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
|
||||
!callee_is_static && // required for optimized MH invokes
|
||||
C1ProfileVirtualCalls) {
|
||||
assert(op->recv()->is_single_cpu(), "recv must be allocated");
|
||||
Register recv = op->recv()->as_register();
|
||||
|
@ -871,9 +871,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Use the type from the constant pool cache
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tosBits);
|
||||
// Make sure we don't need to mask rdx for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask rdx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
#ifdef _LP64
|
||||
Label notObj;
|
||||
__ cmpl(rdx, atos);
|
||||
|
@ -439,7 +439,6 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
// frame::sender_for_compiled_frame
|
||||
frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
assert(!is_ricochet_frame(), "caller must handle this");
|
||||
|
||||
// frame owned by optimizing compiler
|
||||
assert(_cb->frame_size() >= 0, "must have non-zero frame size");
|
||||
@ -483,7 +482,6 @@ frame frame::sender(RegisterMap* map) const {
|
||||
if (is_entry_frame()) return sender_for_entry_frame(map);
|
||||
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
|
||||
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
|
||||
if (is_ricochet_frame()) return sender_for_ricochet_frame(map);
|
||||
|
||||
if (_cb != NULL) {
|
||||
return sender_for_compiled_frame(map);
|
||||
@ -658,9 +656,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
values.describe(frame_no, fp() + frame::name##_offset, #name)
|
||||
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if (is_ricochet_frame()) {
|
||||
MethodHandles::RicochetFrame::describe(this, values, frame_no);
|
||||
} else if (is_interpreted_frame()) {
|
||||
if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_method);
|
||||
@ -682,12 +678,7 @@ intptr_t* frame::real_fp() const {
|
||||
if (_cb != NULL) {
|
||||
// use the frame size if valid
|
||||
int size = _cb->frame_size();
|
||||
if ((size > 0) &&
|
||||
(! is_ricochet_frame())) {
|
||||
// Work-around: ricochet explicitly excluded because frame size is not
|
||||
// constant for the ricochet blob but its frame_size could not, for
|
||||
// some reasons, be declared as <= 0. This potentially confusing
|
||||
// size declaration should be fixed as another CR.
|
||||
if (size > 0) {
|
||||
return unextended_sp() + size;
|
||||
}
|
||||
}
|
||||
|
@ -253,8 +253,12 @@ void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register
|
||||
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
|
||||
movptr(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
|
||||
const int shift_count = (1 + byte_no) * BitsPerByte;
|
||||
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
|
||||
(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
|
||||
"correct shift count");
|
||||
shrptr(bytecode, shift_count);
|
||||
andptr(bytecode, 0xFF);
|
||||
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
|
||||
andptr(bytecode, ConstantPoolCacheEntry::bytecode_1_mask);
|
||||
}
|
||||
|
||||
|
||||
|
@ -256,8 +256,12 @@ void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register
|
||||
// little-endian machines allow us that.
|
||||
movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
|
||||
const int shift_count = (1 + byte_no) * BitsPerByte;
|
||||
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
|
||||
(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
|
||||
"correct shift count");
|
||||
shrl(bytecode, shift_count);
|
||||
andl(bytecode, 0xFF);
|
||||
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
|
||||
andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask);
|
||||
}
|
||||
|
||||
|
||||
|
@ -35,7 +35,6 @@
|
||||
address generate_normal_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_method_handle_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
|
@ -243,18 +243,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
}
|
||||
|
||||
|
||||
// Method handle invoker
|
||||
// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
|
||||
address InterpreterGenerator::generate_method_handle_entry(void) {
|
||||
if (!EnableInvokeDynamic) {
|
||||
return generate_abstract_entry();
|
||||
}
|
||||
|
||||
address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm);
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||
|
||||
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
||||
|
@ -325,19 +325,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
}
|
||||
|
||||
|
||||
// Method handle invoker
|
||||
// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
|
||||
address InterpreterGenerator::generate_method_handle_entry(void) {
|
||||
if (!EnableInvokeDynamic) {
|
||||
return generate_abstract_entry();
|
||||
}
|
||||
|
||||
address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm);
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
|
||||
// Empty method, generate a very fast return.
|
||||
|
||||
address InterpreterGenerator::generate_empty_entry(void) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -27,266 +27,12 @@
|
||||
|
||||
// Adapters
|
||||
enum /* platform_dependent_constants */ {
|
||||
adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 120000))
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
// The stack just after the recursive call from a ricochet frame
|
||||
// looks something like this. Offsets are marked in words, not bytes.
|
||||
// rsi (r13 on LP64) is part of the interpreter calling sequence
|
||||
// which tells the callee where is my real rsp (for frame walking).
|
||||
// (...lower memory addresses)
|
||||
// rsp: [ return pc ] always the global RicochetBlob::bounce_addr
|
||||
// rsp+1: [ recursive arg N ]
|
||||
// rsp+2: [ recursive arg N-1 ]
|
||||
// ...
|
||||
// rsp+N: [ recursive arg 1 ]
|
||||
// rsp+N+1: [ recursive method handle ]
|
||||
// ...
|
||||
// rbp-6: [ cleanup continuation pc ] <-- (struct RicochetFrame)
|
||||
// rbp-5: [ saved target MH ] the MH we will call on the saved args
|
||||
// rbp-4: [ saved args layout oop ] an int[] array which describes argument layout
|
||||
// rbp-3: [ saved args pointer ] address of transformed adapter arg M (slot 0)
|
||||
// rbp-2: [ conversion ] information about how the return value is used
|
||||
// rbp-1: [ exact sender sp ] exact TOS (rsi/r13) of original sender frame
|
||||
// rbp+0: [ saved sender fp ] (for original sender of AMH)
|
||||
// rbp+1: [ saved sender pc ] (back to original sender of AMH)
|
||||
// rbp+2: [ transformed adapter arg M ] <-- (extended TOS of original sender)
|
||||
// rbp+3: [ transformed adapter arg M-1]
|
||||
// ...
|
||||
// rbp+M+1: [ transformed adapter arg 1 ]
|
||||
// rbp+M+2: [ padding ] <-- (rbp + saved args base offset)
|
||||
// ... [ optional padding]
|
||||
// (higher memory addresses...)
|
||||
//
|
||||
// The arguments originally passed by the original sender
|
||||
// are lost, and arbitrary amounts of stack motion might have
|
||||
// happened due to argument transformation.
|
||||
// (This is done by C2I/I2C adapters and non-direct method handles.)
|
||||
// This is why there is an unpredictable amount of memory between
|
||||
// the extended and exact TOS of the sender.
|
||||
// The ricochet adapter itself will also (in general) perform
|
||||
// transformations before the recursive call.
|
||||
//
|
||||
// The transformed and saved arguments, immediately above the saved
|
||||
// return PC, are a well-formed method handle invocation ready to execute.
|
||||
// When the GC needs to walk the stack, these arguments are described
|
||||
// via the saved arg types oop, an int[] array with a private format.
|
||||
// This array is derived from the type of the transformed adapter
|
||||
// method handle, which also sits at the base of the saved argument
|
||||
// bundle. Since the GC may not be able to fish out the int[]
|
||||
// array, so it is pushed explicitly on the stack. This may be
|
||||
// an unnecessary expense.
|
||||
//
|
||||
// The following register conventions are significant at this point:
|
||||
// rsp the thread stack, as always; preserved by caller
|
||||
// rsi/r13 exact TOS of recursive frame (contents of [rbp-2])
|
||||
// rcx recursive method handle (contents of [rsp+N+1])
|
||||
// rbp preserved by caller (not used by caller)
|
||||
// Unless otherwise specified, all registers can be blown by the call.
|
||||
//
|
||||
// If this frame must be walked, the transformed adapter arguments
|
||||
// will be found with the help of the saved arguments descriptor.
|
||||
//
|
||||
// Therefore, the descriptor must match the referenced arguments.
|
||||
// The arguments must be followed by at least one word of padding,
|
||||
// which will be necessary to complete the final method handle call.
|
||||
// That word is not treated as holding an oop. Neither is the word
|
||||
//
|
||||
// The word pointed to by the return argument pointer is not
|
||||
// treated as an oop, even if points to a saved argument.
|
||||
// This allows the saved argument list to have a "hole" in it
|
||||
// to receive an oop from the recursive call.
|
||||
// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.)
|
||||
//
|
||||
// When the recursive callee returns, RicochetBlob::bounce_addr will
|
||||
// immediately jump to the continuation stored in the RF.
|
||||
// This continuation will merge the recursive return value
|
||||
// into the saved argument list. At that point, the original
|
||||
// rsi, rbp, and rsp will be reloaded, the ricochet frame will
|
||||
// disappear, and the final target of the adapter method handle
|
||||
// will be invoked on the transformed argument list.
|
||||
|
||||
class RicochetFrame {
|
||||
friend class MethodHandles;
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
intptr_t* _continuation; // what to do when control gets back here
|
||||
oopDesc* _saved_target; // target method handle to invoke on saved_args
|
||||
oopDesc* _saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie
|
||||
intptr_t* _saved_args_base; // base of pushed arguments (slot 0, arg N) (-3)
|
||||
intptr_t _conversion; // misc. information from original AdapterMethodHandle (-2)
|
||||
intptr_t* _exact_sender_sp; // parallel to interpreter_frame_sender_sp (-1)
|
||||
intptr_t* _sender_link; // *must* coincide with frame::link_offset (0)
|
||||
address _sender_pc; // *must* coincide with frame::return_addr_offset (1)
|
||||
|
||||
public:
|
||||
intptr_t* continuation() const { return _continuation; }
|
||||
oop saved_target() const { return _saved_target; }
|
||||
oop saved_args_layout() const { return _saved_args_layout; }
|
||||
intptr_t* saved_args_base() const { return _saved_args_base; }
|
||||
intptr_t conversion() const { return _conversion; }
|
||||
intptr_t* exact_sender_sp() const { return _exact_sender_sp; }
|
||||
intptr_t* sender_link() const { return _sender_link; }
|
||||
address sender_pc() const { return _sender_pc; }
|
||||
|
||||
intptr_t* extended_sender_sp() const {
|
||||
// The extended sender SP is above the current RicochetFrame.
|
||||
return (intptr_t*) (((address) this) + sizeof(RicochetFrame));
|
||||
}
|
||||
|
||||
intptr_t return_value_slot_number() const {
|
||||
return adapter_conversion_vminfo(conversion());
|
||||
}
|
||||
BasicType return_value_type() const {
|
||||
return adapter_conversion_dest_type(conversion());
|
||||
}
|
||||
bool has_return_value_slot() const {
|
||||
return return_value_type() != T_VOID;
|
||||
}
|
||||
intptr_t* return_value_slot_addr() const {
|
||||
assert(has_return_value_slot(), "");
|
||||
return saved_arg_slot_addr(return_value_slot_number());
|
||||
}
|
||||
intptr_t* saved_target_slot_addr() const {
|
||||
return saved_arg_slot_addr(saved_args_length());
|
||||
}
|
||||
intptr_t* saved_arg_slot_addr(int slot) const {
|
||||
assert(slot >= 0, "");
|
||||
return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
|
||||
}
|
||||
|
||||
jint saved_args_length() const;
|
||||
jint saved_arg_offset(int arg) const;
|
||||
|
||||
// GC interface
|
||||
oop* saved_target_addr() { return (oop*)&_saved_target; }
|
||||
oop* saved_args_layout_addr() { return (oop*)&_saved_args_layout; }
|
||||
|
||||
oop compute_saved_args_layout(bool read_cache, bool write_cache);
|
||||
|
||||
// Compiler/assembler interface.
|
||||
static int continuation_offset_in_bytes() { return offset_of(RicochetFrame, _continuation); }
|
||||
static int saved_target_offset_in_bytes() { return offset_of(RicochetFrame, _saved_target); }
|
||||
static int saved_args_layout_offset_in_bytes(){ return offset_of(RicochetFrame, _saved_args_layout); }
|
||||
static int saved_args_base_offset_in_bytes() { return offset_of(RicochetFrame, _saved_args_base); }
|
||||
static int conversion_offset_in_bytes() { return offset_of(RicochetFrame, _conversion); }
|
||||
static int exact_sender_sp_offset_in_bytes() { return offset_of(RicochetFrame, _exact_sender_sp); }
|
||||
static int sender_link_offset_in_bytes() { return offset_of(RicochetFrame, _sender_link); }
|
||||
static int sender_pc_offset_in_bytes() { return offset_of(RicochetFrame, _sender_pc); }
|
||||
|
||||
// This value is not used for much, but it apparently must be nonzero.
|
||||
static int frame_size_in_bytes() { return sender_link_offset_in_bytes(); }
|
||||
|
||||
#ifdef ASSERT
|
||||
// The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
|
||||
enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
|
||||
static int magic_number_1_offset_in_bytes() { return -wordSize; }
|
||||
static int magic_number_2_offset_in_bytes() { return sizeof(RicochetFrame); }
|
||||
intptr_t magic_number_1() const { return *(intptr_t*)((address)this + magic_number_1_offset_in_bytes()); };
|
||||
intptr_t magic_number_2() const { return *(intptr_t*)((address)this + magic_number_2_offset_in_bytes()); };
|
||||
#endif //ASSERT
|
||||
|
||||
enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
|
||||
|
||||
static void verify_offsets() NOT_DEBUG_RETURN;
|
||||
void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
|
||||
void zap_arguments() NOT_DEBUG_RETURN;
|
||||
|
||||
static void generate_ricochet_blob(MacroAssembler* _masm,
|
||||
// output params:
|
||||
int* bounce_offset,
|
||||
int* exception_offset,
|
||||
int* frame_size_in_words);
|
||||
|
||||
static void enter_ricochet_frame(MacroAssembler* _masm,
|
||||
Register rcx_recv,
|
||||
Register rax_argv,
|
||||
address return_handler,
|
||||
Register rbx_temp);
|
||||
static void leave_ricochet_frame(MacroAssembler* _masm,
|
||||
Register rcx_recv,
|
||||
Register new_sp_reg,
|
||||
Register sender_pc_reg);
|
||||
|
||||
static Address frame_address(int offset = 0) {
|
||||
// The RicochetFrame is found by subtracting a constant offset from rbp.
|
||||
return Address(rbp, - sender_link_offset_in_bytes() + offset);
|
||||
}
|
||||
|
||||
static RicochetFrame* from_frame(const frame& fr) {
|
||||
address bp = (address) fr.fp();
|
||||
RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
|
||||
rf->verify();
|
||||
return rf;
|
||||
}
|
||||
|
||||
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
|
||||
|
||||
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
|
||||
adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000))
|
||||
};
|
||||
|
||||
// Additional helper methods for MethodHandles code generation:
|
||||
public:
|
||||
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);
|
||||
static void load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
|
||||
static void load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
|
||||
|
||||
static void load_stack_move(MacroAssembler* _masm,
|
||||
Register rdi_stack_move,
|
||||
Register rcx_amh,
|
||||
bool might_be_negative);
|
||||
|
||||
static void insert_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
Register rax_argslot,
|
||||
Register rbx_temp, Register rdx_temp);
|
||||
|
||||
static void remove_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
Register rax_argslot,
|
||||
Register rbx_temp, Register rdx_temp);
|
||||
|
||||
static void push_arg_slots(MacroAssembler* _masm,
|
||||
Register rax_argslot,
|
||||
RegisterOrConstant slot_count,
|
||||
int skip_words_count,
|
||||
Register rbx_temp, Register rdx_temp);
|
||||
|
||||
static void move_arg_slots_up(MacroAssembler* _masm,
|
||||
Register rbx_bottom, // invariant
|
||||
Address top_addr, // can use rax_temp
|
||||
RegisterOrConstant positive_distance_in_slots,
|
||||
Register rax_temp, Register rdx_temp);
|
||||
|
||||
static void move_arg_slots_down(MacroAssembler* _masm,
|
||||
Address bottom_addr, // can use rax_temp
|
||||
Register rbx_top, // invariant
|
||||
RegisterOrConstant negative_distance_in_slots,
|
||||
Register rax_temp, Register rdx_temp);
|
||||
|
||||
static void move_typed_arg(MacroAssembler* _masm,
|
||||
BasicType type, bool is_element,
|
||||
Address slot_dest, Address value_src,
|
||||
Register rbx_temp, Register rdx_temp);
|
||||
|
||||
static void move_return_value(MacroAssembler* _masm, BasicType type,
|
||||
Address return_slot);
|
||||
|
||||
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
|
||||
const char* error_message) NOT_DEBUG_RETURN;
|
||||
|
||||
static void verify_argslots(MacroAssembler* _masm,
|
||||
RegisterOrConstant argslot_count,
|
||||
Register argslot_reg,
|
||||
bool negate_argslot,
|
||||
const char* error_message) NOT_DEBUG_RETURN;
|
||||
|
||||
static void verify_stack_move(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
int direction) NOT_DEBUG_RETURN;
|
||||
|
||||
static void verify_klass(MacroAssembler* _masm,
|
||||
Register obj, KlassHandle klass,
|
||||
@ -297,9 +43,17 @@ public:
|
||||
"reference is a MH");
|
||||
}
|
||||
|
||||
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
|
||||
|
||||
// Similar to InterpreterMacroAssembler::jump_from_interpreted.
|
||||
// Takes care of special dispatch from single stepping too.
|
||||
static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp);
|
||||
static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
|
||||
bool for_compiler_entry);
|
||||
|
||||
static void jump_to_lambda_form(MacroAssembler* _masm,
|
||||
Register recv, Register method_temp,
|
||||
Register temp2,
|
||||
bool for_compiler_entry);
|
||||
|
||||
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
|
||||
|
||||
|
@ -643,6 +643,19 @@ static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_
|
||||
__ movdbl(r, Address(saved_sp, next_val_off));
|
||||
}
|
||||
|
||||
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
|
||||
address code_start, address code_end,
|
||||
Label& L_ok) {
|
||||
Label L_fail;
|
||||
__ lea(temp_reg, ExternalAddress(code_start));
|
||||
__ cmpptr(pc_reg, temp_reg);
|
||||
__ jcc(Assembler::belowEqual, L_fail);
|
||||
__ lea(temp_reg, ExternalAddress(code_end));
|
||||
__ cmpptr(pc_reg, temp_reg);
|
||||
__ jcc(Assembler::below, L_ok);
|
||||
__ bind(L_fail);
|
||||
}
|
||||
|
||||
static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
int total_args_passed,
|
||||
int comp_args_on_stack,
|
||||
@ -653,9 +666,53 @@ static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
// we may do a i2c -> c2i transition if we lose a race where compiled
|
||||
// code goes non-entrant while we get args ready.
|
||||
|
||||
// Adapters can be frameless because they do not require the caller
|
||||
// to perform additional cleanup work, such as correcting the stack pointer.
|
||||
// An i2c adapter is frameless because the *caller* frame, which is interpreted,
|
||||
// routinely repairs its own stack pointer (from interpreter_frame_last_sp),
|
||||
// even if a callee has modified the stack pointer.
|
||||
// A c2i adapter is frameless because the *callee* frame, which is interpreted,
|
||||
// routinely repairs its caller's stack pointer (from sender_sp, which is set
|
||||
// up via the senderSP register).
|
||||
// In other words, if *either* the caller or callee is interpreted, we can
|
||||
// get the stack pointer repaired after a call.
|
||||
// This is why c2i and i2c adapters cannot be indefinitely composed.
|
||||
// In particular, if a c2i adapter were to somehow call an i2c adapter,
|
||||
// both caller and callee would be compiled methods, and neither would
|
||||
// clean up the stack pointer changes performed by the two adapters.
|
||||
// If this happens, control eventually transfers back to the compiled
|
||||
// caller, but with an uncorrected stack, causing delayed havoc.
|
||||
|
||||
// Pick up the return address
|
||||
__ movptr(rax, Address(rsp, 0));
|
||||
|
||||
if (VerifyAdapterCalls &&
|
||||
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
|
||||
// So, let's test for cascading c2i/i2c adapters right now.
|
||||
// assert(Interpreter::contains($return_addr) ||
|
||||
// StubRoutines::contains($return_addr),
|
||||
// "i2c adapter must return to an interpreter frame");
|
||||
__ block_comment("verify_i2c { ");
|
||||
Label L_ok;
|
||||
if (Interpreter::code() != NULL)
|
||||
range_check(masm, rax, rdi,
|
||||
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
|
||||
L_ok);
|
||||
if (StubRoutines::code1() != NULL)
|
||||
range_check(masm, rax, rdi,
|
||||
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
|
||||
L_ok);
|
||||
if (StubRoutines::code2() != NULL)
|
||||
range_check(masm, rax, rdi,
|
||||
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
|
||||
L_ok);
|
||||
const char* msg = "i2c adapter must return to an interpreter frame";
|
||||
__ block_comment(msg);
|
||||
__ stop(msg);
|
||||
__ bind(L_ok);
|
||||
__ block_comment("} verify_i2ce ");
|
||||
}
|
||||
|
||||
// Must preserve original SP for loading incoming arguments because
|
||||
// we need to align the outgoing SP for compiled code.
|
||||
__ movptr(rdi, rsp);
|
||||
@ -1293,6 +1350,89 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
static void verify_oop_args(MacroAssembler* masm,
|
||||
int total_args_passed,
|
||||
const BasicType* sig_bt,
|
||||
const VMRegPair* regs) {
|
||||
Register temp_reg = rbx; // not part of any compiled calling seq
|
||||
if (VerifyOops) {
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
if (sig_bt[i] == T_OBJECT ||
|
||||
sig_bt[i] == T_ARRAY) {
|
||||
VMReg r = regs[i].first();
|
||||
assert(r->is_valid(), "bad oop arg");
|
||||
if (r->is_stack()) {
|
||||
__ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
|
||||
__ verify_oop(temp_reg);
|
||||
} else {
|
||||
__ verify_oop(r->as_Register());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_special_dispatch(MacroAssembler* masm,
|
||||
int total_args_passed,
|
||||
int comp_args_on_stack,
|
||||
vmIntrinsics::ID special_dispatch,
|
||||
const BasicType* sig_bt,
|
||||
const VMRegPair* regs) {
|
||||
verify_oop_args(masm, total_args_passed, sig_bt, regs);
|
||||
|
||||
// Now write the args into the outgoing interpreter space
|
||||
bool has_receiver = false;
|
||||
Register receiver_reg = noreg;
|
||||
int member_arg_pos = -1;
|
||||
Register member_reg = noreg;
|
||||
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
|
||||
if (ref_kind != 0) {
|
||||
member_arg_pos = total_args_passed - 1; // trailing MemberName argument
|
||||
member_reg = rbx; // known to be free at this point
|
||||
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
|
||||
} else if (special_dispatch == vmIntrinsics::_invokeBasic) {
|
||||
has_receiver = true;
|
||||
} else {
|
||||
guarantee(false, err_msg("special_dispatch=%d", special_dispatch));
|
||||
}
|
||||
|
||||
if (member_reg != noreg) {
|
||||
// Load the member_arg into register, if necessary.
|
||||
assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
|
||||
assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
|
||||
VMReg r = regs[member_arg_pos].first();
|
||||
assert(r->is_valid(), "bad member arg");
|
||||
if (r->is_stack()) {
|
||||
__ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
|
||||
} else {
|
||||
// no data motion is needed
|
||||
member_reg = r->as_Register();
|
||||
}
|
||||
}
|
||||
|
||||
if (has_receiver) {
|
||||
// Make sure the receiver is loaded into a register.
|
||||
assert(total_args_passed > 0, "oob");
|
||||
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
|
||||
VMReg r = regs[0].first();
|
||||
assert(r->is_valid(), "bad receiver arg");
|
||||
if (r->is_stack()) {
|
||||
// Porting note: This assumes that compiled calling conventions always
|
||||
// pass the receiver oop in a register. If this is not true on some
|
||||
// platform, pick a temp and load the receiver from stack.
|
||||
assert(false, "receiver always in a register");
|
||||
receiver_reg = rcx; // known to be free at this point
|
||||
__ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
|
||||
} else {
|
||||
// no data motion is needed
|
||||
receiver_reg = r->as_Register();
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out which address we are really jumping to:
|
||||
MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
|
||||
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Generate a native wrapper for a given method. The method takes arguments
|
||||
@ -1323,14 +1463,37 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
|
||||
// transition back to thread_in_Java
|
||||
// return to caller
|
||||
//
|
||||
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
methodHandle method,
|
||||
int compile_id,
|
||||
int total_in_args,
|
||||
int comp_args_on_stack,
|
||||
BasicType *in_sig_bt,
|
||||
VMRegPair *in_regs,
|
||||
BasicType* in_sig_bt,
|
||||
VMRegPair* in_regs,
|
||||
BasicType ret_type) {
|
||||
if (method->is_method_handle_intrinsic()) {
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
intptr_t start = (intptr_t)__ pc();
|
||||
int vep_offset = ((intptr_t)__ pc()) - start;
|
||||
gen_special_dispatch(masm,
|
||||
total_in_args,
|
||||
comp_args_on_stack,
|
||||
method->intrinsic_id(),
|
||||
in_sig_bt,
|
||||
in_regs);
|
||||
int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
|
||||
__ flush();
|
||||
int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
|
||||
return nmethod::new_native_nmethod(method,
|
||||
compile_id,
|
||||
masm->code(),
|
||||
vep_offset,
|
||||
frame_complete,
|
||||
stack_slots / VMRegImpl::slots_per_word,
|
||||
in_ByteSize(-1),
|
||||
in_ByteSize(-1),
|
||||
(OopMapSet*)NULL);
|
||||
}
|
||||
bool is_critical_native = true;
|
||||
address native_func = method->critical_native_function();
|
||||
if (native_func == NULL) {
|
||||
@ -1436,7 +1599,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_ARRAY:
|
||||
case T_ARRAY: // critical array (uses 2 slots on LP64)
|
||||
case T_BOOLEAN:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
|
@ -590,6 +590,19 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
__ jmp(rcx);
|
||||
}
|
||||
|
||||
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
|
||||
address code_start, address code_end,
|
||||
Label& L_ok) {
|
||||
Label L_fail;
|
||||
__ lea(temp_reg, ExternalAddress(code_start));
|
||||
__ cmpptr(pc_reg, temp_reg);
|
||||
__ jcc(Assembler::belowEqual, L_fail);
|
||||
__ lea(temp_reg, ExternalAddress(code_end));
|
||||
__ cmpptr(pc_reg, temp_reg);
|
||||
__ jcc(Assembler::below, L_ok);
|
||||
__ bind(L_fail);
|
||||
}
|
||||
|
||||
static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
int total_args_passed,
|
||||
int comp_args_on_stack,
|
||||
@ -605,9 +618,53 @@ static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
// save code can segv when fxsave instructions find improperly
|
||||
// aligned stack pointer.
|
||||
|
||||
// Adapters can be frameless because they do not require the caller
|
||||
// to perform additional cleanup work, such as correcting the stack pointer.
|
||||
// An i2c adapter is frameless because the *caller* frame, which is interpreted,
|
||||
// routinely repairs its own stack pointer (from interpreter_frame_last_sp),
|
||||
// even if a callee has modified the stack pointer.
|
||||
// A c2i adapter is frameless because the *callee* frame, which is interpreted,
|
||||
// routinely repairs its caller's stack pointer (from sender_sp, which is set
|
||||
// up via the senderSP register).
|
||||
// In other words, if *either* the caller or callee is interpreted, we can
|
||||
// get the stack pointer repaired after a call.
|
||||
// This is why c2i and i2c adapters cannot be indefinitely composed.
|
||||
// In particular, if a c2i adapter were to somehow call an i2c adapter,
|
||||
// both caller and callee would be compiled methods, and neither would
|
||||
// clean up the stack pointer changes performed by the two adapters.
|
||||
// If this happens, control eventually transfers back to the compiled
|
||||
// caller, but with an uncorrected stack, causing delayed havoc.
|
||||
|
||||
// Pick up the return address
|
||||
__ movptr(rax, Address(rsp, 0));
|
||||
|
||||
if (VerifyAdapterCalls &&
|
||||
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
|
||||
// So, let's test for cascading c2i/i2c adapters right now.
|
||||
// assert(Interpreter::contains($return_addr) ||
|
||||
// StubRoutines::contains($return_addr),
|
||||
// "i2c adapter must return to an interpreter frame");
|
||||
__ block_comment("verify_i2c { ");
|
||||
Label L_ok;
|
||||
if (Interpreter::code() != NULL)
|
||||
range_check(masm, rax, r11,
|
||||
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
|
||||
L_ok);
|
||||
if (StubRoutines::code1() != NULL)
|
||||
range_check(masm, rax, r11,
|
||||
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
|
||||
L_ok);
|
||||
if (StubRoutines::code2() != NULL)
|
||||
range_check(masm, rax, r11,
|
||||
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
|
||||
L_ok);
|
||||
const char* msg = "i2c adapter must return to an interpreter frame";
|
||||
__ block_comment(msg);
|
||||
__ stop(msg);
|
||||
__ bind(L_ok);
|
||||
__ block_comment("} verify_i2ce ");
|
||||
}
|
||||
|
||||
// Must preserve original SP for loading incoming arguments because
|
||||
// we need to align the outgoing SP for compiled code.
|
||||
__ movptr(r11, rsp);
|
||||
@ -1366,6 +1423,14 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
|
||||
}
|
||||
|
||||
|
||||
// Different signatures may require very different orders for the move
|
||||
// to avoid clobbering other arguments. There's no simple way to
|
||||
// order them safely. Compute a safe order for issuing stores and
|
||||
// break any cycles in those stores. This code is fairly general but
|
||||
// it's not necessary on the other platforms so we keep it in the
|
||||
// platform dependent code instead of moving it into a shared file.
|
||||
// (See bugs 7013347 & 7145024.)
|
||||
// Note that this code is specific to LP64.
|
||||
class ComputeMoveOrder: public StackObj {
|
||||
class MoveOperation: public ResourceObj {
|
||||
friend class ComputeMoveOrder;
|
||||
@ -1532,6 +1597,89 @@ class ComputeMoveOrder: public StackObj {
|
||||
}
|
||||
};
|
||||
|
||||
static void verify_oop_args(MacroAssembler* masm,
|
||||
int total_args_passed,
|
||||
const BasicType* sig_bt,
|
||||
const VMRegPair* regs) {
|
||||
Register temp_reg = rbx; // not part of any compiled calling seq
|
||||
if (VerifyOops) {
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
if (sig_bt[i] == T_OBJECT ||
|
||||
sig_bt[i] == T_ARRAY) {
|
||||
VMReg r = regs[i].first();
|
||||
assert(r->is_valid(), "bad oop arg");
|
||||
if (r->is_stack()) {
|
||||
__ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
|
||||
__ verify_oop(temp_reg);
|
||||
} else {
|
||||
__ verify_oop(r->as_Register());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_special_dispatch(MacroAssembler* masm,
|
||||
int total_args_passed,
|
||||
int comp_args_on_stack,
|
||||
vmIntrinsics::ID special_dispatch,
|
||||
const BasicType* sig_bt,
|
||||
const VMRegPair* regs) {
|
||||
verify_oop_args(masm, total_args_passed, sig_bt, regs);
|
||||
|
||||
// Now write the args into the outgoing interpreter space
|
||||
bool has_receiver = false;
|
||||
Register receiver_reg = noreg;
|
||||
int member_arg_pos = -1;
|
||||
Register member_reg = noreg;
|
||||
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
|
||||
if (ref_kind != 0) {
|
||||
member_arg_pos = total_args_passed - 1; // trailing MemberName argument
|
||||
member_reg = rbx; // known to be free at this point
|
||||
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
|
||||
} else if (special_dispatch == vmIntrinsics::_invokeBasic) {
|
||||
has_receiver = true;
|
||||
} else {
|
||||
guarantee(false, err_msg("special_dispatch=%d", special_dispatch));
|
||||
}
|
||||
|
||||
if (member_reg != noreg) {
|
||||
// Load the member_arg into register, if necessary.
|
||||
assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
|
||||
assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
|
||||
VMReg r = regs[member_arg_pos].first();
|
||||
assert(r->is_valid(), "bad member arg");
|
||||
if (r->is_stack()) {
|
||||
__ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
|
||||
} else {
|
||||
// no data motion is needed
|
||||
member_reg = r->as_Register();
|
||||
}
|
||||
}
|
||||
|
||||
if (has_receiver) {
|
||||
// Make sure the receiver is loaded into a register.
|
||||
assert(total_args_passed > 0, "oob");
|
||||
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
|
||||
VMReg r = regs[0].first();
|
||||
assert(r->is_valid(), "bad receiver arg");
|
||||
if (r->is_stack()) {
|
||||
// Porting note: This assumes that compiled calling conventions always
|
||||
// pass the receiver oop in a register. If this is not true on some
|
||||
// platform, pick a temp and load the receiver from stack.
|
||||
assert(false, "receiver always in a register");
|
||||
receiver_reg = j_rarg0; // known to be free at this point
|
||||
__ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
|
||||
} else {
|
||||
// no data motion is needed
|
||||
receiver_reg = r->as_Register();
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out which address we are really jumping to:
|
||||
MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
|
||||
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Generate a native wrapper for a given method. The method takes arguments
|
||||
@ -1539,14 +1687,60 @@ class ComputeMoveOrder: public StackObj {
|
||||
// convention (handlizes oops, etc), transitions to native, makes the call,
|
||||
// returns to java state (possibly blocking), unhandlizes any result and
|
||||
// returns.
|
||||
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
//
|
||||
// Critical native functions are a shorthand for the use of
|
||||
// GetPrimtiveArrayCritical and disallow the use of any other JNI
|
||||
// functions. The wrapper is expected to unpack the arguments before
|
||||
// passing them to the callee and perform checks before and after the
|
||||
// native call to ensure that they GC_locker
|
||||
// lock_critical/unlock_critical semantics are followed. Some other
|
||||
// parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// block and the check for pending exceptions it's impossible for them
|
||||
// to be thrown.
|
||||
//
|
||||
// They are roughly structured like this:
|
||||
// if (GC_locker::needs_gc())
|
||||
// SharedRuntime::block_for_jni_critical();
|
||||
// tranistion to thread_in_native
|
||||
// unpack arrray arguments and call native entry point
|
||||
// check for safepoint in progress
|
||||
// check if any thread suspend flags are set
|
||||
// call into JVM and possible unlock the JNI critical
|
||||
// if a GC was suppressed while in the critical native.
|
||||
// transition back to thread_in_Java
|
||||
// return to caller
|
||||
//
|
||||
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
methodHandle method,
|
||||
int compile_id,
|
||||
int total_in_args,
|
||||
int comp_args_on_stack,
|
||||
BasicType *in_sig_bt,
|
||||
VMRegPair *in_regs,
|
||||
BasicType* in_sig_bt,
|
||||
VMRegPair* in_regs,
|
||||
BasicType ret_type) {
|
||||
if (method->is_method_handle_intrinsic()) {
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
intptr_t start = (intptr_t)__ pc();
|
||||
int vep_offset = ((intptr_t)__ pc()) - start;
|
||||
gen_special_dispatch(masm,
|
||||
total_in_args,
|
||||
comp_args_on_stack,
|
||||
method->intrinsic_id(),
|
||||
in_sig_bt,
|
||||
in_regs);
|
||||
int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
|
||||
__ flush();
|
||||
int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
|
||||
return nmethod::new_native_nmethod(method,
|
||||
compile_id,
|
||||
masm->code(),
|
||||
vep_offset,
|
||||
frame_complete,
|
||||
stack_slots / VMRegImpl::slots_per_word,
|
||||
in_ByteSize(-1),
|
||||
in_ByteSize(-1),
|
||||
(OopMapSet*)NULL);
|
||||
}
|
||||
bool is_critical_native = true;
|
||||
address native_func = method->critical_native_function();
|
||||
if (native_func == NULL) {
|
||||
@ -1658,7 +1852,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
case T_SHORT:
|
||||
case T_CHAR:
|
||||
case T_INT: single_slots++; break;
|
||||
case T_ARRAY:
|
||||
case T_ARRAY: // specific to LP64 (7145024)
|
||||
case T_LONG: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
@ -2326,12 +2326,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG,
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
|
||||
|
||||
// Build this early so it's available for the interpreter
|
||||
StubRoutines::_throw_WrongMethodTypeException_entry =
|
||||
generate_throw_exception("WrongMethodTypeException throw_exception",
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
|
||||
rax, rcx);
|
||||
|
||||
// Build this early so it's available for the interpreter
|
||||
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
|
||||
}
|
||||
|
@ -3102,14 +3102,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
|
||||
|
||||
// Build this early so it's available for the interpreter. Stub
|
||||
// expects the required and actual types as register arguments in
|
||||
// j_rarg0 and j_rarg1 respectively.
|
||||
StubRoutines::_throw_WrongMethodTypeException_entry =
|
||||
generate_throw_exception("WrongMethodTypeException throw_exception",
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
|
||||
rax, rcx);
|
||||
|
||||
// Build this early so it's available for the interpreter.
|
||||
StubRoutines::_throw_StackOverflowError_entry =
|
||||
generate_throw_exception("StackOverflowError throw_exception",
|
||||
|
@ -710,9 +710,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Use the type from the constant pool cache
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tosBits);
|
||||
// Make sure we don't need to mask rdx for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask rdx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmpl(rdx, btos);
|
||||
__ jcc(Assembler::notEqual, notByte);
|
||||
__ load_signed_byte(rax, field_address);
|
||||
@ -1513,7 +1513,6 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
@ -1526,7 +1525,9 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
|
||||
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) return entry_point;
|
||||
|
@ -683,9 +683,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Use the type from the constant pool cache
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tosBits);
|
||||
// Make sure we don't need to mask edx for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask edx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
|
||||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::notEqual, notObj);
|
||||
@ -1524,12 +1524,11 @@ address AbstractInterpreterGenerator::generate_method_entry(
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
|
||||
case Interpreter::method_handle : entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry();break;
|
||||
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
@ -1539,10 +1538,12 @@ address AbstractInterpreterGenerator::generate_method_entry(
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) {
|
||||
|
@ -446,13 +446,13 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
const Register cache = rcx;
|
||||
const Register index = rdx;
|
||||
|
||||
resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
|
||||
resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
|
||||
if (VerifyOops) {
|
||||
__ verify_oop(rax);
|
||||
}
|
||||
|
||||
Label L_done, L_throw_exception;
|
||||
const Register con_klass_temp = rcx; // same as Rcache
|
||||
const Register con_klass_temp = rcx; // same as cache
|
||||
__ load_klass(con_klass_temp, rax);
|
||||
__ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
|
||||
__ jcc(Assembler::notEqual, L_done);
|
||||
@ -2084,15 +2084,15 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
Register Rcache,
|
||||
Register index,
|
||||
size_t index_size) {
|
||||
Register temp = rbx;
|
||||
|
||||
const Register temp = rbx;
|
||||
assert_different_registers(result, Rcache, index, temp);
|
||||
|
||||
Label resolved;
|
||||
if (byte_no == f1_oop) {
|
||||
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
|
||||
// This kind of CP cache entry does not need to match the flags byte, because
|
||||
if (byte_no == f12_oop) {
|
||||
// We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
|
||||
// This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
|
||||
// there is a 1-1 relation between bytecode type and CP entry type.
|
||||
// The caller will also load a methodOop from f2.
|
||||
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
|
||||
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
|
||||
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
|
||||
@ -2112,15 +2112,18 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
case Bytecodes::_getstatic : // fall through
|
||||
case Bytecodes::_putstatic : // fall through
|
||||
case Bytecodes::_getfield : // fall through
|
||||
case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
|
||||
case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
|
||||
case Bytecodes::_invokevirtual : // fall through
|
||||
case Bytecodes::_invokespecial : // fall through
|
||||
case Bytecodes::_invokestatic : // fall through
|
||||
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
|
||||
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
|
||||
case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
|
||||
case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
|
||||
case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
|
||||
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
|
||||
case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
|
||||
case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
|
||||
break;
|
||||
}
|
||||
__ movl(temp, (int)bytecode());
|
||||
__ call_VM(noreg, entry, temp);
|
||||
@ -2149,7 +2152,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
|
||||
__ movl(flags, Address(cache, index, Address::times_ptr,
|
||||
in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
|
||||
|
||||
// klass overwrite register
|
||||
// klass overwrite register
|
||||
if (is_static) {
|
||||
__ movptr(obj, Address(cache, index, Address::times_ptr,
|
||||
in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
|
||||
@ -2161,7 +2164,7 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
|
||||
Register itable_index,
|
||||
Register flags,
|
||||
bool is_invokevirtual,
|
||||
bool is_invokevfinal /*unused*/,
|
||||
bool is_invokevfinal, /*unused*/
|
||||
bool is_invokedynamic) {
|
||||
// setup registers
|
||||
const Register cache = rcx;
|
||||
@ -2171,28 +2174,33 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
|
||||
assert_different_registers(itable_index, flags);
|
||||
assert_different_registers(itable_index, cache, index);
|
||||
// determine constant pool cache field offsets
|
||||
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
|
||||
const int method_offset = in_bytes(
|
||||
constantPoolCacheOopDesc::base_offset() +
|
||||
(is_invokevirtual
|
||||
((byte_no == f2_byte)
|
||||
? ConstantPoolCacheEntry::f2_offset()
|
||||
: ConstantPoolCacheEntry::f1_offset()
|
||||
)
|
||||
);
|
||||
: ConstantPoolCacheEntry::f1_offset()));
|
||||
const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::flags_offset());
|
||||
// access constant pool cache fields
|
||||
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::f2_offset());
|
||||
|
||||
if (byte_no == f1_oop) {
|
||||
// Resolved f1_oop goes directly into 'method' register.
|
||||
assert(is_invokedynamic, "");
|
||||
resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
|
||||
if (byte_no == f12_oop) {
|
||||
// Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
|
||||
// Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
|
||||
// See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
|
||||
size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
|
||||
resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
|
||||
__ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
|
||||
itable_index = noreg; // hack to disable load below
|
||||
} else {
|
||||
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
|
||||
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
|
||||
}
|
||||
if (itable_index != noreg) {
|
||||
// pick up itable index from f2 also:
|
||||
assert(byte_no == f1_byte, "already picked up f1");
|
||||
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
|
||||
}
|
||||
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
|
||||
@ -2260,10 +2268,10 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
|
||||
|
||||
Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
|
||||
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tosBits);
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
|
||||
assert(btos == 0, "change code, btos != 0");
|
||||
// btos
|
||||
__ andptr(flags, 0x0f);
|
||||
__ andptr(flags, ConstantPoolCacheEntry::tos_state_mask);
|
||||
__ jcc(Assembler::notZero, notByte);
|
||||
|
||||
__ load_signed_byte(rax, lo );
|
||||
@ -2415,9 +2423,9 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
|
||||
__ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
|
||||
ConstantPoolCacheEntry::flags_offset())));
|
||||
__ mov(rbx, rsp);
|
||||
__ shrl(rcx, ConstantPoolCacheEntry::tosBits);
|
||||
// Make sure we don't need to mask rcx for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask rcx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmpl(rcx, ltos);
|
||||
__ jccb(Assembler::equal, two_word);
|
||||
__ cmpl(rcx, dtos);
|
||||
@ -2467,7 +2475,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
|
||||
|
||||
Label notVolatile, Done;
|
||||
__ movl(rdx, flags);
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::volatileField);
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
__ andl(rdx, 0x1);
|
||||
|
||||
// field addresses
|
||||
@ -2476,9 +2484,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
|
||||
|
||||
Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
|
||||
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tosBits);
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
|
||||
assert(btos == 0, "change code, btos != 0");
|
||||
__ andl(flags, 0x0f);
|
||||
__ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
|
||||
__ jcc(Assembler::notZero, notByte);
|
||||
|
||||
// btos
|
||||
@ -2719,7 +2727,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
// volatile_barrier( );
|
||||
|
||||
Label notVolatile, Done;
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::volatileField);
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
__ andl(rdx, 0x1);
|
||||
// Check for volatile store
|
||||
__ testl(rdx, rdx);
|
||||
@ -2885,19 +2893,29 @@ void TemplateTable::count_calls(Register method, Register temp) {
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
|
||||
void TemplateTable::prepare_invoke(int byte_no,
|
||||
Register method, // linked method (or i-klass)
|
||||
Register index, // itable index, MethodType, etc.
|
||||
Register recv, // if caller wants to see it
|
||||
Register flags // if caller wants to test it
|
||||
) {
|
||||
// determine flags
|
||||
Bytecodes::Code code = bytecode();
|
||||
const Bytecodes::Code code = bytecode();
|
||||
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
|
||||
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
|
||||
const bool is_invokehandle = code == Bytecodes::_invokehandle;
|
||||
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
|
||||
const bool is_invokespecial = code == Bytecodes::_invokespecial;
|
||||
const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
|
||||
const bool receiver_null_check = is_invokespecial;
|
||||
const bool save_flags = is_invokeinterface || is_invokevirtual;
|
||||
const bool load_receiver = (recv != noreg);
|
||||
const bool save_flags = (flags != noreg);
|
||||
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
|
||||
assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
|
||||
assert(flags == noreg || flags == rdx, "");
|
||||
assert(recv == noreg || recv == rcx, "");
|
||||
|
||||
// setup registers & access constant pool cache
|
||||
const Register recv = rcx;
|
||||
const Register flags = rdx;
|
||||
if (recv == noreg) recv = rcx;
|
||||
if (flags == noreg) flags = rdx;
|
||||
assert_different_registers(method, index, recv, flags);
|
||||
|
||||
// save 'interpreter return address'
|
||||
@ -2905,20 +2923,28 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
|
||||
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
|
||||
|
||||
// load receiver if needed (note: no return address pushed yet)
|
||||
if (load_receiver) {
|
||||
assert(!is_invokedynamic, "");
|
||||
__ movl(recv, flags);
|
||||
__ andl(recv, 0xFF);
|
||||
// recv count is 0 based?
|
||||
Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
|
||||
__ movptr(recv, recv_addr);
|
||||
__ verify_oop(recv);
|
||||
// maybe push appendix to arguments (just before return address)
|
||||
if (is_invokedynamic || is_invokehandle) {
|
||||
Label L_no_push;
|
||||
__ verify_oop(index);
|
||||
__ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
|
||||
__ jccb(Assembler::zero, L_no_push);
|
||||
// Push the appendix as a trailing parameter.
|
||||
// This must be done before we get the receiver,
|
||||
// since the parameter_size includes it.
|
||||
__ push(index); // push appendix (MethodType, CallSite, etc.)
|
||||
__ bind(L_no_push);
|
||||
}
|
||||
|
||||
// do null check if needed
|
||||
if (receiver_null_check) {
|
||||
__ null_check(recv);
|
||||
// load receiver if needed (note: no return address pushed yet)
|
||||
if (load_receiver) {
|
||||
__ movl(recv, flags);
|
||||
__ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
|
||||
const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
|
||||
const int receiver_is_at_end = -1; // back off one slot to get receiver
|
||||
Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
|
||||
__ movptr(recv, recv_addr);
|
||||
__ verify_oop(recv);
|
||||
}
|
||||
|
||||
if (save_flags) {
|
||||
@ -2926,16 +2952,14 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
}
|
||||
|
||||
// compute return type
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tosBits);
|
||||
// Make sure we don't need to mask flags for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask flags after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
// load return address
|
||||
{
|
||||
address table_addr;
|
||||
if (is_invokeinterface || is_invokedynamic)
|
||||
table_addr = (address)Interpreter::return_5_addrs_by_index_table();
|
||||
else
|
||||
table_addr = (address)Interpreter::return_3_addrs_by_index_table();
|
||||
const address table_addr = (is_invokeinterface || is_invokedynamic) ?
|
||||
(address)Interpreter::return_5_addrs_by_index_table() :
|
||||
(address)Interpreter::return_3_addrs_by_index_table();
|
||||
ExternalAddress table(table_addr);
|
||||
__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
|
||||
}
|
||||
@ -2943,7 +2967,7 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
// push return address
|
||||
__ push(flags);
|
||||
|
||||
// Restore flag value from the constant pool cache, and restore rsi
|
||||
// Restore flags value from the constant pool cache, and restore rsi
|
||||
// for later null checks. rsi is the bytecode pointer
|
||||
if (save_flags) {
|
||||
__ mov(flags, rsi);
|
||||
@ -2952,22 +2976,26 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::invokevirtual_helper(Register index, Register recv,
|
||||
Register flags) {
|
||||
|
||||
void TemplateTable::invokevirtual_helper(Register index,
|
||||
Register recv,
|
||||
Register flags) {
|
||||
// Uses temporary registers rax, rdx
|
||||
assert_different_registers(index, recv, rax, rdx);
|
||||
assert(index == rbx, "");
|
||||
assert(recv == rcx, "");
|
||||
|
||||
// Test for an invoke of a final method
|
||||
Label notFinal;
|
||||
__ movl(rax, flags);
|
||||
__ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
|
||||
__ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
|
||||
__ jcc(Assembler::zero, notFinal);
|
||||
|
||||
Register method = index; // method must be rbx,
|
||||
assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
|
||||
const Register method = index; // method must be rbx
|
||||
assert(method == rbx,
|
||||
"methodOop must be rbx for interpreter calling convention");
|
||||
|
||||
// do the call - the index is actually the method to call
|
||||
// that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
|
||||
__ verify_oop(method);
|
||||
|
||||
// It's final, need a null check here!
|
||||
@ -2982,7 +3010,6 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
|
||||
|
||||
// get receiver klass
|
||||
__ null_check(recv, oopDesc::klass_offset_in_bytes());
|
||||
// Keep recv in rcx for callee expects it there
|
||||
__ load_klass(rax, recv);
|
||||
__ verify_oop(rax);
|
||||
|
||||
@ -2990,9 +3017,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
|
||||
__ profile_virtual_call(rax, rdi, rdx);
|
||||
|
||||
// get target methodOop & entry point
|
||||
const int base = instanceKlass::vtable_start_offset() * wordSize;
|
||||
assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
|
||||
__ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
|
||||
__ lookup_virtual_method(rax, index, method);
|
||||
__ jump_from_interpreted(method, rdx);
|
||||
}
|
||||
|
||||
@ -3000,9 +3025,12 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
|
||||
void TemplateTable::invokevirtual(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f2_byte, "use this argument");
|
||||
prepare_invoke(rbx, noreg, byte_no);
|
||||
prepare_invoke(byte_no,
|
||||
rbx, // method or vtable index
|
||||
noreg, // unused itable index
|
||||
rcx, rdx); // recv, flags
|
||||
|
||||
// rbx,: index
|
||||
// rbx: index
|
||||
// rcx: receiver
|
||||
// rdx: flags
|
||||
|
||||
@ -3013,7 +3041,10 @@ void TemplateTable::invokevirtual(int byte_no) {
|
||||
void TemplateTable::invokespecial(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
prepare_invoke(rbx, noreg, byte_no);
|
||||
prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop
|
||||
rcx); // get receiver also for null check
|
||||
__ verify_oop(rcx);
|
||||
__ null_check(rcx);
|
||||
// do the call
|
||||
__ verify_oop(rbx);
|
||||
__ profile_call(rax);
|
||||
@ -3024,7 +3055,7 @@ void TemplateTable::invokespecial(int byte_no) {
|
||||
void TemplateTable::invokestatic(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
prepare_invoke(rbx, noreg, byte_no);
|
||||
prepare_invoke(byte_no, rbx); // get f1 methodOop
|
||||
// do the call
|
||||
__ verify_oop(rbx);
|
||||
__ profile_call(rax);
|
||||
@ -3042,10 +3073,11 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
|
||||
void TemplateTable::invokeinterface(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
prepare_invoke(rax, rbx, byte_no);
|
||||
prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index
|
||||
rcx, rdx); // recv, flags
|
||||
|
||||
// rax,: Interface
|
||||
// rbx,: index
|
||||
// rax: interface klass (from f1)
|
||||
// rbx: itable index (from f2)
|
||||
// rcx: receiver
|
||||
// rdx: flags
|
||||
|
||||
@ -3055,7 +3087,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// another compliant java compiler.
|
||||
Label notMethod;
|
||||
__ movl(rdi, rdx);
|
||||
__ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
|
||||
__ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
|
||||
__ jcc(Assembler::zero, notMethod);
|
||||
|
||||
invokevirtual_helper(rbx, rcx, rdx);
|
||||
@ -3063,6 +3095,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
|
||||
// Get receiver klass into rdx - also a null check
|
||||
__ restore_locals(); // restore rdi
|
||||
__ null_check(rcx, oopDesc::klass_offset_in_bytes());
|
||||
__ load_klass(rdx, rcx);
|
||||
__ verify_oop(rdx);
|
||||
|
||||
@ -3077,7 +3110,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
rbx, rsi,
|
||||
no_such_interface);
|
||||
|
||||
// rbx,: methodOop to call
|
||||
// rbx: methodOop to call
|
||||
// rcx: receiver
|
||||
// Check for abstract method error
|
||||
// Note: This should be done more efficiently via a throw_abstract_method_error
|
||||
@ -3116,9 +3149,39 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ should_not_reach_here();
|
||||
}
|
||||
|
||||
void TemplateTable::invokehandle(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f12_oop, "use this argument");
|
||||
const Register rbx_method = rbx; // (from f2)
|
||||
const Register rax_mtype = rax; // (from f1)
|
||||
const Register rcx_recv = rcx;
|
||||
const Register rdx_flags = rdx;
|
||||
|
||||
if (!EnableInvokeDynamic) {
|
||||
// rewriter does not generate this bytecode
|
||||
__ should_not_reach_here();
|
||||
return;
|
||||
}
|
||||
|
||||
prepare_invoke(byte_no,
|
||||
rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType
|
||||
rcx_recv);
|
||||
__ verify_oop(rbx_method);
|
||||
__ verify_oop(rcx_recv);
|
||||
__ null_check(rcx_recv);
|
||||
|
||||
// Note: rax_mtype is already pushed (if necessary) by prepare_invoke
|
||||
|
||||
// FIXME: profile the LambdaForm also
|
||||
__ profile_final_call(rax);
|
||||
|
||||
__ jump_from_interpreted(rbx_method, rdx);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::invokedynamic(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_oop, "use this argument");
|
||||
assert(byte_no == f12_oop, "use this argument");
|
||||
|
||||
if (!EnableInvokeDynamic) {
|
||||
// We should not encounter this bytecode if !EnableInvokeDynamic.
|
||||
@ -3131,26 +3194,23 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
return;
|
||||
}
|
||||
|
||||
prepare_invoke(rax, rbx, byte_no);
|
||||
const Register rbx_method = rbx;
|
||||
const Register rax_callsite = rax;
|
||||
|
||||
// rax: CallSite object (f1)
|
||||
// rbx: unused (f2)
|
||||
// rcx: receiver address
|
||||
// rdx: flags (unused)
|
||||
prepare_invoke(byte_no, rbx_method, rax_callsite);
|
||||
|
||||
Register rax_callsite = rax;
|
||||
Register rcx_method_handle = rcx;
|
||||
// rax: CallSite object (from f1)
|
||||
// rbx: MH.linkToCallSite method (from f2)
|
||||
|
||||
// Note: rax_callsite is already pushed by prepare_invoke
|
||||
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
// profile this call
|
||||
__ profile_call(rsi);
|
||||
|
||||
__ verify_oop(rax_callsite);
|
||||
__ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
|
||||
__ null_check(rcx_method_handle);
|
||||
__ verify_oop(rcx_method_handle);
|
||||
__ prepare_to_jump_from_interpreted();
|
||||
__ jump_to_method_handle_entry(rcx_method_handle, rdx);
|
||||
|
||||
__ jump_from_interpreted(rbx_method, rdx);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
|
@ -25,10 +25,15 @@
|
||||
#ifndef CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
|
||||
#define CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
|
||||
|
||||
static void prepare_invoke(Register method, Register index, int byte_no);
|
||||
static void prepare_invoke(int byte_no,
|
||||
Register method, // linked method (or i-klass)
|
||||
Register index = noreg, // itable index, MethodType, etc.
|
||||
Register recv = noreg, // if caller wants to see it
|
||||
Register flags = noreg // if caller wants to test it
|
||||
);
|
||||
static void invokevirtual_helper(Register index, Register recv,
|
||||
Register flags);
|
||||
static void volatile_barrier(Assembler::Membar_mask_bits order_constraint );
|
||||
static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);
|
||||
|
||||
// Helpers
|
||||
static void index_check(Register array, Register index);
|
||||
|
@ -458,7 +458,7 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
const Register cache = rcx;
|
||||
const Register index = rdx;
|
||||
|
||||
resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
|
||||
resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
|
||||
if (VerifyOops) {
|
||||
__ verify_oop(rax);
|
||||
}
|
||||
@ -2125,10 +2125,11 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
assert_different_registers(result, Rcache, index, temp);
|
||||
|
||||
Label resolved;
|
||||
if (byte_no == f1_oop) {
|
||||
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
|
||||
// This kind of CP cache entry does not need to match the flags byte, because
|
||||
if (byte_no == f12_oop) {
|
||||
// We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
|
||||
// This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
|
||||
// there is a 1-1 relation between bytecode type and CP entry type.
|
||||
// The caller will also load a methodOop from f2.
|
||||
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
|
||||
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
|
||||
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
|
||||
@ -2157,6 +2158,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
case Bytecodes::_invokeinterface:
|
||||
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
|
||||
break;
|
||||
case Bytecodes::_invokehandle:
|
||||
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
|
||||
break;
|
||||
case Bytecodes::_invokedynamic:
|
||||
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
|
||||
break;
|
||||
@ -2167,7 +2171,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
|
||||
break;
|
||||
}
|
||||
__ movl(temp, (int) bytecode());
|
||||
@ -2180,7 +2184,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
|
||||
__ bind(resolved);
|
||||
}
|
||||
|
||||
// The Rcache and index registers must be set before call
|
||||
// The cache and index registers must be set before call
|
||||
void TemplateTable::load_field_cp_cache_entry(Register obj,
|
||||
Register cache,
|
||||
Register index,
|
||||
@ -2191,17 +2195,17 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
|
||||
|
||||
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
|
||||
// Field offset
|
||||
__ movptr(off, Address(cache, index, Address::times_8,
|
||||
__ movptr(off, Address(cache, index, Address::times_ptr,
|
||||
in_bytes(cp_base_offset +
|
||||
ConstantPoolCacheEntry::f2_offset())));
|
||||
// Flags
|
||||
__ movl(flags, Address(cache, index, Address::times_8,
|
||||
__ movl(flags, Address(cache, index, Address::times_ptr,
|
||||
in_bytes(cp_base_offset +
|
||||
ConstantPoolCacheEntry::flags_offset())));
|
||||
|
||||
// klass overwrite register
|
||||
if (is_static) {
|
||||
__ movptr(obj, Address(cache, index, Address::times_8,
|
||||
__ movptr(obj, Address(cache, index, Address::times_ptr,
|
||||
in_bytes(cp_base_offset +
|
||||
ConstantPoolCacheEntry::f1_offset())));
|
||||
}
|
||||
@ -2222,9 +2226,10 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
|
||||
assert_different_registers(itable_index, flags);
|
||||
assert_different_registers(itable_index, cache, index);
|
||||
// determine constant pool cache field offsets
|
||||
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
|
||||
const int method_offset = in_bytes(
|
||||
constantPoolCacheOopDesc::base_offset() +
|
||||
(is_invokevirtual
|
||||
((byte_no == f2_byte)
|
||||
? ConstantPoolCacheEntry::f2_offset()
|
||||
: ConstantPoolCacheEntry::f1_offset()));
|
||||
const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
|
||||
@ -2233,15 +2238,21 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
|
||||
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::f2_offset());
|
||||
|
||||
if (byte_no == f1_oop) {
|
||||
// Resolved f1_oop goes directly into 'method' register.
|
||||
assert(is_invokedynamic, "");
|
||||
resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
|
||||
if (byte_no == f12_oop) {
|
||||
// Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
|
||||
// Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
|
||||
// See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
|
||||
size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
|
||||
resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
|
||||
__ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
|
||||
itable_index = noreg; // hack to disable load below
|
||||
} else {
|
||||
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
|
||||
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
|
||||
}
|
||||
if (itable_index != noreg) {
|
||||
// pick up itable index from f2 also:
|
||||
assert(byte_no == f1_byte, "already picked up f1");
|
||||
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
|
||||
}
|
||||
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
|
||||
@ -2317,10 +2328,11 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
|
||||
Label Done, notByte, notInt, notShort, notChar,
|
||||
notLong, notFloat, notObj, notDouble;
|
||||
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tosBits);
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask edx after the above shift
|
||||
assert(btos == 0, "change code, btos != 0");
|
||||
|
||||
__ andl(flags, 0x0F);
|
||||
__ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
|
||||
__ jcc(Assembler::notZero, notByte);
|
||||
// btos
|
||||
__ load_signed_byte(rax, field);
|
||||
@ -2466,10 +2478,9 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
|
||||
Address::times_8,
|
||||
in_bytes(cp_base_offset +
|
||||
ConstantPoolCacheEntry::flags_offset())));
|
||||
__ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits);
|
||||
// Make sure we don't need to mask rcx for tosBits after the
|
||||
// above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask rcx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
|
||||
__ cmpl(c_rarg3, ltos);
|
||||
__ cmovptr(Assembler::equal,
|
||||
@ -2516,7 +2527,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
|
||||
|
||||
Label notVolatile, Done;
|
||||
__ movl(rdx, flags);
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::volatileField);
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
__ andl(rdx, 0x1);
|
||||
|
||||
// field address
|
||||
@ -2525,10 +2536,10 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
|
||||
Label notByte, notInt, notShort, notChar,
|
||||
notLong, notFloat, notObj, notDouble;
|
||||
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tosBits);
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
|
||||
|
||||
assert(btos == 0, "change code, btos != 0");
|
||||
__ andl(flags, 0x0f);
|
||||
__ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
|
||||
__ jcc(Assembler::notZero, notByte);
|
||||
|
||||
// btos
|
||||
@ -2751,7 +2762,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
// Assembler::StoreStore));
|
||||
|
||||
Label notVolatile;
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::volatileField);
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
__ andl(rdx, 0x1);
|
||||
|
||||
// Get object from stack
|
||||
@ -2832,7 +2843,7 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
// __ movl(rdx, Address(rcx, rbx, Address::times_8,
|
||||
// in_bytes(constantPoolCacheOopDesc::base_offset() +
|
||||
// ConstantPoolCacheEntry::flags_offset())));
|
||||
// __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
|
||||
// __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
// __ andl(rdx, 0x1);
|
||||
// }
|
||||
__ movptr(rbx, Address(rcx, rbx, Address::times_8,
|
||||
@ -2920,7 +2931,7 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
// __ movl(rdx, Address(rcx, rdx, Address::times_8,
|
||||
// in_bytes(constantPoolCacheOopDesc::base_offset() +
|
||||
// ConstantPoolCacheEntry::flags_offset())));
|
||||
// __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
|
||||
// __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
// __ testl(rdx, 0x1);
|
||||
// __ jcc(Assembler::zero, notVolatile);
|
||||
// __ membar(Assembler::LoadLoad);
|
||||
@ -2940,19 +2951,29 @@ void TemplateTable::count_calls(Register method, Register temp) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
|
||||
void TemplateTable::prepare_invoke(int byte_no,
|
||||
Register method, // linked method (or i-klass)
|
||||
Register index, // itable index, MethodType, etc.
|
||||
Register recv, // if caller wants to see it
|
||||
Register flags // if caller wants to test it
|
||||
) {
|
||||
// determine flags
|
||||
Bytecodes::Code code = bytecode();
|
||||
const Bytecodes::Code code = bytecode();
|
||||
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
|
||||
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
|
||||
const bool is_invokehandle = code == Bytecodes::_invokehandle;
|
||||
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
|
||||
const bool is_invokespecial = code == Bytecodes::_invokespecial;
|
||||
const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
|
||||
const bool receiver_null_check = is_invokespecial;
|
||||
const bool save_flags = is_invokeinterface || is_invokevirtual;
|
||||
const bool load_receiver = (recv != noreg);
|
||||
const bool save_flags = (flags != noreg);
|
||||
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
|
||||
assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
|
||||
assert(flags == noreg || flags == rdx, "");
|
||||
assert(recv == noreg || recv == rcx, "");
|
||||
|
||||
// setup registers & access constant pool cache
|
||||
const Register recv = rcx;
|
||||
const Register flags = rdx;
|
||||
if (recv == noreg) recv = rcx;
|
||||
if (flags == noreg) flags = rdx;
|
||||
assert_different_registers(method, index, recv, flags);
|
||||
|
||||
// save 'interpreter return address'
|
||||
@ -2960,19 +2981,29 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
|
||||
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
|
||||
|
||||
// load receiver if needed (note: no return address pushed yet)
|
||||
if (load_receiver) {
|
||||
assert(!is_invokedynamic, "");
|
||||
__ movl(recv, flags);
|
||||
__ andl(recv, 0xFF);
|
||||
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
|
||||
__ movptr(recv, recv_addr);
|
||||
__ verify_oop(recv);
|
||||
// maybe push appendix to arguments (just before return address)
|
||||
if (is_invokedynamic || is_invokehandle) {
|
||||
Label L_no_push;
|
||||
__ verify_oop(index);
|
||||
__ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
|
||||
__ jccb(Assembler::zero, L_no_push);
|
||||
// Push the appendix as a trailing parameter.
|
||||
// This must be done before we get the receiver,
|
||||
// since the parameter_size includes it.
|
||||
__ push(index); // push appendix (MethodType, CallSite, etc.)
|
||||
__ bind(L_no_push);
|
||||
}
|
||||
|
||||
// do null check if needed
|
||||
if (receiver_null_check) {
|
||||
__ null_check(recv);
|
||||
// load receiver if needed (after appendix is pushed so parameter size is correct)
|
||||
// Note: no return address pushed yet
|
||||
if (load_receiver) {
|
||||
__ movl(recv, flags);
|
||||
__ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
|
||||
const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
|
||||
const int receiver_is_at_end = -1; // back off one slot to get receiver
|
||||
Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
|
||||
__ movptr(recv, recv_addr);
|
||||
__ verify_oop(recv);
|
||||
}
|
||||
|
||||
if (save_flags) {
|
||||
@ -2980,16 +3011,14 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
}
|
||||
|
||||
// compute return type
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tosBits);
|
||||
// Make sure we don't need to mask flags for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask flags after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
// load return address
|
||||
{
|
||||
address table_addr;
|
||||
if (is_invokeinterface || is_invokedynamic)
|
||||
table_addr = (address)Interpreter::return_5_addrs_by_index_table();
|
||||
else
|
||||
table_addr = (address)Interpreter::return_3_addrs_by_index_table();
|
||||
const address table_addr = (is_invokeinterface || is_invokedynamic) ?
|
||||
(address)Interpreter::return_5_addrs_by_index_table() :
|
||||
(address)Interpreter::return_3_addrs_by_index_table();
|
||||
ExternalAddress table(table_addr);
|
||||
__ lea(rscratch1, table);
|
||||
__ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
|
||||
@ -2998,7 +3027,7 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
// push return address
|
||||
__ push(flags);
|
||||
|
||||
// Restore flag field from the constant pool cache, and restore esi
|
||||
// Restore flags value from the constant pool cache, and restore rsi
|
||||
// for later null checks. r13 is the bytecode pointer
|
||||
if (save_flags) {
|
||||
__ movl(flags, r13);
|
||||
@ -3012,11 +3041,13 @@ void TemplateTable::invokevirtual_helper(Register index,
|
||||
Register flags) {
|
||||
// Uses temporary registers rax, rdx
|
||||
assert_different_registers(index, recv, rax, rdx);
|
||||
assert(index == rbx, "");
|
||||
assert(recv == rcx, "");
|
||||
|
||||
// Test for an invoke of a final method
|
||||
Label notFinal;
|
||||
__ movl(rax, flags);
|
||||
__ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
|
||||
__ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
|
||||
__ jcc(Assembler::zero, notFinal);
|
||||
|
||||
const Register method = index; // method must be rbx
|
||||
@ -3024,6 +3055,7 @@ void TemplateTable::invokevirtual_helper(Register index,
|
||||
"methodOop must be rbx for interpreter calling convention");
|
||||
|
||||
// do the call - the index is actually the method to call
|
||||
// that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
|
||||
__ verify_oop(method);
|
||||
|
||||
// It's final, need a null check here!
|
||||
@ -3039,20 +3071,13 @@ void TemplateTable::invokevirtual_helper(Register index,
|
||||
// get receiver klass
|
||||
__ null_check(recv, oopDesc::klass_offset_in_bytes());
|
||||
__ load_klass(rax, recv);
|
||||
|
||||
__ verify_oop(rax);
|
||||
|
||||
// profile this call
|
||||
__ profile_virtual_call(rax, r14, rdx);
|
||||
|
||||
// get target methodOop & entry point
|
||||
const int base = instanceKlass::vtable_start_offset() * wordSize;
|
||||
assert(vtableEntry::size() * wordSize == 8,
|
||||
"adjust the scaling in the code below");
|
||||
__ movptr(method, Address(rax, index,
|
||||
Address::times_8,
|
||||
base + vtableEntry::method_offset_in_bytes()));
|
||||
__ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
|
||||
__ lookup_virtual_method(rax, index, method);
|
||||
__ jump_from_interpreted(method, rdx);
|
||||
}
|
||||
|
||||
@ -3060,7 +3085,10 @@ void TemplateTable::invokevirtual_helper(Register index,
|
||||
void TemplateTable::invokevirtual(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f2_byte, "use this argument");
|
||||
prepare_invoke(rbx, noreg, byte_no);
|
||||
prepare_invoke(byte_no,
|
||||
rbx, // method or vtable index
|
||||
noreg, // unused itable index
|
||||
rcx, rdx); // recv, flags
|
||||
|
||||
// rbx: index
|
||||
// rcx: receiver
|
||||
@ -3073,7 +3101,10 @@ void TemplateTable::invokevirtual(int byte_no) {
|
||||
void TemplateTable::invokespecial(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
prepare_invoke(rbx, noreg, byte_no);
|
||||
prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop
|
||||
rcx); // get receiver also for null check
|
||||
__ verify_oop(rcx);
|
||||
__ null_check(rcx);
|
||||
// do the call
|
||||
__ verify_oop(rbx);
|
||||
__ profile_call(rax);
|
||||
@ -3084,7 +3115,7 @@ void TemplateTable::invokespecial(int byte_no) {
|
||||
void TemplateTable::invokestatic(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
prepare_invoke(rbx, noreg, byte_no);
|
||||
prepare_invoke(byte_no, rbx); // get f1 methodOop
|
||||
// do the call
|
||||
__ verify_oop(rbx);
|
||||
__ profile_call(rax);
|
||||
@ -3100,10 +3131,11 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
|
||||
void TemplateTable::invokeinterface(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
prepare_invoke(rax, rbx, byte_no);
|
||||
prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index
|
||||
rcx, rdx); // recv, flags
|
||||
|
||||
// rax: Interface
|
||||
// rbx: index
|
||||
// rax: interface klass (from f1)
|
||||
// rbx: itable index (from f2)
|
||||
// rcx: receiver
|
||||
// rdx: flags
|
||||
|
||||
@ -3113,14 +3145,15 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// another compliant java compiler.
|
||||
Label notMethod;
|
||||
__ movl(r14, rdx);
|
||||
__ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface));
|
||||
__ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
|
||||
__ jcc(Assembler::zero, notMethod);
|
||||
|
||||
invokevirtual_helper(rbx, rcx, rdx);
|
||||
__ bind(notMethod);
|
||||
|
||||
// Get receiver klass into rdx - also a null check
|
||||
__ restore_locals(); // restore r14
|
||||
__ restore_locals(); // restore r14
|
||||
__ null_check(rcx, oopDesc::klass_offset_in_bytes());
|
||||
__ load_klass(rdx, rcx);
|
||||
__ verify_oop(rdx);
|
||||
|
||||
@ -3135,7 +3168,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
rbx, r13,
|
||||
no_such_interface);
|
||||
|
||||
// rbx,: methodOop to call
|
||||
// rbx: methodOop to call
|
||||
// rcx: receiver
|
||||
// Check for abstract method error
|
||||
// Note: This should be done more efficiently via a throw_abstract_method_error
|
||||
@ -3172,12 +3205,42 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
InterpreterRuntime::throw_IncompatibleClassChangeError));
|
||||
// the call_VM checks for exception, so we should never return here.
|
||||
__ should_not_reach_here();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::invokehandle(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f12_oop, "use this argument");
|
||||
const Register rbx_method = rbx; // f2
|
||||
const Register rax_mtype = rax; // f1
|
||||
const Register rcx_recv = rcx;
|
||||
const Register rdx_flags = rdx;
|
||||
|
||||
if (!EnableInvokeDynamic) {
|
||||
// rewriter does not generate this bytecode
|
||||
__ should_not_reach_here();
|
||||
return;
|
||||
}
|
||||
|
||||
prepare_invoke(byte_no,
|
||||
rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType
|
||||
rcx_recv);
|
||||
__ verify_oop(rbx_method);
|
||||
__ verify_oop(rcx_recv);
|
||||
__ null_check(rcx_recv);
|
||||
|
||||
// Note: rax_mtype is already pushed (if necessary) by prepare_invoke
|
||||
|
||||
// FIXME: profile the LambdaForm also
|
||||
__ profile_final_call(rax);
|
||||
|
||||
__ jump_from_interpreted(rbx_method, rdx);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::invokedynamic(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_oop, "use this argument");
|
||||
assert(byte_no == f12_oop, "use this argument");
|
||||
|
||||
if (!EnableInvokeDynamic) {
|
||||
// We should not encounter this bytecode if !EnableInvokeDynamic.
|
||||
@ -3190,26 +3253,23 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
return;
|
||||
}
|
||||
|
||||
prepare_invoke(rax, rbx, byte_no);
|
||||
const Register rbx_method = rbx;
|
||||
const Register rax_callsite = rax;
|
||||
|
||||
// rax: CallSite object (f1)
|
||||
// rbx: unused (f2)
|
||||
// rcx: receiver address
|
||||
// rdx: flags (unused)
|
||||
prepare_invoke(byte_no, rbx_method, rax_callsite);
|
||||
|
||||
Register rax_callsite = rax;
|
||||
Register rcx_method_handle = rcx;
|
||||
// rax: CallSite object (from f1)
|
||||
// rbx: MH.linkToCallSite method (from f2)
|
||||
|
||||
// Note: rax_callsite is already pushed by prepare_invoke
|
||||
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
// profile this call
|
||||
__ profile_call(r13);
|
||||
|
||||
__ verify_oop(rax_callsite);
|
||||
__ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
|
||||
__ null_check(rcx_method_handle);
|
||||
__ verify_oop(rcx_method_handle);
|
||||
__ prepare_to_jump_from_interpreted();
|
||||
__ jump_to_method_handle_entry(rcx_method_handle, rdx);
|
||||
|
||||
__ jump_from_interpreted(rbx_method, rdx);
|
||||
}
|
||||
|
||||
|
||||
|
@ -25,7 +25,12 @@
|
||||
#ifndef CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
|
||||
#define CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
|
||||
|
||||
static void prepare_invoke(Register method, Register index, int byte_no);
|
||||
static void prepare_invoke(int byte_no,
|
||||
Register method, // linked method (or i-klass)
|
||||
Register index = noreg, // itable index, MethodType, etc.
|
||||
Register recv = noreg, // if caller wants to see it
|
||||
Register flags = noreg // if caller wants to test it
|
||||
);
|
||||
static void invokevirtual_helper(Register index, Register recv,
|
||||
Register flags);
|
||||
static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);
|
||||
|
@ -76,8 +76,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
// get receiver klass
|
||||
address npe_addr = __ pc();
|
||||
__ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
|
||||
// compute entry offset (in words)
|
||||
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
@ -93,7 +92,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
const Register method = rbx;
|
||||
|
||||
// load methodOop and target address
|
||||
__ movptr(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
|
||||
__ lookup_virtual_method(rax, vtable_index, method);
|
||||
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
__ cmpptr(method, (int32_t)NULL_WORD);
|
||||
|
@ -69,10 +69,6 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
address npe_addr = __ pc();
|
||||
__ load_klass(rax, j_rarg0);
|
||||
|
||||
// compute entry offset (in words)
|
||||
int entry_offset =
|
||||
instanceKlass::vtable_start_offset() + vtable_index * vtableEntry::size();
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
@ -90,9 +86,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
// load methodOop and target address
|
||||
const Register method = rbx;
|
||||
|
||||
__ movptr(method, Address(rax,
|
||||
entry_offset * wordSize +
|
||||
vtableEntry::method_offset_in_bytes()));
|
||||
__ lookup_virtual_method(rax, vtable_index, method);
|
||||
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
__ cmpptr(method, (int32_t)NULL_WORD);
|
||||
|
@ -646,16 +646,15 @@ int CppInterpreter::method_handle_entry(methodOop method,
|
||||
oop method_type = (oop) p;
|
||||
|
||||
// The MethodHandle is in the slot after the arguments
|
||||
oop form = java_lang_invoke_MethodType::form(method_type);
|
||||
int num_vmslots = java_lang_invoke_MethodTypeForm::vmslots(form);
|
||||
assert(argument_slots == num_vmslots + 1, "should be");
|
||||
int num_vmslots = argument_slots - 1;
|
||||
oop method_handle = VMSLOTS_OBJECT(num_vmslots);
|
||||
|
||||
// InvokeGeneric requires some extra shuffling
|
||||
oop mhtype = java_lang_invoke_MethodHandle::type(method_handle);
|
||||
bool is_exact = mhtype == method_type;
|
||||
if (!is_exact) {
|
||||
if (method->intrinsic_id() == vmIntrinsics::_invokeExact) {
|
||||
if (true || // FIXME
|
||||
method->intrinsic_id() == vmIntrinsics::_invokeExact) {
|
||||
CALL_VM_NOCHECK_NOFIX(
|
||||
SharedRuntime::throw_WrongMethodTypeException(
|
||||
thread, method_type, mhtype));
|
||||
@ -670,8 +669,8 @@ int CppInterpreter::method_handle_entry(methodOop method,
|
||||
// NB the x86 code for this (in methodHandles_x86.cpp, search for
|
||||
// "genericInvoker") is really really odd. I'm hoping it's trying
|
||||
// to accomodate odd VM/class library combinations I can ignore.
|
||||
oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form);
|
||||
if (adapter == NULL) {
|
||||
oop adapter = NULL; //FIXME: load the adapter from the CP cache
|
||||
IF (adapter == NULL) {
|
||||
CALL_VM_NOCHECK_NOFIX(
|
||||
SharedRuntime::throw_WrongMethodTypeException(
|
||||
thread, method_type, mhtype));
|
||||
@ -761,7 +760,7 @@ void CppInterpreter::process_method_handle(oop method_handle, TRAPS) {
|
||||
return;
|
||||
}
|
||||
if (entry_kind != MethodHandles::_invokespecial_mh) {
|
||||
int index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle);
|
||||
intptr_t index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle);
|
||||
instanceKlass* rcvrKlass =
|
||||
(instanceKlass *) receiver->klass()->klass_part();
|
||||
if (entry_kind == MethodHandles::_invokevirtual_mh) {
|
||||
@ -1179,8 +1178,7 @@ BasicType CppInterpreter::result_type_of_handle(oop method_handle) {
|
||||
intptr_t* CppInterpreter::calculate_unwind_sp(ZeroStack* stack,
|
||||
oop method_handle) {
|
||||
oop method_type = java_lang_invoke_MethodHandle::type(method_handle);
|
||||
oop form = java_lang_invoke_MethodType::form(method_type);
|
||||
int argument_slots = java_lang_invoke_MethodTypeForm::vmslots(form);
|
||||
int argument_slots = java_lang_invoke_MethodType::ptype_slot_count(method_type);
|
||||
|
||||
return stack->sp() + argument_slots;
|
||||
}
|
||||
|
@ -38,6 +38,5 @@
|
||||
address generate_empty_entry();
|
||||
address generate_accessor_entry();
|
||||
address generate_Reference_get_entry();
|
||||
address generate_method_handle_entry();
|
||||
|
||||
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
|
||||
|
@ -70,14 +70,6 @@ address InterpreterGenerator::generate_abstract_entry() {
|
||||
return generate_entry((address) ShouldNotCallThisEntry());
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_method_handle_entry() {
|
||||
#ifdef CC_INTERP
|
||||
return generate_entry((address) CppInterpreter::method_handle_entry);
|
||||
#else
|
||||
return generate_entry((address) ShouldNotCallThisEntry());
|
||||
#endif // CC_INTERP
|
||||
}
|
||||
|
||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
return true;
|
||||
}
|
||||
|
@ -201,13 +201,23 @@ int VM_Version::platform_features(int features) {
|
||||
impl[i] = (char)toupper((uint)impl[i]);
|
||||
if (strstr(impl, "SPARC64") != NULL) {
|
||||
features |= sparc64_family_m;
|
||||
} else if (strstr(impl, "SPARC-M") != NULL) {
|
||||
// M-series SPARC is based on T-series.
|
||||
features |= (M_family_m | T_family_m);
|
||||
} else if (strstr(impl, "SPARC-T") != NULL) {
|
||||
features |= T_family_m;
|
||||
if (strstr(impl, "SPARC-T1") != NULL) {
|
||||
features |= T1_model_m;
|
||||
}
|
||||
} else {
|
||||
assert(strstr(impl, "SPARC") != NULL, "should be sparc");
|
||||
if (strstr(impl, "SPARC") == NULL) {
|
||||
#ifndef PRODUCT
|
||||
// kstat on Solaris 8 virtual machines (branded zones)
|
||||
// returns "(unsupported)" implementation.
|
||||
warning("kstat cpu_info implementation = '%s', should contain SPARC", impl);
|
||||
#endif
|
||||
implementation = "SPARC";
|
||||
}
|
||||
}
|
||||
free((void*)impl);
|
||||
break;
|
||||
|
@ -674,16 +674,19 @@ void gen_inst_format(FILE *fp, FormDict &globals, InstructForm &inst, bool for_c
|
||||
else if( inst.is_ideal_mem() ) {
|
||||
// Print out the field name if available to improve readability
|
||||
fprintf(fp, " if (ra->C->alias_type(adr_type())->field() != NULL) {\n");
|
||||
fprintf(fp, " st->print(\" ! Field \");\n");
|
||||
fprintf(fp, " if( ra->C->alias_type(adr_type())->is_volatile() )\n");
|
||||
fprintf(fp, " st->print(\" Volatile\");\n");
|
||||
fprintf(fp, " ra->C->alias_type(adr_type())->field()->holder()->name()->print_symbol_on(st);\n");
|
||||
fprintf(fp, " ciField* f = ra->C->alias_type(adr_type())->field();\n");
|
||||
fprintf(fp, " st->print(\" ! Field: \");\n");
|
||||
fprintf(fp, " if (f->is_volatile())\n");
|
||||
fprintf(fp, " st->print(\"volatile \");\n");
|
||||
fprintf(fp, " f->holder()->name()->print_symbol_on(st);\n");
|
||||
fprintf(fp, " st->print(\".\");\n");
|
||||
fprintf(fp, " ra->C->alias_type(adr_type())->field()->name()->print_symbol_on(st);\n");
|
||||
fprintf(fp, " f->name()->print_symbol_on(st);\n");
|
||||
fprintf(fp, " if (f->is_constant())\n");
|
||||
fprintf(fp, " st->print(\" (constant)\");\n");
|
||||
fprintf(fp, " } else\n");
|
||||
// Make sure 'Volatile' gets printed out
|
||||
fprintf(fp, " if( ra->C->alias_type(adr_type())->is_volatile() )\n");
|
||||
fprintf(fp, " st->print(\" Volatile!\");\n");
|
||||
fprintf(fp, " if (ra->C->alias_type(adr_type())->is_volatile())\n");
|
||||
fprintf(fp, " st->print(\" volatile!\");\n");
|
||||
}
|
||||
|
||||
// Complete the definition of the format function
|
||||
|
@ -318,6 +318,16 @@ void DelayedConstant::update_all() {
|
||||
}
|
||||
}
|
||||
|
||||
RegisterOrConstant AbstractAssembler::delayed_value(int(*value_fn)(), Register tmp, int offset) {
|
||||
intptr_t val = (intptr_t) (*value_fn)();
|
||||
if (val != 0) return val + offset;
|
||||
return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset);
|
||||
}
|
||||
RegisterOrConstant AbstractAssembler::delayed_value(address(*value_fn)(), Register tmp, int offset) {
|
||||
intptr_t val = (intptr_t) (*value_fn)();
|
||||
if (val != 0) return val + offset;
|
||||
return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset);
|
||||
}
|
||||
intptr_t* AbstractAssembler::delayed_value_addr(int(*value_fn)()) {
|
||||
DelayedConstant* dcon = DelayedConstant::add(T_INT, (DelayedConstant::value_fn_t) value_fn);
|
||||
return &dcon->value;
|
||||
|
@ -406,12 +406,8 @@ class AbstractAssembler : public ResourceObj {
|
||||
// offsets in code which must be generated before the object class is loaded.
|
||||
// Field offsets are never zero, since an object's header (mark word)
|
||||
// is located at offset zero.
|
||||
RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0) {
|
||||
return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset);
|
||||
}
|
||||
RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0) {
|
||||
return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset);
|
||||
}
|
||||
RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0);
|
||||
RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0);
|
||||
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset) = 0;
|
||||
// Last overloading is platform-dependent; look in assembler_<arch>.cpp.
|
||||
static intptr_t* delayed_value_addr(int(*constant_fn)());
|
||||
|
@ -103,7 +103,8 @@ inline void assert_different_registers(
|
||||
) {
|
||||
assert(
|
||||
a != b,
|
||||
"registers must be different"
|
||||
err_msg("registers must be different: a=%d, b=%d",
|
||||
a, b)
|
||||
);
|
||||
}
|
||||
|
||||
@ -116,7 +117,8 @@ inline void assert_different_registers(
|
||||
assert(
|
||||
a != b && a != c
|
||||
&& b != c,
|
||||
"registers must be different"
|
||||
err_msg("registers must be different: a=%d, b=%d, c=%d",
|
||||
a, b, c)
|
||||
);
|
||||
}
|
||||
|
||||
@ -131,7 +133,8 @@ inline void assert_different_registers(
|
||||
a != b && a != c && a != d
|
||||
&& b != c && b != d
|
||||
&& c != d,
|
||||
"registers must be different"
|
||||
err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d",
|
||||
a, b, c, d)
|
||||
);
|
||||
}
|
||||
|
||||
@ -148,7 +151,8 @@ inline void assert_different_registers(
|
||||
&& b != c && b != d && b != e
|
||||
&& c != d && c != e
|
||||
&& d != e,
|
||||
"registers must be different"
|
||||
err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d",
|
||||
a, b, c, d, e)
|
||||
);
|
||||
}
|
||||
|
||||
@ -167,7 +171,8 @@ inline void assert_different_registers(
|
||||
&& c != d && c != e && c != f
|
||||
&& d != e && d != f
|
||||
&& e != f,
|
||||
"registers must be different"
|
||||
err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d",
|
||||
a, b, c, d, e, f)
|
||||
);
|
||||
}
|
||||
|
||||
@ -188,7 +193,8 @@ inline void assert_different_registers(
|
||||
&& d != e && d != f && d != g
|
||||
&& e != f && e != g
|
||||
&& f != g,
|
||||
"registers must be different"
|
||||
err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d",
|
||||
a, b, c, d, e, f, g)
|
||||
);
|
||||
}
|
||||
|
||||
@ -211,7 +217,34 @@ inline void assert_different_registers(
|
||||
&& e != f && e != g && e != h
|
||||
&& f != g && f != h
|
||||
&& g != h,
|
||||
"registers must be different"
|
||||
err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d",
|
||||
a, b, c, d, e, f, g, h)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
inline void assert_different_registers(
|
||||
AbstractRegister a,
|
||||
AbstractRegister b,
|
||||
AbstractRegister c,
|
||||
AbstractRegister d,
|
||||
AbstractRegister e,
|
||||
AbstractRegister f,
|
||||
AbstractRegister g,
|
||||
AbstractRegister h,
|
||||
AbstractRegister i
|
||||
) {
|
||||
assert(
|
||||
a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i
|
||||
&& b != c && b != d && b != e && b != f && b != g && b != h && b != i
|
||||
&& c != d && c != e && c != f && c != g && c != h && c != i
|
||||
&& d != e && d != f && d != g && d != h && d != i
|
||||
&& e != f && e != g && e != h && e != i
|
||||
&& f != g && f != h && f != i
|
||||
&& g != h && g != i
|
||||
&& h != i,
|
||||
err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d, i=%d",
|
||||
a, b, c, d, e, f, g, h, i)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -567,6 +567,7 @@ void Canonicalizer::do_NullCheck (NullCheck* x) {
|
||||
}
|
||||
}
|
||||
|
||||
void Canonicalizer::do_TypeCast (TypeCast* x) {}
|
||||
void Canonicalizer::do_Invoke (Invoke* x) {}
|
||||
void Canonicalizer::do_NewInstance (NewInstance* x) {}
|
||||
void Canonicalizer::do_NewTypeArray (NewTypeArray* x) {}
|
||||
|
@ -74,6 +74,7 @@ class Canonicalizer: InstructionVisitor {
|
||||
virtual void do_IfInstanceOf (IfInstanceOf* x);
|
||||
virtual void do_Convert (Convert* x);
|
||||
virtual void do_NullCheck (NullCheck* x);
|
||||
virtual void do_TypeCast (TypeCast* x);
|
||||
virtual void do_Invoke (Invoke* x);
|
||||
virtual void do_NewInstance (NewInstance* x);
|
||||
virtual void do_NewTypeArray (NewTypeArray* x);
|
||||
|
@ -523,7 +523,7 @@ void Compilation::bailout(const char* msg) {
|
||||
assert(msg != NULL, "bailout message must exist");
|
||||
if (!bailed_out()) {
|
||||
// keep first bailout message
|
||||
if (PrintBailouts) tty->print_cr("compilation bailout: %s", msg);
|
||||
if (PrintCompilation || PrintBailouts) tty->print_cr("compilation bailout: %s", msg);
|
||||
_bailout_msg = msg;
|
||||
}
|
||||
}
|
||||
|
@ -92,7 +92,6 @@ CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signa
|
||||
for (i = 0; i < sizeargs;) {
|
||||
BasicType t = sig_bt[i];
|
||||
assert(t != T_VOID, "should be skipping these");
|
||||
|
||||
LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
|
||||
args->append(opr);
|
||||
if (opr->is_address()) {
|
||||
|
@ -181,8 +181,8 @@ class FrameMap : public CompilationResourceObj {
|
||||
|
||||
// for outgoing calls, these also update the reserved area to
|
||||
// include space for arguments and any ABI area.
|
||||
CallingConvention* c_calling_convention (const BasicTypeArray* signature);
|
||||
CallingConvention* java_calling_convention (const BasicTypeArray* signature, bool outgoing);
|
||||
CallingConvention* c_calling_convention(const BasicTypeArray* signature);
|
||||
CallingConvention* java_calling_convention(const BasicTypeArray* signature, bool outgoing);
|
||||
|
||||
// deopt support
|
||||
ByteSize sp_offset_for_orig_pc() { return sp_offset_for_monitor_base(_num_monitors); }
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "ci/ciCallSite.hpp"
|
||||
#include "ci/ciField.hpp"
|
||||
#include "ci/ciKlass.hpp"
|
||||
#include "ci/ciMethodHandle.hpp"
|
||||
#include "ci/ciMemberName.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "interpreter/bytecode.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
@ -914,11 +914,11 @@ void GraphBuilder::load_local(ValueType* type, int index) {
|
||||
|
||||
void GraphBuilder::store_local(ValueType* type, int index) {
|
||||
Value x = pop(type);
|
||||
store_local(state(), x, type, index);
|
||||
store_local(state(), x, index);
|
||||
}
|
||||
|
||||
|
||||
void GraphBuilder::store_local(ValueStack* state, Value x, ValueType* type, int index) {
|
||||
void GraphBuilder::store_local(ValueStack* state, Value x, int index) {
|
||||
if (parsing_jsr()) {
|
||||
// We need to do additional tracking of the location of the return
|
||||
// address for jsrs since we don't handle arbitrary jsr/ret
|
||||
@ -1535,7 +1535,7 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
|
||||
case T_ARRAY:
|
||||
case T_OBJECT:
|
||||
if (field_val.as_object()->should_be_constant()) {
|
||||
constant = new Constant(as_ValueType(field_val));
|
||||
constant = new Constant(as_ValueType(field_val));
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1562,12 +1562,53 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
|
||||
append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_getfield :
|
||||
{
|
||||
case Bytecodes::_getfield: {
|
||||
// Check for compile-time constants, i.e., trusted final non-static fields.
|
||||
Instruction* constant = NULL;
|
||||
obj = apop();
|
||||
ObjectType* obj_type = obj->type()->as_ObjectType();
|
||||
if (obj_type->is_constant() && !PatchALot) {
|
||||
ciObject* const_oop = obj_type->constant_value();
|
||||
if (!const_oop->is_null_object()) {
|
||||
if (field->is_constant()) {
|
||||
ciConstant field_val = field->constant_value_of(const_oop);
|
||||
BasicType field_type = field_val.basic_type();
|
||||
switch (field_type) {
|
||||
case T_ARRAY:
|
||||
case T_OBJECT:
|
||||
if (field_val.as_object()->should_be_constant()) {
|
||||
constant = new Constant(as_ValueType(field_val));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
constant = new Constant(as_ValueType(field_val));
|
||||
}
|
||||
} else {
|
||||
// For CallSite objects treat the target field as a compile time constant.
|
||||
if (const_oop->is_call_site()) {
|
||||
ciCallSite* call_site = const_oop->as_call_site();
|
||||
if (field->is_call_site_target()) {
|
||||
ciMethodHandle* target = call_site->get_target();
|
||||
if (target != NULL) { // just in case
|
||||
ciConstant field_val(T_OBJECT, target);
|
||||
constant = new Constant(as_ValueType(field_val));
|
||||
// Add a dependence for invalidation of the optimization.
|
||||
if (!call_site->is_constant_call_site()) {
|
||||
dependency_recorder()->assert_call_site_target_value(call_site, target);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (constant != NULL) {
|
||||
push(type, append(constant));
|
||||
} else {
|
||||
if (state_before == NULL) {
|
||||
state_before = copy_state_for_exception();
|
||||
}
|
||||
LoadField* load = new LoadField(apop(), offset, field, false, state_before, needs_patching);
|
||||
LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
|
||||
Value replacement = !needs_patching ? _memory->load(load) : load;
|
||||
if (replacement != load) {
|
||||
assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
|
||||
@ -1575,22 +1616,23 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
|
||||
} else {
|
||||
push(type, append(load));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_putfield :
|
||||
{ Value val = pop(type);
|
||||
if (state_before == NULL) {
|
||||
state_before = copy_state_for_exception();
|
||||
}
|
||||
StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, needs_patching);
|
||||
if (!needs_patching) store = _memory->store(store);
|
||||
if (store != NULL) {
|
||||
append(store);
|
||||
}
|
||||
}
|
||||
break;
|
||||
default :
|
||||
}
|
||||
case Bytecodes::_putfield: {
|
||||
Value val = pop(type);
|
||||
obj = apop();
|
||||
if (state_before == NULL) {
|
||||
state_before = copy_state_for_exception();
|
||||
}
|
||||
StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching);
|
||||
if (!needs_patching) store = _memory->store(store);
|
||||
if (store != NULL) {
|
||||
append(store);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
break;
|
||||
}
|
||||
@ -1604,38 +1646,73 @@ Dependencies* GraphBuilder::dependency_recorder() const {
|
||||
|
||||
|
||||
void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
const bool has_receiver =
|
||||
code == Bytecodes::_invokespecial ||
|
||||
code == Bytecodes::_invokevirtual ||
|
||||
code == Bytecodes::_invokeinterface;
|
||||
const bool is_invokedynamic = (code == Bytecodes::_invokedynamic);
|
||||
|
||||
bool will_link;
|
||||
ciMethod* target = stream()->get_method(will_link);
|
||||
ciMethod* target = stream()->get_method(will_link);
|
||||
ciKlass* holder = stream()->get_declared_method_holder();
|
||||
const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
|
||||
|
||||
// FIXME bail out for now
|
||||
if ((bc_raw == Bytecodes::_invokehandle || is_invokedynamic) && !will_link) {
|
||||
BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
|
||||
}
|
||||
|
||||
// we have to make sure the argument size (incl. the receiver)
|
||||
// is correct for compilation (the call would fail later during
|
||||
// linkage anyway) - was bug (gri 7/28/99)
|
||||
if (target->is_loaded() && target->is_static() != (code == Bytecodes::_invokestatic)) BAILOUT("will cause link error");
|
||||
{
|
||||
// Use raw to get rewritten bytecode.
|
||||
const bool is_invokestatic = bc_raw == Bytecodes::_invokestatic;
|
||||
const bool allow_static =
|
||||
is_invokestatic ||
|
||||
bc_raw == Bytecodes::_invokehandle ||
|
||||
bc_raw == Bytecodes::_invokedynamic;
|
||||
if (target->is_loaded()) {
|
||||
if (( target->is_static() && !allow_static) ||
|
||||
(!target->is_static() && is_invokestatic)) {
|
||||
BAILOUT("will cause link error");
|
||||
}
|
||||
}
|
||||
}
|
||||
ciInstanceKlass* klass = target->holder();
|
||||
|
||||
// check if CHA possible: if so, change the code to invoke_special
|
||||
ciInstanceKlass* calling_klass = method()->holder();
|
||||
ciKlass* holder = stream()->get_declared_method_holder();
|
||||
ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
|
||||
ciInstanceKlass* actual_recv = callee_holder;
|
||||
|
||||
// some methods are obviously bindable without any type checks so
|
||||
// convert them directly to an invokespecial.
|
||||
if (target->is_loaded() && !target->is_abstract() &&
|
||||
target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) {
|
||||
code = Bytecodes::_invokespecial;
|
||||
// Some methods are obviously bindable without any type checks so
|
||||
// convert them directly to an invokespecial or invokestatic.
|
||||
if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
|
||||
switch (bc_raw) {
|
||||
case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break;
|
||||
case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break;
|
||||
}
|
||||
}
|
||||
|
||||
bool is_invokedynamic = code == Bytecodes::_invokedynamic;
|
||||
// Push appendix argument (MethodType, CallSite, etc.), if one.
|
||||
if (stream()->has_appendix()) {
|
||||
ciObject* appendix = stream()->get_appendix();
|
||||
Value arg = append(new Constant(new ObjectConstant(appendix)));
|
||||
apush(arg);
|
||||
}
|
||||
|
||||
// NEEDS_CLEANUP
|
||||
// I've added the target-is_loaded() test below but I don't really understand
|
||||
// I've added the target->is_loaded() test below but I don't really understand
|
||||
// how klass->is_loaded() can be true and yet target->is_loaded() is false.
|
||||
// this happened while running the JCK invokevirtual tests under doit. TKR
|
||||
ciMethod* cha_monomorphic_target = NULL;
|
||||
ciMethod* exact_target = NULL;
|
||||
Value better_receiver = NULL;
|
||||
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
|
||||
!target->is_method_handle_invoke()) {
|
||||
!(// %%% FIXME: Are both of these relevant?
|
||||
target->is_method_handle_intrinsic() ||
|
||||
target->is_compiled_lambda_form())) {
|
||||
Value receiver = NULL;
|
||||
ciInstanceKlass* receiver_klass = NULL;
|
||||
bool type_is_exact = false;
|
||||
@ -1761,23 +1838,15 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
code == Bytecodes::_invokedynamic) {
|
||||
ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
|
||||
bool success = false;
|
||||
if (target->is_method_handle_invoke()) {
|
||||
if (target->is_method_handle_intrinsic()) {
|
||||
// method handle invokes
|
||||
success = !is_invokedynamic ? for_method_handle_inline(target) : for_invokedynamic_inline(target);
|
||||
}
|
||||
if (!success) {
|
||||
success = for_method_handle_inline(target);
|
||||
} else {
|
||||
// static binding => check if callee is ok
|
||||
success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), better_receiver);
|
||||
success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
|
||||
}
|
||||
CHECK_BAILOUT();
|
||||
|
||||
#ifndef PRODUCT
|
||||
// printing
|
||||
if (PrintInlining && !success) {
|
||||
// if it was successfully inlined, then it was already printed.
|
||||
print_inline_result(inline_target, success);
|
||||
}
|
||||
#endif
|
||||
clear_inline_bailout();
|
||||
if (success) {
|
||||
// Register dependence if JVMTI has either breakpoint
|
||||
@ -1788,8 +1857,13 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
}
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
print_inlining(target, "no static binding", /*success*/ false);
|
||||
}
|
||||
} else {
|
||||
print_inlining(target, "not inlineable", /*success*/ false);
|
||||
}
|
||||
|
||||
// If we attempted an inline which did not succeed because of a
|
||||
// bailout during construction of the callee graph, the entire
|
||||
// compilation has to be aborted. This is fairly rare and currently
|
||||
@ -1803,10 +1877,6 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
|
||||
// inlining not successful => standard invoke
|
||||
bool is_loaded = target->is_loaded();
|
||||
bool has_receiver =
|
||||
code == Bytecodes::_invokespecial ||
|
||||
code == Bytecodes::_invokevirtual ||
|
||||
code == Bytecodes::_invokeinterface;
|
||||
ValueType* result_type = as_ValueType(target->return_type());
|
||||
|
||||
// We require the debug info to be the "state before" because
|
||||
@ -1855,7 +1925,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
} else if (exact_target != NULL) {
|
||||
target_klass = exact_target->holder();
|
||||
}
|
||||
profile_call(recv, target_klass);
|
||||
profile_call(target, recv, target_klass);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3097,30 +3167,61 @@ int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
|
||||
}
|
||||
|
||||
|
||||
bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Value receiver) {
|
||||
// Clear out any existing inline bailout condition
|
||||
bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) {
|
||||
const char* msg = NULL;
|
||||
|
||||
// clear out any existing inline bailout condition
|
||||
clear_inline_bailout();
|
||||
|
||||
if (callee->should_exclude()) {
|
||||
// callee is excluded
|
||||
INLINE_BAILOUT("excluded by CompilerOracle")
|
||||
} else if (callee->should_not_inline()) {
|
||||
// callee is excluded
|
||||
INLINE_BAILOUT("disallowed by CompilerOracle")
|
||||
} else if (!callee->can_be_compiled()) {
|
||||
// callee is not compilable (prob. has breakpoints)
|
||||
INLINE_BAILOUT("not compilable (disabled)")
|
||||
} else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
|
||||
// intrinsics can be native or not
|
||||
return true;
|
||||
} else if (callee->is_native()) {
|
||||
// non-intrinsic natives cannot be inlined
|
||||
INLINE_BAILOUT("non-intrinsic native")
|
||||
} else if (callee->is_abstract()) {
|
||||
INLINE_BAILOUT("abstract")
|
||||
} else {
|
||||
return try_inline_full(callee, holder_known, NULL, receiver);
|
||||
// exclude methods we don't want to inline
|
||||
msg = should_not_inline(callee);
|
||||
if (msg != NULL) {
|
||||
print_inlining(callee, msg, /*success*/ false);
|
||||
return false;
|
||||
}
|
||||
|
||||
// handle intrinsics
|
||||
if (callee->intrinsic_id() != vmIntrinsics::_none) {
|
||||
if (try_inline_intrinsics(callee)) {
|
||||
print_inlining(callee, "intrinsic");
|
||||
return true;
|
||||
}
|
||||
// try normal inlining
|
||||
}
|
||||
|
||||
// certain methods cannot be parsed at all
|
||||
msg = check_can_parse(callee);
|
||||
if (msg != NULL) {
|
||||
print_inlining(callee, msg, /*success*/ false);
|
||||
return false;
|
||||
}
|
||||
|
||||
// If bytecode not set use the current one.
|
||||
if (bc == Bytecodes::_illegal) {
|
||||
bc = code();
|
||||
}
|
||||
if (try_inline_full(callee, holder_known, bc, receiver))
|
||||
return true;
|
||||
print_inlining(callee, _inline_bailout_msg, /*success*/ false);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
const char* GraphBuilder::check_can_parse(ciMethod* callee) const {
|
||||
// Certain methods cannot be parsed at all:
|
||||
if ( callee->is_native()) return "native method";
|
||||
if ( callee->is_abstract()) return "abstract method";
|
||||
if (!callee->can_be_compiled()) return "not compilable (disabled)";
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
// negative filter: should callee NOT be inlined? returns NULL, ok to inline, or rejection msg
|
||||
const char* GraphBuilder::should_not_inline(ciMethod* callee) const {
|
||||
if ( callee->should_exclude()) return "excluded by CompilerOracle";
|
||||
if ( callee->should_not_inline()) return "disallowed by CompilerOracle";
|
||||
if ( callee->dont_inline()) return "don't inline by annotation";
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@ -3304,7 +3405,7 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
||||
recv = args->at(0);
|
||||
null_check(recv);
|
||||
}
|
||||
profile_call(recv, NULL);
|
||||
profile_call(callee, recv, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3315,13 +3416,6 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
||||
Value value = append_split(result);
|
||||
if (result_type != voidType) push(result_type, value);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// printing
|
||||
if (PrintInlining) {
|
||||
print_inline_result(callee, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
// done
|
||||
return true;
|
||||
}
|
||||
@ -3477,7 +3571,7 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
|
||||
}
|
||||
|
||||
|
||||
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver) {
|
||||
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) {
|
||||
assert(!callee->is_native(), "callee must not be native");
|
||||
if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
|
||||
INLINE_BAILOUT("inlining prohibited by policy");
|
||||
@ -3508,10 +3602,10 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
|
||||
if (callee->force_inline() || callee->should_inline()) {
|
||||
// ignore heuristic controls on inlining
|
||||
if (callee->force_inline())
|
||||
CompileTask::print_inlining(callee, scope()->level(), bci(), "force inline by annotation");
|
||||
print_inlining(callee, "force inline by annotation");
|
||||
} else {
|
||||
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining");
|
||||
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
|
||||
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("inlining too deep");
|
||||
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
|
||||
if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large");
|
||||
|
||||
// don't inline throwable methods unless the inlining tree is rooted in a throwable class
|
||||
@ -3530,28 +3624,25 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
|
||||
if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
|
||||
INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
|
||||
}
|
||||
// printing
|
||||
print_inlining(callee, "");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// printing
|
||||
if (PrintInlining) {
|
||||
print_inline_result(callee, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
// NOTE: Bailouts from this point on, which occur at the
|
||||
// GraphBuilder level, do not cause bailout just of the inlining but
|
||||
// in fact of the entire compilation.
|
||||
|
||||
BlockBegin* orig_block = block();
|
||||
|
||||
const bool is_invokedynamic = bc == Bytecodes::_invokedynamic;
|
||||
const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic);
|
||||
|
||||
const int args_base = state()->stack_size() - callee->arg_size();
|
||||
assert(args_base >= 0, "stack underflow during inlining");
|
||||
|
||||
// Insert null check if necessary
|
||||
Value recv = NULL;
|
||||
if (code() != Bytecodes::_invokestatic &&
|
||||
code() != Bytecodes::_invokedynamic) {
|
||||
if (has_receiver) {
|
||||
// note: null check must happen even if first instruction of callee does
|
||||
// an implicit null check since the callee is in a different scope
|
||||
// and we must make sure exception handling does the right thing
|
||||
@ -3567,7 +3658,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
|
||||
compilation()->set_would_profile(true);
|
||||
|
||||
if (profile_calls()) {
|
||||
profile_call(recv, holder_known ? callee->holder() : NULL);
|
||||
profile_call(callee, recv, holder_known ? callee->holder() : NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3576,7 +3667,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
|
||||
// fall-through of control flow, all return instructions of the
|
||||
// callee will need to be replaced by Goto's pointing to this
|
||||
// continuation point.
|
||||
BlockBegin* cont = cont_block != NULL ? cont_block : block_at(next_bci());
|
||||
BlockBegin* cont = block_at(next_bci());
|
||||
bool continuation_existed = true;
|
||||
if (cont == NULL) {
|
||||
cont = new BlockBegin(next_bci());
|
||||
@ -3609,17 +3700,10 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
|
||||
// note: this will also ensure that all arguments are computed before being passed
|
||||
ValueStack* callee_state = state();
|
||||
ValueStack* caller_state = state()->caller_state();
|
||||
{ int i = args_base;
|
||||
while (i < caller_state->stack_size()) {
|
||||
const int par_no = i - args_base;
|
||||
Value arg = caller_state->stack_at_inc(i);
|
||||
// NOTE: take base() of arg->type() to avoid problems storing
|
||||
// constants
|
||||
if (receiver != NULL && par_no == 0) {
|
||||
arg = receiver;
|
||||
}
|
||||
store_local(callee_state, arg, arg->type()->base(), par_no);
|
||||
}
|
||||
for (int i = args_base; i < caller_state->stack_size(); ) {
|
||||
const int arg_no = i - args_base;
|
||||
Value arg = caller_state->stack_at_inc(i);
|
||||
store_local(callee_state, arg, arg_no);
|
||||
}
|
||||
|
||||
// Remove args from stack.
|
||||
@ -3695,29 +3779,27 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
|
||||
// block merging. This allows load elimination and CSE to take place
|
||||
// across multiple callee scopes if they are relatively simple, and
|
||||
// is currently essential to making inlining profitable.
|
||||
if (cont_block == NULL) {
|
||||
if (num_returns() == 1
|
||||
&& block() == orig_block
|
||||
&& block() == inline_cleanup_block()) {
|
||||
_last = inline_cleanup_return_prev();
|
||||
_state = inline_cleanup_state();
|
||||
} else if (continuation_preds == cont->number_of_preds()) {
|
||||
// Inlining caused that the instructions after the invoke in the
|
||||
// caller are not reachable any more. So skip filling this block
|
||||
// with instructions!
|
||||
assert(cont == continuation(), "");
|
||||
if (num_returns() == 1
|
||||
&& block() == orig_block
|
||||
&& block() == inline_cleanup_block()) {
|
||||
_last = inline_cleanup_return_prev();
|
||||
_state = inline_cleanup_state();
|
||||
} else if (continuation_preds == cont->number_of_preds()) {
|
||||
// Inlining caused that the instructions after the invoke in the
|
||||
// caller are not reachable any more. So skip filling this block
|
||||
// with instructions!
|
||||
assert(cont == continuation(), "");
|
||||
assert(_last && _last->as_BlockEnd(), "");
|
||||
_skip_block = true;
|
||||
} else {
|
||||
// Resume parsing in continuation block unless it was already parsed.
|
||||
// Note that if we don't change _last here, iteration in
|
||||
// iterate_bytecodes_for_block will stop when we return.
|
||||
if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
|
||||
// add continuation to work list instead of parsing it immediately
|
||||
assert(_last && _last->as_BlockEnd(), "");
|
||||
scope_data()->parent()->add_to_work_list(continuation());
|
||||
_skip_block = true;
|
||||
} else {
|
||||
// Resume parsing in continuation block unless it was already parsed.
|
||||
// Note that if we don't change _last here, iteration in
|
||||
// iterate_bytecodes_for_block will stop when we return.
|
||||
if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
|
||||
// add continuation to work list instead of parsing it immediately
|
||||
assert(_last && _last->as_BlockEnd(), "");
|
||||
scope_data()->parent()->add_to_work_list(continuation());
|
||||
_skip_block = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3735,114 +3817,88 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
|
||||
|
||||
|
||||
bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
|
||||
assert(!callee->is_static(), "change next line");
|
||||
int index = state()->stack_size() - (callee->arg_size_no_receiver() + 1);
|
||||
Value receiver = state()->stack_at(index);
|
||||
|
||||
if (receiver->type()->is_constant()) {
|
||||
ciMethodHandle* method_handle = receiver->type()->as_ObjectType()->constant_value()->as_method_handle();
|
||||
|
||||
// Set the callee to have access to the class and signature in
|
||||
// the MethodHandleCompiler.
|
||||
method_handle->set_callee(callee);
|
||||
method_handle->set_caller(method());
|
||||
|
||||
// Get an adapter for the MethodHandle.
|
||||
ciMethod* method_handle_adapter = method_handle->get_method_handle_adapter();
|
||||
if (method_handle_adapter != NULL) {
|
||||
return try_inline(method_handle_adapter, /*holder_known=*/ true);
|
||||
}
|
||||
} else if (receiver->as_CheckCast()) {
|
||||
// Match MethodHandle.selectAlternative idiom
|
||||
Phi* phi = receiver->as_CheckCast()->obj()->as_Phi();
|
||||
|
||||
if (phi != NULL && phi->operand_count() == 2) {
|
||||
// Get the two MethodHandle inputs from the Phi.
|
||||
Value op1 = phi->operand_at(0);
|
||||
Value op2 = phi->operand_at(1);
|
||||
ObjectType* op1type = op1->type()->as_ObjectType();
|
||||
ObjectType* op2type = op2->type()->as_ObjectType();
|
||||
|
||||
if (op1type->is_constant() && op2type->is_constant()) {
|
||||
ciMethodHandle* mh1 = op1type->constant_value()->as_method_handle();
|
||||
ciMethodHandle* mh2 = op2type->constant_value()->as_method_handle();
|
||||
|
||||
// Set the callee to have access to the class and signature in
|
||||
// the MethodHandleCompiler.
|
||||
mh1->set_callee(callee);
|
||||
mh1->set_caller(method());
|
||||
mh2->set_callee(callee);
|
||||
mh2->set_caller(method());
|
||||
|
||||
// Get adapters for the MethodHandles.
|
||||
ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
|
||||
ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
|
||||
|
||||
if (mh1_adapter != NULL && mh2_adapter != NULL) {
|
||||
set_inline_cleanup_info();
|
||||
|
||||
// Build the If guard
|
||||
BlockBegin* one = new BlockBegin(next_bci());
|
||||
BlockBegin* two = new BlockBegin(next_bci());
|
||||
BlockBegin* end = new BlockBegin(next_bci());
|
||||
Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
|
||||
block()->set_end(iff->as_BlockEnd());
|
||||
|
||||
// Connect up the states
|
||||
one->merge(block()->end()->state());
|
||||
two->merge(block()->end()->state());
|
||||
|
||||
// Save the state for the second inlinee
|
||||
ValueStack* state_before = copy_state_before();
|
||||
|
||||
// Parse first adapter
|
||||
_last = _block = one;
|
||||
if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end, NULL)) {
|
||||
restore_inline_cleanup_info();
|
||||
block()->clear_end(); // remove appended iff
|
||||
return false;
|
||||
}
|
||||
|
||||
// Parse second adapter
|
||||
_last = _block = two;
|
||||
_state = state_before;
|
||||
if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end, NULL)) {
|
||||
restore_inline_cleanup_info();
|
||||
block()->clear_end(); // remove appended iff
|
||||
return false;
|
||||
}
|
||||
|
||||
connect_to_end(end);
|
||||
ValueStack* state_before = state()->copy_for_parsing();
|
||||
vmIntrinsics::ID iid = callee->intrinsic_id();
|
||||
switch (iid) {
|
||||
case vmIntrinsics::_invokeBasic:
|
||||
{
|
||||
// get MethodHandle receiver
|
||||
const int args_base = state()->stack_size() - callee->arg_size();
|
||||
ValueType* type = state()->stack_at(args_base)->type();
|
||||
if (type->is_constant()) {
|
||||
ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget();
|
||||
guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
|
||||
Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
|
||||
if (try_inline(target, /*holder_known*/ true, bc)) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
print_inlining(callee, "receiver not constant", /*success*/ false);
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
bool GraphBuilder::for_invokedynamic_inline(ciMethod* callee) {
|
||||
// Get the MethodHandle from the CallSite.
|
||||
ciCallSite* call_site = stream()->get_call_site();
|
||||
ciMethodHandle* method_handle = call_site->get_target();
|
||||
|
||||
// Set the callee to have access to the class and signature in the
|
||||
// MethodHandleCompiler.
|
||||
method_handle->set_callee(callee);
|
||||
method_handle->set_caller(method());
|
||||
|
||||
// Get an adapter for the MethodHandle.
|
||||
ciMethod* method_handle_adapter = method_handle->get_invokedynamic_adapter();
|
||||
if (method_handle_adapter != NULL) {
|
||||
if (try_inline(method_handle_adapter, /*holder_known=*/ true)) {
|
||||
// Add a dependence for invalidation of the optimization.
|
||||
if (!call_site->is_constant_call_site()) {
|
||||
dependency_recorder()->assert_call_site_target_value(call_site, method_handle);
|
||||
case vmIntrinsics::_linkToVirtual:
|
||||
case vmIntrinsics::_linkToStatic:
|
||||
case vmIntrinsics::_linkToSpecial:
|
||||
case vmIntrinsics::_linkToInterface:
|
||||
{
|
||||
// pop MemberName argument
|
||||
const int args_base = state()->stack_size() - callee->arg_size();
|
||||
ValueType* type = apop()->type();
|
||||
if (type->is_constant()) {
|
||||
ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget();
|
||||
// If the target is another method handle invoke try recursivly to get
|
||||
// a better target.
|
||||
if (target->is_method_handle_intrinsic()) {
|
||||
if (for_method_handle_inline(target)) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
ciSignature* signature = target->signature();
|
||||
const int receiver_skip = target->is_static() ? 0 : 1;
|
||||
// Cast receiver to its type.
|
||||
if (!target->is_static()) {
|
||||
ciKlass* tk = signature->accessing_klass();
|
||||
Value obj = state()->stack_at(args_base);
|
||||
if (obj->exact_type() == NULL &&
|
||||
obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
|
||||
TypeCast* c = new TypeCast(tk, obj, state_before);
|
||||
append(c);
|
||||
state()->stack_at_put(args_base, c);
|
||||
}
|
||||
}
|
||||
// Cast reference arguments to its type.
|
||||
for (int i = 0, j = 0; i < signature->count(); i++) {
|
||||
ciType* t = signature->type_at(i);
|
||||
if (t->is_klass()) {
|
||||
ciKlass* tk = t->as_klass();
|
||||
Value obj = state()->stack_at(args_base + receiver_skip + j);
|
||||
if (obj->exact_type() == NULL &&
|
||||
obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) {
|
||||
TypeCast* c = new TypeCast(t, obj, state_before);
|
||||
append(c);
|
||||
state()->stack_at_put(args_base + receiver_skip + j, c);
|
||||
}
|
||||
}
|
||||
j += t->size(); // long and double take two slots
|
||||
}
|
||||
Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
|
||||
if (try_inline(target, /*holder_known*/ true, bc)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
print_inlining(callee, "MemberName not constant", /*success*/ false);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
||||
break;
|
||||
}
|
||||
set_state(state_before);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -4034,22 +4090,24 @@ void GraphBuilder::append_unsafe_CAS(ciMethod* callee) {
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void GraphBuilder::print_inline_result(ciMethod* callee, bool res) {
|
||||
CompileTask::print_inlining(callee, scope()->level(), bci(), _inline_bailout_msg);
|
||||
if (res && CIPrintMethodCodes) {
|
||||
void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) {
|
||||
if (!PrintInlining) return;
|
||||
assert(msg != NULL, "must be");
|
||||
CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
|
||||
if (success && CIPrintMethodCodes) {
|
||||
callee->print_codes();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void GraphBuilder::print_stats() {
|
||||
vmap()->print();
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
|
||||
append(new ProfileCall(method(), bci(), recv, known_holder));
|
||||
void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) {
|
||||
append(new ProfileCall(method(), bci(), callee, recv, known_holder));
|
||||
}
|
||||
|
||||
void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
|
||||
|
@ -225,7 +225,7 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
|
||||
void load_constant();
|
||||
void load_local(ValueType* type, int index);
|
||||
void store_local(ValueType* type, int index);
|
||||
void store_local(ValueStack* state, Value value, ValueType* type, int index);
|
||||
void store_local(ValueStack* state, Value value, int index);
|
||||
void load_indexed (BasicType type);
|
||||
void store_indexed(BasicType type);
|
||||
void stack_op(Bytecodes::Code code);
|
||||
@ -337,14 +337,16 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
|
||||
void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
|
||||
|
||||
// inliners
|
||||
bool try_inline( ciMethod* callee, bool holder_known, Value receiver = NULL);
|
||||
bool try_inline( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL);
|
||||
bool try_inline_intrinsics(ciMethod* callee);
|
||||
bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver);
|
||||
bool try_inline_full( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL);
|
||||
bool try_inline_jsr(int jsr_dest_bci);
|
||||
|
||||
const char* check_can_parse(ciMethod* callee) const;
|
||||
const char* should_not_inline(ciMethod* callee) const;
|
||||
|
||||
// JSR 292 support
|
||||
bool for_method_handle_inline(ciMethod* callee);
|
||||
bool for_invokedynamic_inline(ciMethod* callee);
|
||||
|
||||
// helpers
|
||||
void inline_bailout(const char* msg);
|
||||
@ -366,9 +368,9 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
|
||||
bool append_unsafe_prefetch(ciMethod* callee, bool is_store, bool is_static);
|
||||
void append_unsafe_CAS(ciMethod* callee);
|
||||
|
||||
NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);)
|
||||
void print_inlining(ciMethod* callee, const char* msg, bool success = true);
|
||||
|
||||
void profile_call(Value recv, ciKlass* predicted_holder);
|
||||
void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder);
|
||||
void profile_invocation(ciMethod* inlinee, ValueStack* state);
|
||||
|
||||
// Shortcuts to profiling control.
|
||||
|
@ -161,6 +161,12 @@ ciType* Local::exact_type() const {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ciType* Constant::exact_type() const {
|
||||
if (type()->is_object()) {
|
||||
return type()->as_ObjectType()->exact_type();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ciType* LoadIndexed::exact_type() const {
|
||||
ciType* array_type = array()->exact_type();
|
||||
|
@ -66,6 +66,7 @@ class CompareOp;
|
||||
class IfOp;
|
||||
class Convert;
|
||||
class NullCheck;
|
||||
class TypeCast;
|
||||
class OsrEntry;
|
||||
class ExceptionObject;
|
||||
class StateSplit;
|
||||
@ -174,6 +175,7 @@ class InstructionVisitor: public StackObj {
|
||||
virtual void do_IfOp (IfOp* x) = 0;
|
||||
virtual void do_Convert (Convert* x) = 0;
|
||||
virtual void do_NullCheck (NullCheck* x) = 0;
|
||||
virtual void do_TypeCast (TypeCast* x) = 0;
|
||||
virtual void do_Invoke (Invoke* x) = 0;
|
||||
virtual void do_NewInstance (NewInstance* x) = 0;
|
||||
virtual void do_NewTypeArray (NewTypeArray* x) = 0;
|
||||
@ -302,7 +304,8 @@ class Instruction: public CompilationResourceObj {
|
||||
|
||||
void update_exception_state(ValueStack* state);
|
||||
|
||||
protected:
|
||||
//protected:
|
||||
public:
|
||||
void set_type(ValueType* type) {
|
||||
assert(type != NULL, "type must exist");
|
||||
_type = type;
|
||||
@ -485,6 +488,7 @@ class Instruction: public CompilationResourceObj {
|
||||
virtual TypeCheck* as_TypeCheck() { return NULL; }
|
||||
virtual CheckCast* as_CheckCast() { return NULL; }
|
||||
virtual InstanceOf* as_InstanceOf() { return NULL; }
|
||||
virtual TypeCast* as_TypeCast() { return NULL; }
|
||||
virtual AccessMonitor* as_AccessMonitor() { return NULL; }
|
||||
virtual MonitorEnter* as_MonitorEnter() { return NULL; }
|
||||
virtual MonitorExit* as_MonitorExit() { return NULL; }
|
||||
@ -638,8 +642,8 @@ LEAF(Local, Instruction)
|
||||
// accessors
|
||||
int java_index() const { return _java_index; }
|
||||
|
||||
ciType* declared_type() const { return _declared_type; }
|
||||
ciType* exact_type() const;
|
||||
virtual ciType* declared_type() const { return _declared_type; }
|
||||
virtual ciType* exact_type() const;
|
||||
|
||||
// generic
|
||||
virtual void input_values_do(ValueVisitor* f) { /* no values */ }
|
||||
@ -650,13 +654,13 @@ LEAF(Constant, Instruction)
|
||||
public:
|
||||
// creation
|
||||
Constant(ValueType* type):
|
||||
Instruction(type, NULL, true)
|
||||
Instruction(type, NULL, /*type_is_constant*/ true)
|
||||
{
|
||||
assert(type->is_constant(), "must be a constant");
|
||||
}
|
||||
|
||||
Constant(ValueType* type, ValueStack* state_before):
|
||||
Instruction(type, state_before, true)
|
||||
Instruction(type, state_before, /*type_is_constant*/ true)
|
||||
{
|
||||
assert(state_before != NULL, "only used for constants which need patching");
|
||||
assert(type->is_constant(), "must be a constant");
|
||||
@ -670,6 +674,7 @@ LEAF(Constant, Instruction)
|
||||
virtual intx hash() const;
|
||||
virtual bool is_equal(Value v) const;
|
||||
|
||||
virtual ciType* exact_type() const;
|
||||
|
||||
enum CompareResult { not_comparable = -1, cond_false, cond_true };
|
||||
|
||||
@ -1103,6 +1108,29 @@ LEAF(NullCheck, Instruction)
|
||||
};
|
||||
|
||||
|
||||
// This node is supposed to cast the type of another node to a more precise
|
||||
// declared type.
|
||||
LEAF(TypeCast, Instruction)
|
||||
private:
|
||||
ciType* _declared_type;
|
||||
Value _obj;
|
||||
|
||||
public:
|
||||
// The type of this node is the same type as the object type (and it might be constant).
|
||||
TypeCast(ciType* type, Value obj, ValueStack* state_before)
|
||||
: Instruction(obj->type(), state_before, obj->type()->is_constant()),
|
||||
_declared_type(type),
|
||||
_obj(obj) {}
|
||||
|
||||
// accessors
|
||||
ciType* declared_type() const { return _declared_type; }
|
||||
Value obj() const { return _obj; }
|
||||
|
||||
// generic
|
||||
virtual void input_values_do(ValueVisitor* f) { f->visit(&_obj); }
|
||||
};
|
||||
|
||||
|
||||
BASE(StateSplit, Instruction)
|
||||
private:
|
||||
ValueStack* _state;
|
||||
@ -1166,6 +1194,7 @@ LEAF(Invoke, StateSplit)
|
||||
|
||||
// JSR 292 support
|
||||
bool is_invokedynamic() const { return code() == Bytecodes::_invokedynamic; }
|
||||
bool is_method_handle_intrinsic() const { return target()->is_method_handle_intrinsic(); }
|
||||
|
||||
virtual bool needs_exception_state() const { return false; }
|
||||
|
||||
@ -2277,14 +2306,16 @@ LEAF(ProfileCall, Instruction)
|
||||
private:
|
||||
ciMethod* _method;
|
||||
int _bci_of_invoke;
|
||||
ciMethod* _callee; // the method that is called at the given bci
|
||||
Value _recv;
|
||||
ciKlass* _known_holder;
|
||||
|
||||
public:
|
||||
ProfileCall(ciMethod* method, int bci, Value recv, ciKlass* known_holder)
|
||||
ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder)
|
||||
: Instruction(voidType)
|
||||
, _method(method)
|
||||
, _bci_of_invoke(bci)
|
||||
, _callee(callee)
|
||||
, _recv(recv)
|
||||
, _known_holder(known_holder)
|
||||
{
|
||||
@ -2294,6 +2325,7 @@ LEAF(ProfileCall, Instruction)
|
||||
|
||||
ciMethod* method() { return _method; }
|
||||
int bci_of_invoke() { return _bci_of_invoke; }
|
||||
ciMethod* callee() { return _callee; }
|
||||
Value recv() { return _recv; }
|
||||
ciKlass* known_holder() { return _known_holder; }
|
||||
|
||||
|
@ -137,12 +137,16 @@ void InstructionPrinter::print_object(Value obj) {
|
||||
ciMethod* m = (ciMethod*)value;
|
||||
output()->print("<method %s.%s>", m->holder()->name()->as_utf8(), m->name()->as_utf8());
|
||||
} else {
|
||||
output()->print("<object " PTR_FORMAT ">", value->constant_encoding());
|
||||
output()->print("<object " PTR_FORMAT " klass=", value->constant_encoding());
|
||||
print_klass(value->klass());
|
||||
output()->print(">");
|
||||
}
|
||||
} else if (type->as_InstanceConstant() != NULL) {
|
||||
ciInstance* value = type->as_InstanceConstant()->value();
|
||||
if (value->is_loaded()) {
|
||||
output()->print("<instance " PTR_FORMAT ">", value->constant_encoding());
|
||||
output()->print("<instance " PTR_FORMAT " klass=", value->constant_encoding());
|
||||
print_klass(value->klass());
|
||||
output()->print(">");
|
||||
} else {
|
||||
output()->print("<unloaded instance " PTR_FORMAT ">", value);
|
||||
}
|
||||
@ -453,6 +457,14 @@ void InstructionPrinter::do_NullCheck(NullCheck* x) {
|
||||
}
|
||||
|
||||
|
||||
void InstructionPrinter::do_TypeCast(TypeCast* x) {
|
||||
output()->print("type_cast(");
|
||||
print_value(x->obj());
|
||||
output()->print(") ");
|
||||
print_klass(x->declared_type()->klass());
|
||||
}
|
||||
|
||||
|
||||
void InstructionPrinter::do_Invoke(Invoke* x) {
|
||||
if (x->receiver() != NULL) {
|
||||
print_value(x->receiver());
|
||||
|
@ -101,6 +101,7 @@ class InstructionPrinter: public InstructionVisitor {
|
||||
virtual void do_IfOp (IfOp* x);
|
||||
virtual void do_Convert (Convert* x);
|
||||
virtual void do_NullCheck (NullCheck* x);
|
||||
virtual void do_TypeCast (TypeCast* x);
|
||||
virtual void do_Invoke (Invoke* x);
|
||||
virtual void do_NewInstance (NewInstance* x);
|
||||
virtual void do_NewTypeArray (NewTypeArray* x);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_C1_C1_LIR_HPP
|
||||
|
||||
#include "c1/c1_ValueType.hpp"
|
||||
#include "oops/methodOop.hpp"
|
||||
|
||||
class BlockBegin;
|
||||
class BlockList;
|
||||
@ -1162,8 +1163,9 @@ class LIR_OpJavaCall: public LIR_OpCall {
|
||||
return
|
||||
is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
|
||||
||
|
||||
(method()->holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() &&
|
||||
methodOopDesc::is_method_handle_invoke_name(method()->name()->sid()));
|
||||
method()->is_compiled_lambda_form() // Java-generated adapter
|
||||
||
|
||||
method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
|
||||
}
|
||||
|
||||
intptr_t vtable_offset() const {
|
||||
@ -1823,18 +1825,20 @@ class LIR_OpProfileCall : public LIR_Op {
|
||||
|
||||
private:
|
||||
ciMethod* _profiled_method;
|
||||
int _profiled_bci;
|
||||
LIR_Opr _mdo;
|
||||
LIR_Opr _recv;
|
||||
LIR_Opr _tmp1;
|
||||
ciKlass* _known_holder;
|
||||
int _profiled_bci;
|
||||
ciMethod* _profiled_callee;
|
||||
LIR_Opr _mdo;
|
||||
LIR_Opr _recv;
|
||||
LIR_Opr _tmp1;
|
||||
ciKlass* _known_holder;
|
||||
|
||||
public:
|
||||
// Destroys recv
|
||||
LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
|
||||
LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
|
||||
: LIR_Op(code, LIR_OprFact::illegalOpr, NULL) // no result, no info
|
||||
, _profiled_method(profiled_method)
|
||||
, _profiled_bci(profiled_bci)
|
||||
, _profiled_callee(profiled_callee)
|
||||
, _mdo(mdo)
|
||||
, _recv(recv)
|
||||
, _tmp1(t1)
|
||||
@ -1842,6 +1846,7 @@ class LIR_OpProfileCall : public LIR_Op {
|
||||
|
||||
ciMethod* profiled_method() const { return _profiled_method; }
|
||||
int profiled_bci() const { return _profiled_bci; }
|
||||
ciMethod* profiled_callee() const { return _profiled_callee; }
|
||||
LIR_Opr mdo() const { return _mdo; }
|
||||
LIR_Opr recv() const { return _recv; }
|
||||
LIR_Opr tmp1() const { return _tmp1; }
|
||||
@ -2145,8 +2150,8 @@ class LIR_List: public CompilationResourceObj {
|
||||
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
|
||||
ciMethod* profiled_method, int profiled_bci);
|
||||
// methodDataOop profiling
|
||||
void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
|
||||
append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass));
|
||||
void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
|
||||
append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1940,6 +1940,14 @@ void LIRGenerator::do_NullCheck(NullCheck* x) {
|
||||
}
|
||||
|
||||
|
||||
void LIRGenerator::do_TypeCast(TypeCast* x) {
|
||||
LIRItem value(x->obj(), this);
|
||||
value.load_item();
|
||||
// the result is the same as from the node we are casting
|
||||
set_result(x, value.result());
|
||||
}
|
||||
|
||||
|
||||
void LIRGenerator::do_Throw(Throw* x) {
|
||||
LIRItem exception(x->exception(), this);
|
||||
exception.load_item();
|
||||
@ -2767,7 +2775,10 @@ void LIRGenerator::do_Invoke(Invoke* x) {
|
||||
// JSR 292
|
||||
// Preserve the SP over MethodHandle call sites.
|
||||
ciMethod* target = x->target();
|
||||
if (target->is_method_handle_invoke()) {
|
||||
bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
|
||||
target->is_method_handle_intrinsic() ||
|
||||
target->is_compiled_lambda_form());
|
||||
if (is_method_handle_invoke) {
|
||||
info->set_is_method_handle_invoke(true);
|
||||
__ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
|
||||
}
|
||||
@ -2843,7 +2854,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
|
||||
|
||||
// JSR 292
|
||||
// Restore the SP after MethodHandle call sites.
|
||||
if (target->is_method_handle_invoke()) {
|
||||
if (is_method_handle_invoke) {
|
||||
__ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
|
||||
}
|
||||
|
||||
@ -3027,7 +3038,7 @@ void LIRGenerator::do_ProfileCall(ProfileCall* x) {
|
||||
recv = new_register(T_OBJECT);
|
||||
__ move(value.result(), recv);
|
||||
}
|
||||
__ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
|
||||
__ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
|
||||
}
|
||||
|
||||
void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
|
||||
|
@ -500,6 +500,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
||||
virtual void do_IfOp (IfOp* x);
|
||||
virtual void do_Convert (Convert* x);
|
||||
virtual void do_NullCheck (NullCheck* x);
|
||||
virtual void do_TypeCast (TypeCast* x);
|
||||
virtual void do_Invoke (Invoke* x);
|
||||
virtual void do_NewInstance (NewInstance* x);
|
||||
virtual void do_NewTypeArray (NewTypeArray* x);
|
||||
|
@ -478,6 +478,7 @@ public:
|
||||
void do_IfOp (IfOp* x);
|
||||
void do_Convert (Convert* x);
|
||||
void do_NullCheck (NullCheck* x);
|
||||
void do_TypeCast (TypeCast* x);
|
||||
void do_Invoke (Invoke* x);
|
||||
void do_NewInstance (NewInstance* x);
|
||||
void do_NewTypeArray (NewTypeArray* x);
|
||||
@ -648,6 +649,7 @@ void NullCheckVisitor::do_CompareOp (CompareOp* x) {}
|
||||
void NullCheckVisitor::do_IfOp (IfOp* x) {}
|
||||
void NullCheckVisitor::do_Convert (Convert* x) {}
|
||||
void NullCheckVisitor::do_NullCheck (NullCheck* x) { nce()->handle_NullCheck(x); }
|
||||
void NullCheckVisitor::do_TypeCast (TypeCast* x) {}
|
||||
void NullCheckVisitor::do_Invoke (Invoke* x) { nce()->handle_Invoke(x); }
|
||||
void NullCheckVisitor::do_NewInstance (NewInstance* x) { nce()->handle_NewInstance(x); }
|
||||
void NullCheckVisitor::do_NewTypeArray (NewTypeArray* x) { nce()->handle_NewArray(x); }
|
||||
|
@ -178,6 +178,7 @@ class ValueNumberingVisitor: public InstructionVisitor {
|
||||
void do_IfOp (IfOp* x) { /* nothing to do */ }
|
||||
void do_Convert (Convert* x) { /* nothing to do */ }
|
||||
void do_NullCheck (NullCheck* x) { /* nothing to do */ }
|
||||
void do_TypeCast (TypeCast* x) { /* nothing to do */ }
|
||||
void do_NewInstance (NewInstance* x) { /* nothing to do */ }
|
||||
void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ }
|
||||
void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ }
|
||||
|
@ -195,6 +195,7 @@ void ValueStack::setup_phi_for_local(BlockBegin* b, int index) {
|
||||
|
||||
void ValueStack::print() {
|
||||
scope()->method()->print_name();
|
||||
tty->cr();
|
||||
if (stack_is_empty()) {
|
||||
tty->print_cr("empty stack");
|
||||
} else {
|
||||
|
@ -142,6 +142,10 @@ class ValueStack: public CompilationResourceObj {
|
||||
return x;
|
||||
}
|
||||
|
||||
void stack_at_put(int i, Value x) {
|
||||
_stack.at_put(i, x);
|
||||
}
|
||||
|
||||
// pinning support
|
||||
void pin_stack_for_linear_scan();
|
||||
|
||||
|
@ -101,6 +101,23 @@ ciObject* ArrayConstant::constant_value() const { return _val
|
||||
ciObject* InstanceConstant::constant_value() const { return _value; }
|
||||
ciObject* ClassConstant::constant_value() const { return _value; }
|
||||
|
||||
ciType* ObjectConstant::exact_type() const {
|
||||
ciObject* c = constant_value();
|
||||
return (c != NULL && !c->is_null_object()) ? c->klass() : NULL;
|
||||
}
|
||||
ciType* ArrayConstant::exact_type() const {
|
||||
ciObject* c = constant_value();
|
||||
return (c != NULL && !c->is_null_object()) ? c->klass() : NULL;
|
||||
}
|
||||
ciType* InstanceConstant::exact_type() const {
|
||||
ciObject* c = constant_value();
|
||||
return (c != NULL && !c->is_null_object()) ? c->klass() : NULL;
|
||||
}
|
||||
ciType* ClassConstant::exact_type() const {
|
||||
ciObject* c = constant_value();
|
||||
return (c != NULL && !c->is_null_object()) ? c->klass() : NULL;
|
||||
}
|
||||
|
||||
|
||||
ValueType* as_ValueType(BasicType type) {
|
||||
switch (type) {
|
||||
|
@ -297,7 +297,8 @@ class ObjectType: public ValueType {
|
||||
virtual const char tchar() const { return 'a'; }
|
||||
virtual const char* name() const { return "object"; }
|
||||
virtual ObjectType* as_ObjectType() { return this; }
|
||||
virtual ciObject* constant_value() const { ShouldNotReachHere(); return NULL; }
|
||||
virtual ciObject* constant_value() const { ShouldNotReachHere(); return NULL; }
|
||||
virtual ciType* exact_type() const { return NULL; }
|
||||
bool is_loaded() const;
|
||||
jobject encoding() const;
|
||||
};
|
||||
@ -315,6 +316,7 @@ class ObjectConstant: public ObjectType {
|
||||
virtual bool is_constant() const { return true; }
|
||||
virtual ObjectConstant* as_ObjectConstant() { return this; }
|
||||
virtual ciObject* constant_value() const;
|
||||
virtual ciType* exact_type() const;
|
||||
};
|
||||
|
||||
|
||||
@ -334,9 +336,9 @@ class ArrayConstant: public ArrayType {
|
||||
ciArray* value() const { return _value; }
|
||||
|
||||
virtual bool is_constant() const { return true; }
|
||||
|
||||
virtual ArrayConstant* as_ArrayConstant() { return this; }
|
||||
virtual ciObject* constant_value() const;
|
||||
virtual ciType* exact_type() const;
|
||||
};
|
||||
|
||||
|
||||
@ -356,9 +358,9 @@ class InstanceConstant: public InstanceType {
|
||||
ciInstance* value() const { return _value; }
|
||||
|
||||
virtual bool is_constant() const { return true; }
|
||||
|
||||
virtual InstanceConstant* as_InstanceConstant(){ return this; }
|
||||
virtual ciObject* constant_value() const;
|
||||
virtual ciType* exact_type() const;
|
||||
};
|
||||
|
||||
|
||||
@ -378,9 +380,9 @@ class ClassConstant: public ClassType {
|
||||
ciInstanceKlass* value() const { return _value; }
|
||||
|
||||
virtual bool is_constant() const { return true; }
|
||||
|
||||
virtual ClassConstant* as_ClassConstant() { return this; }
|
||||
virtual ciObject* constant_value() const;
|
||||
virtual ciType* exact_type() const;
|
||||
};
|
||||
|
||||
|
||||
|
@ -238,9 +238,11 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
|
||||
|
||||
// some methods are obviously bindable without any type checks so
|
||||
// convert them directly to an invokespecial.
|
||||
if (target->is_loaded() && !target->is_abstract() &&
|
||||
target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) {
|
||||
code = Bytecodes::_invokespecial;
|
||||
if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
|
||||
switch (code) {
|
||||
case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break;
|
||||
case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break;
|
||||
}
|
||||
}
|
||||
|
||||
// compute size of arguments
|
||||
@ -866,7 +868,12 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
|
||||
{ bool will_link;
|
||||
ciMethod* target = s.get_method(will_link);
|
||||
ciKlass* holder = s.get_declared_method_holder();
|
||||
invoke(state, s.cur_bc(), target, holder);
|
||||
// Push appendix argument, if one.
|
||||
if (s.has_appendix()) {
|
||||
state.apush(unknown_obj);
|
||||
}
|
||||
// Pass in raw bytecode because we need to see invokehandle instructions.
|
||||
invoke(state, s.cur_bc_raw(), target, holder);
|
||||
ciType* return_type = target->return_type();
|
||||
if (!return_type->is_primitive_type()) {
|
||||
state.apush(unknown_obj);
|
||||
|
@ -47,6 +47,7 @@ class ciObject;
|
||||
class ciNullObject;
|
||||
class ciInstance;
|
||||
class ciCallSite;
|
||||
class ciMemberName;
|
||||
class ciMethodHandle;
|
||||
class ciMethod;
|
||||
class ciMethodData;
|
||||
@ -100,6 +101,7 @@ friend class ciExceptionHandlerStream; \
|
||||
friend class ciObject; \
|
||||
friend class ciNullObject; \
|
||||
friend class ciInstance; \
|
||||
friend class ciMemberName; \
|
||||
friend class ciMethod; \
|
||||
friend class ciMethodData; \
|
||||
friend class ciMethodHandle; \
|
||||
|
@ -50,7 +50,6 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.inline2.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/methodHandleWalk.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/reflection.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
@ -582,7 +581,7 @@ ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
|
||||
assert(index < 0, "only one kind of index at a time");
|
||||
ConstantPoolCacheEntry* cpc_entry = cpool->cache()->entry_at(cache_index);
|
||||
index = cpc_entry->constant_pool_index();
|
||||
oop obj = cpc_entry->f1();
|
||||
oop obj = cpc_entry->f1_as_instance();
|
||||
if (obj != NULL) {
|
||||
assert(obj->is_instance() || obj->is_array(), "must be a Java reference");
|
||||
ciObject* ciobj = get_object(obj);
|
||||
@ -750,7 +749,7 @@ ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
|
||||
|
||||
if (cpool->has_preresolution()
|
||||
|| (holder == ciEnv::MethodHandle_klass() &&
|
||||
methodOopDesc::is_method_handle_invoke_name(name_sym))) {
|
||||
MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) {
|
||||
// Short-circuit lookups for JSR 292-related call sites.
|
||||
// That is, do not rely only on name-based lookups, because they may fail
|
||||
// if the names are not resolvable in the boot class loader (7056328).
|
||||
@ -760,11 +759,13 @@ ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
|
||||
case Bytecodes::_invokespecial:
|
||||
case Bytecodes::_invokestatic:
|
||||
{
|
||||
methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index, bc);
|
||||
oop appendix_oop = NULL;
|
||||
methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index);
|
||||
if (m != NULL) {
|
||||
return get_object(m)->as_method();
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -800,27 +801,28 @@ ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
|
||||
// Compare the following logic with InterpreterRuntime::resolve_invokedynamic.
|
||||
assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
|
||||
|
||||
bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc);
|
||||
if (is_resolved && cpool->cache()->secondary_entry_at(index)->is_f1_null())
|
||||
// FIXME: code generation could allow for null (unlinked) call site
|
||||
is_resolved = false;
|
||||
ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index);
|
||||
bool is_resolved = !secondary_entry->is_f1_null();
|
||||
// FIXME: code generation could allow for null (unlinked) call site
|
||||
// The call site could be made patchable as follows:
|
||||
// Load the appendix argument from the constant pool.
|
||||
// Test the appendix argument and jump to a known deopt routine if it is null.
|
||||
// Jump through a patchable call site, which is initially a deopt routine.
|
||||
// Patch the call site to the nmethod entry point of the static compiled lambda form.
|
||||
// As with other two-component call sites, both values must be independently verified.
|
||||
|
||||
// Call site might not be resolved yet. We could create a real invoker method from the
|
||||
// compiler, but it is simpler to stop the code path here with an unlinked method.
|
||||
// Call site might not be resolved yet.
|
||||
// Stop the code path here with an unlinked method.
|
||||
if (!is_resolved) {
|
||||
ciInstanceKlass* holder = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
|
||||
ciSymbol* name = ciSymbol::invokeExact_name();
|
||||
ciSymbol* name = ciSymbol::invokeBasic_name();
|
||||
ciSymbol* signature = get_symbol(cpool->signature_ref_at(index));
|
||||
return get_unloaded_method(holder, name, signature, accessor);
|
||||
}
|
||||
|
||||
// Get the invoker methodOop from the constant pool.
|
||||
oop f1_value = cpool->cache()->main_entry_at(index)->f1();
|
||||
methodOop signature_invoker = (methodOop) f1_value;
|
||||
assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
|
||||
"correct result from LinkResolver::resolve_invokedynamic");
|
||||
|
||||
return get_object(signature_invoker)->as_method();
|
||||
// Get the invoker methodOop and the extra argument from the constant pool.
|
||||
methodOop adapter = secondary_entry->f2_as_vfinal_method();
|
||||
return get_object(adapter)->as_method();
|
||||
}
|
||||
|
||||
|
||||
@ -1131,7 +1133,7 @@ uint ciEnv::compile_id() {
|
||||
// ------------------------------------------------------------------
|
||||
// ciEnv::notice_inlined_method()
|
||||
void ciEnv::notice_inlined_method(ciMethod* method) {
|
||||
_num_inlined_bytecodes += method->code_size();
|
||||
_num_inlined_bytecodes += method->code_size_for_inlining();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
39
hotspot/src/share/vm/ci/ciMemberName.cpp
Normal file
39
hotspot/src/share/vm/ci/ciMemberName.cpp
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "ci/ciClassList.hpp"
|
||||
#include "ci/ciMemberName.hpp"
|
||||
#include "ci/ciUtilities.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMemberName::get_vmtarget
|
||||
//
|
||||
// Return: MN.vmtarget
|
||||
ciMethod* ciMemberName::get_vmtarget() const {
|
||||
VM_ENTRY_MARK;
|
||||
oop vmtarget_oop = java_lang_invoke_MemberName::vmtarget(get_oop());
|
||||
return CURRENT_ENV->get_object(vmtarget_oop)->as_method();
|
||||
}
|
44
hotspot/src/share/vm/ci/ciMemberName.hpp
Normal file
44
hotspot/src/share/vm/ci/ciMemberName.hpp
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_CI_CIMEMBERNAME_HPP
|
||||
#define SHARE_VM_CI_CIMEMBERNAME_HPP
|
||||
|
||||
#include "ci/ciCallProfile.hpp"
|
||||
#include "ci/ciInstance.hpp"
|
||||
|
||||
// ciMemberName
|
||||
//
|
||||
// The class represents a java.lang.invoke.MemberName object.
|
||||
class ciMemberName : public ciInstance {
|
||||
public:
|
||||
ciMemberName(instanceHandle h_i) : ciInstance(h_i) {}
|
||||
|
||||
// What kind of ciObject is this?
|
||||
bool is_member_name() const { return true; }
|
||||
|
||||
ciMethod* get_vmtarget() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CIMEMBERNAME_HPP
|
@ -769,39 +769,37 @@ int ciMethod::scale_count(int count, float prof_factor) {
|
||||
// invokedynamic support
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::is_method_handle_invoke
|
||||
// ciMethod::is_method_handle_intrinsic
|
||||
//
|
||||
// Return true if the method is an instance of one of the two
|
||||
// signature-polymorphic MethodHandle methods, invokeExact or invokeGeneric.
|
||||
bool ciMethod::is_method_handle_invoke() const {
|
||||
if (!is_loaded()) {
|
||||
bool flag = (holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() &&
|
||||
methodOopDesc::is_method_handle_invoke_name(name()->sid()));
|
||||
return flag;
|
||||
}
|
||||
VM_ENTRY_MARK;
|
||||
return get_methodOop()->is_method_handle_invoke();
|
||||
// Return true if the method is an instance of the JVM-generated
|
||||
// signature-polymorphic MethodHandle methods, _invokeBasic, _linkToVirtual, etc.
|
||||
bool ciMethod::is_method_handle_intrinsic() const {
|
||||
vmIntrinsics::ID iid = _intrinsic_id; // do not check if loaded
|
||||
return (MethodHandles::is_signature_polymorphic(iid) &&
|
||||
MethodHandles::is_signature_polymorphic_intrinsic(iid));
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::is_method_handle_adapter
|
||||
// ciMethod::is_compiled_lambda_form
|
||||
//
|
||||
// Return true if the method is a generated MethodHandle adapter.
|
||||
// These are built by MethodHandleCompiler.
|
||||
bool ciMethod::is_method_handle_adapter() const {
|
||||
if (!is_loaded()) return false;
|
||||
VM_ENTRY_MARK;
|
||||
return get_methodOop()->is_method_handle_adapter();
|
||||
// These are built by Java code.
|
||||
bool ciMethod::is_compiled_lambda_form() const {
|
||||
vmIntrinsics::ID iid = _intrinsic_id; // do not check if loaded
|
||||
return iid == vmIntrinsics::_compiledLambdaForm;
|
||||
}
|
||||
|
||||
ciInstance* ciMethod::method_handle_type() {
|
||||
check_is_loaded();
|
||||
VM_ENTRY_MARK;
|
||||
oop mtype = get_methodOop()->method_handle_type();
|
||||
return CURRENT_THREAD_ENV->get_object(mtype)->as_instance();
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::has_member_arg
|
||||
//
|
||||
// Return true if the method is a linker intrinsic like _linkToVirtual.
|
||||
// These are built by the JVM.
|
||||
bool ciMethod::has_member_arg() const {
|
||||
vmIntrinsics::ID iid = _intrinsic_id; // do not check if loaded
|
||||
return (MethodHandles::is_signature_polymorphic(iid) &&
|
||||
MethodHandles::has_member_arg(iid));
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::ensure_method_data
|
||||
//
|
||||
@ -1024,28 +1022,13 @@ int ciMethod::highest_osr_comp_level() {
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::code_size_for_inlining
|
||||
//
|
||||
// Code size for inlining decisions.
|
||||
//
|
||||
// Don't fully count method handle adapters against inlining budgets:
|
||||
// the metric we use here is the number of call sites in the adapter
|
||||
// as they are probably the instructions which generate some code.
|
||||
// Code size for inlining decisions. This method returns a code
|
||||
// size of 1 for methods which has the ForceInline annotation.
|
||||
int ciMethod::code_size_for_inlining() {
|
||||
check_is_loaded();
|
||||
|
||||
// Method handle adapters
|
||||
if (is_method_handle_adapter()) {
|
||||
// Count call sites
|
||||
int call_site_count = 0;
|
||||
ciBytecodeStream iter(this);
|
||||
while (iter.next() != ciBytecodeStream::EOBC()) {
|
||||
if (Bytecodes::is_invoke(iter.cur_bc())) {
|
||||
call_site_count++;
|
||||
}
|
||||
}
|
||||
return call_site_count;
|
||||
if (get_methodOop()->force_inline()) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Normal method
|
||||
return code_size();
|
||||
}
|
||||
|
||||
@ -1127,7 +1110,8 @@ bool ciMethod::check_call(int refinfo_index, bool is_static) const {
|
||||
constantPoolHandle pool (THREAD, get_methodOop()->constants());
|
||||
methodHandle spec_method;
|
||||
KlassHandle spec_klass;
|
||||
LinkResolver::resolve_method(spec_method, spec_klass, pool, refinfo_index, THREAD);
|
||||
Bytecodes::Code code = (is_static ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual);
|
||||
LinkResolver::resolve_method_statically(spec_method, spec_klass, code, pool, refinfo_index, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
return false;
|
||||
@ -1207,8 +1191,16 @@ void ciMethod::print_name(outputStream* st) {
|
||||
//
|
||||
// Print the name of this method, without signature.
|
||||
void ciMethod::print_short_name(outputStream* st) {
|
||||
check_is_loaded();
|
||||
GUARDED_VM_ENTRY(get_methodOop()->print_short_name(st);)
|
||||
if (is_loaded()) {
|
||||
GUARDED_VM_ENTRY(get_methodOop()->print_short_name(st););
|
||||
} else {
|
||||
// Fall back if method is not loaded.
|
||||
holder()->print_name_on(st);
|
||||
st->print("::");
|
||||
name()->print_symbol_on(st);
|
||||
if (WizardMode)
|
||||
signature()->as_symbol()->print_symbol_on(st);
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
@ -1223,6 +1215,7 @@ void ciMethod::print_impl(outputStream* st) {
|
||||
holder()->print_name_on(st);
|
||||
st->print(" signature=");
|
||||
signature()->as_symbol()->print_symbol_on(st);
|
||||
st->print(" arg_size=%d", arg_size());
|
||||
if (is_loaded()) {
|
||||
st->print(" loaded=true flags=");
|
||||
flags().print_member_flags(st);
|
||||
|
@ -133,16 +133,20 @@ class ciMethod : public ciObject {
|
||||
return _signature->size() + (_flags.is_static() ? 0 : 1);
|
||||
}
|
||||
// Report the number of elements on stack when invoking this method.
|
||||
// This is different than the regular arg_size because invokdynamic
|
||||
// This is different than the regular arg_size because invokedynamic
|
||||
// has an implicit receiver.
|
||||
int invoke_arg_size(Bytecodes::Code code) const {
|
||||
int arg_size = _signature->size();
|
||||
// Add a receiver argument, maybe:
|
||||
if (code != Bytecodes::_invokestatic &&
|
||||
code != Bytecodes::_invokedynamic) {
|
||||
arg_size++;
|
||||
if (is_loaded()) {
|
||||
return arg_size();
|
||||
} else {
|
||||
int arg_size = _signature->size();
|
||||
// Add a receiver argument, maybe:
|
||||
if (code != Bytecodes::_invokestatic &&
|
||||
code != Bytecodes::_invokedynamic) {
|
||||
arg_size++;
|
||||
}
|
||||
return arg_size;
|
||||
}
|
||||
return arg_size;
|
||||
}
|
||||
|
||||
|
||||
@ -161,6 +165,7 @@ class ciMethod : public ciObject {
|
||||
int code_size_for_inlining();
|
||||
|
||||
bool force_inline() { return get_methodOop()->force_inline(); }
|
||||
bool dont_inline() { return get_methodOop()->dont_inline(); }
|
||||
|
||||
int comp_level();
|
||||
int highest_osr_comp_level();
|
||||
@ -258,9 +263,9 @@ class ciMethod : public ciObject {
|
||||
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
|
||||
|
||||
// JSR 292 support
|
||||
bool is_method_handle_invoke() const;
|
||||
bool is_method_handle_adapter() const;
|
||||
ciInstance* method_handle_type();
|
||||
bool is_method_handle_intrinsic() const;
|
||||
bool is_compiled_lambda_form() const;
|
||||
bool has_member_arg() const;
|
||||
|
||||
// What kind of ciObject is this?
|
||||
bool is_method() { return true; }
|
||||
|
@ -24,84 +24,18 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "ci/ciClassList.hpp"
|
||||
#include "ci/ciInstance.hpp"
|
||||
#include "ci/ciMethodData.hpp"
|
||||
#include "ci/ciMethodHandle.hpp"
|
||||
#include "ci/ciUtilities.hpp"
|
||||
#include "prims/methodHandleWalk.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
|
||||
// ciMethodHandle
|
||||
#include "classfile/javaClasses.hpp"
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethodHandle::get_adapter
|
||||
// ciMethodHandle::get_vmtarget
|
||||
//
|
||||
// Return an adapter for this MethodHandle.
|
||||
ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) {
|
||||
// Return: MH.form -> LF.vmentry -> MN.vmtarget
|
||||
ciMethod* ciMethodHandle::get_vmtarget() const {
|
||||
VM_ENTRY_MARK;
|
||||
Handle h(get_oop());
|
||||
methodHandle callee(_callee->get_methodOop());
|
||||
assert(callee->is_method_handle_invoke(), "");
|
||||
oop mt1 = callee->method_handle_type();
|
||||
oop mt2 = java_lang_invoke_MethodHandle::type(h());
|
||||
if (!java_lang_invoke_MethodType::equals(mt1, mt2)) {
|
||||
if (PrintMiscellaneous && (Verbose || WizardMode)) {
|
||||
tty->print_cr("ciMethodHandle::get_adapter: types not equal");
|
||||
mt1->print(); mt2->print();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
// We catch all exceptions here that could happen in the method
|
||||
// handle compiler and stop the VM.
|
||||
MethodHandleCompiler mhc(h, callee->name(), callee->signature(), _profile.count(), is_invokedynamic, THREAD);
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
methodHandle m = mhc.compile(THREAD);
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
return CURRENT_ENV->get_object(m())->as_method();
|
||||
}
|
||||
}
|
||||
if (PrintMiscellaneous && (Verbose || WizardMode)) {
|
||||
tty->print("*** ciMethodHandle::get_adapter => ");
|
||||
PENDING_EXCEPTION->print();
|
||||
tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print();
|
||||
}
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
return NULL;
|
||||
oop form_oop = java_lang_invoke_MethodHandle::form(get_oop());
|
||||
oop vmentry_oop = java_lang_invoke_LambdaForm::vmentry(form_oop);
|
||||
oop vmtarget_oop = java_lang_invoke_MemberName::vmtarget(vmentry_oop);
|
||||
return CURRENT_ENV->get_object(vmtarget_oop)->as_method();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethodHandle::get_adapter
|
||||
//
|
||||
// Return an adapter for this MethodHandle.
|
||||
ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) {
|
||||
ciMethod* result = get_adapter_impl(is_invokedynamic);
|
||||
if (result) {
|
||||
// Fake up the MDO maturity.
|
||||
ciMethodData* mdo = result->method_data();
|
||||
if (mdo != NULL && _caller->method_data() != NULL && _caller->method_data()->is_mature()) {
|
||||
mdo->set_mature();
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethodHandle::print_chain_impl
|
||||
//
|
||||
// Implementation of the print method.
|
||||
void ciMethodHandle::print_chain_impl() {
|
||||
ASSERT_IN_VM;
|
||||
MethodHandleChain::print(get_oop());
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethodHandle::print_chain
|
||||
//
|
||||
// Implementation of the print_chain method.
|
||||
void ciMethodHandle::print_chain() {
|
||||
GUARDED_VM_ENTRY(print_chain_impl(););
|
||||
}
|
||||
#endif
|
||||
|
@ -25,61 +25,20 @@
|
||||
#ifndef SHARE_VM_CI_CIMETHODHANDLE_HPP
|
||||
#define SHARE_VM_CI_CIMETHODHANDLE_HPP
|
||||
|
||||
#include "ci/ciCallProfile.hpp"
|
||||
#include "ci/ciClassList.hpp"
|
||||
#include "ci/ciInstance.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
|
||||
// ciMethodHandle
|
||||
//
|
||||
// The class represents a java.lang.invoke.MethodHandle object.
|
||||
class ciMethodHandle : public ciInstance {
|
||||
private:
|
||||
ciMethod* _callee;
|
||||
ciMethod* _caller;
|
||||
ciCallProfile _profile;
|
||||
ciMethod* _method_handle_adapter;
|
||||
ciMethod* _invokedynamic_adapter;
|
||||
|
||||
// Return an adapter for this MethodHandle.
|
||||
ciMethod* get_adapter_impl(bool is_invokedynamic);
|
||||
ciMethod* get_adapter( bool is_invokedynamic);
|
||||
|
||||
protected:
|
||||
void print_chain_impl() NOT_DEBUG_RETURN;
|
||||
|
||||
public:
|
||||
ciMethodHandle(instanceHandle h_i) :
|
||||
ciInstance(h_i),
|
||||
_callee(NULL),
|
||||
_caller(NULL),
|
||||
_method_handle_adapter(NULL),
|
||||
_invokedynamic_adapter(NULL)
|
||||
{}
|
||||
ciMethodHandle(instanceHandle h_i) : ciInstance(h_i) {}
|
||||
|
||||
// What kind of ciObject is this?
|
||||
bool is_method_handle() const { return true; }
|
||||
|
||||
void set_callee(ciMethod* m) { _callee = m; }
|
||||
void set_caller(ciMethod* m) { _caller = m; }
|
||||
void set_call_profile(ciCallProfile profile) { _profile = profile; }
|
||||
|
||||
// Return an adapter for a MethodHandle call.
|
||||
ciMethod* get_method_handle_adapter() {
|
||||
if (_method_handle_adapter == NULL) {
|
||||
_method_handle_adapter = get_adapter(false);
|
||||
}
|
||||
return _method_handle_adapter;
|
||||
}
|
||||
|
||||
// Return an adapter for an invokedynamic call.
|
||||
ciMethod* get_invokedynamic_adapter() {
|
||||
if (_invokedynamic_adapter == NULL) {
|
||||
_invokedynamic_adapter = get_adapter(true);
|
||||
}
|
||||
return _invokedynamic_adapter;
|
||||
}
|
||||
|
||||
void print_chain() NOT_DEBUG_RETURN;
|
||||
ciMethod* get_vmtarget() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CIMETHODHANDLE_HPP
|
||||
|
@ -138,13 +138,14 @@ public:
|
||||
jobject constant_encoding();
|
||||
|
||||
// What kind of ciObject is this?
|
||||
virtual bool is_null_object() const { return false; }
|
||||
virtual bool is_call_site() const { return false; }
|
||||
virtual bool is_cpcache() const { return false; }
|
||||
virtual bool is_null_object() const { return false; }
|
||||
virtual bool is_call_site() const { return false; }
|
||||
virtual bool is_cpcache() const { return false; }
|
||||
virtual bool is_instance() { return false; }
|
||||
virtual bool is_member_name() const { return false; }
|
||||
virtual bool is_method() { return false; }
|
||||
virtual bool is_method_data() { return false; }
|
||||
virtual bool is_method_handle() const { return false; }
|
||||
virtual bool is_method_handle() const { return false; }
|
||||
virtual bool is_array() { return false; }
|
||||
virtual bool is_obj_array() { return false; }
|
||||
virtual bool is_type_array() { return false; }
|
||||
@ -208,6 +209,10 @@ public:
|
||||
assert(is_instance(), "bad cast");
|
||||
return (ciInstance*)this;
|
||||
}
|
||||
ciMemberName* as_member_name() {
|
||||
assert(is_member_name(), "bad cast");
|
||||
return (ciMemberName*)this;
|
||||
}
|
||||
ciMethod* as_method() {
|
||||
assert(is_method(), "bad cast");
|
||||
return (ciMethod*)this;
|
||||
@ -290,7 +295,8 @@ public:
|
||||
}
|
||||
|
||||
// Print debugging output about this ciObject.
|
||||
void print(outputStream* st = tty);
|
||||
void print(outputStream* st);
|
||||
void print() { print(tty); } // GDB cannot handle default arguments
|
||||
|
||||
// Print debugging output about the oop this ciObject represents.
|
||||
void print_oop(outputStream* st = tty);
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "ci/ciInstance.hpp"
|
||||
#include "ci/ciInstanceKlass.hpp"
|
||||
#include "ci/ciInstanceKlassKlass.hpp"
|
||||
#include "ci/ciMemberName.hpp"
|
||||
#include "ci/ciMethod.hpp"
|
||||
#include "ci/ciMethodData.hpp"
|
||||
#include "ci/ciMethodHandle.hpp"
|
||||
@ -344,6 +345,8 @@ ciObject* ciObjectFactory::create_new_object(oop o) {
|
||||
instanceHandle h_i(THREAD, (instanceOop)o);
|
||||
if (java_lang_invoke_CallSite::is_instance(o))
|
||||
return new (arena()) ciCallSite(h_i);
|
||||
else if (java_lang_invoke_MemberName::is_instance(o))
|
||||
return new (arena()) ciMemberName(h_i);
|
||||
else if (java_lang_invoke_MethodHandle::is_instance(o))
|
||||
return new (arena()) ciMethodHandle(h_i);
|
||||
else
|
||||
|
@ -39,10 +39,11 @@ private:
|
||||
ciKlass* _accessing_klass;
|
||||
|
||||
GrowableArray<ciType*>* _types;
|
||||
int _size;
|
||||
int _count;
|
||||
int _size; // number of stack slots required for arguments
|
||||
int _count; // number of parameter types in the signature
|
||||
|
||||
friend class ciMethod;
|
||||
friend class ciBytecodeStream;
|
||||
friend class ciObjectFactory;
|
||||
|
||||
ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature);
|
||||
|
@ -363,6 +363,29 @@ ciMethod* ciBytecodeStream::get_method(bool& will_link) {
|
||||
return m;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciBytecodeStream::has_appendix
|
||||
//
|
||||
// Returns true if there is an appendix argument stored in the
|
||||
// constant pool cache at the current bci.
|
||||
bool ciBytecodeStream::has_appendix() {
|
||||
VM_ENTRY_MARK;
|
||||
constantPoolHandle cpool(_method->get_methodOop()->constants());
|
||||
return constantPoolOopDesc::has_appendix_at_if_loaded(cpool, get_method_index());
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciBytecodeStream::get_appendix
|
||||
//
|
||||
// Return the appendix argument stored in the constant pool cache at
|
||||
// the current bci.
|
||||
ciObject* ciBytecodeStream::get_appendix() {
|
||||
VM_ENTRY_MARK;
|
||||
constantPoolHandle cpool(_method->get_methodOop()->constants());
|
||||
oop appendix_oop = constantPoolOopDesc::appendix_at_if_loaded(cpool, get_method_index());
|
||||
return CURRENT_ENV->get_object(appendix_oop);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciBytecodeStream::get_declared_method_holder
|
||||
//
|
||||
@ -378,9 +401,9 @@ ciKlass* ciBytecodeStream::get_declared_method_holder() {
|
||||
VM_ENTRY_MARK;
|
||||
constantPoolHandle cpool(_method->get_methodOop()->constants());
|
||||
bool ignore;
|
||||
// report as InvokeDynamic for invokedynamic, which is syntactically classless
|
||||
// report as MethodHandle for invokedynamic, which is syntactically classless
|
||||
if (cur_bc() == Bytecodes::_invokedynamic)
|
||||
return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_lang_invoke_InvokeDynamic(), false);
|
||||
return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_lang_invoke_MethodHandle(), false);
|
||||
return CURRENT_ENV->get_klass_by_index(cpool, get_method_holder_index(), ignore, _holder);
|
||||
}
|
||||
|
||||
@ -395,6 +418,24 @@ int ciBytecodeStream::get_method_holder_index() {
|
||||
return cpool->klass_ref_index_at(get_method_index());
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciBytecodeStream::get_declared_method_signature
|
||||
//
|
||||
// Get the declared signature of the currently referenced method.
|
||||
//
|
||||
// This is always the same as the signature of the resolved method
|
||||
// itself, except for _invokehandle and _invokedynamic calls.
|
||||
//
|
||||
ciSignature* ciBytecodeStream::get_declared_method_signature() {
|
||||
int sig_index = get_method_signature_index();
|
||||
VM_ENTRY_MARK;
|
||||
ciEnv* env = CURRENT_ENV;
|
||||
constantPoolHandle cpool(_method->get_methodOop()->constants());
|
||||
Symbol* sig_sym = cpool->symbol_at(sig_index);
|
||||
ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass();
|
||||
return new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym));
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciBytecodeStream::get_method_signature_index
|
||||
//
|
||||
@ -434,7 +475,7 @@ ciCallSite* ciBytecodeStream::get_call_site() {
|
||||
// Get the CallSite from the constant pool cache.
|
||||
int method_index = get_method_index();
|
||||
ConstantPoolCacheEntry* cpcache_entry = cpcache->secondary_entry_at(method_index);
|
||||
oop call_site_oop = cpcache_entry->f1();
|
||||
oop call_site_oop = cpcache_entry->f1_as_instance();
|
||||
|
||||
// Create a CallSite object and return it.
|
||||
return CURRENT_ENV->get_object(call_site_oop)->as_call_site();
|
||||
|
@ -259,8 +259,11 @@ public:
|
||||
|
||||
// If this is a method invocation bytecode, get the invoked method.
|
||||
ciMethod* get_method(bool& will_link);
|
||||
bool has_appendix();
|
||||
ciObject* get_appendix();
|
||||
ciKlass* get_declared_method_holder();
|
||||
int get_method_holder_index();
|
||||
ciSignature* get_declared_method_signature();
|
||||
int get_method_signature_index();
|
||||
|
||||
ciCPCache* get_cpcache() const;
|
||||
|
@ -83,6 +83,10 @@ bool ciSymbol::starts_with(const char* prefix, int len) const {
|
||||
GUARDED_VM_ENTRY(return get_symbol()->starts_with(prefix, len);)
|
||||
}
|
||||
|
||||
bool ciSymbol::is_signature_polymorphic_name() const {
|
||||
GUARDED_VM_ENTRY(return MethodHandles::is_signature_polymorphic_name(get_symbol());)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciSymbol::index_of
|
||||
//
|
||||
|
@ -107,6 +107,8 @@ public:
|
||||
|
||||
// Are two ciSymbols equal?
|
||||
bool equals(ciSymbol* obj) { return this->_symbol == obj->get_symbol(); }
|
||||
|
||||
bool is_signature_polymorphic_name() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CISYMBOL_HPP
|
||||
|
@ -643,9 +643,9 @@ void ciTypeFlow::StateVector::do_getstatic(ciBytecodeStream* str) {
|
||||
// ------------------------------------------------------------------
|
||||
// ciTypeFlow::StateVector::do_invoke
|
||||
void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
|
||||
bool has_receiver) {
|
||||
bool has_receiver_foo) {
|
||||
bool will_link;
|
||||
ciMethod* method = str->get_method(will_link);
|
||||
ciMethod* callee = str->get_method(will_link);
|
||||
if (!will_link) {
|
||||
// We weren't able to find the method.
|
||||
if (str->cur_bc() == Bytecodes::_invokedynamic) {
|
||||
@ -654,12 +654,24 @@ void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
|
||||
(Deoptimization::Reason_uninitialized,
|
||||
Deoptimization::Action_reinterpret));
|
||||
} else {
|
||||
ciKlass* unloaded_holder = method->holder();
|
||||
ciKlass* unloaded_holder = callee->holder();
|
||||
trap(str, unloaded_holder, str->get_method_holder_index());
|
||||
}
|
||||
} else {
|
||||
ciSignature* signature = method->signature();
|
||||
// TODO Use Bytecode_invoke after metadata changes.
|
||||
//Bytecode_invoke inv(str->method(), str->cur_bci());
|
||||
//const bool has_receiver = callee->is_loaded() ? !callee->is_static() : inv.has_receiver();
|
||||
Bytecode inv(str);
|
||||
Bytecodes::Code code = inv.invoke_code();
|
||||
const bool has_receiver = callee->is_loaded() ? !callee->is_static() : code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic;
|
||||
|
||||
ciSignature* signature = callee->signature();
|
||||
ciSignatureStream sigstr(signature);
|
||||
// Push appendix argument, if one.
|
||||
if (str->has_appendix()) {
|
||||
ciObject* appendix = str->get_appendix();
|
||||
push_object(appendix->klass());
|
||||
}
|
||||
int arg_size = signature->size();
|
||||
int stack_base = stack_size() - arg_size;
|
||||
int i = 0;
|
||||
@ -677,6 +689,7 @@ void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
|
||||
for (int j = 0; j < arg_size; j++) {
|
||||
pop();
|
||||
}
|
||||
assert(!callee->is_loaded() || has_receiver == !callee->is_static(), "mismatch");
|
||||
if (has_receiver) {
|
||||
// Check this?
|
||||
pop_object();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user