Merge
This commit is contained in:
commit
24b71d013a
4
.hgtags
4
.hgtags
@ -62,3 +62,7 @@ b1e55627a6980b9508854ed0c0f21d4f981b4494 jdk7-b84
|
||||
b6f633a93ae0ec4555ff4bf756f5e2150c9bdede jdk7-b85
|
||||
c94d9cc81f495d97817eba9d71b84fc45f7661a5 jdk7-b86
|
||||
b7456c473862048fa70ed8092313a4ef0a55d403 jdk7-b87
|
||||
7077b95d42f6b3942a8751bba033801ff50e5889 jdk7-b88
|
||||
44158f6d3b94c0fa020e33632532473d92d1ea96 jdk7-b89
|
||||
1d1927f9ec097b62c913921e2dfa5dbaf5dc325b jdk7-b90
|
||||
308ad8f68b8dd68e22d73dd490e110059b732422 jdk7-b91
|
||||
|
@ -62,3 +62,7 @@ e1176f86805fe07fd9fb9da065dc51b47712ce76 jdk7-b82
|
||||
cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85
|
||||
433a60a9c0bf1b26ee7e65cebaa89c541f497aed jdk7-b86
|
||||
6b1069f53fbc30663ccef49d78c31bb7d6967bde jdk7-b87
|
||||
82135c848d5fcddb065e98ae77b81077c858f593 jdk7-b88
|
||||
7f1ba4459972bf84b8201dc1cc4f62b1fe1c74f4 jdk7-b89
|
||||
425ba3efabbfe0b188105c10aaf7c3c8fa8d1a38 jdk7-b90
|
||||
97d8b6c659c29c8493a8b2b72c2796a021a8cf79 jdk7-b91
|
||||
|
@ -62,3 +62,7 @@ fde0df7a2384f7fe33204a79678989807d9c2b98 jdk7-b83
|
||||
c67a9df7bc0ca291f08f9a9cc05cb78ea15d25e6 jdk7-b85
|
||||
6253e28826d16cf1aecc39ce04c8de1f6bf2df5f jdk7-b86
|
||||
09a41111a401d327f65e453384d976a10154d9ea jdk7-b87
|
||||
39e14d2da687c7e592142137517aaf689544820f jdk7-b88
|
||||
bb4424c5e778b842c064a8b1aa902b35f4397654 jdk7-b89
|
||||
56ce07b0eb47b93a98a72adef0f21e602c460623 jdk7-b90
|
||||
bcd2fc089227559ac5be927923609fac29f067fa jdk7-b91
|
||||
|
@ -86,3 +86,11 @@ ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84
|
||||
bf823ef06b4f211e66988d76a2e2669be5c0820e jdk7-b86
|
||||
07226e9eab8f74b37346b32715f829a2ef2c3188 hs18-b01
|
||||
e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87
|
||||
4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b jdk7-b88
|
||||
15836273ac2494f36ef62088bc1cb6f3f011f565 jdk7-b89
|
||||
4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b hs18-b02
|
||||
605c9707a766ff518cd841fc04f9bb4b36a3a30b jdk7-b90
|
||||
e0a1a502e402dbe7bf2d9102b4084a7e79a99a9b jdk7-b91
|
||||
25f53b53aaa3eb8b2d5391a1e8de9a76ae1dd8a2 hs18-b03
|
||||
3221d1887d30341bedfdac1dbf365ea41beff20f jdk7-b92
|
||||
310cdbc355355a13aa53c002b6bde4a8c5ba67ff hs18-b04
|
||||
|
@ -884,9 +884,12 @@ static bool read_shared_lib_info(struct ps_prochandle* ph) {
|
||||
}
|
||||
|
||||
// read name of the shared object
|
||||
if (read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
|
||||
lib_name[0] = '\0';
|
||||
if (lib_name_addr != 0 &&
|
||||
read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
|
||||
print_debug("can't read shared object name\n");
|
||||
return false;
|
||||
// don't let failure to read the name stop opening the file. If something is really wrong
|
||||
// it will fail later.
|
||||
}
|
||||
|
||||
if (lib_name[0] != '\0') {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -494,6 +494,68 @@ public class CommandProcessor {
|
||||
}
|
||||
}
|
||||
},
|
||||
new Command("revptrs", "revptrs address", false) {
|
||||
public void doit(Tokens t) {
|
||||
int tokens = t.countTokens();
|
||||
if (tokens != 1 && (tokens != 2 || !t.nextToken().equals("-c"))) {
|
||||
usage();
|
||||
return;
|
||||
}
|
||||
boolean chase = tokens == 2;
|
||||
ReversePtrs revptrs = VM.getVM().getRevPtrs();
|
||||
if (revptrs == null) {
|
||||
out.println("Computing reverse pointers...");
|
||||
ReversePtrsAnalysis analysis = new ReversePtrsAnalysis();
|
||||
final boolean[] complete = new boolean[1];
|
||||
HeapProgressThunk thunk = new HeapProgressThunk() {
|
||||
public void heapIterationFractionUpdate(double d) {}
|
||||
public synchronized void heapIterationComplete() {
|
||||
complete[0] = true;
|
||||
notify();
|
||||
}
|
||||
};
|
||||
analysis.setHeapProgressThunk(thunk);
|
||||
analysis.run();
|
||||
while (!complete[0]) {
|
||||
synchronized (thunk) {
|
||||
try {
|
||||
thunk.wait();
|
||||
} catch (Exception e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
revptrs = VM.getVM().getRevPtrs();
|
||||
out.println("Done.");
|
||||
}
|
||||
Address a = VM.getVM().getDebugger().parseAddress(t.nextToken());
|
||||
if (VM.getVM().getUniverse().heap().isInReserved(a)) {
|
||||
OopHandle handle = a.addOffsetToAsOopHandle(0);
|
||||
Oop oop = VM.getVM().getObjectHeap().newOop(handle);
|
||||
ArrayList ptrs = revptrs.get(oop);
|
||||
if (ptrs == null) {
|
||||
out.println("no live references to " + a);
|
||||
} else {
|
||||
if (chase) {
|
||||
while (ptrs.size() == 1) {
|
||||
LivenessPathElement e = (LivenessPathElement)ptrs.get(0);
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
|
||||
out.println(bos.toString());
|
||||
ptrs = revptrs.get(e.getObj());
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < ptrs.size(); i++) {
|
||||
LivenessPathElement e = (LivenessPathElement)ptrs.get(i);
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
|
||||
out.println(bos.toString());
|
||||
oop = e.getObj();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
new Command("inspect", "inspect expression", false) {
|
||||
public void doit(Tokens t) {
|
||||
if (t.countTokens() != 1) {
|
||||
@ -816,8 +878,24 @@ public class CommandProcessor {
|
||||
dumpType(type);
|
||||
} else {
|
||||
Iterator i = agent.getTypeDataBase().getTypes();
|
||||
// Make sure the types are emitted in an order than can be read back in
|
||||
HashSet emitted = new HashSet();
|
||||
Stack pending = new Stack();
|
||||
while (i.hasNext()) {
|
||||
dumpType((Type)i.next());
|
||||
Type n = (Type)i.next();
|
||||
if (emitted.contains(n.getName())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
while (n != null && !emitted.contains(n.getName())) {
|
||||
pending.push(n);
|
||||
n = n.getSuperclass();
|
||||
}
|
||||
while (!pending.empty()) {
|
||||
n = (Type)pending.pop();
|
||||
dumpType(n);
|
||||
emitted.add(n.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -846,83 +924,105 @@ public class CommandProcessor {
|
||||
|
||||
}
|
||||
},
|
||||
new Command("search", "search [ heap | codecache | threads ] value", false) {
|
||||
new Command("search", "search [ heap | perm | rawheap | codecache | threads ] value", false) {
|
||||
public void doit(Tokens t) {
|
||||
if (t.countTokens() != 2) {
|
||||
usage();
|
||||
} else {
|
||||
String type = t.nextToken();
|
||||
final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken());
|
||||
final long stride = VM.getVM().getAddressSize();
|
||||
if (type.equals("threads")) {
|
||||
Threads threads = VM.getVM().getThreads();
|
||||
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
|
||||
Address base = thread.getBaseOfStackPointer();
|
||||
Address end = thread.getLastJavaSP();
|
||||
if (end == null) continue;
|
||||
if (end.lessThan(base)) {
|
||||
Address tmp = base;
|
||||
base = end;
|
||||
end = tmp;
|
||||
}
|
||||
out.println("Searching " + base + " " + end);
|
||||
while (base != null && base.lessThan(end)) {
|
||||
Address val = base.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println(base);
|
||||
}
|
||||
base = base.addOffsetTo(stride);
|
||||
}
|
||||
return;
|
||||
}
|
||||
String type = t.nextToken();
|
||||
final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken());
|
||||
final long stride = VM.getVM().getAddressSize();
|
||||
if (type.equals("threads")) {
|
||||
Threads threads = VM.getVM().getThreads();
|
||||
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
|
||||
Address base = thread.getBaseOfStackPointer();
|
||||
Address end = thread.getLastJavaSP();
|
||||
if (end == null) continue;
|
||||
if (end.lessThan(base)) {
|
||||
Address tmp = base;
|
||||
base = end;
|
||||
end = tmp;
|
||||
}
|
||||
out.println("Searching " + base + " " + end);
|
||||
while (base != null && base.lessThan(end)) {
|
||||
Address val = base.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println(base);
|
||||
}
|
||||
base = base.addOffsetTo(stride);
|
||||
}
|
||||
} else if (type.equals("heap")) {
|
||||
RawHeapVisitor iterator = new RawHeapVisitor() {
|
||||
public void prologue(long used) {
|
||||
}
|
||||
|
||||
public void visitAddress(Address addr) {
|
||||
Address val = addr.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found at " + addr);
|
||||
}
|
||||
}
|
||||
public void visitCompOopAddress(Address addr) {
|
||||
Address val = addr.getCompOopAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found at " + addr);
|
||||
}
|
||||
}
|
||||
public void epilogue() {
|
||||
}
|
||||
};
|
||||
VM.getVM().getObjectHeap().iterateRaw(iterator);
|
||||
} else if (type.equals("codecache")) {
|
||||
CodeCacheVisitor v = new CodeCacheVisitor() {
|
||||
public void prologue(Address start, Address end) {
|
||||
}
|
||||
public void visit(CodeBlob blob) {
|
||||
boolean printed = false;
|
||||
Address base = blob.getAddress();
|
||||
Address end = base.addOffsetTo(blob.getSize());
|
||||
while (base != null && base.lessThan(end)) {
|
||||
Address val = base.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
if (!printed) {
|
||||
printed = true;
|
||||
blob.printOn(out);
|
||||
}
|
||||
out.println("found at " + base + "\n");
|
||||
}
|
||||
base = base.addOffsetTo(stride);
|
||||
}
|
||||
}
|
||||
public void epilogue() {
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
VM.getVM().getCodeCache().iterate(v);
|
||||
|
||||
}
|
||||
} else if (type.equals("rawheap")) {
|
||||
RawHeapVisitor iterator = new RawHeapVisitor() {
|
||||
public void prologue(long used) {
|
||||
}
|
||||
|
||||
public void visitAddress(Address addr) {
|
||||
Address val = addr.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found at " + addr);
|
||||
}
|
||||
}
|
||||
public void visitCompOopAddress(Address addr) {
|
||||
Address val = addr.getCompOopAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found at " + addr);
|
||||
}
|
||||
}
|
||||
public void epilogue() {
|
||||
}
|
||||
};
|
||||
VM.getVM().getObjectHeap().iterateRaw(iterator);
|
||||
} else if (type.equals("heap") || type.equals("perm")) {
|
||||
HeapVisitor iterator = new DefaultHeapVisitor() {
|
||||
public boolean doObj(Oop obj) {
|
||||
int index = 0;
|
||||
Address start = obj.getHandle();
|
||||
long end = obj.getObjectSize();
|
||||
while (index < end) {
|
||||
Address val = start.getAddressAt(index);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found in " + obj.getHandle());
|
||||
break;
|
||||
}
|
||||
index += 4;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
if (type.equals("heap")) {
|
||||
VM.getVM().getObjectHeap().iterate(iterator);
|
||||
} else {
|
||||
VM.getVM().getObjectHeap().iteratePerm(iterator);
|
||||
}
|
||||
} else if (type.equals("codecache")) {
|
||||
CodeCacheVisitor v = new CodeCacheVisitor() {
|
||||
public void prologue(Address start, Address end) {
|
||||
}
|
||||
public void visit(CodeBlob blob) {
|
||||
boolean printed = false;
|
||||
Address base = blob.getAddress();
|
||||
Address end = base.addOffsetTo(blob.getSize());
|
||||
while (base != null && base.lessThan(end)) {
|
||||
Address val = base.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
if (!printed) {
|
||||
printed = true;
|
||||
blob.printOn(out);
|
||||
}
|
||||
out.println("found at " + base + "\n");
|
||||
}
|
||||
base = base.addOffsetTo(stride);
|
||||
}
|
||||
}
|
||||
public void epilogue() {
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
VM.getVM().getCodeCache().iterate(v);
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -957,12 +1057,19 @@ public class CommandProcessor {
|
||||
Threads threads = VM.getVM().getThreads();
|
||||
boolean all = name.equals("-a");
|
||||
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
|
||||
StringWriter sw = new StringWriter();
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
thread.printThreadIDOn(new PrintStream(bos));
|
||||
if (all || bos.toString().equals(name)) {
|
||||
out.println(bos.toString() + " = " + thread.getAddress());
|
||||
HTMLGenerator gen = new HTMLGenerator(false);
|
||||
out.println(gen.genHTMLForJavaStackTrace(thread));
|
||||
try {
|
||||
out.println(gen.genHTMLForJavaStackTrace(thread));
|
||||
} catch (Exception e) {
|
||||
err.println("Error: " + e);
|
||||
if (verboseExceptions) {
|
||||
e.printStackTrace(err);
|
||||
}
|
||||
}
|
||||
if (!all) return;
|
||||
}
|
||||
}
|
||||
@ -970,6 +1077,26 @@ public class CommandProcessor {
|
||||
}
|
||||
}
|
||||
},
|
||||
new Command("thread", "thread { -a | id }", false) {
|
||||
public void doit(Tokens t) {
|
||||
if (t.countTokens() != 1) {
|
||||
usage();
|
||||
} else {
|
||||
String name = t.nextToken();
|
||||
Threads threads = VM.getVM().getThreads();
|
||||
boolean all = name.equals("-a");
|
||||
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
thread.printThreadIDOn(new PrintStream(bos));
|
||||
if (all || bos.toString().equals(name)) {
|
||||
out.println(bos.toString() + " = " + thread.getAddress());
|
||||
if (!all) return;
|
||||
}
|
||||
}
|
||||
out.println("Couldn't find thread " + name);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
new Command("threads", false) {
|
||||
public void doit(Tokens t) {
|
||||
@ -1161,7 +1288,7 @@ public class CommandProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*))");
|
||||
static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*)|(![a-zA-Z][^ ]*))");
|
||||
|
||||
public void executeCommand(String ln) {
|
||||
if (ln.indexOf('!') != -1) {
|
||||
@ -1195,14 +1322,37 @@ public class CommandProcessor {
|
||||
result.append(item.at(item.countTokens() - 1));
|
||||
} else {
|
||||
String tail = cmd.substring(1);
|
||||
int index = Integer.parseInt(tail);
|
||||
if (index < 0) {
|
||||
index = history.size() + index;
|
||||
switch (tail.charAt(0)) {
|
||||
case '0':
|
||||
case '1':
|
||||
case '2':
|
||||
case '3':
|
||||
case '4':
|
||||
case '5':
|
||||
case '6':
|
||||
case '7':
|
||||
case '8':
|
||||
case '9':
|
||||
case '-': {
|
||||
int index = Integer.parseInt(tail);
|
||||
if (index < 0) {
|
||||
index = history.size() + index;
|
||||
}
|
||||
if (index > size) {
|
||||
err.println("No such history item");
|
||||
} else {
|
||||
result.append((String)history.get(index));
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
for (int i = history.size() - 1; i >= 0; i--) {
|
||||
String s = (String)history.get(i);
|
||||
if (s.startsWith(tail)) {
|
||||
result.append(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (index > size) {
|
||||
err.println("No such history item");
|
||||
} else {
|
||||
result.append((String)history.get(index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -985,6 +985,12 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
|
||||
annoPanel.addAnnotation(new Annotation(curFrame.addressOfInterpreterFrameExpressionStack(),
|
||||
curFrame.addressOfInterpreterFrameTOS(),
|
||||
"Interpreter expression stack"));
|
||||
Address monBegin = curFrame.interpreterFrameMonitorBegin().address();
|
||||
Address monEnd = curFrame.interpreterFrameMonitorEnd().address();
|
||||
if (!monBegin.equals(monEnd)) {
|
||||
annoPanel.addAnnotation(new Annotation(monBegin, monEnd,
|
||||
"BasicObjectLocks"));
|
||||
}
|
||||
if (interpreterFrameMethod != null) {
|
||||
// The offset is just to get the right stack slots highlighted in the output
|
||||
int offset = 1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2003 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -294,7 +294,7 @@ public class BugSpot extends JPanel {
|
||||
attachDialog.setSize(400, 300);
|
||||
GraphicsUtilities.centerInContainer(attachDialog.getComponent(),
|
||||
getParentDimension(attachDialog.getComponent()));
|
||||
attachDialog.show();
|
||||
attachDialog.setVisible(true);
|
||||
}
|
||||
|
||||
public void showThreadsDialog() {
|
||||
@ -321,7 +321,7 @@ public class BugSpot extends JPanel {
|
||||
getParentDimension(threadsDialog.getComponent()));
|
||||
GraphicsUtilities.centerInContainer(threadsDialog.getComponent(),
|
||||
getParentDimension(threadsDialog.getComponent()));
|
||||
threadsDialog.show();
|
||||
threadsDialog.setVisible(true);
|
||||
}
|
||||
|
||||
public void showMemoryDialog() {
|
||||
@ -341,7 +341,7 @@ public class BugSpot extends JPanel {
|
||||
getParentDimension(memoryDialog.getComponent()));
|
||||
GraphicsUtilities.centerInContainer(memoryDialog.getComponent(),
|
||||
getParentDimension(memoryDialog.getComponent()));
|
||||
memoryDialog.show();
|
||||
memoryDialog.setVisible(true);
|
||||
}
|
||||
|
||||
/** Changes the editor factory this debugger uses to display source
|
||||
@ -530,7 +530,7 @@ public class BugSpot extends JPanel {
|
||||
addFrame(stackFrame);
|
||||
stackFrame.setSize(400, 200);
|
||||
GraphicsUtilities.moveToInContainer(stackFrame.getComponent(), 0.0f, 1.0f, 0, 20);
|
||||
stackFrame.show();
|
||||
stackFrame.setVisible(true);
|
||||
|
||||
// Create register panel
|
||||
registerPanel = new RegisterPanel();
|
||||
@ -544,7 +544,7 @@ public class BugSpot extends JPanel {
|
||||
registerFrame.setSize(225, 200);
|
||||
GraphicsUtilities.moveToInContainer(registerFrame.getComponent(),
|
||||
1.0f, 0.0f, 0, 0);
|
||||
registerFrame.show();
|
||||
registerFrame.setVisible(true);
|
||||
|
||||
resetCurrentThread();
|
||||
} catch (DebuggerException e) {
|
||||
@ -979,7 +979,7 @@ public class BugSpot extends JPanel {
|
||||
1.0f,
|
||||
0.85f,
|
||||
getParentDimension(editorFrame.getComponent()));
|
||||
editorFrame.show();
|
||||
editorFrame.setVisible(true);
|
||||
shown = true;
|
||||
}
|
||||
code.showLineNumber(lineNo);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -96,10 +96,6 @@ public class BytecodeDisassembler {
|
||||
addBytecodeClass(Bytecodes._dstore, BytecodeStore.class);
|
||||
addBytecodeClass(Bytecodes._astore, BytecodeStore.class);
|
||||
addBytecodeClass(Bytecodes._tableswitch, BytecodeTableswitch.class);
|
||||
|
||||
// only special fast_xxx cases. others are handled differently.
|
||||
addBytecodeClass(Bytecodes._fast_iaccess_0, BytecodeFastAAccess0.class);
|
||||
addBytecodeClass(Bytecodes._fast_aaccess_0, BytecodeFastIAccess0.class);
|
||||
}
|
||||
|
||||
public BytecodeDisassembler(Method method) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -263,11 +263,12 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
case JVM_CONSTANT_NameAndType: return "JVM_CONSTANT_NameAndType";
|
||||
case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid";
|
||||
case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass";
|
||||
case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError";
|
||||
case JVM_CONSTANT_ClassIndex: return "JVM_CONSTANT_ClassIndex";
|
||||
case JVM_CONSTANT_UnresolvedString: return "JVM_CONSTANT_UnresolvedString";
|
||||
case JVM_CONSTANT_StringIndex: return "JVM_CONSTANT_StringIndex";
|
||||
}
|
||||
throw new InternalError("unknown tag");
|
||||
throw new InternalError("Unknown tag: " + tag);
|
||||
}
|
||||
|
||||
public void iterateFields(OopVisitor visitor, boolean doVMFields) {
|
||||
@ -304,6 +305,7 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
index++;
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
case JVM_CONSTANT_UnresolvedClass:
|
||||
case JVM_CONSTANT_Class:
|
||||
case JVM_CONSTANT_UnresolvedString:
|
||||
@ -409,6 +411,7 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
}
|
||||
|
||||
// case JVM_CONSTANT_ClassIndex:
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
case JVM_CONSTANT_UnresolvedClass: {
|
||||
dos.writeByte(JVM_CONSTANT_Class);
|
||||
String klassName = getSymbolAt(ci).asString();
|
||||
@ -464,6 +467,8 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
+ ", type = " + signatureIndex);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new InternalError("unknown tag: " + cpConstType);
|
||||
} // switch
|
||||
}
|
||||
dos.flush();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,6 +58,9 @@ public interface ClassConstants
|
||||
// Temporary tag while constructing constant pool
|
||||
public static final int JVM_CONSTANT_StringIndex = 103;
|
||||
|
||||
// Temporary tag while constructing constant pool
|
||||
public static final int JVM_CONSTANT_UnresolvedClassInError = 104;
|
||||
|
||||
// 1.5 major/minor version numbers from JVM spec. 3rd edition
|
||||
public static final short MAJOR_VERSION = 49;
|
||||
public static final short MINOR_VERSION = 0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -108,7 +108,7 @@ public abstract class SignatureIterator {
|
||||
return BasicTypeSize.getTArraySize();
|
||||
}
|
||||
}
|
||||
throw new RuntimeException("Should not reach here");
|
||||
throw new RuntimeException("Should not reach here: char " + (char)_signature.getByteAt(_index) + " @ " + _index + " in " + _signature.asString());
|
||||
}
|
||||
protected void checkSignatureEnd() {
|
||||
if (_index < _signature.getLength()) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -238,6 +238,7 @@ public class ClassWriter implements /* imports */ ClassConstants
|
||||
}
|
||||
|
||||
// case JVM_CONSTANT_ClassIndex:
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
case JVM_CONSTANT_UnresolvedClass: {
|
||||
dos.writeByte(JVM_CONSTANT_Class);
|
||||
String klassName = cpool.getSymbolAt(ci).asString();
|
||||
@ -296,6 +297,8 @@ public class ClassWriter implements /* imports */ ClassConstants
|
||||
+ ", type = " + signatureIndex);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new InternalError("Unknown tag: " + cpConstType);
|
||||
} // switch
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,6 @@ public interface FrameWrapper {
|
||||
public void setVisible(boolean visible);
|
||||
public void setSize(int x, int y);
|
||||
public void pack();
|
||||
public void show();
|
||||
public void dispose();
|
||||
public void setBackground(Color color);
|
||||
public void setResizable(boolean resizable);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -95,8 +95,10 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
|
||||
// list tags
|
||||
void beginList() { beginTag("ul"); nl(); }
|
||||
void li(String s) { wrap("li", s); nl(); }
|
||||
void endList() { endTag("ul"); nl(); }
|
||||
void beginListItem() { beginTag("li"); }
|
||||
void endListItem() { endTag("li"); nl(); }
|
||||
void li(String s) { wrap("li", s); nl(); }
|
||||
|
||||
// table tags
|
||||
void beginTable(int border) {
|
||||
@ -505,6 +507,11 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.cell(cpool.getSymbolAt(index).asString());
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
buf.cell("JVM_CONSTANT_UnresolvedClassInError");
|
||||
buf.cell(cpool.getSymbolAt(index).asString());
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_Class:
|
||||
buf.cell("JVM_CONSTANT_Class");
|
||||
Klass klass = (Klass) cpool.getObjAt(index);
|
||||
@ -564,6 +571,9 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.cell("JVM_CONSTANT_StringIndex");
|
||||
buf.cell(Integer.toString(cpool.getIntAt(index)));
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new InternalError("unknown tag: " + ctag);
|
||||
}
|
||||
|
||||
buf.endTag("tr");
|
||||
@ -671,7 +681,16 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.cell(Integer.toString(curBci) + spaces);
|
||||
|
||||
buf.beginTag("td");
|
||||
String instrStr = escapeHTMLSpecialChars(instr.toString());
|
||||
String instrStr = null;
|
||||
try {
|
||||
instrStr = escapeHTMLSpecialChars(instr.toString());
|
||||
} catch (RuntimeException re) {
|
||||
buf.append("exception during bytecode processing");
|
||||
buf.endTag("td");
|
||||
buf.endTag("tr");
|
||||
re.printStackTrace();
|
||||
return;
|
||||
}
|
||||
|
||||
if (instr instanceof BytecodeNew) {
|
||||
BytecodeNew newBytecode = (BytecodeNew) instr;
|
||||
@ -1396,9 +1415,7 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
final SymbolFinder symFinder = createSymbolFinder();
|
||||
final Disassembler disasm = createDisassembler(startPc, code);
|
||||
class NMethodVisitor implements InstructionVisitor {
|
||||
boolean prevWasCall;
|
||||
public void prologue() {
|
||||
prevWasCall = false;
|
||||
}
|
||||
|
||||
public void visit(long currentPc, Instruction instr) {
|
||||
@ -1418,8 +1435,7 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
|
||||
PCDesc pcDesc = (PCDesc) safepoints.get(longToAddress(currentPc));
|
||||
|
||||
boolean isSafepoint = (pcDesc != null);
|
||||
if (isSafepoint && prevWasCall) {
|
||||
if (pcDesc != null) {
|
||||
buf.append(genSafepointInfo(nmethod, pcDesc));
|
||||
}
|
||||
|
||||
@ -1435,11 +1451,6 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
}
|
||||
|
||||
buf.br();
|
||||
if (isSafepoint && !prevWasCall) {
|
||||
buf.append(genSafepointInfo(nmethod, pcDesc));
|
||||
}
|
||||
|
||||
prevWasCall = instr.isCall();
|
||||
}
|
||||
|
||||
public void epilogue() {
|
||||
@ -1783,22 +1794,20 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.h3("Fields");
|
||||
buf.beginList();
|
||||
for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) {
|
||||
int nameIndex = fields.getShortAt(f + InstanceKlass.NAME_INDEX_OFFSET);
|
||||
int sigIndex = fields.getShortAt(f + InstanceKlass.SIGNATURE_INDEX_OFFSET);
|
||||
int genSigIndex = fields.getShortAt(f + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET);
|
||||
Symbol f_name = cp.getSymbolAt(nameIndex);
|
||||
Symbol f_sig = cp.getSymbolAt(sigIndex);
|
||||
Symbol f_genSig = (genSigIndex != 0)? cp.getSymbolAt(genSigIndex) : null;
|
||||
AccessFlags acc = new AccessFlags(fields.getShortAt(f + InstanceKlass.ACCESS_FLAGS_OFFSET));
|
||||
sun.jvm.hotspot.oops.Field field = klass.getFieldByIndex(f);
|
||||
String f_name = ((NamedFieldIdentifier)field.getID()).getName();
|
||||
Symbol f_sig = field.getSignature();
|
||||
Symbol f_genSig = field.getGenericSignature();
|
||||
AccessFlags acc = field.getAccessFlagsObj();
|
||||
|
||||
buf.beginTag("li");
|
||||
buf.beginListItem();
|
||||
buf.append(genFieldModifierString(acc));
|
||||
buf.append(' ');
|
||||
Formatter sigBuf = new Formatter(genHTML);
|
||||
new SignatureConverter(f_sig, sigBuf.getBuffer()).dispatchField();
|
||||
buf.append(sigBuf.toString().replace('/', '.'));
|
||||
buf.append(' ');
|
||||
buf.append(f_name.asString());
|
||||
buf.append(f_name);
|
||||
buf.append(';');
|
||||
// is it generic?
|
||||
if (f_genSig != null) {
|
||||
@ -1806,7 +1815,8 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.append(escapeHTMLSpecialChars(f_genSig.asString()));
|
||||
buf.append("] ");
|
||||
}
|
||||
buf.endTag("li");
|
||||
buf.append(" (offset = " + field.getOffset() + ")");
|
||||
buf.endListItem();
|
||||
}
|
||||
buf.endList();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@ public class Assert {
|
||||
public static boolean ASSERTS_ENABLED = true;
|
||||
|
||||
public static void that(boolean test, String message) {
|
||||
if (!test) {
|
||||
if (ASSERTS_ENABLED && !test) {
|
||||
throw new AssertionFailure(message);
|
||||
}
|
||||
}
|
||||
|
@ -28,5 +28,5 @@
|
||||
|
||||
# Don't put quotes (fail windows build).
|
||||
HOTSPOT_VM_DISTRO=Java HotSpot(TM)
|
||||
COMPANY_NAME=Sun Microsystems, Inc.
|
||||
COMPANY_NAME=Oracle Corporation
|
||||
PRODUCT_NAME=Java(TM) Platform SE
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2010
|
||||
|
||||
HS_MAJOR_VER=18
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=02
|
||||
HS_BUILD_NUMBER=04
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=7
|
||||
|
@ -51,6 +51,8 @@ jprt.my.solaris.sparc.jdk6=solaris_sparc_5.8
|
||||
jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8
|
||||
jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8
|
||||
jprt.my.solaris.sparc.jdk6u14=solaris_sparc_5.8
|
||||
jprt.my.solaris.sparc.jdk6u18=solaris_sparc_5.8
|
||||
jprt.my.solaris.sparc.jdk6u20=solaris_sparc_5.8
|
||||
jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
|
||||
@ -58,6 +60,8 @@ jprt.my.solaris.sparcv9.jdk6=solaris_sparcv9_5.8
|
||||
jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8
|
||||
jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8
|
||||
jprt.my.solaris.sparcv9.jdk6u14=solaris_sparcv9_5.8
|
||||
jprt.my.solaris.sparcv9.jdk6u18=solaris_sparcv9_5.8
|
||||
jprt.my.solaris.sparcv9.jdk6u20=solaris_sparcv9_5.8
|
||||
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.solaris.i586.jdk7=solaris_i586_5.10
|
||||
@ -65,6 +69,8 @@ jprt.my.solaris.i586.jdk6=solaris_i586_5.8
|
||||
jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8
|
||||
jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8
|
||||
jprt.my.solaris.i586.jdk6u14=solaris_i586_5.8
|
||||
jprt.my.solaris.i586.jdk6u18=solaris_i586_5.8
|
||||
jprt.my.solaris.i586.jdk6u20=solaris_i586_5.8
|
||||
jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.solaris.x64.jdk7=solaris_x64_5.10
|
||||
@ -72,6 +78,8 @@ jprt.my.solaris.x64.jdk6=solaris_x64_5.10
|
||||
jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10
|
||||
jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10
|
||||
jprt.my.solaris.x64.jdk6u14=solaris_x64_5.10
|
||||
jprt.my.solaris.x64.jdk6u18=solaris_x64_5.10
|
||||
jprt.my.solaris.x64.jdk6u20=solaris_x64_5.10
|
||||
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.i586.jdk7=linux_i586_2.6
|
||||
@ -79,6 +87,8 @@ jprt.my.linux.i586.jdk6=linux_i586_2.4
|
||||
jprt.my.linux.i586.jdk6perf=linux_i586_2.4
|
||||
jprt.my.linux.i586.jdk6u10=linux_i586_2.4
|
||||
jprt.my.linux.i586.jdk6u14=linux_i586_2.4
|
||||
jprt.my.linux.i586.jdk6u18=linux_i586_2.4
|
||||
jprt.my.linux.i586.jdk6u20=linux_i586_2.4
|
||||
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.x64.jdk7=linux_x64_2.6
|
||||
@ -86,6 +96,8 @@ jprt.my.linux.x64.jdk6=linux_x64_2.4
|
||||
jprt.my.linux.x64.jdk6perf=linux_x64_2.4
|
||||
jprt.my.linux.x64.jdk6u10=linux_x64_2.4
|
||||
jprt.my.linux.x64.jdk6u14=linux_x64_2.4
|
||||
jprt.my.linux.x64.jdk6u18=linux_x64_2.4
|
||||
jprt.my.linux.x64.jdk6u20=linux_x64_2.4
|
||||
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.windows.i586.jdk7=windows_i586_5.0
|
||||
@ -93,6 +105,8 @@ jprt.my.windows.i586.jdk6=windows_i586_5.0
|
||||
jprt.my.windows.i586.jdk6perf=windows_i586_5.0
|
||||
jprt.my.windows.i586.jdk6u10=windows_i586_5.0
|
||||
jprt.my.windows.i586.jdk6u14=windows_i586_5.0
|
||||
jprt.my.windows.i586.jdk6u18=windows_i586_5.0
|
||||
jprt.my.windows.i586.jdk6u20=windows_i586_5.0
|
||||
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.windows.x64.jdk7=windows_x64_5.2
|
||||
@ -100,6 +114,8 @@ jprt.my.windows.x64.jdk6=windows_x64_5.2
|
||||
jprt.my.windows.x64.jdk6perf=windows_x64_5.2
|
||||
jprt.my.windows.x64.jdk6u10=windows_x64_5.2
|
||||
jprt.my.windows.x64.jdk6u14=windows_x64_5.2
|
||||
jprt.my.windows.x64.jdk6u18=windows_x64_5.2
|
||||
jprt.my.windows.x64.jdk6u20=windows_x64_5.2
|
||||
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
|
||||
|
||||
# Standard list of jprt build targets for this source tree
|
||||
|
@ -127,6 +127,9 @@ $(GENERATEDFILES): refresh_adfiles
|
||||
# Note that product files are updated via "mv", which is atomic.
|
||||
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
|
||||
|
||||
# Debuggable by default
|
||||
CFLAGS += -g
|
||||
|
||||
# Pass -D flags into ADLC.
|
||||
ADLCFLAGS += $(SYSDEFS)
|
||||
|
||||
@ -135,7 +138,7 @@ ADLCFLAGS += -q -T
|
||||
|
||||
# Normally, debugging is done directly on the ad_<arch>*.cpp files.
|
||||
# But -g will put #line directives in those files pointing back to <arch>.ad.
|
||||
#ADLCFLAGS += -g
|
||||
ADLCFLAGS += -g
|
||||
|
||||
ifdef LP64
|
||||
ADLCFLAGS += -D_LP64
|
||||
|
@ -147,6 +147,9 @@ $(GENERATEDFILES): refresh_adfiles
|
||||
# Note that product files are updated via "mv", which is atomic.
|
||||
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
|
||||
|
||||
# Debuggable by default
|
||||
CFLAGS += -g
|
||||
|
||||
# Pass -D flags into ADLC.
|
||||
ADLCFLAGS += $(SYSDEFS)
|
||||
|
||||
@ -155,7 +158,7 @@ ADLCFLAGS += -q -T
|
||||
|
||||
# Normally, debugging is done directly on the ad_<arch>*.cpp files.
|
||||
# But -g will put #line directives in those files pointing back to <arch>.ad.
|
||||
#ADLCFLAGS += -g
|
||||
ADLCFLAGS += -g
|
||||
|
||||
ifdef LP64
|
||||
ADLCFLAGS += -D_LP64
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2333,6 +2333,18 @@ void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
|
||||
#endif
|
||||
|
||||
|
||||
void MacroAssembler::load_sized_value(Address src, Register dst,
|
||||
size_t size_in_bytes, bool is_signed) {
|
||||
switch (size_in_bytes) {
|
||||
case 8: ldx(src, dst); break;
|
||||
case 4: ld( src, dst); break;
|
||||
case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
|
||||
case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::float_cmp( bool is_float, int unordered_result,
|
||||
FloatRegister Fa, FloatRegister Fb,
|
||||
Register Rresult) {
|
||||
@ -2625,40 +2637,103 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
|
||||
assert(dest.register_or_noreg() != G0, "lost side effect");
|
||||
if ((src.is_constant() && src.as_constant() == 0) ||
|
||||
(src.is_register() && src.as_register() == G0)) {
|
||||
// do nothing
|
||||
} else if (dest.is_register()) {
|
||||
add(dest.as_register(), ensure_simm13_or_reg(src, temp), dest.as_register());
|
||||
} else if (src.is_constant()) {
|
||||
intptr_t res = dest.as_constant() + src.as_constant();
|
||||
dest = RegisterOrConstant(res); // side effect seen by caller
|
||||
RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
|
||||
assert(d.register_or_noreg() != G0, "lost side effect");
|
||||
if ((s2.is_constant() && s2.as_constant() == 0) ||
|
||||
(s2.is_register() && s2.as_register() == G0)) {
|
||||
// Do nothing, just move value.
|
||||
if (s1.is_register()) {
|
||||
if (d.is_constant()) d = temp;
|
||||
mov(s1.as_register(), d.as_register());
|
||||
return d;
|
||||
} else {
|
||||
return s1;
|
||||
}
|
||||
}
|
||||
|
||||
if (s1.is_register()) {
|
||||
assert_different_registers(s1.as_register(), temp);
|
||||
if (d.is_constant()) d = temp;
|
||||
andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
|
||||
return d;
|
||||
} else {
|
||||
assert(temp != noreg, "cannot handle constant += register");
|
||||
add(src.as_register(), ensure_simm13_or_reg(dest, temp), temp);
|
||||
dest = RegisterOrConstant(temp); // side effect seen by caller
|
||||
if (s2.is_register()) {
|
||||
assert_different_registers(s2.as_register(), temp);
|
||||
if (d.is_constant()) d = temp;
|
||||
set(s1.as_constant(), temp);
|
||||
andn(temp, s2.as_register(), d.as_register());
|
||||
return d;
|
||||
} else {
|
||||
intptr_t res = s1.as_constant() & ~s2.as_constant();
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
|
||||
assert(dest.register_or_noreg() != G0, "lost side effect");
|
||||
if (!is_simm13(src.constant_or_zero()))
|
||||
src = (src.as_constant() & 0xFF);
|
||||
if ((src.is_constant() && src.as_constant() == 0) ||
|
||||
(src.is_register() && src.as_register() == G0)) {
|
||||
// do nothing
|
||||
} else if (dest.is_register()) {
|
||||
sll_ptr(dest.as_register(), src, dest.as_register());
|
||||
} else if (src.is_constant()) {
|
||||
intptr_t res = dest.as_constant() << src.as_constant();
|
||||
dest = RegisterOrConstant(res); // side effect seen by caller
|
||||
RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
|
||||
assert(d.register_or_noreg() != G0, "lost side effect");
|
||||
if ((s2.is_constant() && s2.as_constant() == 0) ||
|
||||
(s2.is_register() && s2.as_register() == G0)) {
|
||||
// Do nothing, just move value.
|
||||
if (s1.is_register()) {
|
||||
if (d.is_constant()) d = temp;
|
||||
mov(s1.as_register(), d.as_register());
|
||||
return d;
|
||||
} else {
|
||||
return s1;
|
||||
}
|
||||
}
|
||||
|
||||
if (s1.is_register()) {
|
||||
assert_different_registers(s1.as_register(), temp);
|
||||
if (d.is_constant()) d = temp;
|
||||
add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
|
||||
return d;
|
||||
} else {
|
||||
assert(temp != noreg, "cannot handle constant <<= register");
|
||||
set(dest.as_constant(), temp);
|
||||
sll_ptr(temp, src, temp);
|
||||
dest = RegisterOrConstant(temp); // side effect seen by caller
|
||||
if (s2.is_register()) {
|
||||
assert_different_registers(s2.as_register(), temp);
|
||||
if (d.is_constant()) d = temp;
|
||||
add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
|
||||
return d;
|
||||
} else {
|
||||
intptr_t res = s1.as_constant() + s2.as_constant();
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
|
||||
assert(d.register_or_noreg() != G0, "lost side effect");
|
||||
if (!is_simm13(s2.constant_or_zero()))
|
||||
s2 = (s2.as_constant() & 0xFF);
|
||||
if ((s2.is_constant() && s2.as_constant() == 0) ||
|
||||
(s2.is_register() && s2.as_register() == G0)) {
|
||||
// Do nothing, just move value.
|
||||
if (s1.is_register()) {
|
||||
if (d.is_constant()) d = temp;
|
||||
mov(s1.as_register(), d.as_register());
|
||||
return d;
|
||||
} else {
|
||||
return s1;
|
||||
}
|
||||
}
|
||||
|
||||
if (s1.is_register()) {
|
||||
assert_different_registers(s1.as_register(), temp);
|
||||
if (d.is_constant()) d = temp;
|
||||
sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
|
||||
return d;
|
||||
} else {
|
||||
if (s2.is_register()) {
|
||||
assert_different_registers(s2.as_register(), temp);
|
||||
if (d.is_constant()) d = temp;
|
||||
set(s1.as_constant(), temp);
|
||||
sll_ptr(temp, s2.as_register(), d.as_register());
|
||||
return d;
|
||||
} else {
|
||||
intptr_t res = s1.as_constant() << s2.as_constant();
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2708,8 +2783,8 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
|
||||
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
|
||||
RegisterOrConstant itable_offset = itable_index;
|
||||
regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
|
||||
regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes());
|
||||
itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
|
||||
itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
|
||||
add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
|
||||
|
||||
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
|
||||
@ -2805,7 +2880,7 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
|
||||
|
||||
assert_different_registers(sub_klass, super_klass, temp_reg);
|
||||
if (super_check_offset.is_register()) {
|
||||
assert_different_registers(sub_klass, super_klass,
|
||||
assert_different_registers(sub_klass, super_klass, temp_reg,
|
||||
super_check_offset.as_register());
|
||||
} else if (must_load_sco) {
|
||||
assert(temp2_reg != noreg, "supply either a temp or a register offset");
|
||||
@ -2855,6 +2930,8 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
|
||||
// The super check offset is always positive...
|
||||
lduw(super_klass, sco_offset, temp2_reg);
|
||||
super_check_offset = RegisterOrConstant(temp2_reg);
|
||||
// super_check_offset is register.
|
||||
assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
|
||||
}
|
||||
ld_ptr(sub_klass, super_check_offset, temp_reg);
|
||||
cmp(super_klass, temp_reg);
|
||||
@ -3014,11 +3091,10 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
|
||||
Register temp_reg,
|
||||
Label& wrong_method_type) {
|
||||
if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
|
||||
assert_different_registers(mtype_reg, mh_reg, temp_reg);
|
||||
// compare method type against that of the receiver
|
||||
RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
|
||||
@ -3029,10 +3105,33 @@ void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_re
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) {
|
||||
// A method handle has a "vmslots" field which gives the size of its
|
||||
// argument list in JVM stack slots. This field is either located directly
|
||||
// in every method handle, or else is indirectly accessed through the
|
||||
// method handle's MethodType. This macro hides the distinction.
|
||||
void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
|
||||
Register temp_reg) {
|
||||
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
|
||||
if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
|
||||
// load mh.type.form.vmslots
|
||||
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
|
||||
// hoist vmslots into every mh to avoid dependent load chain
|
||||
ld( Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
|
||||
} else {
|
||||
Register temp2_reg = vmslots_reg;
|
||||
ld_ptr(Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
|
||||
ld_ptr(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
|
||||
ld( Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
|
||||
assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
|
||||
assert_different_registers(mh_reg, temp_reg);
|
||||
|
||||
if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
|
||||
|
||||
// pick out the interpreted side of the handler
|
||||
ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
|
||||
|
||||
@ -3043,17 +3142,18 @@ void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_
|
||||
// for the various stubs which take control at this point,
|
||||
// see MethodHandles::generate_method_handle_stub
|
||||
|
||||
// (Can any caller use this delay slot? If so, add an option for supression.)
|
||||
delayed()->nop();
|
||||
// Some callers can fill the delay slot.
|
||||
if (emit_delayed_nop) {
|
||||
delayed()->nop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
|
||||
int extra_slot_offset) {
|
||||
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
|
||||
int stackElementSize = Interpreter::stackElementWords() * wordSize;
|
||||
int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
|
||||
int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
|
||||
assert(offset1 - offset == stackElementSize, "correct arithmetic");
|
||||
int stackElementSize = Interpreter::stackElementSize;
|
||||
int offset = extra_slot_offset * stackElementSize;
|
||||
if (arg_slot.is_constant()) {
|
||||
offset += arg_slot.as_constant() * stackElementSize;
|
||||
return offset;
|
||||
@ -3067,6 +3167,11 @@ RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
|
||||
}
|
||||
|
||||
|
||||
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
|
||||
int extra_slot_offset) {
|
||||
return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
|
||||
Register temp_reg,
|
||||
@ -4082,7 +4187,7 @@ static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
|
||||
// make it work.
|
||||
static void check_index(int ind) {
|
||||
assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
|
||||
"Invariants.")
|
||||
"Invariants.");
|
||||
}
|
||||
|
||||
static void generate_satb_log_enqueue(bool with_frame) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -661,9 +661,6 @@ class Assembler : public AbstractAssembler {
|
||||
stx_op3 = 0x0e,
|
||||
swap_op3 = 0x0f,
|
||||
|
||||
lduwa_op3 = 0x10,
|
||||
ldxa_op3 = 0x1b,
|
||||
|
||||
stwa_op3 = 0x14,
|
||||
stxa_op3 = 0x1e,
|
||||
|
||||
@ -1065,7 +1062,7 @@ class Assembler : public AbstractAssembler {
|
||||
}
|
||||
void assert_not_delayed(const char* msg) {
|
||||
#ifdef CHECK_DELAY
|
||||
assert_msg ( delay_state == no_delay, msg);
|
||||
assert(delay_state == no_delay, msg);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1383,24 +1380,25 @@ public:
|
||||
|
||||
// pp 181
|
||||
|
||||
void and3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void and3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void and3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void and3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void andcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void andcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void andn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void andn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void andn( Register s1, RegisterOrConstant s2, Register d);
|
||||
void andncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void andncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void or3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void or3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void orcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void orcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void orn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void orn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void orncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void orncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void xor3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void xor3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void xor3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void xor3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void xorcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void xorcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void xnor( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
@ -2029,8 +2027,8 @@ public:
|
||||
inline void st_ptr(Register d, Register s1, ByteSize simm13a);
|
||||
#endif
|
||||
|
||||
// ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's
|
||||
// st_long will perform st for 32 bit VM's and stx for 64 bit VM's
|
||||
// ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
|
||||
// st_long will perform std for 32 bit VM's and stx for 64 bit VM's
|
||||
inline void ld_long(Register s1, Register s2, Register d);
|
||||
inline void ld_long(Register s1, int simm13a, Register d);
|
||||
inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
|
||||
@ -2041,23 +2039,19 @@ public:
|
||||
inline void st_long(Register d, const Address& a, int offset = 0);
|
||||
|
||||
// Helpers for address formation.
|
||||
// They update the dest in place, whether it is a register or constant.
|
||||
// They emit no code at all if src is a constant zero.
|
||||
// If dest is a constant and src is a register, the temp argument
|
||||
// is required, and becomes the result.
|
||||
// If dest is a register and src is a non-simm13 constant,
|
||||
// the temp argument is required, and is used to materialize the constant.
|
||||
void regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
|
||||
Register temp = noreg );
|
||||
void regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
|
||||
Register temp = noreg );
|
||||
// - They emit only a move if s2 is a constant zero.
|
||||
// - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
|
||||
// - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
|
||||
RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
|
||||
RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
|
||||
RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
|
||||
|
||||
RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant roc, Register Rtemp) {
|
||||
guarantee(Rtemp != noreg, "constant offset overflow");
|
||||
if (is_simm13(roc.constant_or_zero()))
|
||||
return roc; // register or short constant
|
||||
set(roc.as_constant(), Rtemp);
|
||||
return RegisterOrConstant(Rtemp);
|
||||
RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
|
||||
if (is_simm13(src.constant_or_zero()))
|
||||
return src; // register or short constant
|
||||
guarantee(temp != noreg, "constant offset overflow");
|
||||
set(src.as_constant(), temp);
|
||||
return temp;
|
||||
}
|
||||
|
||||
// --------------------------------------------------
|
||||
@ -2306,6 +2300,9 @@ public:
|
||||
void lcmp( Register Ra, Register Rb, Register Rresult);
|
||||
#endif
|
||||
|
||||
// Loading values by size and signed-ness
|
||||
void load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed);
|
||||
|
||||
void float_cmp( bool is_float, int unordered_result,
|
||||
FloatRegister Fa, FloatRegister Fb,
|
||||
Register Rresult);
|
||||
@ -2424,12 +2421,16 @@ public:
|
||||
void check_method_handle_type(Register mtype_reg, Register mh_reg,
|
||||
Register temp_reg,
|
||||
Label& wrong_method_type);
|
||||
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
|
||||
void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
|
||||
Register temp_reg);
|
||||
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
|
||||
// offset relative to Gargs of argument at tos[arg_slot].
|
||||
// (arg_slot == 0 means the last argument, not the first).
|
||||
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
|
||||
int extra_slot_offset = 0);
|
||||
|
||||
// Address of Gargs and argument_offset.
|
||||
Address argument_address(RegisterOrConstant arg_slot,
|
||||
int extra_slot_offset = 0);
|
||||
|
||||
// Stack overflow checking
|
||||
|
||||
|
@ -206,12 +206,17 @@ inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld
|
||||
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
|
||||
|
||||
// form effective addresses this way:
|
||||
inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, int offset) {
|
||||
if (s2.is_register()) add(s1, s2.as_register(), d);
|
||||
inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
|
||||
if (s2.is_register()) add(s1, s2.as_register(), d);
|
||||
else { add(s1, s2.as_constant() + offset, d); offset = 0; }
|
||||
if (offset != 0) add(d, offset, d);
|
||||
}
|
||||
|
||||
inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
|
||||
if (s2.is_register()) andn(s1, s2.as_register(), d);
|
||||
else andn(s1, s2.as_constant(), d);
|
||||
}
|
||||
|
||||
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
|
@ -388,6 +388,60 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
}
|
||||
|
||||
|
||||
// Emit the code to remove the frame from the stack in the exception
|
||||
// unwind path.
|
||||
int LIR_Assembler::emit_unwind_handler() {
|
||||
#ifndef PRODUCT
|
||||
if (CommentedAssembly) {
|
||||
_masm->block_comment("Unwind handler");
|
||||
}
|
||||
#endif
|
||||
|
||||
int offset = code_offset();
|
||||
|
||||
// Fetch the exception from TLS and clear out exception related thread state
|
||||
__ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
|
||||
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
|
||||
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
|
||||
|
||||
__ bind(_unwind_handler_entry);
|
||||
__ verify_not_null_oop(O0);
|
||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||
__ mov(O0, I0); // Preserve the exception
|
||||
}
|
||||
|
||||
// Preform needed unlocking
|
||||
MonitorExitStub* stub = NULL;
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::I1_opr);
|
||||
stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
|
||||
__ unlock_object(I3, I2, I1, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
if (compilation()->env()->dtrace_method_probes()) {
|
||||
jobject2reg(method()->constant_encoding(), O0);
|
||||
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||
__ mov(I0, O0); // Restore the exception
|
||||
}
|
||||
|
||||
// dispatch to the unwind logic
|
||||
__ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
|
||||
// Emit the slow path assembly
|
||||
if (stub != NULL) {
|
||||
stub->emit_code(this);
|
||||
}
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::emit_deopt_handler() {
|
||||
// if the last instruction is a call (typically to do a throw which
|
||||
// is coming at the end after block reordering) the return address
|
||||
@ -1728,9 +1782,13 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else if (code == lir_cmp_l2i) {
|
||||
#ifdef _LP64
|
||||
__ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
|
||||
#else
|
||||
__ lcmp(left->as_register_hi(), left->as_register_lo(),
|
||||
right->as_register_hi(), right->as_register_lo(),
|
||||
dst->as_register());
|
||||
#endif
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -2046,26 +2104,29 @@ int LIR_Assembler::shift_amount(BasicType t) {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
|
||||
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
|
||||
assert(exceptionOop->as_register() == Oexception, "should match");
|
||||
assert(unwind || exceptionPC->as_register() == Oissuing_pc, "should match");
|
||||
assert(exceptionPC->as_register() == Oissuing_pc, "should match");
|
||||
|
||||
info->add_register_oop(exceptionOop);
|
||||
|
||||
if (unwind) {
|
||||
__ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
} else {
|
||||
// reuse the debug info from the safepoint poll for the throw op itself
|
||||
address pc_for_athrow = __ pc();
|
||||
int pc_for_athrow_offset = __ offset();
|
||||
RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
|
||||
__ set(pc_for_athrow, Oissuing_pc, rspec);
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
// reuse the debug info from the safepoint poll for the throw op itself
|
||||
address pc_for_athrow = __ pc();
|
||||
int pc_for_athrow_offset = __ offset();
|
||||
RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
|
||||
__ set(pc_for_athrow, Oissuing_pc, rspec);
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
|
||||
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
|
||||
assert(exceptionOop->as_register() == Oexception, "should match");
|
||||
|
||||
__ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
|
||||
@ -2354,7 +2415,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
|
||||
if (UseSlowPath ||
|
||||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
|
||||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
|
||||
__ br(Assembler::always, false, Assembler::pn, *op->stub()->entry());
|
||||
__ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
|
||||
__ delayed()->nop();
|
||||
} else {
|
||||
__ allocate_array(op->obj()->as_register(),
|
||||
@ -2849,7 +2910,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
|
||||
|
||||
void LIR_Assembler::align_backward_branch_target() {
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
}
|
||||
|
||||
|
||||
|
@ -60,9 +60,6 @@ define_pd_global(intx, FreqInlineSize, 175);
|
||||
define_pd_global(intx, INTPRESSURE, 48); // large register set
|
||||
define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment
|
||||
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
// The default setting 16/16 seems to work best.
|
||||
// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
|
||||
define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize
|
||||
define_pd_global(intx, RegisterCostAreaRatio, 12000);
|
||||
define_pd_global(bool, UseTLAB, true);
|
||||
define_pd_global(bool, ResizeTLAB, true);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
// fail with a guarantee ("not enough space for interpreter generation");
|
||||
// if too small.
|
||||
// Run with +PrintInterpreter to get the VM to print out the size.
|
||||
// Max size with JVMTI and TaggedStackInterpreter
|
||||
// Max size with JVMTI
|
||||
|
||||
// QQQ this is proably way too large for c++ interpreter
|
||||
|
||||
|
@ -620,7 +620,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
|
||||
// stack frames shouldn't be much larger than max_stack elements
|
||||
|
||||
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
|
||||
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,9 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
|
||||
|
||||
define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
// The default setting 16/16 seems to work best.
|
||||
// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
|
||||
define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize
|
||||
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
|
||||
define_pd_global(intx, InlineSmallCode, 1500);
|
||||
#ifdef _LP64
|
||||
|
@ -50,7 +50,6 @@ void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args
|
||||
// Any changes should also be applied to CodeEmitter::emit_osr_entry().
|
||||
assert_different_registers(args_size, locals_size);
|
||||
// max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
|
||||
if (TaggedStackInterpreter) sll(locals_size, 1, locals_size);
|
||||
subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
|
||||
// Use br/mov combination because it works on both V8 and V9 and is
|
||||
// faster.
|
||||
@ -319,7 +318,7 @@ void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, F
|
||||
ldf(FloatRegisterImpl::D, r1, offset, d);
|
||||
#else
|
||||
ldf(FloatRegisterImpl::S, r1, offset, d);
|
||||
ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize(), d->successor());
|
||||
ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -330,10 +329,10 @@ void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register
|
||||
#ifdef _LP64
|
||||
stf(FloatRegisterImpl::D, d, r1, offset);
|
||||
// store something more useful here
|
||||
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
|
||||
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
|
||||
#else
|
||||
stf(FloatRegisterImpl::S, d, r1, offset);
|
||||
stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize());
|
||||
stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -345,7 +344,7 @@ void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Reg
|
||||
ldx(r1, offset, rd);
|
||||
#else
|
||||
ld(r1, offset, rd);
|
||||
ld(r1, offset + Interpreter::stackElementSize(), rd->successor());
|
||||
ld(r1, offset + Interpreter::stackElementSize, rd->successor());
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -356,138 +355,62 @@ void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, in
|
||||
#ifdef _LP64
|
||||
stx(l, r1, offset);
|
||||
// store something more useful here
|
||||
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
|
||||
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
|
||||
#else
|
||||
st(l, r1, offset);
|
||||
st(l->successor(), r1, offset + Interpreter::stackElementSize());
|
||||
st(l->successor(), r1, offset + Interpreter::stackElementSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t,
|
||||
Register r,
|
||||
Register scratch) {
|
||||
if (TaggedStackInterpreter) {
|
||||
Label ok, long_ok;
|
||||
ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(0), r);
|
||||
if (t == frame::TagCategory2) {
|
||||
cmp(r, G0);
|
||||
brx(Assembler::equal, false, Assembler::pt, long_ok);
|
||||
delayed()->ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(1), r);
|
||||
stop("stack long/double tag value bad");
|
||||
bind(long_ok);
|
||||
cmp(r, G0);
|
||||
} else if (t == frame::TagValue) {
|
||||
cmp(r, G0);
|
||||
} else {
|
||||
assert_different_registers(r, scratch);
|
||||
mov(t, scratch);
|
||||
cmp(r, scratch);
|
||||
}
|
||||
brx(Assembler::equal, false, Assembler::pt, ok);
|
||||
delayed()->nop();
|
||||
// Also compare if the stack value is zero, then the tag might
|
||||
// not have been set coming from deopt.
|
||||
ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
|
||||
cmp(r, G0);
|
||||
brx(Assembler::equal, false, Assembler::pt, ok);
|
||||
delayed()->nop();
|
||||
stop("Stack tag value is bad");
|
||||
bind(ok);
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void InterpreterMacroAssembler::pop_i(Register r) {
|
||||
assert_not_delayed();
|
||||
// Uses destination register r for scratch
|
||||
debug_only(verify_stack_tag(frame::TagValue, r));
|
||||
ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
|
||||
inc(Lesp, Interpreter::stackElementSize());
|
||||
inc(Lesp, Interpreter::stackElementSize);
|
||||
debug_only(verify_esp(Lesp));
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
|
||||
assert_not_delayed();
|
||||
// Uses destination register r for scratch
|
||||
debug_only(verify_stack_tag(frame::TagReference, r, scratch));
|
||||
ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
|
||||
inc(Lesp, Interpreter::stackElementSize());
|
||||
inc(Lesp, Interpreter::stackElementSize);
|
||||
debug_only(verify_esp(Lesp));
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_l(Register r) {
|
||||
assert_not_delayed();
|
||||
// Uses destination register r for scratch
|
||||
debug_only(verify_stack_tag(frame::TagCategory2, r));
|
||||
load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
|
||||
inc(Lesp, 2*Interpreter::stackElementSize());
|
||||
inc(Lesp, 2*Interpreter::stackElementSize);
|
||||
debug_only(verify_esp(Lesp));
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
|
||||
assert_not_delayed();
|
||||
debug_only(verify_stack_tag(frame::TagValue, scratch));
|
||||
ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
|
||||
inc(Lesp, Interpreter::stackElementSize());
|
||||
inc(Lesp, Interpreter::stackElementSize);
|
||||
debug_only(verify_esp(Lesp));
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
|
||||
assert_not_delayed();
|
||||
debug_only(verify_stack_tag(frame::TagCategory2, scratch));
|
||||
load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
|
||||
inc(Lesp, 2*Interpreter::stackElementSize());
|
||||
inc(Lesp, 2*Interpreter::stackElementSize);
|
||||
debug_only(verify_esp(Lesp));
|
||||
}
|
||||
|
||||
|
||||
// (Note use register first, then decrement so dec can be done during store stall)
|
||||
void InterpreterMacroAssembler::tag_stack(Register r) {
|
||||
if (TaggedStackInterpreter) {
|
||||
st_ptr(r, Lesp, Interpreter::tag_offset_in_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::tag_stack(frame::Tag t, Register r) {
|
||||
if (TaggedStackInterpreter) {
|
||||
assert (frame::TagValue == 0, "TagValue must be zero");
|
||||
if (t == frame::TagValue) {
|
||||
st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
|
||||
} else if (t == frame::TagCategory2) {
|
||||
st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
|
||||
// Tag next slot down too
|
||||
st_ptr(G0, Lesp, -Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes());
|
||||
} else {
|
||||
assert_different_registers(r, O3);
|
||||
mov(t, O3);
|
||||
st_ptr(O3, Lesp, Interpreter::tag_offset_in_bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_i(Register r) {
|
||||
assert_not_delayed();
|
||||
debug_only(verify_esp(Lesp));
|
||||
tag_stack(frame::TagValue, r);
|
||||
st( r, Lesp, Interpreter::value_offset_in_bytes());
|
||||
dec( Lesp, Interpreter::stackElementSize());
|
||||
st(r, Lesp, 0);
|
||||
dec(Lesp, Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_ptr(Register r) {
|
||||
assert_not_delayed();
|
||||
tag_stack(frame::TagReference, r);
|
||||
st_ptr( r, Lesp, Interpreter::value_offset_in_bytes());
|
||||
dec( Lesp, Interpreter::stackElementSize());
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
|
||||
assert_not_delayed();
|
||||
tag_stack(tag);
|
||||
st_ptr(r, Lesp, Interpreter::value_offset_in_bytes());
|
||||
dec( Lesp, Interpreter::stackElementSize());
|
||||
st_ptr(r, Lesp, 0);
|
||||
dec(Lesp, Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
// remember: our convention for longs in SPARC is:
|
||||
@ -497,33 +420,28 @@ void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
|
||||
void InterpreterMacroAssembler::push_l(Register r) {
|
||||
assert_not_delayed();
|
||||
debug_only(verify_esp(Lesp));
|
||||
tag_stack(frame::TagCategory2, r);
|
||||
// Longs are in stored in memory-correct order, even if unaligned.
|
||||
// and may be separated by stack tags.
|
||||
int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
|
||||
// Longs are stored in memory-correct order, even if unaligned.
|
||||
int offset = -Interpreter::stackElementSize;
|
||||
store_unaligned_long(r, Lesp, offset);
|
||||
dec(Lesp, 2 * Interpreter::stackElementSize());
|
||||
dec(Lesp, 2 * Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::push_f(FloatRegister f) {
|
||||
assert_not_delayed();
|
||||
debug_only(verify_esp(Lesp));
|
||||
tag_stack(frame::TagValue, Otos_i);
|
||||
stf(FloatRegisterImpl::S, f, Lesp, Interpreter::value_offset_in_bytes());
|
||||
dec(Lesp, Interpreter::stackElementSize());
|
||||
stf(FloatRegisterImpl::S, f, Lesp, 0);
|
||||
dec(Lesp, Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::push_d(FloatRegister d) {
|
||||
assert_not_delayed();
|
||||
debug_only(verify_esp(Lesp));
|
||||
tag_stack(frame::TagCategory2, Otos_i);
|
||||
// Longs are in stored in memory-correct order, even if unaligned.
|
||||
// and may be separated by stack tags.
|
||||
int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
|
||||
// Longs are stored in memory-correct order, even if unaligned.
|
||||
int offset = -Interpreter::stackElementSize;
|
||||
store_unaligned_double(d, Lesp, offset);
|
||||
dec(Lesp, 2 * Interpreter::stackElementSize());
|
||||
dec(Lesp, 2 * Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
|
||||
@ -561,30 +479,18 @@ void InterpreterMacroAssembler::pop(TosState state) {
|
||||
}
|
||||
|
||||
|
||||
// Tagged stack helpers for swap and dup
|
||||
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
|
||||
Register tag) {
|
||||
// Helpers for swap and dup
|
||||
void InterpreterMacroAssembler::load_ptr(int n, Register val) {
|
||||
ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
|
||||
if (TaggedStackInterpreter) {
|
||||
ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(n), tag);
|
||||
}
|
||||
}
|
||||
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
|
||||
Register tag) {
|
||||
void InterpreterMacroAssembler::store_ptr(int n, Register val) {
|
||||
st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
|
||||
if (TaggedStackInterpreter) {
|
||||
st_ptr(tag, Lesp, Interpreter::expr_tag_offset_in_bytes(n));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::load_receiver(Register param_count,
|
||||
Register recv) {
|
||||
|
||||
sll(param_count, Interpreter::logStackElementSize(), param_count);
|
||||
if (TaggedStackInterpreter) {
|
||||
add(param_count, Interpreter::value_offset_in_bytes(), param_count); // get obj address
|
||||
}
|
||||
sll(param_count, Interpreter::logStackElementSize, param_count);
|
||||
ld_ptr(Lesp, param_count, recv); // gets receiver Oop
|
||||
}
|
||||
|
||||
@ -605,7 +511,6 @@ void InterpreterMacroAssembler::empty_expression_stack() {
|
||||
|
||||
// Compute max expression stack+register save area
|
||||
lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack.
|
||||
if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size); // max_stack * 2 for TAGS
|
||||
add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
|
||||
|
||||
//
|
||||
@ -814,22 +719,39 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset) {
|
||||
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
|
||||
int bcp_offset, bool giant_index) {
|
||||
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
|
||||
if (!giant_index) {
|
||||
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
|
||||
} else {
|
||||
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
|
||||
get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
|
||||
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
|
||||
xor3(tmp, -1, tmp); // convert to plain index
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
|
||||
int bcp_offset, bool giant_index) {
|
||||
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
|
||||
assert_different_registers(cache, tmp);
|
||||
assert_not_delayed();
|
||||
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
|
||||
// convert from field index to ConstantPoolCacheEntry index
|
||||
// and from word index to byte offset
|
||||
get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
|
||||
// convert from field index to ConstantPoolCacheEntry index and from
|
||||
// word index to byte offset
|
||||
sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
|
||||
add(LcpoolCache, tmp, cache);
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) {
|
||||
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
|
||||
int bcp_offset, bool giant_index) {
|
||||
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
|
||||
assert_different_registers(cache, tmp);
|
||||
assert_not_delayed();
|
||||
assert(!giant_index,"NYI");
|
||||
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
|
||||
// convert from field index to ConstantPoolCacheEntry index
|
||||
// and from word index to byte offset
|
||||
@ -1675,15 +1597,31 @@ void InterpreterMacroAssembler::profile_final_call(Register scratch) {
|
||||
// Count a virtual call in the bytecodes.
|
||||
|
||||
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
|
||||
Register scratch) {
|
||||
Register scratch,
|
||||
bool receiver_can_be_null) {
|
||||
if (ProfileInterpreter) {
|
||||
Label profile_continue;
|
||||
|
||||
// If no method data exists, go to profile_continue.
|
||||
test_method_data_pointer(profile_continue);
|
||||
|
||||
|
||||
Label skip_receiver_profile;
|
||||
if (receiver_can_be_null) {
|
||||
Label not_null;
|
||||
tst(receiver);
|
||||
brx(Assembler::notZero, false, Assembler::pt, not_null);
|
||||
delayed()->nop();
|
||||
// We are making a call. Increment the count for null receiver.
|
||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
|
||||
ba(false, skip_receiver_profile);
|
||||
delayed()->nop();
|
||||
bind(not_null);
|
||||
}
|
||||
|
||||
// Record the receiver type.
|
||||
record_klass_in_profile(receiver, scratch, true);
|
||||
bind(skip_receiver_profile);
|
||||
|
||||
// The method data pointer needs to be updated to reflect the new target.
|
||||
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
|
||||
@ -1985,51 +1923,11 @@ void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,
|
||||
}
|
||||
|
||||
// Locals
|
||||
#ifdef ASSERT
|
||||
void InterpreterMacroAssembler::verify_local_tag(frame::Tag t,
|
||||
Register base,
|
||||
Register scratch,
|
||||
int n) {
|
||||
if (TaggedStackInterpreter) {
|
||||
Label ok, long_ok;
|
||||
// Use dst for scratch
|
||||
assert_different_registers(base, scratch);
|
||||
ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n), scratch);
|
||||
if (t == frame::TagCategory2) {
|
||||
cmp(scratch, G0);
|
||||
brx(Assembler::equal, false, Assembler::pt, long_ok);
|
||||
delayed()->ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n+1), scratch);
|
||||
stop("local long/double tag value bad");
|
||||
bind(long_ok);
|
||||
// compare second half tag
|
||||
cmp(scratch, G0);
|
||||
} else if (t == frame::TagValue) {
|
||||
cmp(scratch, G0);
|
||||
} else {
|
||||
assert_different_registers(O3, base, scratch);
|
||||
mov(t, O3);
|
||||
cmp(scratch, O3);
|
||||
}
|
||||
brx(Assembler::equal, false, Assembler::pt, ok);
|
||||
delayed()->nop();
|
||||
// Also compare if the local value is zero, then the tag might
|
||||
// not have been set coming from deopt.
|
||||
ld_ptr(base, Interpreter::local_offset_in_bytes(n), scratch);
|
||||
cmp(scratch, G0);
|
||||
brx(Assembler::equal, false, Assembler::pt, ok);
|
||||
delayed()->nop();
|
||||
stop("Local tag value is bad");
|
||||
bind(ok);
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
debug_only(verify_local_tag(frame::TagReference, index, dst));
|
||||
ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
|
||||
ld_ptr(index, 0, dst);
|
||||
// Note: index must hold the effective address--the iinc template uses it
|
||||
}
|
||||
|
||||
@ -2037,27 +1935,24 @@ void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst )
|
||||
void InterpreterMacroAssembler::access_local_returnAddress(Register index,
|
||||
Register dst ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
debug_only(verify_local_tag(frame::TagValue, index, dst));
|
||||
ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
|
||||
ld_ptr(index, 0, dst);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
debug_only(verify_local_tag(frame::TagValue, index, dst));
|
||||
ld(index, Interpreter::value_offset_in_bytes(), dst);
|
||||
ld(index, 0, dst);
|
||||
// Note: index must hold the effective address--the iinc template uses it
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
debug_only(verify_local_tag(frame::TagCategory2, index, dst));
|
||||
// First half stored at index n+1 (which grows down from Llocals[n])
|
||||
load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
|
||||
}
|
||||
@ -2065,18 +1960,16 @@ void InterpreterMacroAssembler::access_local_long( Register index, Register dst
|
||||
|
||||
void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
debug_only(verify_local_tag(frame::TagValue, index, G1_scratch));
|
||||
ldf(FloatRegisterImpl::S, index, Interpreter::value_offset_in_bytes(), dst);
|
||||
ldf(FloatRegisterImpl::S, index, 0, dst);
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
debug_only(verify_local_tag(frame::TagCategory2, index, G1_scratch));
|
||||
load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
|
||||
}
|
||||
|
||||
@ -2102,94 +1995,60 @@ void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int off
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void InterpreterMacroAssembler::tag_local(frame::Tag t,
|
||||
Register base,
|
||||
Register src,
|
||||
int n) {
|
||||
if (TaggedStackInterpreter) {
|
||||
// have to store zero because local slots can be reused (rats!)
|
||||
if (t == frame::TagValue) {
|
||||
st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
|
||||
} else if (t == frame::TagCategory2) {
|
||||
st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
|
||||
st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n+1));
|
||||
} else {
|
||||
// assert that we don't stomp the value in 'src'
|
||||
// O3 is arbitrary because it's not used.
|
||||
assert_different_registers(src, base, O3);
|
||||
mov( t, O3);
|
||||
st_ptr(O3, base, Interpreter::local_tag_offset_in_bytes(n));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
debug_only(check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);)
|
||||
tag_local(frame::TagValue, index, src);
|
||||
st(src, index, Interpreter::value_offset_in_bytes());
|
||||
debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
|
||||
st(src, index, 0);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::store_local_ptr( Register index, Register src,
|
||||
Register tag ) {
|
||||
void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
#ifdef ASSERT
|
||||
check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
|
||||
#endif
|
||||
st_ptr(src, index, Interpreter::value_offset_in_bytes());
|
||||
// Store tag register directly
|
||||
if (TaggedStackInterpreter) {
|
||||
st_ptr(tag, index, Interpreter::tag_offset_in_bytes());
|
||||
}
|
||||
#ifdef ASSERT
|
||||
check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
|
||||
#endif
|
||||
st_ptr(src, index, 0);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::store_local_ptr( int n, Register src,
|
||||
Register tag ) {
|
||||
st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
|
||||
if (TaggedStackInterpreter) {
|
||||
st_ptr(tag, Llocals, Interpreter::local_tag_offset_in_bytes(n));
|
||||
}
|
||||
void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {
|
||||
st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
#ifdef ASSERT
|
||||
#ifdef ASSERT
|
||||
check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
|
||||
#endif
|
||||
tag_local(frame::TagCategory2, index, src);
|
||||
#endif
|
||||
store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
#ifdef ASSERT
|
||||
check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
|
||||
#endif
|
||||
tag_local(frame::TagValue, index, G1_scratch);
|
||||
stf(FloatRegisterImpl::S, src, index, Interpreter::value_offset_in_bytes());
|
||||
#ifdef ASSERT
|
||||
check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
|
||||
#endif
|
||||
stf(FloatRegisterImpl::S, src, index, 0);
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
|
||||
assert_not_delayed();
|
||||
sll(index, Interpreter::logStackElementSize(), index);
|
||||
sll(index, Interpreter::logStackElementSize, index);
|
||||
sub(Llocals, index, index);
|
||||
#ifdef ASSERT
|
||||
#ifdef ASSERT
|
||||
check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
|
||||
#endif
|
||||
tag_local(frame::TagCategory2, index, G1_scratch);
|
||||
#endif
|
||||
store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
|
||||
}
|
||||
|
||||
|
@ -149,7 +149,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void push_i( Register r = Otos_i);
|
||||
void push_ptr( Register r = Otos_i);
|
||||
void push_ptr( Register r, Register tag);
|
||||
void push_l( Register r = Otos_l1);
|
||||
void push_f(FloatRegister f = Ftos_f);
|
||||
void push_d(FloatRegister f = Ftos_d1);
|
||||
@ -159,17 +158,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void push(TosState state); // transition state -> vtos
|
||||
void empty_expression_stack(); // resets both Lesp and SP
|
||||
|
||||
// Support for Tagged Stacks
|
||||
void tag_stack(frame::Tag t, Register r);
|
||||
void tag_stack(Register tag);
|
||||
void tag_local(frame::Tag t, Register src, Register base, int n = 0);
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_sp(Register Rsp, Register Rtemp);
|
||||
void verify_esp(Register Resp); // verify that Lesp points to a word in the temp stack
|
||||
|
||||
void verify_stack_tag(frame::Tag t, Register r, Register scratch = G0);
|
||||
void verify_local_tag(frame::Tag t, Register base, Register scr, int n = 0);
|
||||
#endif // ASSERT
|
||||
|
||||
public:
|
||||
@ -191,8 +182,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
Register Rdst,
|
||||
setCCOrNot should_set_CC = dont_set_CC );
|
||||
|
||||
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset);
|
||||
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset);
|
||||
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
|
||||
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
|
||||
void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
|
||||
|
||||
|
||||
// common code
|
||||
@ -241,17 +233,17 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void check_for_regarea_stomp( Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1);
|
||||
#endif // ASSERT
|
||||
void store_local_int( Register index, Register src );
|
||||
void store_local_ptr( Register index, Register src, Register tag = Otos_l2 );
|
||||
void store_local_ptr( int n, Register src, Register tag = Otos_l2 );
|
||||
void store_local_ptr( Register index, Register src );
|
||||
void store_local_ptr( int n, Register src );
|
||||
void store_local_long( Register index, Register src );
|
||||
void store_local_float( Register index, FloatRegister src );
|
||||
void store_local_double( Register index, FloatRegister src );
|
||||
|
||||
// Tagged stack helpers for swap and dup
|
||||
void load_ptr_and_tag(int n, Register val, Register tag);
|
||||
void store_ptr_and_tag(int n, Register val, Register tag);
|
||||
// Helpers for swap and dup
|
||||
void load_ptr(int n, Register val);
|
||||
void store_ptr(int n, Register val);
|
||||
|
||||
// Tagged stack helper for getting receiver in register.
|
||||
// Helper for getting receiver in register.
|
||||
void load_receiver(Register param_count, Register recv);
|
||||
|
||||
static int top_most_monitor_byte_offset(); // offset in bytes to top of monitor block
|
||||
@ -304,7 +296,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void profile_not_taken_branch(Register scratch);
|
||||
void profile_call(Register scratch);
|
||||
void profile_final_call(Register scratch);
|
||||
void profile_virtual_call(Register receiver, Register scratch);
|
||||
void profile_virtual_call(Register receiver, Register scratch, bool receiver_can_be_null = false);
|
||||
void profile_ret(TosState state, Register return_bci, Register scratch);
|
||||
void profile_null_seen(Register scratch);
|
||||
void profile_typecheck(Register klass, Register scratch);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,19 +43,6 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
|
||||
Argument jni_arg(jni_offset(), false);
|
||||
Register Rtmp = O0;
|
||||
|
||||
#ifdef ASSERT
|
||||
if (TaggedStackInterpreter) {
|
||||
// check at least one tag is okay
|
||||
Label ok;
|
||||
__ ld_ptr(Llocals, Interpreter::local_tag_offset_in_bytes(offset() + 1), Rtmp);
|
||||
__ cmp(Rtmp, G0);
|
||||
__ brx(Assembler::equal, false, Assembler::pt, ok);
|
||||
__ delayed()->nop();
|
||||
__ stop("Native object has bad tag value");
|
||||
__ bind(ok);
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
#ifdef _LP64
|
||||
__ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
|
||||
__ store_long_argument(Rtmp, jni_arg);
|
||||
@ -107,18 +94,6 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
|
||||
Address h_arg = Address(Llocals, Interpreter::local_offset_in_bytes(offset()));
|
||||
__ ld_ptr(h_arg, Rtmp1);
|
||||
#ifdef ASSERT
|
||||
if (TaggedStackInterpreter) {
|
||||
// check we have the obj and not the tag
|
||||
Label ok;
|
||||
__ mov(frame::TagReference, Rtmp3);
|
||||
__ cmp(Rtmp1, Rtmp3);
|
||||
__ brx(Assembler::notEqual, true, Assembler::pt, ok);
|
||||
__ delayed()->nop();
|
||||
__ stop("Native object passed tag by mistake");
|
||||
__ bind(ok);
|
||||
}
|
||||
#endif // ASSERT
|
||||
if (!do_NULL_check) {
|
||||
__ add(h_arg.base(), h_arg.disp(), Rtmp2);
|
||||
} else {
|
||||
@ -168,17 +143,9 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
long_sig = 3
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_tag(frame::Tag t) {
|
||||
assert(!TaggedStackInterpreter ||
|
||||
*(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void pass_int() {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
add_signature( non_float );
|
||||
}
|
||||
|
||||
@ -186,31 +153,27 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
// pass address of from
|
||||
intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
|
||||
*_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
|
||||
debug_only(verify_tag(frame::TagReference));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
add_signature( non_float );
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
virtual void pass_float() {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
add_signature( float_sig );
|
||||
}
|
||||
|
||||
virtual void pass_double() {
|
||||
*_to++ = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= 2*Interpreter::stackElementSize();
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
add_signature( double_sig );
|
||||
}
|
||||
|
||||
virtual void pass_long() {
|
||||
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_to += 1;
|
||||
_from -= 2*Interpreter::stackElementSize();
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
add_signature( long_sig );
|
||||
}
|
||||
#else
|
||||
@ -218,9 +181,8 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
virtual void pass_long() {
|
||||
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_to += 2;
|
||||
_from -= 2*Interpreter::stackElementSize();
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
add_signature( non_float );
|
||||
}
|
||||
#endif // _LP64
|
||||
|
@ -235,19 +235,17 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Method handle invoker
|
||||
// Dispatch a method of the form java.dyn.MethodHandles::invoke(...)
|
||||
address InterpreterGenerator::generate_method_handle_entry(void) {
|
||||
if (!EnableMethodHandles) {
|
||||
return generate_abstract_entry();
|
||||
}
|
||||
return generate_abstract_entry(); //6815692//
|
||||
|
||||
return MethodHandles::generate_method_handle_interpreter_entry(_masm);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Entry points & stack frame layout
|
||||
//
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,33 +24,13 @@
|
||||
|
||||
public:
|
||||
|
||||
// Support for Tagged Stacks
|
||||
static int expr_offset_in_bytes(int i) { return stackElementSize * i + wordSize; }
|
||||
|
||||
// Stack index relative to tos (which points at value)
|
||||
static int expr_index_at(int i) {
|
||||
return stackElementWords() * i;
|
||||
}
|
||||
|
||||
static int expr_tag_index_at(int i) {
|
||||
assert(TaggedStackInterpreter, "should not call this");
|
||||
// tag is one word above java stack element
|
||||
return stackElementWords() * i + 1;
|
||||
}
|
||||
|
||||
static int expr_offset_in_bytes(int i) { return stackElementSize()*i + wordSize; }
|
||||
static int expr_tag_offset_in_bytes (int i) {
|
||||
assert(TaggedStackInterpreter, "should not call this");
|
||||
return expr_offset_in_bytes(i) + wordSize;
|
||||
}
|
||||
static int expr_index_at(int i) { return stackElementWords * i; }
|
||||
|
||||
// Already negated by c++ interpreter
|
||||
static int local_index_at(int i) {
|
||||
assert(i<=0, "local direction already negated");
|
||||
return stackElementWords() * i + (value_offset_in_bytes()/wordSize);
|
||||
}
|
||||
|
||||
static int local_tag_index_at(int i) {
|
||||
assert(i<=0, "local direction already negated");
|
||||
assert(TaggedStackInterpreter, "should not call this");
|
||||
return stackElementWords() * i + (tag_offset_in_bytes()/wordSize);
|
||||
static int local_index_at(int i) {
|
||||
assert(i <= 0, "local direction already negated");
|
||||
return stackElementWords * i;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,9 @@
|
||||
|
||||
address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
|
||||
address interpreted_entry) {
|
||||
// Just before the actual machine code entry point, allocate space
|
||||
// for a MethodHandleEntry::Data record, so that we can manage everything
|
||||
// from one base pointer.
|
||||
__ align(wordSize);
|
||||
address target = __ pc() + sizeof(Data);
|
||||
while (__ pc() < target) {
|
||||
@ -59,12 +62,891 @@ MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _mas
|
||||
|
||||
// Code generation
|
||||
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
|
||||
ShouldNotReachHere(); //NYI, 6815692
|
||||
return NULL;
|
||||
// I5_savedSP: sender SP (must preserve)
|
||||
// G4 (Gargs): incoming argument list (must preserve)
|
||||
// G5_method: invoke methodOop; becomes method type.
|
||||
// G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
|
||||
// O0, O1: garbage temps, blown away
|
||||
Register O0_argslot = O0;
|
||||
Register O1_scratch = O1;
|
||||
|
||||
// emit WrongMethodType path first, to enable back-branch from main path
|
||||
Label wrong_method_type;
|
||||
__ bind(wrong_method_type);
|
||||
__ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
|
||||
__ delayed()->nop();
|
||||
|
||||
// here's where control starts out:
|
||||
__ align(CodeEntryAlignment);
|
||||
address entry_point = __ pc();
|
||||
|
||||
// fetch the MethodType from the method handle into G5_method_type
|
||||
{
|
||||
Register tem = G5_method;
|
||||
assert(tem == G5_method_type, "yes, it's the same register");
|
||||
for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
|
||||
__ ld_ptr(Address(tem, *pchase), G5_method_type);
|
||||
}
|
||||
}
|
||||
|
||||
// given the MethodType, find out where the MH argument is buried
|
||||
__ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O0_argslot);
|
||||
__ ldsw( Address(O0_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
|
||||
__ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
|
||||
|
||||
__ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
|
||||
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
|
||||
// Verify that argslot lies within (Gargs, FP].
|
||||
Label L_ok, L_bad;
|
||||
#ifdef _LP64
|
||||
__ add(FP, STACK_BIAS, temp_reg);
|
||||
__ cmp(argslot_reg, temp_reg);
|
||||
#else
|
||||
__ cmp(argslot_reg, FP);
|
||||
#endif
|
||||
__ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
|
||||
__ delayed()->nop();
|
||||
__ cmp(Gargs, argslot_reg);
|
||||
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
|
||||
__ delayed()->nop();
|
||||
__ bind(L_bad);
|
||||
__ stop(error_message);
|
||||
__ bind(L_ok);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// Helper to insert argument slots into the stack.
|
||||
// arg_slots must be a multiple of stack_move_unit() and <= 0
|
||||
void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
int arg_mask,
|
||||
Register argslot_reg,
|
||||
Register temp_reg, Register temp2_reg, Register temp3_reg) {
|
||||
assert(temp3_reg != noreg, "temp3 required");
|
||||
assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
|
||||
(!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
|
||||
|
||||
#ifdef ASSERT
|
||||
verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
|
||||
if (arg_slots.is_register()) {
|
||||
Label L_ok, L_bad;
|
||||
__ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
|
||||
__ br(Assembler::greater, false, Assembler::pn, L_bad);
|
||||
__ delayed()->nop();
|
||||
__ btst(-stack_move_unit() - 1, arg_slots.as_register());
|
||||
__ br(Assembler::zero, false, Assembler::pt, L_ok);
|
||||
__ delayed()->nop();
|
||||
__ bind(L_bad);
|
||||
__ stop("assert arg_slots <= 0 and clear low bits");
|
||||
__ bind(L_ok);
|
||||
} else {
|
||||
assert(arg_slots.as_constant() <= 0, "");
|
||||
assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
#ifdef _LP64
|
||||
if (arg_slots.is_register()) {
|
||||
// Was arg_slots register loaded as signed int?
|
||||
Label L_ok;
|
||||
__ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
|
||||
__ sra(temp_reg, BitsPerInt, temp_reg);
|
||||
__ cmp(arg_slots.as_register(), temp_reg);
|
||||
__ br(Assembler::equal, false, Assembler::pt, L_ok);
|
||||
__ delayed()->nop();
|
||||
__ stop("arg_slots register not loaded as signed int");
|
||||
__ bind(L_ok);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Make space on the stack for the inserted argument(s).
|
||||
// Then pull down everything shallower than argslot_reg.
|
||||
// The stacked return address gets pulled down with everything else.
|
||||
// That is, copy [sp, argslot) downward by -size words. In pseudo-code:
|
||||
// sp -= size;
|
||||
// for (temp = sp + size; temp < argslot; temp++)
|
||||
// temp[-size] = temp[0]
|
||||
// argslot -= size;
|
||||
RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
|
||||
|
||||
// Keep the stack pointer 2*wordSize aligned.
|
||||
const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
|
||||
RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
|
||||
__ add(SP, masked_offset, SP);
|
||||
|
||||
__ mov(Gargs, temp_reg); // source pointer for copy
|
||||
__ add(Gargs, offset, Gargs);
|
||||
|
||||
{
|
||||
Label loop;
|
||||
__ bind(loop);
|
||||
// pull one word down each time through the loop
|
||||
__ ld_ptr(Address(temp_reg, 0), temp2_reg);
|
||||
__ st_ptr(temp2_reg, Address(temp_reg, offset));
|
||||
__ add(temp_reg, wordSize, temp_reg);
|
||||
__ cmp(temp_reg, argslot_reg);
|
||||
__ brx(Assembler::less, false, Assembler::pt, loop);
|
||||
__ delayed()->nop(); // FILLME
|
||||
}
|
||||
|
||||
// Now move the argslot down, to point to the opened-up space.
|
||||
__ add(argslot_reg, offset, argslot_reg);
|
||||
}
|
||||
|
||||
|
||||
// Helper to remove argument slots from the stack.
|
||||
// arg_slots must be a multiple of stack_move_unit() and >= 0
|
||||
void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
Register argslot_reg,
|
||||
Register temp_reg, Register temp2_reg, Register temp3_reg) {
|
||||
assert(temp3_reg != noreg, "temp3 required");
|
||||
assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
|
||||
(!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
|
||||
|
||||
RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
|
||||
|
||||
#ifdef ASSERT
|
||||
// Verify that [argslot..argslot+size) lies within (Gargs, FP).
|
||||
__ add(argslot_reg, offset, temp2_reg);
|
||||
verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
|
||||
if (arg_slots.is_register()) {
|
||||
Label L_ok, L_bad;
|
||||
__ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
|
||||
__ br(Assembler::less, false, Assembler::pn, L_bad);
|
||||
__ delayed()->nop();
|
||||
__ btst(-stack_move_unit() - 1, arg_slots.as_register());
|
||||
__ br(Assembler::zero, false, Assembler::pt, L_ok);
|
||||
__ delayed()->nop();
|
||||
__ bind(L_bad);
|
||||
__ stop("assert arg_slots >= 0 and clear low bits");
|
||||
__ bind(L_ok);
|
||||
} else {
|
||||
assert(arg_slots.as_constant() >= 0, "");
|
||||
assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
// Pull up everything shallower than argslot.
|
||||
// Then remove the excess space on the stack.
|
||||
// The stacked return address gets pulled up with everything else.
|
||||
// That is, copy [sp, argslot) upward by size words. In pseudo-code:
|
||||
// for (temp = argslot-1; temp >= sp; --temp)
|
||||
// temp[size] = temp[0]
|
||||
// argslot += size;
|
||||
// sp += size;
|
||||
__ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy
|
||||
{
|
||||
Label loop;
|
||||
__ bind(loop);
|
||||
// pull one word up each time through the loop
|
||||
__ ld_ptr(Address(temp_reg, 0), temp2_reg);
|
||||
__ st_ptr(temp2_reg, Address(temp_reg, offset));
|
||||
__ sub(temp_reg, wordSize, temp_reg);
|
||||
__ cmp(temp_reg, Gargs);
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
|
||||
__ delayed()->nop(); // FILLME
|
||||
}
|
||||
|
||||
// Now move the argslot up, to point to the just-copied block.
|
||||
__ add(Gargs, offset, Gargs);
|
||||
// And adjust the argslot address to point at the deletion point.
|
||||
__ add(argslot_reg, offset, argslot_reg);
|
||||
|
||||
// Keep the stack pointer 2*wordSize aligned.
|
||||
const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
|
||||
RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
|
||||
__ add(SP, masked_offset, SP);
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
extern "C" void print_method_handle(oop mh);
|
||||
void trace_method_handle_stub(const char* adaptername,
|
||||
oop mh) {
|
||||
#if 0
|
||||
intptr_t* entry_sp,
|
||||
intptr_t* saved_sp,
|
||||
intptr_t* saved_bp) {
|
||||
// called as a leaf from native code: do not block the JVM!
|
||||
intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
|
||||
intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
|
||||
printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
|
||||
adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
|
||||
if (last_sp != saved_sp)
|
||||
printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
|
||||
#endif
|
||||
|
||||
printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
|
||||
print_method_handle(mh);
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
// which conversion op types are implemented here?
|
||||
int MethodHandles::adapter_conversion_ops_supported_mask() {
|
||||
return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
|
||||
//|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
|
||||
);
|
||||
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// MethodHandles::generate_method_handle_stub
|
||||
//
|
||||
// Generate an "entry" field for a method handle.
|
||||
// This determines how the method handle will respond to calls.
|
||||
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
|
||||
ShouldNotReachHere(); //NYI, 6815692
|
||||
// Here is the register state during an interpreted call,
|
||||
// as set up by generate_method_handle_interpreter_entry():
|
||||
// - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
|
||||
// - G3: receiver method handle
|
||||
// - O5_savedSP: sender SP (must preserve)
|
||||
|
||||
Register O0_argslot = O0;
|
||||
Register O1_scratch = O1;
|
||||
Register O2_scratch = O2;
|
||||
Register O3_scratch = O3;
|
||||
Register G5_index = G5;
|
||||
|
||||
guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
|
||||
|
||||
// Some handy addresses:
|
||||
Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset()));
|
||||
|
||||
Address G3_mh_vmtarget( G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
|
||||
|
||||
Address G3_dmh_vmindex( G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
|
||||
|
||||
Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
|
||||
Address G3_bmh_argument( G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
|
||||
|
||||
Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
|
||||
Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
|
||||
Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
|
||||
|
||||
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
|
||||
|
||||
if (have_entry(ek)) {
|
||||
__ nop(); // empty stubs make SG sick
|
||||
return;
|
||||
}
|
||||
|
||||
address interp_entry = __ pc();
|
||||
if (UseCompressedOops) __ unimplemented("UseCompressedOops");
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceMethodHandles) {
|
||||
// save: Gargs, O5_savedSP
|
||||
__ save(SP, -16*wordSize, SP);
|
||||
__ set((intptr_t) entry_name(ek), O0);
|
||||
__ mov(G3_method_handle, O1);
|
||||
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
|
||||
__ restore(SP, 16*wordSize, SP);
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
switch ((int) ek) {
|
||||
case _raise_exception:
|
||||
{
|
||||
// Not a real MH entry, but rather shared code for raising an
|
||||
// exception. Extra local arguments are passed in scratch
|
||||
// registers, as required type in O3, failing object (or NULL)
|
||||
// in O2, failing bytecode type in O1.
|
||||
|
||||
__ mov(O5_savedSP, SP); // Cut the stack back to where the caller started.
|
||||
|
||||
// Push arguments as if coming from the interpreter.
|
||||
Register O0_scratch = O0_argslot;
|
||||
int stackElementSize = Interpreter::stackElementSize;
|
||||
|
||||
// Make space on the stack for the arguments.
|
||||
__ sub(SP, 4*stackElementSize, SP);
|
||||
__ sub(Gargs, 3*stackElementSize, Gargs);
|
||||
//__ sub(Lesp, 3*stackElementSize, Lesp);
|
||||
|
||||
// void raiseException(int code, Object actual, Object required)
|
||||
__ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code
|
||||
__ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize)); // actual
|
||||
__ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize)); // required
|
||||
|
||||
Label no_method;
|
||||
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
|
||||
__ set(AddressLiteral((address) &_raise_exception_method), G5_method);
|
||||
__ ld_ptr(Address(G5_method, 0), G5_method);
|
||||
__ tst(G5_method);
|
||||
__ brx(Assembler::zero, false, Assembler::pn, no_method);
|
||||
__ delayed()->nop();
|
||||
|
||||
int jobject_oop_offset = 0;
|
||||
__ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
|
||||
__ tst(G5_method);
|
||||
__ brx(Assembler::zero, false, Assembler::pn, no_method);
|
||||
__ delayed()->nop();
|
||||
|
||||
__ verify_oop(G5_method);
|
||||
__ jump_indirect_to(G5_method_fie, O1_scratch);
|
||||
__ delayed()->nop();
|
||||
|
||||
// If we get here, the Java runtime did not do its job of creating the exception.
|
||||
// Do something that is at least causes a valid throw from the interpreter.
|
||||
__ bind(no_method);
|
||||
__ unimplemented("_raise_exception no method");
|
||||
}
|
||||
break;
|
||||
|
||||
case _invokestatic_mh:
|
||||
case _invokespecial_mh:
|
||||
{
|
||||
__ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop
|
||||
__ verify_oop(G5_method);
|
||||
// Same as TemplateTable::invokestatic or invokespecial,
|
||||
// minus the CP setup and profiling:
|
||||
if (ek == _invokespecial_mh) {
|
||||
// Must load & check the first argument before entering the target method.
|
||||
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
|
||||
__ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
|
||||
__ null_check(G3_method_handle);
|
||||
__ verify_oop(G3_method_handle);
|
||||
}
|
||||
__ jump_indirect_to(G5_method_fie, O1_scratch);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
break;
|
||||
|
||||
case _invokevirtual_mh:
|
||||
{
|
||||
// Same as TemplateTable::invokevirtual,
|
||||
// minus the CP setup and profiling:
|
||||
|
||||
// Pick out the vtable index and receiver offset from the MH,
|
||||
// and then we can discard it:
|
||||
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
|
||||
__ ldsw(G3_dmh_vmindex, G5_index);
|
||||
// Note: The verifier allows us to ignore G3_mh_vmtarget.
|
||||
__ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
|
||||
__ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
|
||||
|
||||
// Get receiver klass:
|
||||
Register O0_klass = O0_argslot;
|
||||
__ load_klass(G3_method_handle, O0_klass);
|
||||
__ verify_oop(O0_klass);
|
||||
|
||||
// Get target methodOop & entry point:
|
||||
const int base = instanceKlass::vtable_start_offset() * wordSize;
|
||||
assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
|
||||
|
||||
__ sll_ptr(G5_index, LogBytesPerWord, G5_index);
|
||||
__ add(O0_klass, G5_index, O0_klass);
|
||||
Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
|
||||
__ ld_ptr(vtable_entry_addr, G5_method);
|
||||
|
||||
__ verify_oop(G5_method);
|
||||
__ jump_indirect_to(G5_method_fie, O1_scratch);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
break;
|
||||
|
||||
case _invokeinterface_mh:
|
||||
{
|
||||
// Same as TemplateTable::invokeinterface,
|
||||
// minus the CP setup and profiling:
|
||||
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
|
||||
Register O1_intf = O1_scratch;
|
||||
__ ld_ptr(G3_mh_vmtarget, O1_intf);
|
||||
__ ldsw(G3_dmh_vmindex, G5_index);
|
||||
__ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
|
||||
__ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
|
||||
|
||||
// Get receiver klass:
|
||||
Register O0_klass = O0_argslot;
|
||||
__ load_klass(G3_method_handle, O0_klass);
|
||||
__ verify_oop(O0_klass);
|
||||
|
||||
// Get interface:
|
||||
Label no_such_interface;
|
||||
__ verify_oop(O1_intf);
|
||||
__ lookup_interface_method(O0_klass, O1_intf,
|
||||
// Note: next two args must be the same:
|
||||
G5_index, G5_method,
|
||||
O2_scratch,
|
||||
O3_scratch,
|
||||
no_such_interface);
|
||||
|
||||
__ verify_oop(G5_method);
|
||||
__ jump_indirect_to(G5_method_fie, O1_scratch);
|
||||
__ delayed()->nop();
|
||||
|
||||
__ bind(no_such_interface);
|
||||
// Throw an exception.
|
||||
// For historical reasons, it will be IncompatibleClassChangeError.
|
||||
__ unimplemented("not tested yet");
|
||||
__ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch); // required interface
|
||||
__ mov(O0_klass, O2_scratch); // bad receiver
|
||||
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
|
||||
__ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch); // who is complaining?
|
||||
}
|
||||
break;
|
||||
|
||||
case _bound_ref_mh:
|
||||
case _bound_int_mh:
|
||||
case _bound_long_mh:
|
||||
case _bound_ref_direct_mh:
|
||||
case _bound_int_direct_mh:
|
||||
case _bound_long_direct_mh:
|
||||
{
|
||||
const bool direct_to_method = (ek >= _bound_ref_direct_mh);
|
||||
BasicType arg_type = T_ILLEGAL;
|
||||
int arg_mask = _INSERT_NO_MASK;
|
||||
int arg_slots = -1;
|
||||
get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
|
||||
|
||||
// Make room for the new argument:
|
||||
__ ldsw(G3_bmh_vmargslot, O0_argslot);
|
||||
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
|
||||
|
||||
insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
|
||||
|
||||
// Store bound argument into the new stack slot:
|
||||
__ ld_ptr(G3_bmh_argument, O1_scratch);
|
||||
if (arg_type == T_OBJECT) {
|
||||
__ st_ptr(O1_scratch, Address(O0_argslot, 0));
|
||||
} else {
|
||||
Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
|
||||
__ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
|
||||
if (arg_slots == 2) {
|
||||
__ unimplemented("not yet tested");
|
||||
#ifndef _LP64
|
||||
__ signx(O2_scratch, O3_scratch); // Sign extend
|
||||
#endif
|
||||
__ st_long(O2_scratch, Address(O0_argslot, 0)); // Uses O2/O3 on !_LP64
|
||||
} else {
|
||||
__ st_ptr( O2_scratch, Address(O0_argslot, 0));
|
||||
}
|
||||
}
|
||||
|
||||
if (direct_to_method) {
|
||||
__ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop
|
||||
__ verify_oop(G5_method);
|
||||
__ jump_indirect_to(G5_method_fie, O1_scratch);
|
||||
__ delayed()->nop();
|
||||
} else {
|
||||
__ ld_ptr(G3_mh_vmtarget, G3_method_handle); // target is a methodOop
|
||||
__ verify_oop(G3_method_handle);
|
||||
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case _adapter_retype_only:
|
||||
case _adapter_retype_raw:
|
||||
// Immediately jump to the next MH layer:
|
||||
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
|
||||
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
|
||||
// This is OK when all parameter types widen.
|
||||
// It is also OK when a return type narrows.
|
||||
break;
|
||||
|
||||
case _adapter_check_cast:
|
||||
{
|
||||
// Temps:
|
||||
Register G5_klass = G5_index; // Interesting AMH data.
|
||||
|
||||
// Check a reference argument before jumping to the next layer of MH:
|
||||
__ ldsw(G3_amh_vmargslot, O0_argslot);
|
||||
Address vmarg = __ argument_address(O0_argslot);
|
||||
|
||||
// What class are we casting to?
|
||||
__ ld_ptr(G3_amh_argument, G5_klass); // This is a Class object!
|
||||
__ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
|
||||
|
||||
Label done;
|
||||
__ ld_ptr(vmarg, O1_scratch);
|
||||
__ tst(O1_scratch);
|
||||
__ brx(Assembler::zero, false, Assembler::pn, done); // No cast if null.
|
||||
__ delayed()->nop();
|
||||
__ load_klass(O1_scratch, O1_scratch);
|
||||
|
||||
// Live at this point:
|
||||
// - G5_klass : klass required by the target method
|
||||
// - O1_scratch : argument klass to test
|
||||
// - G3_method_handle: adapter method handle
|
||||
__ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
|
||||
|
||||
// If we get here, the type check failed!
|
||||
__ ldsw(G3_amh_vmargslot, O0_argslot); // reload argslot field
|
||||
__ ld_ptr(G3_amh_argument, O3_scratch); // required class
|
||||
__ ld_ptr(vmarg, O2_scratch); // bad object
|
||||
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
|
||||
__ delayed()->mov(Bytecodes::_checkcast, O1_scratch); // who is complaining?
|
||||
|
||||
__ bind(done);
|
||||
// Get the new MH:
|
||||
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
|
||||
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
|
||||
}
|
||||
break;
|
||||
|
||||
case _adapter_prim_to_prim:
|
||||
case _adapter_ref_to_prim:
|
||||
// Handled completely by optimized cases.
|
||||
__ stop("init_AdapterMethodHandle should not issue this");
|
||||
break;
|
||||
|
||||
case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim
|
||||
//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim
|
||||
case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim
|
||||
case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim
|
||||
{
|
||||
// Perform an in-place conversion to int or an int subword.
|
||||
__ ldsw(G3_amh_vmargslot, O0_argslot);
|
||||
Address vmarg = __ argument_address(O0_argslot);
|
||||
Address value;
|
||||
bool value_left_justified = false;
|
||||
|
||||
switch (ek) {
|
||||
case _adapter_opt_i2i:
|
||||
case _adapter_opt_l2i:
|
||||
__ unimplemented(entry_name(ek));
|
||||
value = vmarg;
|
||||
break;
|
||||
case _adapter_opt_unboxi:
|
||||
{
|
||||
// Load the value up from the heap.
|
||||
__ ld_ptr(vmarg, O1_scratch);
|
||||
int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
|
||||
#ifdef ASSERT
|
||||
for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
|
||||
if (is_subword_type(BasicType(bt)))
|
||||
assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
|
||||
}
|
||||
#endif
|
||||
__ null_check(O1_scratch, value_offset);
|
||||
value = Address(O1_scratch, value_offset);
|
||||
#ifdef _BIG_ENDIAN
|
||||
// Values stored in objects are packed.
|
||||
value_left_justified = true;
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// This check is required on _BIG_ENDIAN
|
||||
Register G5_vminfo = G5_index;
|
||||
__ ldsw(G3_amh_conversion, G5_vminfo);
|
||||
assert(CONV_VMINFO_SHIFT == 0, "preshifted");
|
||||
|
||||
// Original 32-bit vmdata word must be of this form:
|
||||
// | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
|
||||
__ lduw(value, O1_scratch);
|
||||
if (!value_left_justified)
|
||||
__ sll(O1_scratch, G5_vminfo, O1_scratch);
|
||||
Label zero_extend, done;
|
||||
__ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
|
||||
__ br(Assembler::zero, false, Assembler::pn, zero_extend);
|
||||
__ delayed()->nop();
|
||||
|
||||
// this path is taken for int->byte, int->short
|
||||
__ sra(O1_scratch, G5_vminfo, O1_scratch);
|
||||
__ ba(false, done);
|
||||
__ delayed()->nop();
|
||||
|
||||
__ bind(zero_extend);
|
||||
// this is taken for int->char
|
||||
__ srl(O1_scratch, G5_vminfo, O1_scratch);
|
||||
|
||||
__ bind(done);
|
||||
__ st(O1_scratch, vmarg);
|
||||
|
||||
// Get the new MH:
|
||||
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
|
||||
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
|
||||
}
|
||||
break;
|
||||
|
||||
case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim
|
||||
case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim
|
||||
{
|
||||
// Perform an in-place int-to-long or ref-to-long conversion.
|
||||
__ ldsw(G3_amh_vmargslot, O0_argslot);
|
||||
|
||||
// On big-endian machine we duplicate the slot and store the MSW
|
||||
// in the first slot.
|
||||
__ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
|
||||
|
||||
insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
|
||||
|
||||
Address arg_lsw(O0_argslot, 0);
|
||||
Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
|
||||
|
||||
switch (ek) {
|
||||
case _adapter_opt_i2l:
|
||||
{
|
||||
__ ldsw(arg_lsw, O2_scratch); // Load LSW
|
||||
#ifndef _LP64
|
||||
__ signx(O2_scratch, O3_scratch); // Sign extend
|
||||
#endif
|
||||
__ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64
|
||||
}
|
||||
break;
|
||||
case _adapter_opt_unboxl:
|
||||
{
|
||||
// Load the value up from the heap.
|
||||
__ ld_ptr(arg_lsw, O1_scratch);
|
||||
int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
|
||||
assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
|
||||
__ null_check(O1_scratch, value_offset);
|
||||
__ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64
|
||||
__ st_long(O2_scratch, arg_msw);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
|
||||
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
|
||||
}
|
||||
break;
|
||||
|
||||
case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim
|
||||
case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim
|
||||
{
|
||||
// perform an in-place floating primitive conversion
|
||||
__ unimplemented(entry_name(ek));
|
||||
}
|
||||
break;
|
||||
|
||||
case _adapter_prim_to_ref:
|
||||
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
|
||||
break;
|
||||
|
||||
case _adapter_swap_args:
|
||||
case _adapter_rot_args:
|
||||
// handled completely by optimized cases
|
||||
__ stop("init_AdapterMethodHandle should not issue this");
|
||||
break;
|
||||
|
||||
case _adapter_opt_swap_1:
|
||||
case _adapter_opt_swap_2:
|
||||
case _adapter_opt_rot_1_up:
|
||||
case _adapter_opt_rot_1_down:
|
||||
case _adapter_opt_rot_2_up:
|
||||
case _adapter_opt_rot_2_down:
|
||||
{
|
||||
int swap_bytes = 0, rotate = 0;
|
||||
get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
|
||||
|
||||
// 'argslot' is the position of the first argument to swap.
|
||||
__ ldsw(G3_amh_vmargslot, O0_argslot);
|
||||
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
|
||||
|
||||
// 'vminfo' is the second.
|
||||
Register O1_destslot = O1_scratch;
|
||||
__ ldsw(G3_amh_conversion, O1_destslot);
|
||||
assert(CONV_VMINFO_SHIFT == 0, "preshifted");
|
||||
__ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
|
||||
__ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
|
||||
|
||||
if (!rotate) {
|
||||
for (int i = 0; i < swap_bytes; i += wordSize) {
|
||||
__ ld_ptr(Address(O0_argslot, i), O2_scratch);
|
||||
__ ld_ptr(Address(O1_destslot, i), O3_scratch);
|
||||
__ st_ptr(O3_scratch, Address(O0_argslot, i));
|
||||
__ st_ptr(O2_scratch, Address(O1_destslot, i));
|
||||
}
|
||||
} else {
|
||||
// Save the first chunk, which is going to get overwritten.
|
||||
switch (swap_bytes) {
|
||||
case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
|
||||
case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
|
||||
case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (rotate > 0) {
|
||||
// Rorate upward.
|
||||
__ sub(O0_argslot, swap_bytes, O0_argslot);
|
||||
#if ASSERT
|
||||
{
|
||||
// Verify that argslot > destslot, by at least swap_bytes.
|
||||
Label L_ok;
|
||||
__ cmp(O0_argslot, O1_destslot);
|
||||
__ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
|
||||
__ delayed()->nop();
|
||||
__ stop("source must be above destination (upward rotation)");
|
||||
__ bind(L_ok);
|
||||
}
|
||||
#endif
|
||||
// Work argslot down to destslot, copying contiguous data upwards.
|
||||
// Pseudo-code:
|
||||
// argslot = src_addr - swap_bytes
|
||||
// destslot = dest_addr
|
||||
// while (argslot >= destslot) {
|
||||
// *(argslot + swap_bytes) = *(argslot + 0);
|
||||
// argslot--;
|
||||
// }
|
||||
Label loop;
|
||||
__ bind(loop);
|
||||
__ ld_ptr(Address(O0_argslot, 0), G5_index);
|
||||
__ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
|
||||
__ sub(O0_argslot, wordSize, O0_argslot);
|
||||
__ cmp(O0_argslot, O1_destslot);
|
||||
__ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
|
||||
__ delayed()->nop(); // FILLME
|
||||
} else {
|
||||
__ add(O0_argslot, swap_bytes, O0_argslot);
|
||||
#if ASSERT
|
||||
{
|
||||
// Verify that argslot < destslot, by at least swap_bytes.
|
||||
Label L_ok;
|
||||
__ cmp(O0_argslot, O1_destslot);
|
||||
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
|
||||
__ delayed()->nop();
|
||||
__ stop("source must be above destination (upward rotation)");
|
||||
__ bind(L_ok);
|
||||
}
|
||||
#endif
|
||||
// Work argslot up to destslot, copying contiguous data downwards.
|
||||
// Pseudo-code:
|
||||
// argslot = src_addr + swap_bytes
|
||||
// destslot = dest_addr
|
||||
// while (argslot >= destslot) {
|
||||
// *(argslot - swap_bytes) = *(argslot + 0);
|
||||
// argslot++;
|
||||
// }
|
||||
Label loop;
|
||||
__ bind(loop);
|
||||
__ ld_ptr(Address(O0_argslot, 0), G5_index);
|
||||
__ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
|
||||
__ add(O0_argslot, wordSize, O0_argslot);
|
||||
__ cmp(O0_argslot, O1_destslot);
|
||||
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
|
||||
__ delayed()->nop(); // FILLME
|
||||
}
|
||||
|
||||
// Store the original first chunk into the destination slot, now free.
|
||||
switch (swap_bytes) {
|
||||
case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
|
||||
case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
|
||||
case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
|
||||
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
|
||||
}
|
||||
break;
|
||||
|
||||
case _adapter_dup_args:
|
||||
{
|
||||
// 'argslot' is the position of the first argument to duplicate.
|
||||
__ ldsw(G3_amh_vmargslot, O0_argslot);
|
||||
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
|
||||
|
||||
// 'stack_move' is negative number of words to duplicate.
|
||||
Register G5_stack_move = G5_index;
|
||||
__ ldsw(G3_amh_conversion, G5_stack_move);
|
||||
__ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
|
||||
|
||||
// Remember the old Gargs (argslot[0]).
|
||||
Register O1_oldarg = O1_scratch;
|
||||
__ mov(Gargs, O1_oldarg);
|
||||
|
||||
// Move Gargs down to make room for dups.
|
||||
__ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
|
||||
__ add(Gargs, G5_stack_move, Gargs);
|
||||
|
||||
// Compute the new Gargs (argslot[0]).
|
||||
Register O2_newarg = O2_scratch;
|
||||
__ mov(Gargs, O2_newarg);
|
||||
|
||||
// Copy from oldarg[0...] down to newarg[0...]
|
||||
// Pseude-code:
|
||||
// O1_oldarg = old-Gargs
|
||||
// O2_newarg = new-Gargs
|
||||
// O0_argslot = argslot
|
||||
// while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
|
||||
Label loop;
|
||||
__ bind(loop);
|
||||
__ ld_ptr(Address(O0_argslot, 0), O3_scratch);
|
||||
__ st_ptr(O3_scratch, Address(O2_newarg, 0));
|
||||
__ add(O0_argslot, wordSize, O0_argslot);
|
||||
__ add(O2_newarg, wordSize, O2_newarg);
|
||||
__ cmp(O2_newarg, O1_oldarg);
|
||||
__ brx(Assembler::less, false, Assembler::pt, loop);
|
||||
__ delayed()->nop(); // FILLME
|
||||
|
||||
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
|
||||
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
|
||||
}
|
||||
break;
|
||||
|
||||
case _adapter_drop_args:
|
||||
{
|
||||
// 'argslot' is the position of the first argument to nuke.
|
||||
__ ldsw(G3_amh_vmargslot, O0_argslot);
|
||||
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
|
||||
|
||||
// 'stack_move' is number of words to drop.
|
||||
Register G5_stack_move = G5_index;
|
||||
__ ldsw(G3_amh_conversion, G5_stack_move);
|
||||
__ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
|
||||
|
||||
remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
|
||||
|
||||
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
|
||||
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
|
||||
}
|
||||
break;
|
||||
|
||||
case _adapter_collect_args:
|
||||
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
|
||||
break;
|
||||
|
||||
case _adapter_spread_args:
|
||||
// Handled completely by optimized cases.
|
||||
__ stop("init_AdapterMethodHandle should not issue this");
|
||||
break;
|
||||
|
||||
case _adapter_opt_spread_0:
|
||||
case _adapter_opt_spread_1:
|
||||
case _adapter_opt_spread_more:
|
||||
{
|
||||
// spread an array out into a group of arguments
|
||||
__ unimplemented(entry_name(ek));
|
||||
}
|
||||
break;
|
||||
|
||||
case _adapter_flyby:
|
||||
case _adapter_ricochet:
|
||||
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
|
||||
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
|
||||
|
||||
init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
|
||||
}
|
||||
|
@ -547,17 +547,11 @@ class AdapterGenerator {
|
||||
void set_Rdisp(Register r) { Rdisp = r; }
|
||||
|
||||
void patch_callers_callsite();
|
||||
void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
|
||||
|
||||
// base+st_off points to top of argument
|
||||
int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
|
||||
int arg_offset(const int st_off) { return st_off; }
|
||||
int next_arg_offset(const int st_off) {
|
||||
return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
|
||||
}
|
||||
|
||||
int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); }
|
||||
int next_tag_offset(const int st_off) {
|
||||
return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes();
|
||||
return st_off - Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
// Argument slot values may be loaded first into a register because
|
||||
@ -565,9 +559,6 @@ class AdapterGenerator {
|
||||
RegisterOrConstant arg_slot(const int st_off);
|
||||
RegisterOrConstant next_arg_slot(const int st_off);
|
||||
|
||||
RegisterOrConstant tag_slot(const int st_off);
|
||||
RegisterOrConstant next_tag_slot(const int st_off);
|
||||
|
||||
// Stores long into offset pointed to by base
|
||||
void store_c2i_long(Register r, Register base,
|
||||
const int st_off, bool is_stack);
|
||||
@ -653,23 +644,6 @@ void AdapterGenerator::patch_callers_callsite() {
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
|
||||
Register scratch) {
|
||||
if (TaggedStackInterpreter) {
|
||||
RegisterOrConstant slot = tag_slot(st_off);
|
||||
// have to store zero because local slots can be reused (rats!)
|
||||
if (t == frame::TagValue) {
|
||||
__ st_ptr(G0, base, slot);
|
||||
} else if (t == frame::TagCategory2) {
|
||||
__ st_ptr(G0, base, slot);
|
||||
__ st_ptr(G0, base, next_tag_slot(st_off));
|
||||
} else {
|
||||
__ mov(t, scratch);
|
||||
__ st_ptr(scratch, base, slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
|
||||
RegisterOrConstant roc(arg_offset(st_off));
|
||||
@ -682,17 +656,6 @@ RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
|
||||
}
|
||||
|
||||
|
||||
RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) {
|
||||
RegisterOrConstant roc(tag_offset(st_off));
|
||||
return __ ensure_simm13_or_reg(roc, Rdisp);
|
||||
}
|
||||
|
||||
RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) {
|
||||
RegisterOrConstant roc(next_tag_offset(st_off));
|
||||
return __ ensure_simm13_or_reg(roc, Rdisp);
|
||||
}
|
||||
|
||||
|
||||
// Stores long into offset pointed to by base
|
||||
void AdapterGenerator::store_c2i_long(Register r, Register base,
|
||||
const int st_off, bool is_stack) {
|
||||
@ -718,19 +681,16 @@ void AdapterGenerator::store_c2i_long(Register r, Register base,
|
||||
}
|
||||
#endif // COMPILER2
|
||||
#endif // _LP64
|
||||
tag_c2i_arg(frame::TagCategory2, base, st_off, r);
|
||||
}
|
||||
|
||||
void AdapterGenerator::store_c2i_object(Register r, Register base,
|
||||
const int st_off) {
|
||||
__ st_ptr (r, base, arg_slot(st_off));
|
||||
tag_c2i_arg(frame::TagReference, base, st_off, r);
|
||||
}
|
||||
|
||||
void AdapterGenerator::store_c2i_int(Register r, Register base,
|
||||
const int st_off) {
|
||||
__ st (r, base, arg_slot(st_off));
|
||||
tag_c2i_arg(frame::TagValue, base, st_off, r);
|
||||
}
|
||||
|
||||
// Stores into offset pointed to by base
|
||||
@ -745,13 +705,11 @@ void AdapterGenerator::store_c2i_double(VMReg r_2,
|
||||
__ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
|
||||
__ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
|
||||
#endif
|
||||
tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch);
|
||||
}
|
||||
|
||||
void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
|
||||
const int st_off) {
|
||||
__ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
|
||||
tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch);
|
||||
}
|
||||
|
||||
void AdapterGenerator::gen_c2i_adapter(
|
||||
@ -786,14 +744,14 @@ void AdapterGenerator::gen_c2i_adapter(
|
||||
// Since all args are passed on the stack, total_args_passed*wordSize is the
|
||||
// space we need. Add in varargs area needed by the interpreter. Round up
|
||||
// to stack alignment.
|
||||
const int arg_size = total_args_passed * Interpreter::stackElementSize();
|
||||
const int arg_size = total_args_passed * Interpreter::stackElementSize;
|
||||
const int varargs_area =
|
||||
(frame::varargs_offset - frame::register_save_words)*wordSize;
|
||||
const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
|
||||
|
||||
int bias = STACK_BIAS;
|
||||
const int interp_arg_offset = frame::varargs_offset*wordSize +
|
||||
(total_args_passed-1)*Interpreter::stackElementSize();
|
||||
(total_args_passed-1)*Interpreter::stackElementSize;
|
||||
|
||||
Register base = SP;
|
||||
|
||||
@ -814,7 +772,7 @@ void AdapterGenerator::gen_c2i_adapter(
|
||||
|
||||
// First write G1 (if used) to where ever it must go
|
||||
for (int i=0; i<total_args_passed; i++) {
|
||||
const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
|
||||
const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
|
||||
VMReg r_1 = regs[i].first();
|
||||
VMReg r_2 = regs[i].second();
|
||||
if (r_1 == G1_scratch->as_VMReg()) {
|
||||
@ -831,7 +789,7 @@ void AdapterGenerator::gen_c2i_adapter(
|
||||
|
||||
// Now write the args into the outgoing interpreter space
|
||||
for (int i=0; i<total_args_passed; i++) {
|
||||
const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
|
||||
const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
|
||||
VMReg r_1 = regs[i].first();
|
||||
VMReg r_2 = regs[i].second();
|
||||
if (!r_1->is_valid()) {
|
||||
@ -900,7 +858,7 @@ void AdapterGenerator::gen_c2i_adapter(
|
||||
#endif // _LP64
|
||||
|
||||
__ mov((frame::varargs_offset)*wordSize -
|
||||
1*Interpreter::stackElementSize()+bias+BytesPerWord, G1);
|
||||
1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
|
||||
// Jump to the interpreter just as if interpreter was doing it.
|
||||
__ jmpl(G3_scratch, 0, G0);
|
||||
// Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
|
||||
@ -1051,7 +1009,7 @@ void AdapterGenerator::gen_i2c_adapter(
|
||||
// ldx/lddf optimizations.
|
||||
|
||||
// Load in argument order going down.
|
||||
const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
|
||||
const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
|
||||
set_Rdisp(G1_scratch);
|
||||
|
||||
VMReg r_1 = regs[i].first();
|
||||
@ -1120,7 +1078,7 @@ void AdapterGenerator::gen_i2c_adapter(
|
||||
for (int i=0; i<total_args_passed; i++) {
|
||||
if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
|
||||
// Load in argument order going down
|
||||
int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
|
||||
int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
|
||||
// Need to marshal 64-bit value from misaligned Lesp loads
|
||||
Register r = regs[i].first()->as_Register()->after_restore();
|
||||
if (r == G1 || r == G4) {
|
||||
@ -3062,7 +3020,7 @@ int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals)
|
||||
"test and remove; got more parms than locals");
|
||||
if (callee_locals < callee_parameters)
|
||||
return 0; // No adjustment for negative locals
|
||||
int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords();
|
||||
int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
|
||||
return round_to(diff, WordsPerLong);
|
||||
}
|
||||
|
||||
|
@ -471,6 +471,9 @@ extern bool can_branch_register( Node *bol, Node *cmp );
|
||||
source %{
|
||||
#define __ _masm.
|
||||
|
||||
// Block initializing store
|
||||
#define ASI_BLK_INIT_QUAD_LDD_P 0xE2
|
||||
|
||||
// tertiary op of a LoadP or StoreP encoding
|
||||
#define REGP_OP true
|
||||
|
||||
@ -920,38 +923,6 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
|
||||
#endif
|
||||
}
|
||||
|
||||
void emit_form3_mem_reg_asi(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary,
|
||||
int src1_enc, int disp32, int src2_enc, int dst_enc, int asi) {
|
||||
|
||||
uint instr;
|
||||
instr = (Assembler::ldst_op << 30)
|
||||
| (dst_enc << 25)
|
||||
| (primary << 19)
|
||||
| (src1_enc << 14);
|
||||
|
||||
int disp = disp32;
|
||||
int index = src2_enc;
|
||||
|
||||
if (src1_enc == R_SP_enc || src1_enc == R_FP_enc)
|
||||
disp += STACK_BIAS;
|
||||
|
||||
// We should have a compiler bailout here rather than a guarantee.
|
||||
// Better yet would be some mechanism to handle variable-size matches correctly.
|
||||
guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
|
||||
|
||||
if( disp != 0 ) {
|
||||
// use reg-reg form
|
||||
// set src2=R_O7 contains offset
|
||||
index = R_O7_enc;
|
||||
emit3_simm13( cbuf, Assembler::arith_op, index, Assembler::or_op3, 0, disp);
|
||||
}
|
||||
instr |= (asi << 5);
|
||||
instr |= index;
|
||||
uint *code = (uint*)cbuf.code_end();
|
||||
*code = instr;
|
||||
cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
|
||||
}
|
||||
|
||||
void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) {
|
||||
// The method which records debug information at every safepoint
|
||||
// expects the call to be the first instruction in the snippet as
|
||||
@ -1951,11 +1922,6 @@ encode %{
|
||||
$mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
|
||||
%}
|
||||
|
||||
enc_class form3_mem_reg_little( memory mem, iRegI dst) %{
|
||||
emit_form3_mem_reg_asi(cbuf, this, $primary, -1,
|
||||
$mem$$base, $mem$$disp, $mem$$index, $dst$$reg, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
|
||||
enc_class form3_mem_prefetch_read( memory mem ) %{
|
||||
emit_form3_mem_reg(cbuf, this, $primary, -1,
|
||||
$mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
|
||||
@ -4308,8 +4274,8 @@ operand cmpOp_commute() %{
|
||||
// instructions for every form of operand when the instruction accepts
|
||||
// multiple operand types with the same basic encoding and format. The classic
|
||||
// case of this is memory operands.
|
||||
// Indirect is not included since its use is limited to Compare & Swap
|
||||
opclass memory( indirect, indOffset13, indIndex );
|
||||
opclass indIndexMemory( indIndex );
|
||||
|
||||
//----------PIPELINE-----------------------------------------------------------
|
||||
pipeline %{
|
||||
@ -6147,6 +6113,7 @@ instruct prefetchr( memory mem ) %{
|
||||
%}
|
||||
|
||||
instruct prefetchw( memory mem ) %{
|
||||
predicate(AllocatePrefetchStyle != 3 );
|
||||
match( PrefetchWrite mem );
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
@ -6156,6 +6123,23 @@ instruct prefetchw( memory mem ) %{
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Use BIS instruction to prefetch.
|
||||
instruct prefetchw_bis( memory mem ) %{
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
match( PrefetchWrite mem );
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "STXA G0,$mem\t! // Block initializing store" %}
|
||||
ins_encode %{
|
||||
Register base = as_Register($mem$$base);
|
||||
int disp = $mem$$disp;
|
||||
if (disp != 0) {
|
||||
__ add(base, AllocatePrefetchStepSize, base);
|
||||
}
|
||||
__ stxa(G0, base, G0, ASI_BLK_INIT_QUAD_LDD_P);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
//----------Store Instructions-------------------------------------------------
|
||||
// Store Byte
|
||||
@ -9645,84 +9629,179 @@ instruct popCountL(iRegI dst, iRegL src) %{
|
||||
|
||||
instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
|
||||
match(Set dst (ReverseBytesI src));
|
||||
effect(DEF dst, USE src);
|
||||
|
||||
// Op cost is artificially doubled to make sure that load or store
|
||||
// instructions are preferred over this one which requires a spill
|
||||
// onto a stack slot.
|
||||
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
|
||||
size(8);
|
||||
format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
|
||||
opcode(Assembler::lduwa_op3);
|
||||
ins_encode( form3_mem_reg_little(src, dst) );
|
||||
|
||||
ins_encode %{
|
||||
__ set($src$$disp + STACK_BIAS, O7);
|
||||
__ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe( iload_mem );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
|
||||
match(Set dst (ReverseBytesL src));
|
||||
effect(DEF dst, USE src);
|
||||
|
||||
// Op cost is artificially doubled to make sure that load or store
|
||||
// instructions are preferred over this one which requires a spill
|
||||
// onto a stack slot.
|
||||
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
|
||||
size(8);
|
||||
format %{ "LDXA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::ldxa_op3);
|
||||
ins_encode( form3_mem_reg_little(src, dst) );
|
||||
ins_encode %{
|
||||
__ set($src$$disp + STACK_BIAS, O7);
|
||||
__ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe( iload_mem );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
|
||||
match(Set dst (ReverseBytesUS src));
|
||||
|
||||
// Op cost is artificially doubled to make sure that load or store
|
||||
// instructions are preferred over this one which requires a spill
|
||||
// onto a stack slot.
|
||||
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
|
||||
format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %}
|
||||
|
||||
ins_encode %{
|
||||
// the value was spilled as an int so bias the load
|
||||
__ set($src$$disp + STACK_BIAS + 2, O7);
|
||||
__ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe( iload_mem );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
|
||||
match(Set dst (ReverseBytesS src));
|
||||
|
||||
// Op cost is artificially doubled to make sure that load or store
|
||||
// instructions are preferred over this one which requires a spill
|
||||
// onto a stack slot.
|
||||
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
|
||||
format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %}
|
||||
|
||||
ins_encode %{
|
||||
// the value was spilled as an int so bias the load
|
||||
__ set($src$$disp + STACK_BIAS + 2, O7);
|
||||
__ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe( iload_mem );
|
||||
%}
|
||||
|
||||
// Load Integer reversed byte order
|
||||
instruct loadI_reversed(iRegI dst, memory src) %{
|
||||
instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
|
||||
match(Set dst (ReverseBytesI (LoadI src)));
|
||||
|
||||
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
|
||||
size(8);
|
||||
size(4);
|
||||
format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::lduwa_op3);
|
||||
ins_encode( form3_mem_reg_little( src, dst) );
|
||||
ins_encode %{
|
||||
__ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Long - aligned and reversed
|
||||
instruct loadL_reversed(iRegL dst, memory src) %{
|
||||
instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
|
||||
match(Set dst (ReverseBytesL (LoadL src)));
|
||||
|
||||
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
|
||||
size(8);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "LDXA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::ldxa_op3);
|
||||
ins_encode( form3_mem_reg_little( src, dst ) );
|
||||
ins_encode %{
|
||||
__ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load unsigned short / char reversed byte order
|
||||
instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
|
||||
match(Set dst (ReverseBytesUS (LoadUS src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "LDUHA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
ins_encode %{
|
||||
__ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load short reversed byte order
|
||||
instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
|
||||
match(Set dst (ReverseBytesS (LoadS src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "LDSHA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
ins_encode %{
|
||||
__ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Store Integer reversed byte order
|
||||
instruct storeI_reversed(memory dst, iRegI src) %{
|
||||
instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
|
||||
match(Set dst (StoreI dst (ReverseBytesI src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(8);
|
||||
size(4);
|
||||
format %{ "STWA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::stwa_op3);
|
||||
ins_encode( form3_mem_reg_little( dst, src) );
|
||||
ins_encode %{
|
||||
__ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
// Store Long reversed byte order
|
||||
instruct storeL_reversed(memory dst, iRegL src) %{
|
||||
instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
|
||||
match(Set dst (StoreL dst (ReverseBytesL src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(8);
|
||||
size(4);
|
||||
format %{ "STXA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::stxa_op3);
|
||||
ins_encode( form3_mem_reg_little( dst, src) );
|
||||
ins_encode %{
|
||||
__ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
// Store unsighed short/char reversed byte order
|
||||
instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
|
||||
match(Set dst (StoreC dst (ReverseBytesUS src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "STHA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
ins_encode %{
|
||||
__ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
// Store short reversed byte order
|
||||
instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
|
||||
match(Set dst (StoreC dst (ReverseBytesS src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "STHA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
ins_encode %{
|
||||
__ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
|
@ -139,7 +139,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ ld_ptr(parameter_size.as_address(), t); // get parameter size (in words)
|
||||
__ add(t, frame::memory_parameter_word_sp_offset, t); // add space for save area (in words)
|
||||
__ round_to(t, WordsPerLong); // make sure it is multiple of 2 (in words)
|
||||
__ sll(t, Interpreter::logStackElementSize(), t); // compute number of bytes
|
||||
__ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes
|
||||
__ neg(t); // negate so it can be used with save
|
||||
__ save(SP, t, SP); // setup new frame
|
||||
}
|
||||
@ -191,19 +191,13 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// copy parameters if any
|
||||
Label loop;
|
||||
__ BIND(loop);
|
||||
// Store tag first.
|
||||
if (TaggedStackInterpreter) {
|
||||
__ ld_ptr(src, 0, tmp);
|
||||
__ add(src, BytesPerWord, src); // get next
|
||||
__ st_ptr(tmp, dst, Interpreter::tag_offset_in_bytes());
|
||||
}
|
||||
// Store parameter value
|
||||
__ ld_ptr(src, 0, tmp);
|
||||
__ add(src, BytesPerWord, src);
|
||||
__ st_ptr(tmp, dst, Interpreter::value_offset_in_bytes());
|
||||
__ st_ptr(tmp, dst, 0);
|
||||
__ deccc(cnt);
|
||||
__ br(Assembler::greater, false, Assembler::pt, loop);
|
||||
__ delayed()->sub(dst, Interpreter::stackElementSize(), dst);
|
||||
__ delayed()->sub(dst, Interpreter::stackElementSize, dst);
|
||||
|
||||
// done
|
||||
__ BIND(exit);
|
||||
@ -220,7 +214,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// setup parameters
|
||||
const Register t = G3_scratch;
|
||||
__ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
|
||||
__ sll(t, Interpreter::logStackElementSize(), t); // compute number of bytes
|
||||
__ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes
|
||||
__ sub(FP, t, Gargs); // setup parameter pointer
|
||||
#ifdef _LP64
|
||||
__ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias
|
||||
@ -1148,7 +1142,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ andn(from, 7, from); // Align address
|
||||
__ ldx(from, 0, O3);
|
||||
__ inc(from, 8);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
__ ldx(from, 0, O4);
|
||||
__ deccc(count, count_dec); // Can we do next iteration after this one?
|
||||
@ -1220,7 +1214,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
__ andn(end_from, 7, end_from); // Align address
|
||||
__ ldx(end_from, 0, O3);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
__ ldx(end_from, -8, O4);
|
||||
__ deccc(count, count_dec); // Can we do next iteration after this one?
|
||||
@ -1349,7 +1343,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ BIND(L_copy_byte);
|
||||
__ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
|
||||
__ delayed()->nop();
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_byte_loop);
|
||||
__ ldub(from, offset, O3);
|
||||
__ deccc(count);
|
||||
@ -1445,7 +1439,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
L_aligned_copy, L_copy_byte);
|
||||
}
|
||||
// copy 4 elements (16 bytes) at a time
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_aligned_copy);
|
||||
__ dec(end_from, 16);
|
||||
__ ldx(end_from, 8, O3);
|
||||
@ -1461,7 +1455,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ BIND(L_copy_byte);
|
||||
__ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
|
||||
__ delayed()->nop();
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_byte_loop);
|
||||
__ dec(end_from);
|
||||
__ dec(end_to);
|
||||
@ -1577,7 +1571,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ BIND(L_copy_2_bytes);
|
||||
__ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
|
||||
__ delayed()->nop();
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_2_bytes_loop);
|
||||
__ lduh(from, offset, O3);
|
||||
__ deccc(count);
|
||||
@ -1684,7 +1678,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
L_aligned_copy, L_copy_2_bytes);
|
||||
}
|
||||
// copy 4 elements (16 bytes) at a time
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_aligned_copy);
|
||||
__ dec(end_from, 16);
|
||||
__ ldx(end_from, 8, O3);
|
||||
@ -1781,7 +1775,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// copy with shift 4 elements (16 bytes) at a time
|
||||
__ dec(count, 4); // The cmp at the beginning guaranty count >= 4
|
||||
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_16_bytes);
|
||||
__ ldx(from, 4, O4);
|
||||
__ deccc(count, 4); // Can we do next iteration after this one?
|
||||
@ -1907,7 +1901,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to form 2 aligned 8-bytes chunks to store.
|
||||
//
|
||||
__ ldx(end_from, -4, O3);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_16_bytes);
|
||||
__ ldx(end_from, -12, O4);
|
||||
__ deccc(count, 4);
|
||||
@ -1929,7 +1923,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ delayed()->inc(count, 4);
|
||||
|
||||
// copy 4 elements (16 bytes) at a time
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_aligned_copy);
|
||||
__ dec(end_from, 16);
|
||||
__ ldx(end_from, 8, O3);
|
||||
@ -2000,6 +1994,27 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to: O1
|
||||
// count: O2 treated as signed
|
||||
//
|
||||
// count -= 2;
|
||||
// if ( count >= 0 ) { // >= 2 elements
|
||||
// if ( count > 6) { // >= 8 elements
|
||||
// count -= 6; // original count - 8
|
||||
// do {
|
||||
// copy_8_elements;
|
||||
// count -= 8;
|
||||
// } while ( count >= 0 );
|
||||
// count += 6;
|
||||
// }
|
||||
// if ( count >= 0 ) { // >= 2 elements
|
||||
// do {
|
||||
// copy_2_elements;
|
||||
// } while ( (count=count-2) >= 0 );
|
||||
// }
|
||||
// }
|
||||
// count += 2;
|
||||
// if ( count != 0 ) { // 1 element left
|
||||
// copy_1_element;
|
||||
// }
|
||||
//
|
||||
void generate_disjoint_long_copy_core(bool aligned) {
|
||||
Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
|
||||
const Register from = O0; // source array address
|
||||
@ -2012,7 +2027,39 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ mov(G0, offset0); // offset from start of arrays (0)
|
||||
__ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
|
||||
__ delayed()->add(offset0, 8, offset8);
|
||||
__ align(16);
|
||||
|
||||
// Copy by 64 bytes chunks
|
||||
Label L_copy_64_bytes;
|
||||
const Register from64 = O3; // source address
|
||||
const Register to64 = G3; // destination address
|
||||
__ subcc(count, 6, O3);
|
||||
__ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
|
||||
__ delayed()->mov(to, to64);
|
||||
// Now we can use O4(offset0), O5(offset8) as temps
|
||||
__ mov(O3, count);
|
||||
__ mov(from, from64);
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_64_bytes);
|
||||
for( int off = 0; off < 64; off += 16 ) {
|
||||
__ ldx(from64, off+0, O4);
|
||||
__ ldx(from64, off+8, O5);
|
||||
__ stx(O4, to64, off+0);
|
||||
__ stx(O5, to64, off+8);
|
||||
}
|
||||
__ deccc(count, 8);
|
||||
__ inc(from64, 64);
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
|
||||
__ delayed()->inc(to64, 64);
|
||||
|
||||
// Restore O4(offset0), O5(offset8)
|
||||
__ sub(from64, from, offset0);
|
||||
__ inccc(count, 6);
|
||||
__ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
|
||||
__ delayed()->add(offset0, 8, offset8);
|
||||
|
||||
// Copy by 16 bytes chunks
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_16_bytes);
|
||||
__ ldx(from, offset0, O3);
|
||||
__ ldx(from, offset8, G3);
|
||||
@ -2023,6 +2070,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
|
||||
__ delayed()->inc(offset8, 16);
|
||||
|
||||
// Copy last 8 bytes
|
||||
__ BIND(L_copy_8_bytes);
|
||||
__ inccc(count, 2);
|
||||
__ brx(Assembler::zero, true, Assembler::pn, L_exit );
|
||||
@ -2085,7 +2133,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
|
||||
__ delayed()->sllx(count, LogBytesPerLong, offset8);
|
||||
__ sub(offset8, 8, offset0);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_16_bytes);
|
||||
__ ldx(from, offset8, O2);
|
||||
__ ldx(from, offset0, O3);
|
||||
@ -2351,7 +2399,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
|
||||
// (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
|
||||
// G3, G4, G5 --- current oop, oop.klass, oop.klass.super
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
|
||||
__ BIND(store_element);
|
||||
__ deccc(G1_remain); // decrement the count
|
||||
@ -2863,6 +2911,16 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// arraycopy stubs used by compilers
|
||||
generate_arraycopy_stubs();
|
||||
|
||||
// generic method handle stubs
|
||||
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
|
||||
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
|
||||
ek < MethodHandles::_EK_LIMIT;
|
||||
ek = MethodHandles::EntryKind(1 + (int)ek)) {
|
||||
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
|
||||
MethodHandles::generate_method_handle_stub(_masm, ek);
|
||||
}
|
||||
}
|
||||
|
||||
// Don't initialize the platform math functions since sparc
|
||||
// doesn't have intrinsics for these operations.
|
||||
}
|
||||
|
@ -151,8 +151,10 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
|
||||
|
||||
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
|
||||
address compiled_entry = __ pc();
|
||||
TosState incoming_state = state;
|
||||
|
||||
Label cont;
|
||||
address compiled_entry = __ pc();
|
||||
|
||||
address entry = __ pc();
|
||||
#if !defined(_LP64) && defined(COMPILER2)
|
||||
@ -165,12 +167,11 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node
|
||||
// first which would move g1 -> O0/O1 and destroy the exception we were throwing.
|
||||
|
||||
if( state == ltos ) {
|
||||
__ srl (G1, 0,O1);
|
||||
__ srlx(G1,32,O0);
|
||||
if (incoming_state == ltos) {
|
||||
__ srl (G1, 0, O1);
|
||||
__ srlx(G1, 32, O0);
|
||||
}
|
||||
#endif /* !_LP64 && COMPILER2 */
|
||||
|
||||
#endif // !_LP64 && COMPILER2
|
||||
|
||||
__ bind(cont);
|
||||
|
||||
@ -182,17 +183,32 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
|
||||
__ mov(Llast_SP, SP); // Remove any adapter added stack space.
|
||||
|
||||
|
||||
Label L_got_cache, L_giant_index;
|
||||
const Register cache = G3_scratch;
|
||||
const Register size = G1_scratch;
|
||||
if (EnableInvokeDynamic) {
|
||||
__ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode.
|
||||
__ cmp(G1_scratch, Bytecodes::_invokedynamic);
|
||||
__ br(Assembler::equal, false, Assembler::pn, L_giant_index);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
|
||||
__ bind(L_got_cache);
|
||||
__ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::flags_offset(), size);
|
||||
__ and3(size, 0xFF, size); // argument size in words
|
||||
__ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes
|
||||
__ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
|
||||
__ add(Lesp, size, Lesp); // pop arguments
|
||||
__ dispatch_next(state, step);
|
||||
|
||||
// out of the main line of code...
|
||||
if (EnableInvokeDynamic) {
|
||||
__ bind(L_giant_index);
|
||||
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
|
||||
__ ba(false, L_got_cache);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
@ -479,7 +495,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
// Set the saved SP after the register window save
|
||||
//
|
||||
assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
|
||||
__ sll(Glocals_size, Interpreter::logStackElementSize(), Otmp1);
|
||||
__ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
|
||||
__ add(Gargs, Otmp1, Gargs);
|
||||
|
||||
if (native_call) {
|
||||
@ -495,7 +511,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ lduh( size_of_locals, Otmp1 );
|
||||
__ sub( Otmp1, Glocals_size, Glocals_size );
|
||||
__ round_to( Glocals_size, WordsPerLong );
|
||||
__ sll( Glocals_size, Interpreter::logStackElementSize(), Glocals_size );
|
||||
__ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
|
||||
|
||||
// see if the frame is greater than one page in size. If so,
|
||||
// then we need to verify there is enough stack space remaining
|
||||
@ -503,7 +519,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ lduh( max_stack, Gframe_size );
|
||||
__ add( Gframe_size, extra_space, Gframe_size );
|
||||
__ round_to( Gframe_size, WordsPerLong );
|
||||
__ sll( Gframe_size, Interpreter::logStackElementSize(), Gframe_size);
|
||||
__ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
|
||||
|
||||
// Add in java locals size for stack overflow check only
|
||||
__ add( Gframe_size, Glocals_size, Gframe_size );
|
||||
@ -1218,8 +1234,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
// be updated!
|
||||
__ lduh( size_of_locals, O2 );
|
||||
__ lduh( size_of_parameters, O1 );
|
||||
__ sll( O2, Interpreter::logStackElementSize(), O2);
|
||||
__ sll( O1, Interpreter::logStackElementSize(), O1 );
|
||||
__ sll( O2, Interpreter::logStackElementSize, O2);
|
||||
__ sll( O1, Interpreter::logStackElementSize, O1 );
|
||||
__ sub( Llocals, O2, O2 );
|
||||
__ sub( Llocals, O1, O1 );
|
||||
|
||||
@ -1454,8 +1470,8 @@ static int size_activation_helper(int callee_extra_locals, int max_stack, int mo
|
||||
round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
|
||||
// callee_locals and max_stack are counts, not the size in frame.
|
||||
const int locals_size =
|
||||
round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong);
|
||||
const int max_stack_words = max_stack * Interpreter::stackElementWords();
|
||||
round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
|
||||
const int max_stack_words = max_stack * Interpreter::stackElementWords;
|
||||
return (round_to((max_stack_words
|
||||
//6815692//+ methodOopDesc::extra_stack_words()
|
||||
+ rounded_vm_local_words
|
||||
@ -1554,11 +1570,11 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
|
||||
// preallocate stack space
|
||||
intptr_t* esp = monitors - 1 -
|
||||
(tempcount * Interpreter::stackElementWords()) -
|
||||
(tempcount * Interpreter::stackElementWords) -
|
||||
popframe_extra_args;
|
||||
|
||||
int local_words = method->max_locals() * Interpreter::stackElementWords();
|
||||
int parm_words = method->size_of_parameters() * Interpreter::stackElementWords();
|
||||
int local_words = method->max_locals() * Interpreter::stackElementWords;
|
||||
int parm_words = method->size_of_parameters() * Interpreter::stackElementWords;
|
||||
NEEDS_CLEANUP;
|
||||
intptr_t* locals;
|
||||
if (caller->is_interpreted_frame()) {
|
||||
@ -1646,7 +1662,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
BasicObjectLock* mp = (BasicObjectLock*)monitors;
|
||||
|
||||
assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
|
||||
assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize())+Interpreter::value_offset_in_bytes()), "locals match");
|
||||
assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
|
||||
assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches");
|
||||
assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
|
||||
assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
|
||||
@ -1742,7 +1758,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
|
||||
// Compute size of arguments for saving when returning to deoptimized caller
|
||||
__ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1);
|
||||
__ sll(Gtmp1, Interpreter::logStackElementSize(), Gtmp1);
|
||||
__ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
|
||||
__ sub(Llocals, Gtmp1, Gtmp2);
|
||||
__ add(Gtmp2, wordSize, Gtmp2);
|
||||
// Save these arguments
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,8 @@
|
||||
// fail with a guarantee ("not enough space for interpreter generation");
|
||||
// if too small.
|
||||
// Run with +PrintInterpreter to get the VM to print out the size.
|
||||
// Max size with JVMTI and TaggedStackInterpreter
|
||||
// Max size with JVMTI
|
||||
|
||||
#ifdef _LP64
|
||||
// The sethi() instruction generates lots more instructions when shell
|
||||
// stack limit is unlimited, so that's why this is much bigger.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -580,7 +580,6 @@ void TemplateTable::saload() {
|
||||
|
||||
void TemplateTable::iload(int n) {
|
||||
transition(vtos, itos);
|
||||
debug_only(__ verify_local_tag(frame::TagValue, Llocals, Otos_i, n));
|
||||
__ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
|
||||
}
|
||||
|
||||
@ -588,7 +587,6 @@ void TemplateTable::iload(int n) {
|
||||
void TemplateTable::lload(int n) {
|
||||
transition(vtos, ltos);
|
||||
assert(n+1 < Argument::n_register_parameters, "would need more code");
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, Otos_l, n));
|
||||
__ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
|
||||
}
|
||||
|
||||
@ -596,7 +594,6 @@ void TemplateTable::lload(int n) {
|
||||
void TemplateTable::fload(int n) {
|
||||
transition(vtos, ftos);
|
||||
assert(n < Argument::n_register_parameters, "would need more code");
|
||||
debug_only(__ verify_local_tag(frame::TagValue, Llocals, G3_scratch, n));
|
||||
__ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
|
||||
}
|
||||
|
||||
@ -604,14 +601,12 @@ void TemplateTable::fload(int n) {
|
||||
void TemplateTable::dload(int n) {
|
||||
transition(vtos, dtos);
|
||||
FloatRegister dst = Ftos_d;
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, G3_scratch, n));
|
||||
__ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::aload(int n) {
|
||||
transition(vtos, atos);
|
||||
debug_only(__ verify_local_tag(frame::TagReference, Llocals, Otos_i, n));
|
||||
__ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
|
||||
}
|
||||
|
||||
@ -707,12 +702,11 @@ void TemplateTable::dstore() {
|
||||
|
||||
void TemplateTable::astore() {
|
||||
transition(vtos, vtos);
|
||||
// astore tos can also be a returnAddress, so load and store the tag too
|
||||
__ load_ptr_and_tag(0, Otos_i, Otos_l2);
|
||||
__ inc(Lesp, Interpreter::stackElementSize());
|
||||
__ load_ptr(0, Otos_i);
|
||||
__ inc(Lesp, Interpreter::stackElementSize);
|
||||
__ verify_oop_or_return_address(Otos_i, G3_scratch);
|
||||
locals_index(G3_scratch);
|
||||
__ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
|
||||
__ store_local_ptr(G3_scratch, Otos_i);
|
||||
}
|
||||
|
||||
|
||||
@ -750,12 +744,11 @@ void TemplateTable::wide_dstore() {
|
||||
|
||||
void TemplateTable::wide_astore() {
|
||||
transition(vtos, vtos);
|
||||
// astore tos can also be a returnAddress, so load and store the tag too
|
||||
__ load_ptr_and_tag(0, Otos_i, Otos_l2);
|
||||
__ inc(Lesp, Interpreter::stackElementSize());
|
||||
__ load_ptr(0, Otos_i);
|
||||
__ inc(Lesp, Interpreter::stackElementSize);
|
||||
__ verify_oop_or_return_address(Otos_i, G3_scratch);
|
||||
locals_index_wide(G3_scratch);
|
||||
__ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
|
||||
__ store_local_ptr(G3_scratch, Otos_i);
|
||||
}
|
||||
|
||||
|
||||
@ -845,13 +838,13 @@ void TemplateTable::aastore() {
|
||||
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
|
||||
|
||||
__ ba(false,done);
|
||||
__ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
|
||||
__ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
|
||||
|
||||
__ bind(is_null);
|
||||
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
|
||||
|
||||
__ profile_null_seen(G3_scratch);
|
||||
__ inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
|
||||
__ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
@ -884,7 +877,6 @@ void TemplateTable::sastore() {
|
||||
|
||||
void TemplateTable::istore(int n) {
|
||||
transition(itos, vtos);
|
||||
__ tag_local(frame::TagValue, Llocals, Otos_i, n);
|
||||
__ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
|
||||
}
|
||||
|
||||
@ -892,7 +884,6 @@ void TemplateTable::istore(int n) {
|
||||
void TemplateTable::lstore(int n) {
|
||||
transition(ltos, vtos);
|
||||
assert(n+1 < Argument::n_register_parameters, "only handle register cases");
|
||||
__ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
|
||||
__ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
|
||||
|
||||
}
|
||||
@ -901,7 +892,6 @@ void TemplateTable::lstore(int n) {
|
||||
void TemplateTable::fstore(int n) {
|
||||
transition(ftos, vtos);
|
||||
assert(n < Argument::n_register_parameters, "only handle register cases");
|
||||
__ tag_local(frame::TagValue, Llocals, Otos_l, n);
|
||||
__ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
|
||||
}
|
||||
|
||||
@ -909,30 +899,28 @@ void TemplateTable::fstore(int n) {
|
||||
void TemplateTable::dstore(int n) {
|
||||
transition(dtos, vtos);
|
||||
FloatRegister src = Ftos_d;
|
||||
__ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
|
||||
__ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::astore(int n) {
|
||||
transition(vtos, vtos);
|
||||
// astore tos can also be a returnAddress, so load and store the tag too
|
||||
__ load_ptr_and_tag(0, Otos_i, Otos_l2);
|
||||
__ inc(Lesp, Interpreter::stackElementSize());
|
||||
__ load_ptr(0, Otos_i);
|
||||
__ inc(Lesp, Interpreter::stackElementSize);
|
||||
__ verify_oop_or_return_address(Otos_i, G3_scratch);
|
||||
__ store_local_ptr( n, Otos_i, Otos_l2 );
|
||||
__ store_local_ptr(n, Otos_i);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::pop() {
|
||||
transition(vtos, vtos);
|
||||
__ inc(Lesp, Interpreter::stackElementSize());
|
||||
__ inc(Lesp, Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::pop2() {
|
||||
transition(vtos, vtos);
|
||||
__ inc(Lesp, 2 * Interpreter::stackElementSize());
|
||||
__ inc(Lesp, 2 * Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
|
||||
@ -940,8 +928,8 @@ void TemplateTable::dup() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a
|
||||
// load a and tag
|
||||
__ load_ptr_and_tag(0, Otos_i, Otos_l2);
|
||||
__ push_ptr(Otos_i, Otos_l2);
|
||||
__ load_ptr(0, Otos_i);
|
||||
__ push_ptr(Otos_i);
|
||||
// stack: ..., a, a
|
||||
}
|
||||
|
||||
@ -949,11 +937,11 @@ void TemplateTable::dup() {
|
||||
void TemplateTable::dup_x1() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b
|
||||
__ load_ptr_and_tag(1, G3_scratch, G4_scratch); // get a
|
||||
__ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get b
|
||||
__ store_ptr_and_tag(1, Otos_l1, Otos_l2); // put b
|
||||
__ store_ptr_and_tag(0, G3_scratch, G4_scratch); // put a - like swap
|
||||
__ push_ptr(Otos_l1, Otos_l2); // push b
|
||||
__ load_ptr( 1, G3_scratch); // get a
|
||||
__ load_ptr( 0, Otos_l1); // get b
|
||||
__ store_ptr(1, Otos_l1); // put b
|
||||
__ store_ptr(0, G3_scratch); // put a - like swap
|
||||
__ push_ptr(Otos_l1); // push b
|
||||
// stack: ..., b, a, b
|
||||
}
|
||||
|
||||
@ -962,27 +950,27 @@ void TemplateTable::dup_x2() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b, c
|
||||
// get c and push on stack, reuse registers
|
||||
__ load_ptr_and_tag(0, G3_scratch, G4_scratch); // get c
|
||||
__ push_ptr(G3_scratch, G4_scratch); // push c with tag
|
||||
__ load_ptr( 0, G3_scratch); // get c
|
||||
__ push_ptr(G3_scratch); // push c with tag
|
||||
// stack: ..., a, b, c, c (c in reg) (Lesp - 4)
|
||||
// (stack offsets n+1 now)
|
||||
__ load_ptr_and_tag(3, Otos_l1, Otos_l2); // get a
|
||||
__ store_ptr_and_tag(3, G3_scratch, G4_scratch); // put c at 3
|
||||
__ load_ptr( 3, Otos_l1); // get a
|
||||
__ store_ptr(3, G3_scratch); // put c at 3
|
||||
// stack: ..., c, b, c, c (a in reg)
|
||||
__ load_ptr_and_tag(2, G3_scratch, G4_scratch); // get b
|
||||
__ store_ptr_and_tag(2, Otos_l1, Otos_l2); // put a at 2
|
||||
__ load_ptr( 2, G3_scratch); // get b
|
||||
__ store_ptr(2, Otos_l1); // put a at 2
|
||||
// stack: ..., c, a, c, c (b in reg)
|
||||
__ store_ptr_and_tag(1, G3_scratch, G4_scratch); // put b at 1
|
||||
__ store_ptr(1, G3_scratch); // put b at 1
|
||||
// stack: ..., c, a, b, c
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::dup2() {
|
||||
transition(vtos, vtos);
|
||||
__ load_ptr_and_tag(1, G3_scratch, G4_scratch); // get a
|
||||
__ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get b
|
||||
__ push_ptr(G3_scratch, G4_scratch); // push a
|
||||
__ push_ptr(Otos_l1, Otos_l2); // push b
|
||||
__ load_ptr(1, G3_scratch); // get a
|
||||
__ load_ptr(0, Otos_l1); // get b
|
||||
__ push_ptr(G3_scratch); // push a
|
||||
__ push_ptr(Otos_l1); // push b
|
||||
// stack: ..., a, b, a, b
|
||||
}
|
||||
|
||||
@ -990,17 +978,17 @@ void TemplateTable::dup2() {
|
||||
void TemplateTable::dup2_x1() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b, c
|
||||
__ load_ptr_and_tag(1, Lscratch, G1_scratch); // get b
|
||||
__ load_ptr_and_tag(2, Otos_l1, Otos_l2); // get a
|
||||
__ store_ptr_and_tag(2, Lscratch, G1_scratch); // put b at a
|
||||
__ load_ptr( 1, Lscratch); // get b
|
||||
__ load_ptr( 2, Otos_l1); // get a
|
||||
__ store_ptr(2, Lscratch); // put b at a
|
||||
// stack: ..., b, b, c
|
||||
__ load_ptr_and_tag(0, G3_scratch, G4_scratch); // get c
|
||||
__ store_ptr_and_tag(1, G3_scratch, G4_scratch); // put c at b
|
||||
__ load_ptr( 0, G3_scratch); // get c
|
||||
__ store_ptr(1, G3_scratch); // put c at b
|
||||
// stack: ..., b, c, c
|
||||
__ store_ptr_and_tag(0, Otos_l1, Otos_l2); // put a at c
|
||||
__ store_ptr(0, Otos_l1); // put a at c
|
||||
// stack: ..., b, c, a
|
||||
__ push_ptr(Lscratch, G1_scratch); // push b
|
||||
__ push_ptr(G3_scratch, G4_scratch); // push c
|
||||
__ push_ptr(Lscratch); // push b
|
||||
__ push_ptr(G3_scratch); // push c
|
||||
// stack: ..., b, c, a, b, c
|
||||
}
|
||||
|
||||
@ -1010,18 +998,18 @@ void TemplateTable::dup2_x1() {
|
||||
void TemplateTable::dup2_x2() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b, c, d
|
||||
__ load_ptr_and_tag(1, Lscratch, G1_scratch); // get c
|
||||
__ load_ptr_and_tag(3, Otos_l1, Otos_l2); // get a
|
||||
__ store_ptr_and_tag(3, Lscratch, G1_scratch); // put c at 3
|
||||
__ store_ptr_and_tag(1, Otos_l1, Otos_l2); // put a at 1
|
||||
__ load_ptr( 1, Lscratch); // get c
|
||||
__ load_ptr( 3, Otos_l1); // get a
|
||||
__ store_ptr(3, Lscratch); // put c at 3
|
||||
__ store_ptr(1, Otos_l1); // put a at 1
|
||||
// stack: ..., c, b, a, d
|
||||
__ load_ptr_and_tag(2, G3_scratch, G4_scratch); // get b
|
||||
__ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get d
|
||||
__ store_ptr_and_tag(0, G3_scratch, G4_scratch); // put b at 0
|
||||
__ store_ptr_and_tag(2, Otos_l1, Otos_l2); // put d at 2
|
||||
__ load_ptr( 2, G3_scratch); // get b
|
||||
__ load_ptr( 0, Otos_l1); // get d
|
||||
__ store_ptr(0, G3_scratch); // put b at 0
|
||||
__ store_ptr(2, Otos_l1); // put d at 2
|
||||
// stack: ..., c, d, a, b
|
||||
__ push_ptr(Lscratch, G1_scratch); // push c
|
||||
__ push_ptr(Otos_l1, Otos_l2); // push d
|
||||
__ push_ptr(Lscratch); // push c
|
||||
__ push_ptr(Otos_l1); // push d
|
||||
// stack: ..., c, d, a, b, c, d
|
||||
}
|
||||
|
||||
@ -1029,10 +1017,10 @@ void TemplateTable::dup2_x2() {
|
||||
void TemplateTable::swap() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b
|
||||
__ load_ptr_and_tag(1, G3_scratch, G4_scratch); // get a
|
||||
__ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get b
|
||||
__ store_ptr_and_tag(0, G3_scratch, G4_scratch); // put b
|
||||
__ store_ptr_and_tag(1, Otos_l1, Otos_l2); // put a
|
||||
__ load_ptr( 1, G3_scratch); // get a
|
||||
__ load_ptr( 0, Otos_l1); // get b
|
||||
__ store_ptr(0, G3_scratch); // put b
|
||||
__ store_ptr(1, Otos_l1); // put a
|
||||
// stack: ..., b, a
|
||||
}
|
||||
|
||||
@ -1045,9 +1033,9 @@ void TemplateTable::iop2(Operation op) {
|
||||
case sub: __ sub(O1, Otos_i, Otos_i); break;
|
||||
// %%%%% Mul may not exist: better to call .mul?
|
||||
case mul: __ smul(O1, Otos_i, Otos_i); break;
|
||||
case _and: __ and3(O1, Otos_i, Otos_i); break;
|
||||
case _or: __ or3(O1, Otos_i, Otos_i); break;
|
||||
case _xor: __ xor3(O1, Otos_i, Otos_i); break;
|
||||
case _and: __ and3(O1, Otos_i, Otos_i); break;
|
||||
case _or: __ or3(O1, Otos_i, Otos_i); break;
|
||||
case _xor: __ xor3(O1, Otos_i, Otos_i); break;
|
||||
case shl: __ sll(O1, Otos_i, Otos_i); break;
|
||||
case shr: __ sra(O1, Otos_i, Otos_i); break;
|
||||
case ushr: __ srl(O1, Otos_i, Otos_i); break;
|
||||
@ -1061,17 +1049,17 @@ void TemplateTable::lop2(Operation op) {
|
||||
__ pop_l(O2);
|
||||
switch (op) {
|
||||
#ifdef _LP64
|
||||
case add: __ add(O2, Otos_l, Otos_l); break;
|
||||
case sub: __ sub(O2, Otos_l, Otos_l); break;
|
||||
case _and: __ and3( O2, Otos_l, Otos_l); break;
|
||||
case _or: __ or3( O2, Otos_l, Otos_l); break;
|
||||
case _xor: __ xor3( O2, Otos_l, Otos_l); break;
|
||||
case add: __ add(O2, Otos_l, Otos_l); break;
|
||||
case sub: __ sub(O2, Otos_l, Otos_l); break;
|
||||
case _and: __ and3(O2, Otos_l, Otos_l); break;
|
||||
case _or: __ or3(O2, Otos_l, Otos_l); break;
|
||||
case _xor: __ xor3(O2, Otos_l, Otos_l); break;
|
||||
#else
|
||||
case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
|
||||
case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
|
||||
case _and: __ and3( O3, Otos_l2, Otos_l2); __ and3( O2, Otos_l1, Otos_l1); break;
|
||||
case _or: __ or3( O3, Otos_l2, Otos_l2); __ or3( O2, Otos_l1, Otos_l1); break;
|
||||
case _xor: __ xor3( O3, Otos_l2, Otos_l2); __ xor3( O2, Otos_l1, Otos_l1); break;
|
||||
case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
|
||||
case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
|
||||
case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
|
||||
#endif
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
@ -1307,7 +1295,7 @@ void TemplateTable::iinc() {
|
||||
__ ldsb(Lbcp, 2, O2); // load constant
|
||||
__ access_local_int(G3_scratch, Otos_i);
|
||||
__ add(Otos_i, O2, Otos_i);
|
||||
__ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes()); // access_local_int puts E.A. in G3_scratch
|
||||
__ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
|
||||
}
|
||||
|
||||
|
||||
@ -1317,7 +1305,7 @@ void TemplateTable::wide_iinc() {
|
||||
__ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
|
||||
__ access_local_int(G3_scratch, Otos_i);
|
||||
__ add(Otos_i, O3, Otos_i);
|
||||
__ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes()); // access_local_int puts E.A. in G3_scratch
|
||||
__ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
|
||||
}
|
||||
|
||||
|
||||
@ -1555,7 +1543,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
// Bump Lbcp to target of JSR
|
||||
__ add(Lbcp, O1_disp, Lbcp);
|
||||
// Push returnAddress for "ret" on stack
|
||||
__ push_ptr(Otos_i, G0); // push ptr sized thing plus 0 for tag.
|
||||
__ push_ptr(Otos_i);
|
||||
// And away we go!
|
||||
__ dispatch_next(vtos);
|
||||
return;
|
||||
@ -1963,19 +1951,30 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
|
||||
// ----------------------------------------------------------------------------
|
||||
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
|
||||
assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
|
||||
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
|
||||
|
||||
// Depends on cpCacheOop layout!
|
||||
const int shift_count = (1 + byte_no)*BitsPerByte;
|
||||
Label resolved;
|
||||
|
||||
__ get_cache_and_index_at_bcp(Rcache, index, 1);
|
||||
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
|
||||
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
|
||||
if (is_invokedynamic) {
|
||||
// We are resolved if the f1 field contains a non-null CallSite object.
|
||||
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
|
||||
__ tst(Lbyte_code);
|
||||
__ br(Assembler::notEqual, false, Assembler::pt, resolved);
|
||||
__ delayed()->set((int)bytecode(), O1);
|
||||
} else {
|
||||
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
|
||||
|
||||
__ srl( Lbyte_code, shift_count, Lbyte_code );
|
||||
__ and3( Lbyte_code, 0xFF, Lbyte_code );
|
||||
__ cmp( Lbyte_code, (int)bytecode());
|
||||
__ br( Assembler::equal, false, Assembler::pt, resolved);
|
||||
__ delayed()->set((int)bytecode(), O1);
|
||||
__ srl( Lbyte_code, shift_count, Lbyte_code );
|
||||
__ and3( Lbyte_code, 0xFF, Lbyte_code );
|
||||
__ cmp( Lbyte_code, (int)bytecode());
|
||||
__ br( Assembler::equal, false, Assembler::pt, resolved);
|
||||
__ delayed()->set((int)bytecode(), O1);
|
||||
}
|
||||
|
||||
address entry;
|
||||
switch (bytecode()) {
|
||||
@ -1987,12 +1986,13 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
|
||||
case Bytecodes::_invokespecial : // fall through
|
||||
case Bytecodes::_invokestatic : // fall through
|
||||
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
|
||||
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
// first time invocation - must resolve first
|
||||
__ call_VM(noreg, entry, O1);
|
||||
// Update registers with resolved info
|
||||
__ get_cache_and_index_at_bcp(Rcache, index, 1);
|
||||
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
|
||||
__ bind(resolved);
|
||||
}
|
||||
|
||||
@ -2742,7 +2742,7 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
Register Rflags = G4_scratch;
|
||||
Register Rreceiver = Lscratch;
|
||||
|
||||
__ ld_ptr(Llocals, Interpreter::value_offset_in_bytes(), Rreceiver);
|
||||
__ ld_ptr(Llocals, 0, Rreceiver);
|
||||
|
||||
// access constant pool cache (is resolved)
|
||||
__ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
|
||||
@ -3130,7 +3130,42 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
return;
|
||||
}
|
||||
|
||||
__ stop("invokedynamic NYI");//6815692//
|
||||
// G5: CallSite object (f1)
|
||||
// XX: unused (f2)
|
||||
// G3: receiver address
|
||||
// XX: flags (unused)
|
||||
|
||||
Register G5_callsite = G5_method;
|
||||
Register Rscratch = G3_scratch;
|
||||
Register Rtemp = G1_scratch;
|
||||
Register Rret = Lscratch;
|
||||
|
||||
load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
|
||||
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
|
||||
|
||||
__ verify_oop(G5_callsite);
|
||||
|
||||
// profile this call
|
||||
__ profile_call(O4);
|
||||
|
||||
// get return address
|
||||
AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
|
||||
__ set(table, Rtemp);
|
||||
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
|
||||
// Make sure we don't need to mask Rret for tosBits after the above shift
|
||||
ConstantPoolCacheEntry::verify_tosBits();
|
||||
__ sll(Rret, LogBytesPerWord, Rret);
|
||||
__ ld_ptr(Rtemp, Rret, Rret); // get return address
|
||||
|
||||
__ ld_ptr(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
|
||||
__ null_check(G3_method_handle);
|
||||
|
||||
// Adjust Rret first so Llast_SP can be same as Rret
|
||||
__ add(Rret, -frame::pc_return_offset, O7);
|
||||
__ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
|
||||
__ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
|
||||
// Record SP so we can remove any stack space allocated by adapter transition
|
||||
__ delayed()->mov(SP, Llast_SP);
|
||||
}
|
||||
|
||||
|
||||
@ -3649,7 +3684,7 @@ void TemplateTable::multianewarray() {
|
||||
transition(vtos, atos);
|
||||
// put ndims * wordSize into Lscratch
|
||||
__ ldub( Lbcp, 3, Lscratch);
|
||||
__ sll( Lscratch, Interpreter::logStackElementSize(), Lscratch);
|
||||
__ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
|
||||
// Lesp points past last_dim, so set to O1 to first_dim address
|
||||
__ add( Lesp, Lscratch, O1);
|
||||
call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
|
||||
|
@ -86,14 +86,24 @@ void VM_Version::initialize() {
|
||||
if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
|
||||
FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
|
||||
}
|
||||
if (is_niagara1_plus()) {
|
||||
if (AllocatePrefetchStyle > 0 && FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
|
||||
// Use BIS instruction for allocation prefetch.
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
|
||||
if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
|
||||
// Use smaller prefetch distance on N2 with BIS
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
|
||||
}
|
||||
}
|
||||
if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
|
||||
// Use different prefetch distance without BIS
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
|
||||
FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
|
||||
}
|
||||
if (is_niagara1_plus() && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
|
||||
// Use smaller prefetch distance on N2
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Use hardware population count instruction if available.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3365,6 +3365,13 @@ void Assembler::shrdl(Register dst, Register src) {
|
||||
|
||||
#else // LP64
|
||||
|
||||
void Assembler::set_byte_if_not_zero(Register dst) {
|
||||
int enc = prefix_and_encode(dst->encoding(), true);
|
||||
emit_byte(0x0F);
|
||||
emit_byte(0x95);
|
||||
emit_byte(0xE0 | enc);
|
||||
}
|
||||
|
||||
// 64bit only pieces of the assembler
|
||||
// This should only be used by 64bit instructions that can use rip-relative
|
||||
// it cannot be used by instructions that want an immediate value.
|
||||
@ -6485,24 +6492,19 @@ int MacroAssembler::load_unsigned_short(Register dst, Address src) {
|
||||
}
|
||||
|
||||
void MacroAssembler::load_sized_value(Register dst, Address src,
|
||||
int size_in_bytes, bool is_signed) {
|
||||
switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
|
||||
size_t size_in_bytes, bool is_signed) {
|
||||
switch (size_in_bytes) {
|
||||
#ifndef _LP64
|
||||
// For case 8, caller is responsible for manually loading
|
||||
// the second word into another register.
|
||||
case ~8: // fall through:
|
||||
case 8: movl( dst, src ); break;
|
||||
case 8: movl(dst, src); break;
|
||||
#else
|
||||
case ~8: // fall through:
|
||||
case 8: movq( dst, src ); break;
|
||||
case 8: movq(dst, src); break;
|
||||
#endif
|
||||
case ~4: // fall through:
|
||||
case 4: movl( dst, src ); break;
|
||||
case ~2: load_signed_short( dst, src ); break;
|
||||
case 2: load_unsigned_short( dst, src ); break;
|
||||
case ~1: load_signed_byte( dst, src ); break;
|
||||
case 1: load_unsigned_byte( dst, src ); break;
|
||||
default: ShouldNotReachHere();
|
||||
case 4: movl(dst, src); break;
|
||||
case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
|
||||
case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
@ -7699,6 +7701,7 @@ void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_re
|
||||
// method handle's MethodType. This macro hides the distinction.
|
||||
void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
|
||||
Register temp_reg) {
|
||||
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
|
||||
if (UseCompressedOops) unimplemented(); // field accesses must decode
|
||||
// load mh.type.form.vmslots
|
||||
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
|
||||
@ -7737,7 +7740,7 @@ void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_
|
||||
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
|
||||
int extra_slot_offset) {
|
||||
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
|
||||
int stackElementSize = Interpreter::stackElementSize();
|
||||
int stackElementSize = Interpreter::stackElementSize;
|
||||
int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
|
||||
#ifdef ASSERT
|
||||
int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
|
||||
@ -7968,7 +7971,7 @@ class FPU_State {
|
||||
case 2: return "special";
|
||||
case 3: return "empty";
|
||||
}
|
||||
ShouldNotReachHere()
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1511,7 +1511,7 @@ class MacroAssembler: public Assembler {
|
||||
void extend_sign(Register hi, Register lo);
|
||||
|
||||
// Loading values by size and signed-ness
|
||||
void load_sized_value(Register dst, Address src, int size_in_bytes, bool is_signed);
|
||||
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
|
||||
|
||||
// Support for inc/dec with optimal instruction selection depending on value
|
||||
|
||||
|
@ -455,6 +455,60 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
}
|
||||
|
||||
|
||||
// Emit the code to remove the frame from the stack in the exception
|
||||
// unwind path.
|
||||
int LIR_Assembler::emit_unwind_handler() {
|
||||
#ifndef PRODUCT
|
||||
if (CommentedAssembly) {
|
||||
_masm->block_comment("Unwind handler");
|
||||
}
|
||||
#endif
|
||||
|
||||
int offset = code_offset();
|
||||
|
||||
// Fetch the exception from TLS and clear out exception related thread state
|
||||
__ get_thread(rsi);
|
||||
__ movptr(rax, Address(rsi, JavaThread::exception_oop_offset()));
|
||||
__ movptr(Address(rsi, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rsi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
|
||||
|
||||
__ bind(_unwind_handler_entry);
|
||||
__ verify_not_null_oop(rax);
|
||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||
__ mov(rsi, rax); // Preserve the exception
|
||||
}
|
||||
|
||||
// Preform needed unlocking
|
||||
MonitorExitStub* stub = NULL;
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::rax_opr);
|
||||
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
|
||||
__ unlock_object(rdi, rbx, rax, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
if (compilation()->env()->dtrace_method_probes()) {
|
||||
__ movoop(Address(rsp, 0), method()->constant_encoding());
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
|
||||
}
|
||||
|
||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||
__ mov(rax, rsi); // Restore the exception
|
||||
}
|
||||
|
||||
// remove the activation and dispatch to the unwind handler
|
||||
__ remove_frame(initial_frame_size_in_bytes());
|
||||
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
|
||||
|
||||
// Emit the slow path assembly
|
||||
if (stub != NULL) {
|
||||
stub->emit_code(this);
|
||||
}
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::emit_deopt_handler() {
|
||||
// if the last instruction is a call (typically to do a throw which
|
||||
// is coming at the end after block reordering) the return address
|
||||
@ -1190,8 +1244,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
break;
|
||||
#endif // _L64
|
||||
case T_INT:
|
||||
// %%% could this be a movl? this is safer but longer instruction
|
||||
__ movl2ptr(dest->as_register(), from_addr);
|
||||
__ movl(dest->as_register(), from_addr);
|
||||
break;
|
||||
|
||||
case T_LONG: {
|
||||
@ -1249,7 +1302,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
__ shll(dest_reg, 24);
|
||||
__ sarl(dest_reg, 24);
|
||||
}
|
||||
// These are unsigned so the zero extension on 64bit is just what we need
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1261,8 +1313,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
} else {
|
||||
__ movw(dest_reg, from_addr);
|
||||
}
|
||||
// This is unsigned so the zero extension on 64bit is just what we need
|
||||
// __ movl2ptr(dest_reg, dest_reg);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1275,8 +1325,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
__ shll(dest_reg, 16);
|
||||
__ sarl(dest_reg, 16);
|
||||
}
|
||||
// Might not be needed in 64bit but certainly doesn't hurt (except for code size)
|
||||
__ movl2ptr(dest_reg, dest_reg);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2690,19 +2738,14 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
||||
} else {
|
||||
assert(code == lir_cmp_l2i, "check");
|
||||
#ifdef _LP64
|
||||
Register dest = dst->as_register();
|
||||
__ xorptr(dest, dest);
|
||||
Label high, done;
|
||||
__ cmpptr(left->as_register_lo(), right->as_register_lo());
|
||||
__ jcc(Assembler::equal, done);
|
||||
__ jcc(Assembler::greater, high);
|
||||
__ decrement(dest);
|
||||
__ jmp(done);
|
||||
__ bind(high);
|
||||
__ increment(dest);
|
||||
|
||||
__ bind(done);
|
||||
|
||||
Label done;
|
||||
Register dest = dst->as_register();
|
||||
__ cmpptr(left->as_register_lo(), right->as_register_lo());
|
||||
__ movl(dest, -1);
|
||||
__ jccb(Assembler::less, done);
|
||||
__ set_byte_if_not_zero(dest);
|
||||
__ movzbl(dest, dest);
|
||||
__ bind(done);
|
||||
#else
|
||||
__ lcmp2int(left->as_register_hi(),
|
||||
left->as_register_lo(),
|
||||
@ -2795,47 +2838,48 @@ void LIR_Assembler::emit_static_call_stub() {
|
||||
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
|
||||
__ jump(RuntimeAddress(__ pc()));
|
||||
|
||||
assert(__ offset() - start <= call_stub_size, "stub too big")
|
||||
assert(__ offset() - start <= call_stub_size, "stub too big");
|
||||
__ end_a_stub();
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
|
||||
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
|
||||
assert(exceptionOop->as_register() == rax, "must match");
|
||||
assert(unwind || exceptionPC->as_register() == rdx, "must match");
|
||||
assert(exceptionPC->as_register() == rdx, "must match");
|
||||
|
||||
// exception object is not added to oop map by LinearScan
|
||||
// (LinearScan assumes that no oops are in fixed registers)
|
||||
info->add_register_oop(exceptionOop);
|
||||
Runtime1::StubID unwind_id;
|
||||
|
||||
if (!unwind) {
|
||||
// get current pc information
|
||||
// pc is only needed if the method has an exception handler, the unwind code does not need it.
|
||||
int pc_for_athrow_offset = __ offset();
|
||||
InternalAddress pc_for_athrow(__ pc());
|
||||
__ lea(exceptionPC->as_register(), pc_for_athrow);
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
// get current pc information
|
||||
// pc is only needed if the method has an exception handler, the unwind code does not need it.
|
||||
int pc_for_athrow_offset = __ offset();
|
||||
InternalAddress pc_for_athrow(__ pc());
|
||||
__ lea(exceptionPC->as_register(), pc_for_athrow);
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
|
||||
__ verify_not_null_oop(rax);
|
||||
// search an exception handler (rax: exception oop, rdx: throwing pc)
|
||||
if (compilation()->has_fpu_code()) {
|
||||
unwind_id = Runtime1::handle_exception_id;
|
||||
} else {
|
||||
unwind_id = Runtime1::handle_exception_nofpu_id;
|
||||
}
|
||||
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
|
||||
__ verify_not_null_oop(rax);
|
||||
// search an exception handler (rax: exception oop, rdx: throwing pc)
|
||||
if (compilation()->has_fpu_code()) {
|
||||
unwind_id = Runtime1::handle_exception_id;
|
||||
} else {
|
||||
// remove the activation
|
||||
__ remove_frame(initial_frame_size_in_bytes());
|
||||
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
|
||||
unwind_id = Runtime1::handle_exception_nofpu_id;
|
||||
}
|
||||
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
|
||||
|
||||
// enough room for two byte trap
|
||||
__ nop();
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
|
||||
assert(exceptionOop->as_register() == rax, "must match");
|
||||
|
||||
__ jmp(_unwind_handler_entry);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
|
||||
|
||||
// optimized version for linear scan:
|
||||
|
@ -781,7 +781,7 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
|
||||
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call site.
|
||||
NOT_LP64(__ get_thread(thread);)
|
||||
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
|
||||
__ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
|
||||
__ cmovptr(Assembler::notEqual, rsp, rbp);
|
||||
|
||||
// continue at exception handler (return address removed)
|
||||
|
@ -80,7 +80,6 @@ define_pd_global(intx, CodeCacheExpansionSize, 32*K);
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
|
||||
#endif // AMD64
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, RegisterCostAreaRatio, 16000);
|
||||
|
||||
// Peephole and CISC spilling both break the graph, and so makes the
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,6 @@
|
||||
// Size of interpreter code. Increase if too small. Interpreter will
|
||||
// fail with a guarantee ("not enough space for interpreter generation");
|
||||
// if too small.
|
||||
// Run with +PrintInterpreterSize to get the VM to print out the size.
|
||||
// Max size with JVMTI and TaggedStackInterpreter
|
||||
// Run with +PrintInterpreter to get the VM to print out the size.
|
||||
// Max size with JVMTI
|
||||
const static int InterpreterCodeSize = 168 * 1024;
|
||||
|
@ -291,8 +291,8 @@ BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
|
||||
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
|
||||
BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
|
||||
// make sure the pointer points inside the frame
|
||||
assert((intptr_t) fp() > (intptr_t) result, "result must < than frame pointer");
|
||||
assert((intptr_t) sp() <= (intptr_t) result, "result must >= than stack pointer");
|
||||
assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
|
||||
assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer");
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -502,7 +502,7 @@ bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
|
||||
// When unpacking an optimized frame the frame pointer is
|
||||
// adjusted with:
|
||||
int diff = (method->max_locals() - method->size_of_parameters()) *
|
||||
Interpreter::stackElementWords();
|
||||
Interpreter::stackElementWords;
|
||||
return _fp == (fp - diff);
|
||||
}
|
||||
|
||||
@ -542,7 +542,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
|
||||
// stack frames shouldn't be much larger than max_stack elements
|
||||
|
||||
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
|
||||
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -594,7 +594,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
#ifdef AMD64
|
||||
// This is times two because we do a push(ltos) after pushing XMM0
|
||||
// and that takes two interpreter stack slots.
|
||||
tos_addr += 2 * Interpreter::stackElementWords();
|
||||
tos_addr += 2 * Interpreter::stackElementWords;
|
||||
#else
|
||||
tos_addr += 2;
|
||||
#endif // AMD64
|
||||
|
@ -45,6 +45,7 @@ define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
#else
|
||||
define_pd_global(intx, CodeEntryAlignment, 16);
|
||||
#endif // COMPILER2
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
define_pd_global(intx, InlineSmallCode, 1000);
|
||||
|
||||
|
@ -265,89 +265,30 @@ void InterpreterMacroAssembler::d2ieee() {
|
||||
|
||||
// Java Expression Stack
|
||||
|
||||
#ifdef ASSERT
|
||||
void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
|
||||
if (TaggedStackInterpreter) {
|
||||
Label okay;
|
||||
cmpptr(Address(rsp, wordSize), (int32_t)t);
|
||||
jcc(Assembler::equal, okay);
|
||||
// Also compare if the stack value is zero, then the tag might
|
||||
// not have been set coming from deopt.
|
||||
cmpptr(Address(rsp, 0), 0);
|
||||
jcc(Assembler::equal, okay);
|
||||
stop("Java Expression stack tag value is bad");
|
||||
bind(okay);
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void InterpreterMacroAssembler::pop_ptr(Register r) {
|
||||
debug_only(verify_stack_tag(frame::TagReference));
|
||||
pop(r);
|
||||
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
|
||||
pop(r);
|
||||
// Tag may not be reference for jsr, can be returnAddress
|
||||
if (TaggedStackInterpreter) pop(tag);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_i(Register r) {
|
||||
debug_only(verify_stack_tag(frame::TagValue));
|
||||
pop(r);
|
||||
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
|
||||
debug_only(verify_stack_tag(frame::TagValue));
|
||||
pop(lo);
|
||||
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
|
||||
debug_only(verify_stack_tag(frame::TagValue));
|
||||
pop(hi);
|
||||
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_f() {
|
||||
debug_only(verify_stack_tag(frame::TagValue));
|
||||
fld_s(Address(rsp, 0));
|
||||
addptr(rsp, 1 * wordSize);
|
||||
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_d() {
|
||||
// Write double to stack contiguously and load into ST0
|
||||
pop_dtos_to_rsp();
|
||||
fld_d(Address(rsp, 0));
|
||||
addptr(rsp, 2 * wordSize);
|
||||
}
|
||||
|
||||
|
||||
// Pop the top of the java expression stack to execution stack (which
|
||||
// happens to be the same place).
|
||||
void InterpreterMacroAssembler::pop_dtos_to_rsp() {
|
||||
if (TaggedStackInterpreter) {
|
||||
// Pop double value into scratch registers
|
||||
debug_only(verify_stack_tag(frame::TagValue));
|
||||
pop(rax);
|
||||
addptr(rsp, 1* wordSize);
|
||||
debug_only(verify_stack_tag(frame::TagValue));
|
||||
pop(rdx);
|
||||
addptr(rsp, 1* wordSize);
|
||||
push(rdx);
|
||||
push(rax);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_ftos_to_rsp() {
|
||||
if (TaggedStackInterpreter) {
|
||||
debug_only(verify_stack_tag(frame::TagValue));
|
||||
pop(rax);
|
||||
addptr(rsp, 1 * wordSize);
|
||||
push(rax); // ftos is at rsp
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop(TosState state) {
|
||||
switch (state) {
|
||||
case atos: pop_ptr(rax); break;
|
||||
@ -365,54 +306,28 @@ void InterpreterMacroAssembler::pop(TosState state) {
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_ptr(Register r) {
|
||||
if (TaggedStackInterpreter) push(frame::TagReference);
|
||||
push(r);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
|
||||
if (TaggedStackInterpreter) push(tag); // tag first
|
||||
push(r);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_i(Register r) {
|
||||
if (TaggedStackInterpreter) push(frame::TagValue);
|
||||
push(r);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
|
||||
if (TaggedStackInterpreter) push(frame::TagValue);
|
||||
push(hi);
|
||||
if (TaggedStackInterpreter) push(frame::TagValue);
|
||||
push(lo);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_f() {
|
||||
if (TaggedStackInterpreter) push(frame::TagValue);
|
||||
// Do not schedule for no AGI! Never write beyond rsp!
|
||||
subptr(rsp, 1 * wordSize);
|
||||
fstp_s(Address(rsp, 0));
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_d(Register r) {
|
||||
if (TaggedStackInterpreter) {
|
||||
// Double values are stored as:
|
||||
// tag
|
||||
// high
|
||||
// tag
|
||||
// low
|
||||
push(frame::TagValue);
|
||||
subptr(rsp, 3 * wordSize);
|
||||
fstp_d(Address(rsp, 0));
|
||||
// move high word up to slot n-1
|
||||
movl(r, Address(rsp, 1*wordSize));
|
||||
movl(Address(rsp, 2*wordSize), r);
|
||||
// move tag
|
||||
movl(Address(rsp, 1*wordSize), frame::TagValue);
|
||||
} else {
|
||||
// Do not schedule for no AGI! Never write beyond rsp!
|
||||
subptr(rsp, 2 * wordSize);
|
||||
fstp_d(Address(rsp, 0));
|
||||
}
|
||||
// Do not schedule for no AGI! Never write beyond rsp!
|
||||
subptr(rsp, 2 * wordSize);
|
||||
fstp_d(Address(rsp, 0));
|
||||
}
|
||||
|
||||
|
||||
@ -433,118 +348,15 @@ void InterpreterMacroAssembler::push(TosState state) {
|
||||
}
|
||||
|
||||
|
||||
// Tagged stack helpers for swap and dup
|
||||
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
|
||||
Register tag) {
|
||||
// Helpers for swap and dup
|
||||
void InterpreterMacroAssembler::load_ptr(int n, Register val) {
|
||||
movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
|
||||
if (TaggedStackInterpreter) {
|
||||
movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
|
||||
Register tag) {
|
||||
void InterpreterMacroAssembler::store_ptr(int n, Register val) {
|
||||
movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
|
||||
if (TaggedStackInterpreter) {
|
||||
movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Tagged local support
|
||||
void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
|
||||
if (TaggedStackInterpreter) {
|
||||
if (tag == frame::TagCategory2) {
|
||||
movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)frame::TagValue);
|
||||
movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)frame::TagValue);
|
||||
} else {
|
||||
movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
|
||||
if (TaggedStackInterpreter) {
|
||||
if (tag == frame::TagCategory2) {
|
||||
movptr(Address(rdi, idx, Interpreter::stackElementScale(),
|
||||
Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
|
||||
movptr(Address(rdi, idx, Interpreter::stackElementScale(),
|
||||
Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
|
||||
} else {
|
||||
movptr(Address(rdi, idx, Interpreter::stackElementScale(),
|
||||
Interpreter::local_tag_offset_in_bytes(0)), (int32_t)tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
|
||||
if (TaggedStackInterpreter) {
|
||||
// can only be TagValue or TagReference
|
||||
movptr(Address(rdi, idx, Interpreter::stackElementScale(),
|
||||
Interpreter::local_tag_offset_in_bytes(0)), tag);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::tag_local(Register tag, int n) {
|
||||
if (TaggedStackInterpreter) {
|
||||
// can only be TagValue or TagReference
|
||||
movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
|
||||
if (TaggedStackInterpreter) {
|
||||
frame::Tag t = tag;
|
||||
if (tag == frame::TagCategory2) {
|
||||
Label nbl;
|
||||
t = frame::TagValue; // change to what is stored in locals
|
||||
cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
|
||||
jcc(Assembler::equal, nbl);
|
||||
stop("Local tag is bad for long/double");
|
||||
bind(nbl);
|
||||
}
|
||||
Label notBad;
|
||||
cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
|
||||
jcc(Assembler::equal, notBad);
|
||||
// Also compare if the local value is zero, then the tag might
|
||||
// not have been set coming from deopt.
|
||||
cmpptr(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0);
|
||||
jcc(Assembler::equal, notBad);
|
||||
stop("Local tag is bad");
|
||||
bind(notBad);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
|
||||
if (TaggedStackInterpreter) {
|
||||
frame::Tag t = tag;
|
||||
if (tag == frame::TagCategory2) {
|
||||
Label nbl;
|
||||
t = frame::TagValue; // change to what is stored in locals
|
||||
cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
|
||||
Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
|
||||
jcc(Assembler::equal, nbl);
|
||||
stop("Local tag is bad for long/double");
|
||||
bind(nbl);
|
||||
}
|
||||
Label notBad;
|
||||
cmpl(Address(rdi, idx, Interpreter::stackElementScale(),
|
||||
Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
|
||||
jcc(Assembler::equal, notBad);
|
||||
// Also compare if the local value is zero, then the tag might
|
||||
// not have been set coming from deopt.
|
||||
cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
|
||||
Interpreter::local_offset_in_bytes(0)), 0);
|
||||
jcc(Assembler::equal, notBad);
|
||||
stop("Local tag is bad");
|
||||
bind(notBad);
|
||||
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
|
||||
MacroAssembler::call_VM_leaf_base(entry_point, 0);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -85,16 +85,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void d2ieee(); // truncate dtos to 64bits
|
||||
|
||||
void pop_ptr(Register r = rax);
|
||||
void pop_ptr(Register r, Register tag);
|
||||
void pop_i(Register r = rax);
|
||||
void pop_l(Register lo = rax, Register hi = rdx);
|
||||
void pop_f();
|
||||
void pop_d();
|
||||
void pop_ftos_to_rsp();
|
||||
void pop_dtos_to_rsp();
|
||||
|
||||
void push_ptr(Register r = rax);
|
||||
void push_ptr(Register r, Register tag);
|
||||
void push_i(Register r = rax);
|
||||
void push_l(Register lo = rax, Register hi = rdx);
|
||||
void push_d(Register r = rax);
|
||||
@ -112,33 +108,15 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void pop(void* v ); // Add unimplemented ambiguous method
|
||||
void push(void* v ); // Add unimplemented ambiguous method
|
||||
|
||||
DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
|
||||
|
||||
#endif // CC_INTERP
|
||||
|
||||
#ifndef CC_INTERP
|
||||
|
||||
void empty_expression_stack() {
|
||||
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
void empty_expression_stack() {
|
||||
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
}
|
||||
|
||||
// Tagged stack helpers for swap and dup
|
||||
void load_ptr_and_tag(int n, Register val, Register tag);
|
||||
void store_ptr_and_tag(int n, Register val, Register tag);
|
||||
|
||||
// Tagged Local support
|
||||
|
||||
void tag_local(frame::Tag tag, int n);
|
||||
void tag_local(Register tag, int n);
|
||||
void tag_local(frame::Tag tag, Register idx);
|
||||
void tag_local(Register tag, Register idx);
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_local_tag(frame::Tag tag, int n);
|
||||
void verify_local_tag(frame::Tag tag, Register idx);
|
||||
#endif // ASSERT
|
||||
// Helpers for swap and dup
|
||||
void load_ptr(int n, Register val);
|
||||
void store_ptr(int n, Register val);
|
||||
|
||||
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
|
||||
void super_call_VM_leaf(address entry_point);
|
||||
|
@ -264,113 +264,51 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
||||
|
||||
// Java Expression Stack
|
||||
|
||||
#ifdef ASSERT
|
||||
// Verifies that the stack tag matches. Must be called before the stack
|
||||
// value is popped off the stack.
|
||||
void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
|
||||
if (TaggedStackInterpreter) {
|
||||
frame::Tag tag = t;
|
||||
if (t == frame::TagCategory2) {
|
||||
tag = frame::TagValue;
|
||||
Label hokay;
|
||||
cmpptr(Address(rsp, 3*wordSize), (int32_t)tag);
|
||||
jcc(Assembler::equal, hokay);
|
||||
stop("Java Expression stack tag high value is bad");
|
||||
bind(hokay);
|
||||
}
|
||||
Label okay;
|
||||
cmpptr(Address(rsp, wordSize), (int32_t)tag);
|
||||
jcc(Assembler::equal, okay);
|
||||
// Also compare if the stack value is zero, then the tag might
|
||||
// not have been set coming from deopt.
|
||||
cmpptr(Address(rsp, 0), 0);
|
||||
jcc(Assembler::equal, okay);
|
||||
stop("Java Expression stack tag value is bad");
|
||||
bind(okay);
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void InterpreterMacroAssembler::pop_ptr(Register r) {
|
||||
debug_only(verify_stack_tag(frame::TagReference));
|
||||
pop(r);
|
||||
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
|
||||
pop(r);
|
||||
if (TaggedStackInterpreter) pop(tag);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_i(Register r) {
|
||||
// XXX can't use pop currently, upper half non clean
|
||||
debug_only(verify_stack_tag(frame::TagValue));
|
||||
movl(r, Address(rsp, 0));
|
||||
addptr(rsp, wordSize);
|
||||
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_l(Register r) {
|
||||
debug_only(verify_stack_tag(frame::TagCategory2));
|
||||
movq(r, Address(rsp, 0));
|
||||
addptr(rsp, 2 * Interpreter::stackElementSize());
|
||||
addptr(rsp, 2 * Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_f(XMMRegister r) {
|
||||
debug_only(verify_stack_tag(frame::TagValue));
|
||||
movflt(r, Address(rsp, 0));
|
||||
addptr(rsp, wordSize);
|
||||
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_d(XMMRegister r) {
|
||||
debug_only(verify_stack_tag(frame::TagCategory2));
|
||||
movdbl(r, Address(rsp, 0));
|
||||
addptr(rsp, 2 * Interpreter::stackElementSize());
|
||||
addptr(rsp, 2 * Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_ptr(Register r) {
|
||||
if (TaggedStackInterpreter) push(frame::TagReference);
|
||||
push(r);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
|
||||
if (TaggedStackInterpreter) push(tag);
|
||||
push(r);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_i(Register r) {
|
||||
if (TaggedStackInterpreter) push(frame::TagValue);
|
||||
push(r);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_l(Register r) {
|
||||
if (TaggedStackInterpreter) {
|
||||
push(frame::TagValue);
|
||||
subptr(rsp, 1 * wordSize);
|
||||
push(frame::TagValue);
|
||||
subptr(rsp, 1 * wordSize);
|
||||
} else {
|
||||
subptr(rsp, 2 * wordSize);
|
||||
}
|
||||
subptr(rsp, 2 * wordSize);
|
||||
movq(Address(rsp, 0), r);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_f(XMMRegister r) {
|
||||
if (TaggedStackInterpreter) push(frame::TagValue);
|
||||
subptr(rsp, wordSize);
|
||||
movflt(Address(rsp, 0), r);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_d(XMMRegister r) {
|
||||
if (TaggedStackInterpreter) {
|
||||
push(frame::TagValue);
|
||||
subptr(rsp, 1 * wordSize);
|
||||
push(frame::TagValue);
|
||||
subptr(rsp, 1 * wordSize);
|
||||
} else {
|
||||
subptr(rsp, 2 * wordSize);
|
||||
}
|
||||
subptr(rsp, 2 * wordSize);
|
||||
movdbl(Address(rsp, 0), r);
|
||||
}
|
||||
|
||||
@ -407,118 +345,16 @@ void InterpreterMacroAssembler::push(TosState state) {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// Tagged stack helpers for swap and dup
|
||||
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
|
||||
Register tag) {
|
||||
// Helpers for swap and dup
|
||||
void InterpreterMacroAssembler::load_ptr(int n, Register val) {
|
||||
movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
|
||||
if (TaggedStackInterpreter) {
|
||||
movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
|
||||
Register tag) {
|
||||
void InterpreterMacroAssembler::store_ptr(int n, Register val) {
|
||||
movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
|
||||
if (TaggedStackInterpreter) {
|
||||
movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Tagged local support
|
||||
void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
|
||||
if (TaggedStackInterpreter) {
|
||||
if (tag == frame::TagCategory2) {
|
||||
movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)),
|
||||
(int32_t)frame::TagValue);
|
||||
movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)),
|
||||
(int32_t)frame::TagValue);
|
||||
} else {
|
||||
movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
|
||||
if (TaggedStackInterpreter) {
|
||||
if (tag == frame::TagCategory2) {
|
||||
movptr(Address(r14, idx, Address::times_8,
|
||||
Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
|
||||
movptr(Address(r14, idx, Address::times_8,
|
||||
Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
|
||||
} else {
|
||||
movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)),
|
||||
(int32_t)tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
|
||||
if (TaggedStackInterpreter) {
|
||||
// can only be TagValue or TagReference
|
||||
movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::tag_local(Register tag, int n) {
|
||||
if (TaggedStackInterpreter) {
|
||||
// can only be TagValue or TagReference
|
||||
movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
|
||||
if (TaggedStackInterpreter) {
|
||||
frame::Tag t = tag;
|
||||
if (tag == frame::TagCategory2) {
|
||||
Label nbl;
|
||||
t = frame::TagValue; // change to what is stored in locals
|
||||
cmpptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
|
||||
jcc(Assembler::equal, nbl);
|
||||
stop("Local tag is bad for long/double");
|
||||
bind(nbl);
|
||||
}
|
||||
Label notBad;
|
||||
cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
|
||||
jcc(Assembler::equal, notBad);
|
||||
// Also compare if the local value is zero, then the tag might
|
||||
// not have been set coming from deopt.
|
||||
cmpptr(Address(r14, Interpreter::local_offset_in_bytes(n)), 0);
|
||||
jcc(Assembler::equal, notBad);
|
||||
stop("Local tag is bad");
|
||||
bind(notBad);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
|
||||
if (TaggedStackInterpreter) {
|
||||
frame::Tag t = tag;
|
||||
if (tag == frame::TagCategory2) {
|
||||
Label nbl;
|
||||
t = frame::TagValue; // change to what is stored in locals
|
||||
cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
|
||||
jcc(Assembler::equal, nbl);
|
||||
stop("Local tag is bad for long/double");
|
||||
bind(nbl);
|
||||
}
|
||||
Label notBad;
|
||||
cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
|
||||
jcc(Assembler::equal, notBad);
|
||||
// Also compare if the local value is zero, then the tag might
|
||||
// not have been set coming from deopt.
|
||||
cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0);
|
||||
jcc(Assembler::equal, notBad);
|
||||
stop("Local tag is bad");
|
||||
bind(notBad);
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
|
||||
MacroAssembler::call_VM_leaf_base(entry_point, 0);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -120,38 +120,16 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void pop(TosState state); // transition vtos -> state
|
||||
void push(TosState state); // transition state -> vtos
|
||||
|
||||
// Tagged stack support, pop and push both tag and value.
|
||||
void pop_ptr(Register r, Register tag);
|
||||
void push_ptr(Register r, Register tag);
|
||||
#endif // CC_INTERP
|
||||
|
||||
DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
|
||||
|
||||
#ifndef CC_INTERP
|
||||
|
||||
// Tagged stack helpers for swap and dup
|
||||
void load_ptr_and_tag(int n, Register val, Register tag);
|
||||
void store_ptr_and_tag(int n, Register val, Register tag);
|
||||
|
||||
// Tagged Local support
|
||||
void tag_local(frame::Tag tag, int n);
|
||||
void tag_local(Register tag, int n);
|
||||
void tag_local(frame::Tag tag, Register idx);
|
||||
void tag_local(Register tag, Register idx);
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_local_tag(frame::Tag tag, int n);
|
||||
void verify_local_tag(frame::Tag tag, Register idx);
|
||||
#endif // ASSERT
|
||||
|
||||
|
||||
void empty_expression_stack()
|
||||
{
|
||||
void empty_expression_stack() {
|
||||
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
|
||||
}
|
||||
|
||||
// Helpers for swap and dup
|
||||
void load_ptr(int n, Register val);
|
||||
void store_ptr(int n, Register val);
|
||||
|
||||
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
|
||||
void super_call_VM_leaf(address entry_point);
|
||||
void super_call_VM_leaf(address entry_point, Register arg_1);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -86,33 +86,23 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
address _from;
|
||||
intptr_t* _to;
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_tag(frame::Tag t) {
|
||||
assert(!TaggedStackInterpreter ||
|
||||
*(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void pass_int() {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_long() {
|
||||
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_to += 2;
|
||||
_from -= 2*Interpreter::stackElementSize();
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_object() {
|
||||
// pass address of from
|
||||
intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
|
||||
*_to++ = (*(intptr_t*)from_addr == 0) ? NULL_WORD : from_addr;
|
||||
debug_only(verify_tag(frame::TagReference));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -293,18 +293,10 @@ class SlowSignatureHandler
|
||||
intptr_t* _fp_identifiers;
|
||||
unsigned int _num_args;
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_tag(frame::Tag t) {
|
||||
assert(!TaggedStackInterpreter ||
|
||||
*(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void pass_int()
|
||||
{
|
||||
jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
|
||||
if (_num_args < Argument::n_int_register_parameters_c-1) {
|
||||
*_reg_args++ = from_obj;
|
||||
@ -317,8 +309,7 @@ class SlowSignatureHandler
|
||||
virtual void pass_long()
|
||||
{
|
||||
intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= 2*Interpreter::stackElementSize();
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
|
||||
if (_num_args < Argument::n_int_register_parameters_c-1) {
|
||||
*_reg_args++ = from_obj;
|
||||
@ -331,8 +322,7 @@ class SlowSignatureHandler
|
||||
virtual void pass_object()
|
||||
{
|
||||
intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagReference));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
if (_num_args < Argument::n_int_register_parameters_c-1) {
|
||||
*_reg_args++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
|
||||
_num_args++;
|
||||
@ -344,8 +334,7 @@ class SlowSignatureHandler
|
||||
virtual void pass_float()
|
||||
{
|
||||
jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
|
||||
if (_num_args < Argument::n_float_register_parameters_c-1) {
|
||||
*_reg_args++ = from_obj;
|
||||
@ -359,8 +348,7 @@ class SlowSignatureHandler
|
||||
virtual void pass_double()
|
||||
{
|
||||
intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= 2*Interpreter::stackElementSize();
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
|
||||
if (_num_args < Argument::n_float_register_parameters_c-1) {
|
||||
*_reg_args++ = from_obj;
|
||||
@ -397,18 +385,10 @@ class SlowSignatureHandler
|
||||
unsigned int _num_int_args;
|
||||
unsigned int _num_fp_args;
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_tag(frame::Tag t) {
|
||||
assert(!TaggedStackInterpreter ||
|
||||
*(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void pass_int()
|
||||
{
|
||||
jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
|
||||
if (_num_int_args < Argument::n_int_register_parameters_c-1) {
|
||||
*_int_args++ = from_obj;
|
||||
@ -421,8 +401,7 @@ class SlowSignatureHandler
|
||||
virtual void pass_long()
|
||||
{
|
||||
intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= 2*Interpreter::stackElementSize();
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
|
||||
if (_num_int_args < Argument::n_int_register_parameters_c-1) {
|
||||
*_int_args++ = from_obj;
|
||||
@ -435,8 +414,7 @@ class SlowSignatureHandler
|
||||
virtual void pass_object()
|
||||
{
|
||||
intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagReference));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
|
||||
if (_num_int_args < Argument::n_int_register_parameters_c-1) {
|
||||
*_int_args++ = (*from_addr == 0) ? NULL : (intptr_t)from_addr;
|
||||
@ -449,8 +427,7 @@ class SlowSignatureHandler
|
||||
virtual void pass_float()
|
||||
{
|
||||
jint from_obj = *(jint*)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
debug_only(verify_tag(frame::TagValue));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
_from -= Interpreter::stackElementSize;
|
||||
|
||||
if (_num_fp_args < Argument::n_float_register_parameters_c) {
|
||||
*_fp_args++ = from_obj;
|
||||
@ -463,7 +440,7 @@ class SlowSignatureHandler
|
||||
virtual void pass_double()
|
||||
{
|
||||
intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
_from -= 2*Interpreter::stackElementSize();
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
|
||||
if (_num_fp_args < Argument::n_float_register_parameters_c) {
|
||||
*_fp_args++ = from_obj;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,39 +31,16 @@
|
||||
// the fpu stack.
|
||||
static const int return_sentinel;
|
||||
|
||||
|
||||
static Address::ScaleFactor stackElementScale() {
|
||||
return TaggedStackInterpreter? Address::times_8 : Address::times_4;
|
||||
}
|
||||
static Address::ScaleFactor stackElementScale() { return Address::times_4; }
|
||||
|
||||
// Offset from rsp (which points to the last stack element)
|
||||
static int expr_offset_in_bytes(int i) { return stackElementSize()*i ; }
|
||||
static int expr_tag_offset_in_bytes(int i) {
|
||||
assert(TaggedStackInterpreter, "should not call this");
|
||||
return expr_offset_in_bytes(i) + wordSize;
|
||||
}
|
||||
|
||||
// Support for Tagged Stacks
|
||||
static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
|
||||
|
||||
// Stack index relative to tos (which points at value)
|
||||
static int expr_index_at(int i) {
|
||||
return stackElementWords() * i;
|
||||
}
|
||||
|
||||
static int expr_tag_index_at(int i) {
|
||||
assert(TaggedStackInterpreter, "should not call this");
|
||||
// tag is one word above java stack element
|
||||
return stackElementWords() * i + 1;
|
||||
}
|
||||
static int expr_index_at(int i) { return stackElementWords * i; }
|
||||
|
||||
// Already negated by c++ interpreter
|
||||
static int local_index_at(int i) {
|
||||
assert(i<=0, "local direction already negated");
|
||||
return stackElementWords() * i + (value_offset_in_bytes()/wordSize);
|
||||
}
|
||||
|
||||
static int local_tag_index_at(int i) {
|
||||
assert(i<=0, "local direction already negated");
|
||||
assert(TaggedStackInterpreter, "should not call this");
|
||||
return stackElementWords() * i + (tag_offset_in_bytes()/wordSize);
|
||||
static int local_index_at(int i) {
|
||||
assert(i <= 0, "local direction already negated");
|
||||
return stackElementWords * i;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -131,14 +131,7 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
|
||||
// java methods. Interpreter::method_kind(...) will select
|
||||
// this entry point for the corresponding methods in JDK 1.3.
|
||||
// get argument
|
||||
if (TaggedStackInterpreter) {
|
||||
__ pushl(Address(rsp, 3*wordSize)); // push hi (and note rsp -= wordSize)
|
||||
__ pushl(Address(rsp, 2*wordSize)); // push lo
|
||||
__ fld_d(Address(rsp, 0)); // get double in ST0
|
||||
__ addptr(rsp, 2*wordSize);
|
||||
} else {
|
||||
__ fld_d(Address(rsp, 1*wordSize));
|
||||
}
|
||||
__ fld_d(Address(rsp, 1*wordSize));
|
||||
switch (kind) {
|
||||
case Interpreter::java_lang_math_sin :
|
||||
__ trigfunc('s');
|
||||
|
@ -127,7 +127,8 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
int arg_mask,
|
||||
Register rax_argslot,
|
||||
Register rbx_temp, Register rdx_temp) {
|
||||
Register rbx_temp, Register rdx_temp, Register temp3_reg) {
|
||||
assert(temp3_reg == noreg, "temp3 not required");
|
||||
assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
|
||||
(!arg_slots.is_register() ? rsp : arg_slots.as_register()));
|
||||
|
||||
@ -185,7 +186,8 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
|
||||
void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
Register rax_argslot,
|
||||
Register rbx_temp, Register rdx_temp) {
|
||||
Register rbx_temp, Register rdx_temp, Register temp3_reg) {
|
||||
assert(temp3_reg == noreg, "temp3 not required");
|
||||
assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
|
||||
(!arg_slots.is_register() ? rsp : arg_slots.as_register()));
|
||||
|
||||
@ -260,6 +262,22 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
}
|
||||
#endif //PRODUCT
|
||||
|
||||
// which conversion op types are implemented here?
|
||||
int MethodHandles::adapter_conversion_ops_supported_mask() {
|
||||
return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
|
||||
|(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
|
||||
//|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
|
||||
);
|
||||
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
|
||||
}
|
||||
|
||||
// Generate an "entry" field for a method handle.
|
||||
// This determines how the method handle will respond to calls.
|
||||
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
|
||||
@ -498,7 +516,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
#ifndef _LP64
|
||||
if (arg_slots == 2) {
|
||||
__ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
|
||||
__ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
|
||||
__ movl(Address(rax_argslot, Interpreter::stackElementSize), rdx_temp);
|
||||
}
|
||||
#endif //_LP64
|
||||
}
|
||||
@ -594,7 +612,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
__ lea(rax_argslot, __ argument_address(rax_argslot, 1));
|
||||
remove_arg_slots(_masm, -stack_move_unit(),
|
||||
rax_argslot, rbx_temp, rdx_temp);
|
||||
vmarg = Address(rax_argslot, -Interpreter::stackElementSize());
|
||||
vmarg = Address(rax_argslot, -Interpreter::stackElementSize);
|
||||
__ movl(rdx_temp, vmarg);
|
||||
}
|
||||
break;
|
||||
@ -663,8 +681,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
__ lea(rax_argslot, __ argument_address(rax_argslot, 1));
|
||||
insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
|
||||
rax_argslot, rbx_temp, rdx_temp);
|
||||
Address vmarg1(rax_argslot, -Interpreter::stackElementSize());
|
||||
Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize());
|
||||
Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
|
||||
Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);
|
||||
|
||||
switch (ek) {
|
||||
case _adapter_opt_i2l:
|
||||
@ -716,7 +734,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
|
||||
rax_argslot, rbx_temp, rdx_temp);
|
||||
}
|
||||
Address vmarg(rax_argslot, -Interpreter::stackElementSize());
|
||||
Address vmarg(rax_argslot, -Interpreter::stackElementSize);
|
||||
|
||||
#ifdef _LP64
|
||||
if (ek == _adapter_opt_f2d) {
|
||||
@ -1014,7 +1032,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
// Array length checks out. Now insert any required stack slots.
|
||||
if (length_constant == -1) {
|
||||
// Form a pointer to the end of the affected region.
|
||||
__ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
|
||||
__ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
|
||||
// 'stack_move' is negative number of words to insert
|
||||
Register rdi_stack_move = rdi;
|
||||
__ movl2ptr(rdi_stack_move, rcx_amh_conversion);
|
||||
@ -1052,7 +1070,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
__ movptr(rbx_temp, Address(rsi_source, 0));
|
||||
__ movptr(Address(rax_argslot, 0), rbx_temp);
|
||||
__ addptr(rsi_source, type2aelembytes(elem_type));
|
||||
__ addptr(rax_argslot, Interpreter::stackElementSize());
|
||||
__ addptr(rax_argslot, Interpreter::stackElementSize);
|
||||
__ cmpptr(rax_argslot, rdx_argslot_limit);
|
||||
__ jccb(Assembler::less, loop);
|
||||
} else if (length_constant == 0) {
|
||||
@ -1065,7 +1083,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
__ movptr(rbx_temp, Address(rsi_array, elem_offset));
|
||||
__ movptr(Address(rax_argslot, slot_offset), rbx_temp);
|
||||
elem_offset += type2aelembytes(elem_type);
|
||||
slot_offset += Interpreter::stackElementSize();
|
||||
slot_offset += Interpreter::stackElementSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,8 +115,8 @@ void OptoRuntime::generate_exception_blob() {
|
||||
|
||||
// rax: exception handler for given <exception oop/exception pc>
|
||||
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call.
|
||||
__ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call site.
|
||||
__ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
|
||||
__ cmovptr(Assembler::notEqual, rsp, rbp);
|
||||
|
||||
// We have a handler in rax, (could be deopt blob)
|
||||
|
@ -503,34 +503,9 @@ static void patch_callers_callsite(MacroAssembler *masm) {
|
||||
}
|
||||
|
||||
|
||||
// Helper function to put tags in interpreter stack.
|
||||
static void tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
|
||||
if (TaggedStackInterpreter) {
|
||||
int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
|
||||
if (sig == T_OBJECT || sig == T_ARRAY) {
|
||||
__ movptr(Address(rsp, tag_offset), frame::TagReference);
|
||||
} else if (sig == T_LONG || sig == T_DOUBLE) {
|
||||
int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
|
||||
__ movptr(Address(rsp, next_tag_offset), frame::TagValue);
|
||||
__ movptr(Address(rsp, tag_offset), frame::TagValue);
|
||||
} else {
|
||||
__ movptr(Address(rsp, tag_offset), frame::TagValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Double and long values with Tagged stacks are not contiguous.
|
||||
static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
|
||||
int next_off = st_off - Interpreter::stackElementSize();
|
||||
if (TaggedStackInterpreter) {
|
||||
__ movdbl(Address(rsp, next_off), r);
|
||||
// Move top half up and put tag in the middle.
|
||||
__ movl(rdi, Address(rsp, next_off+wordSize));
|
||||
__ movl(Address(rsp, st_off), rdi);
|
||||
tag_stack(masm, T_DOUBLE, next_off);
|
||||
} else {
|
||||
__ movdbl(Address(rsp, next_off), r);
|
||||
}
|
||||
int next_off = st_off - Interpreter::stackElementSize;
|
||||
__ movdbl(Address(rsp, next_off), r);
|
||||
}
|
||||
|
||||
static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
@ -560,7 +535,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
// Since all args are passed on the stack, total_args_passed * interpreter_
|
||||
// stack_element_size is the
|
||||
// space we need.
|
||||
int extraspace = total_args_passed * Interpreter::stackElementSize();
|
||||
int extraspace = total_args_passed * Interpreter::stackElementSize;
|
||||
|
||||
// Get return address
|
||||
__ pop(rax);
|
||||
@ -578,8 +553,8 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
}
|
||||
|
||||
// st_off points to lowest address on stack.
|
||||
int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize();
|
||||
int next_off = st_off - Interpreter::stackElementSize();
|
||||
int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
|
||||
int next_off = st_off - Interpreter::stackElementSize;
|
||||
|
||||
// Say 4 args:
|
||||
// i st_off
|
||||
@ -601,7 +576,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
if (!r_2->is_valid()) {
|
||||
__ movl(rdi, Address(rsp, ld_off));
|
||||
__ movptr(Address(rsp, st_off), rdi);
|
||||
tag_stack(masm, sig_bt[i], st_off);
|
||||
} else {
|
||||
|
||||
// ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
|
||||
@ -619,13 +593,11 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
__ movptr(Address(rsp, st_off), rax);
|
||||
#endif /* ASSERT */
|
||||
#endif // _LP64
|
||||
tag_stack(masm, sig_bt[i], next_off);
|
||||
}
|
||||
} else if (r_1->is_Register()) {
|
||||
Register r = r_1->as_Register();
|
||||
if (!r_2->is_valid()) {
|
||||
__ movl(Address(rsp, st_off), r);
|
||||
tag_stack(masm, sig_bt[i], st_off);
|
||||
} else {
|
||||
// long/double in gpr
|
||||
NOT_LP64(ShouldNotReachHere());
|
||||
@ -639,17 +611,14 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
__ movptr(Address(rsp, st_off), rax);
|
||||
#endif /* ASSERT */
|
||||
__ movptr(Address(rsp, next_off), r);
|
||||
tag_stack(masm, sig_bt[i], next_off);
|
||||
} else {
|
||||
__ movptr(Address(rsp, st_off), r);
|
||||
tag_stack(masm, sig_bt[i], st_off);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert(r_1->is_XMMRegister(), "");
|
||||
if (!r_2->is_valid()) {
|
||||
__ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
|
||||
tag_stack(masm, sig_bt[i], st_off);
|
||||
} else {
|
||||
assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
|
||||
move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
|
||||
@ -665,20 +634,9 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
}
|
||||
|
||||
|
||||
// For tagged stacks, double or long value aren't contiguous on the stack
|
||||
// so get them contiguous for the xmm load
|
||||
static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
|
||||
int next_val_off = ld_off - Interpreter::stackElementSize();
|
||||
if (TaggedStackInterpreter) {
|
||||
// use tag slot temporarily for MSW
|
||||
__ movptr(rsi, Address(saved_sp, ld_off));
|
||||
__ movptr(Address(saved_sp, next_val_off+wordSize), rsi);
|
||||
__ movdbl(r, Address(saved_sp, next_val_off));
|
||||
// restore tag
|
||||
__ movptr(Address(saved_sp, next_val_off+wordSize), frame::TagValue);
|
||||
} else {
|
||||
__ movdbl(r, Address(saved_sp, next_val_off));
|
||||
}
|
||||
int next_val_off = ld_off - Interpreter::stackElementSize;
|
||||
__ movdbl(r, Address(saved_sp, next_val_off));
|
||||
}
|
||||
|
||||
static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
@ -797,9 +755,9 @@ static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
|
||||
"scrambled load targets?");
|
||||
// Load in argument order going down.
|
||||
int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
|
||||
int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
|
||||
// Point to interpreter value (vs. tag)
|
||||
int next_off = ld_off - Interpreter::stackElementSize();
|
||||
int next_off = ld_off - Interpreter::stackElementSize;
|
||||
//
|
||||
//
|
||||
//
|
||||
@ -2322,7 +2280,7 @@ nmethod *SharedRuntime::generate_dtrace_nmethod(
|
||||
// this function returns the adjust size (in number of words) to a c2i adapter
|
||||
// activation for use during deoptimization
|
||||
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
|
||||
return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
|
||||
return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
|
||||
}
|
||||
|
||||
|
||||
|
@ -452,22 +452,6 @@ static void patch_callers_callsite(MacroAssembler *masm) {
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
// Helper function to put tags in interpreter stack.
|
||||
static void tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
|
||||
if (TaggedStackInterpreter) {
|
||||
int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
|
||||
if (sig == T_OBJECT || sig == T_ARRAY) {
|
||||
__ movptr(Address(rsp, tag_offset), (int32_t) frame::TagReference);
|
||||
} else if (sig == T_LONG || sig == T_DOUBLE) {
|
||||
int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
|
||||
__ movptr(Address(rsp, next_tag_offset), (int32_t) frame::TagValue);
|
||||
__ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
|
||||
} else {
|
||||
__ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
int total_args_passed,
|
||||
@ -489,7 +473,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
// we also account for the return address location since
|
||||
// we store it first rather than hold it in rax across all the shuffling
|
||||
|
||||
int extraspace = (total_args_passed * Interpreter::stackElementSize()) + wordSize;
|
||||
int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
|
||||
|
||||
// stack is aligned, keep it that way
|
||||
extraspace = round_to(extraspace, 2*wordSize);
|
||||
@ -513,9 +497,8 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
}
|
||||
|
||||
// offset to start parameters
|
||||
int st_off = (total_args_passed - i) * Interpreter::stackElementSize() +
|
||||
Interpreter::value_offset_in_bytes();
|
||||
int next_off = st_off - Interpreter::stackElementSize();
|
||||
int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
|
||||
int next_off = st_off - Interpreter::stackElementSize;
|
||||
|
||||
// Say 4 args:
|
||||
// i st_off
|
||||
@ -543,7 +526,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
// sign extend??
|
||||
__ movl(rax, Address(rsp, ld_off));
|
||||
__ movptr(Address(rsp, st_off), rax);
|
||||
tag_stack(masm, sig_bt[i], st_off);
|
||||
|
||||
} else {
|
||||
|
||||
@ -560,10 +542,8 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
__ mov64(rax, CONST64(0xdeadffffdeadaaaa));
|
||||
__ movptr(Address(rsp, st_off), rax);
|
||||
#endif /* ASSERT */
|
||||
tag_stack(masm, sig_bt[i], next_off);
|
||||
} else {
|
||||
__ movq(Address(rsp, st_off), rax);
|
||||
tag_stack(masm, sig_bt[i], st_off);
|
||||
}
|
||||
}
|
||||
} else if (r_1->is_Register()) {
|
||||
@ -572,7 +552,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
// must be only an int (or less ) so move only 32bits to slot
|
||||
// why not sign extend??
|
||||
__ movl(Address(rsp, st_off), r);
|
||||
tag_stack(masm, sig_bt[i], st_off);
|
||||
} else {
|
||||
// Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
|
||||
// T_DOUBLE and T_LONG use two slots in the interpreter
|
||||
@ -584,10 +563,8 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
__ movptr(Address(rsp, st_off), rax);
|
||||
#endif /* ASSERT */
|
||||
__ movq(Address(rsp, next_off), r);
|
||||
tag_stack(masm, sig_bt[i], next_off);
|
||||
} else {
|
||||
__ movptr(Address(rsp, st_off), r);
|
||||
tag_stack(masm, sig_bt[i], st_off);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -595,7 +572,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
if (!r_2->is_valid()) {
|
||||
// only a float use just part of the slot
|
||||
__ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
|
||||
tag_stack(masm, sig_bt[i], st_off);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// Overwrite the unused slot with known junk
|
||||
@ -603,7 +579,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||
__ movptr(Address(rsp, st_off), rax);
|
||||
#endif /* ASSERT */
|
||||
__ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
|
||||
tag_stack(masm, sig_bt[i], next_off);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -688,9 +663,9 @@ static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
|
||||
"scrambled load targets?");
|
||||
// Load in argument order going down.
|
||||
int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
|
||||
int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
|
||||
// Point to interpreter value (vs. tag)
|
||||
int next_off = ld_off - Interpreter::stackElementSize();
|
||||
int next_off = ld_off - Interpreter::stackElementSize;
|
||||
//
|
||||
//
|
||||
//
|
||||
@ -2535,7 +2510,7 @@ nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
|
||||
// this function returns the adjust size (in number of words) to a c2i adapter
|
||||
// activation for use during deoptimization
|
||||
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
|
||||
return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
|
||||
return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
|
||||
}
|
||||
|
||||
|
||||
@ -3328,8 +3303,8 @@ void OptoRuntime::generate_exception_blob() {
|
||||
|
||||
// rax: exception handler
|
||||
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call.
|
||||
__ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0);
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call site.
|
||||
__ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
|
||||
__ cmovptr(Assembler::notEqual, rsp, rbp);
|
||||
|
||||
// We have a handler in rax (could be deopt blob).
|
||||
|
@ -139,7 +139,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// stub code
|
||||
__ enter();
|
||||
__ movptr(rcx, parameter_size); // parameter counter
|
||||
__ shlptr(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes
|
||||
__ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes
|
||||
__ addptr(rcx, locals_count_in_bytes); // reserve space for register saves
|
||||
__ subptr(rsp, rcx);
|
||||
__ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
|
||||
@ -194,12 +194,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ xorptr(rbx, rbx);
|
||||
|
||||
__ BIND(loop);
|
||||
if (TaggedStackInterpreter) {
|
||||
__ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(),
|
||||
-2*wordSize)); // get tag
|
||||
__ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
|
||||
Interpreter::expr_tag_offset_in_bytes(0)), rax); // store tag
|
||||
}
|
||||
|
||||
// get parameter
|
||||
__ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize));
|
||||
@ -430,7 +424,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ verify_oop(exception_oop);
|
||||
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call site.
|
||||
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
|
||||
__ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
|
||||
__ cmovptr(Assembler::notEqual, rsp, rbp);
|
||||
|
||||
// continue at exception handler (return address removed)
|
||||
@ -812,7 +806,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
|
||||
// Copy 64-byte chunks
|
||||
__ jmpb(L_copy_64_bytes);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_64_bytes_loop);
|
||||
|
||||
if(UseUnalignedLoadStores) {
|
||||
@ -874,7 +868,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
|
||||
// Copy 64-byte chunks
|
||||
__ jmpb(L_copy_64_bytes);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_64_bytes_loop);
|
||||
__ movq(mmx0, Address(from, 0));
|
||||
__ movq(mmx1, Address(from, 8));
|
||||
@ -1144,7 +1138,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ movl(Address(to, count, sf, 0), rdx);
|
||||
__ jmpb(L_copy_8_bytes);
|
||||
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
// Move 8 bytes
|
||||
__ BIND(L_copy_8_bytes_loop);
|
||||
if (UseXMMForArrayCopy) {
|
||||
@ -1235,7 +1229,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
} else {
|
||||
__ jmpb(L_copy_8_bytes);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_8_bytes_loop);
|
||||
__ fild_d(Address(from, 0));
|
||||
__ fistp_d(Address(from, to_from, Address::times_1));
|
||||
@ -1282,7 +1276,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
__ jmpb(L_copy_8_bytes);
|
||||
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_8_bytes_loop);
|
||||
if (VM_Version::supports_mmx()) {
|
||||
if (UseXMMForArrayCopy) {
|
||||
@ -1454,7 +1448,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Loop control:
|
||||
// for (count = -count; count != 0; count++)
|
||||
// Base pointers src, dst are biased by 8*count,to last element.
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
|
||||
__ BIND(L_store_element);
|
||||
__ movptr(to_element_addr, elem); // store the oop
|
||||
|
@ -278,11 +278,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ movptr(c_rarg2, parameters); // parameter pointer
|
||||
__ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
|
||||
__ BIND(loop);
|
||||
if (TaggedStackInterpreter) {
|
||||
__ movl(rax, Address(c_rarg2, 0)); // get tag
|
||||
__ addptr(c_rarg2, wordSize); // advance to next tag
|
||||
__ push(rax); // pass tag
|
||||
}
|
||||
__ movptr(rax, Address(c_rarg2, 0));// get parameter
|
||||
__ addptr(c_rarg2, wordSize); // advance to next parameter
|
||||
__ decrementl(c_rarg1); // decrement counter
|
||||
@ -871,9 +866,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
address generate_fp_mask(const char *stub_name, int64_t mask) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", stub_name);
|
||||
|
||||
__ align(16);
|
||||
address start = __ pc();
|
||||
|
||||
__ emit_data64( mask, relocInfo::none );
|
||||
@ -1268,7 +1262,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
|
||||
DEBUG_ONLY(__ stop("enter at entry label, not here"));
|
||||
Label L_loop;
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
if(UseUnalignedLoadStores) {
|
||||
__ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
|
||||
@ -1309,7 +1303,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
|
||||
DEBUG_ONLY(__ stop("enter at entry label, not here"));
|
||||
Label L_loop;
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
if(UseUnalignedLoadStores) {
|
||||
__ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
|
||||
@ -2229,7 +2223,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Loop control:
|
||||
// for (count = -count; count != 0; count++)
|
||||
// Base pointers src, dst are biased by 8*(count-1),to last element.
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
|
||||
__ BIND(L_store_element);
|
||||
__ store_heap_oop(to_element_addr, rax_oop); // store the oop
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,8 +28,8 @@
|
||||
// Size of interpreter code. Increase if too small. Interpreter will
|
||||
// fail with a guarantee ("not enough space for interpreter generation");
|
||||
// if too small.
|
||||
// Run with +PrintInterpreterSize to get the VM to print out the size.
|
||||
// Max size with JVMTI and TaggedStackInterpreter
|
||||
// Run with +PrintInterpreter to get the VM to print out the size.
|
||||
// Max size with JVMTI
|
||||
#ifdef AMD64
|
||||
const static int InterpreterCodeSize = 200 * 1024;
|
||||
#else
|
||||
|
@ -305,7 +305,6 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
|
||||
case T_FLOAT :
|
||||
{ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
|
||||
__ pop(t); // remove return address first
|
||||
__ pop_dtos_to_rsp();
|
||||
// Must return a result for interpreter or compiler. In SSE
|
||||
// mode, results are returned in xmm0 and the FPU stack must
|
||||
// be empty.
|
||||
@ -468,7 +467,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
// see if the frame is greater than one page in size. If so,
|
||||
// then we need to verify there is enough stack space remaining
|
||||
// for the additional locals.
|
||||
__ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize());
|
||||
__ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
|
||||
__ jcc(Assembler::belowEqual, after_frame_check);
|
||||
|
||||
// compute rsp as if this were going to be the last frame on
|
||||
@ -882,7 +881,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ get_method(method);
|
||||
__ verify_oop(method);
|
||||
__ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
|
||||
__ shlptr(t, Interpreter::logStackElementSize());
|
||||
__ shlptr(t, Interpreter::logStackElementSize);
|
||||
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
|
||||
__ subptr(rsp, t);
|
||||
__ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
|
||||
@ -1225,9 +1224,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ testl(rdx, rdx);
|
||||
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
|
||||
__ bind(loop);
|
||||
if (TaggedStackInterpreter) {
|
||||
__ push((int32_t)NULL_WORD); // push tag
|
||||
}
|
||||
__ push((int32_t)NULL_WORD); // initialize local variables
|
||||
__ decrement(rdx); // until everything initialized
|
||||
__ jcc(Assembler::greater, loop);
|
||||
@ -1463,7 +1459,7 @@ int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
|
||||
|
||||
const int extra_stack = methodOopDesc::extra_stack_entries();
|
||||
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
|
||||
Interpreter::stackElementWords();
|
||||
Interpreter::stackElementWords;
|
||||
return overhead_size + method_stack + stub_code;
|
||||
}
|
||||
|
||||
@ -1487,9 +1483,9 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
// NOTE: return size is in words not bytes
|
||||
|
||||
// fixed size of an interpreter frame:
|
||||
int max_locals = method->max_locals() * Interpreter::stackElementWords();
|
||||
int max_locals = method->max_locals() * Interpreter::stackElementWords;
|
||||
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
|
||||
Interpreter::stackElementWords();
|
||||
Interpreter::stackElementWords;
|
||||
|
||||
int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
|
||||
|
||||
@ -1499,9 +1495,9 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
|
||||
|
||||
int size = overhead +
|
||||
((callee_locals - callee_param_count)*Interpreter::stackElementWords()) +
|
||||
((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
|
||||
(moncount*frame::interpreter_frame_monitor_size()) +
|
||||
tempcount*Interpreter::stackElementWords() + popframe_extra_args;
|
||||
tempcount*Interpreter::stackElementWords + popframe_extra_args;
|
||||
|
||||
if (interpreter_frame != NULL) {
|
||||
#ifdef ASSERT
|
||||
@ -1525,7 +1521,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
|
||||
// Set last_sp
|
||||
intptr_t* rsp = (intptr_t*) monbot -
|
||||
tempcount*Interpreter::stackElementWords() -
|
||||
tempcount*Interpreter::stackElementWords -
|
||||
popframe_extra_args;
|
||||
interpreter_frame->interpreter_frame_set_last_sp(rsp);
|
||||
|
||||
@ -1625,7 +1621,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ get_method(rax);
|
||||
__ verify_oop(rax);
|
||||
__ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
|
||||
__ shlptr(rax, Interpreter::logStackElementSize());
|
||||
__ shlptr(rax, Interpreter::logStackElementSize);
|
||||
__ restore_locals();
|
||||
__ subptr(rdi, rax);
|
||||
__ addptr(rdi, wordSize);
|
||||
|
@ -199,7 +199,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
in_bytes(constantPoolCacheOopDesc::base_offset()) +
|
||||
3 * wordSize));
|
||||
__ andl(rbx, 0xFF);
|
||||
if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
|
||||
__ lea(rsp, Address(rsp, rbx, Address::times_8));
|
||||
__ dispatch_next(state, step);
|
||||
|
||||
@ -417,7 +416,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
// see if the frame is greater than one page in size. If so,
|
||||
// then we need to verify there is enough stack space remaining
|
||||
// for the additional locals.
|
||||
__ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize());
|
||||
__ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
|
||||
__ jcc(Assembler::belowEqual, after_frame_check);
|
||||
|
||||
// compute rsp as if this were going to be the last frame on
|
||||
@ -428,7 +427,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
|
||||
// locals + overhead, in bytes
|
||||
__ mov(rax, rdx);
|
||||
__ shlptr(rax, Interpreter::logStackElementSize()); // 2 slots per parameter.
|
||||
__ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter.
|
||||
__ addptr(rax, overhead_size);
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -759,7 +758,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// for natives the size of locals is zero
|
||||
|
||||
// compute beginning of parameters (r14)
|
||||
if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
|
||||
__ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
|
||||
|
||||
// add 2 zero-initialized slots for native calls
|
||||
@ -865,7 +863,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ load_unsigned_short(t,
|
||||
Address(method,
|
||||
methodOopDesc::size_of_parameters_offset()));
|
||||
__ shll(t, Interpreter::logStackElementSize());
|
||||
__ shll(t, Interpreter::logStackElementSize);
|
||||
|
||||
__ subptr(rsp, t);
|
||||
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
|
||||
@ -1228,7 +1226,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ pop(rax);
|
||||
|
||||
// compute beginning of parameters (r14)
|
||||
if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
|
||||
__ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
|
||||
|
||||
// rdx - # of additional locals
|
||||
@ -1239,7 +1236,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ testl(rdx, rdx);
|
||||
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
|
||||
__ bind(loop);
|
||||
if (TaggedStackInterpreter) __ push((int) NULL_WORD); // push tag
|
||||
__ push((int) NULL_WORD); // initialize local variables
|
||||
__ decrementl(rdx); // until everything initialized
|
||||
__ jcc(Assembler::greater, loop);
|
||||
@ -1486,7 +1482,7 @@ int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
|
||||
const int stub_code = frame::entry_frame_after_call_words;
|
||||
const int extra_stack = methodOopDesc::extra_stack_entries();
|
||||
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
|
||||
Interpreter::stackElementWords();
|
||||
Interpreter::stackElementWords;
|
||||
return (overhead_size + method_stack + stub_code);
|
||||
}
|
||||
|
||||
@ -1507,9 +1503,9 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
// It is also guaranteed to be walkable even though it is in a skeletal state
|
||||
|
||||
// fixed size of an interpreter frame:
|
||||
int max_locals = method->max_locals() * Interpreter::stackElementWords();
|
||||
int max_locals = method->max_locals() * Interpreter::stackElementWords;
|
||||
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
|
||||
Interpreter::stackElementWords();
|
||||
Interpreter::stackElementWords;
|
||||
|
||||
int overhead = frame::sender_sp_offset -
|
||||
frame::interpreter_frame_initial_sp_offset;
|
||||
@ -1518,9 +1514,9 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
// for the callee's params we only need to account for the extra
|
||||
// locals.
|
||||
int size = overhead +
|
||||
(callee_locals - callee_param_count)*Interpreter::stackElementWords() +
|
||||
(callee_locals - callee_param_count)*Interpreter::stackElementWords +
|
||||
moncount * frame::interpreter_frame_monitor_size() +
|
||||
tempcount* Interpreter::stackElementWords() + popframe_extra_args;
|
||||
tempcount* Interpreter::stackElementWords + popframe_extra_args;
|
||||
if (interpreter_frame != NULL) {
|
||||
#ifdef ASSERT
|
||||
if (!EnableMethodHandles)
|
||||
@ -1544,7 +1540,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
|
||||
// Set last_sp
|
||||
intptr_t* esp = (intptr_t*) monbot -
|
||||
tempcount*Interpreter::stackElementWords() -
|
||||
tempcount*Interpreter::stackElementWords -
|
||||
popframe_extra_args;
|
||||
interpreter_frame->interpreter_frame_set_last_sp(esp);
|
||||
|
||||
@ -1650,7 +1646,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ get_method(rax);
|
||||
__ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::
|
||||
size_of_parameters_offset())));
|
||||
__ shll(rax, Interpreter::logStackElementSize());
|
||||
__ shll(rax, Interpreter::logStackElementSize);
|
||||
__ restore_locals(); // XXX do we need this?
|
||||
__ subptr(r14, rax);
|
||||
__ addptr(r14, wordSize);
|
||||
|
@ -50,7 +50,7 @@ static inline Address daddress(int n) { return laddress(n); }
|
||||
static inline Address aaddress(int n) { return iaddress(n); }
|
||||
|
||||
static inline Address iaddress(Register r) {
|
||||
return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::value_offset_in_bytes());
|
||||
return Address(rdi, r, Interpreter::stackElementScale());
|
||||
}
|
||||
static inline Address laddress(Register r) {
|
||||
return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
|
||||
@ -59,12 +59,9 @@ static inline Address haddress(Register r) {
|
||||
return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
|
||||
}
|
||||
|
||||
static inline Address faddress(Register r) { return iaddress(r); };
|
||||
static inline Address daddress(Register r) {
|
||||
assert(!TaggedStackInterpreter, "This doesn't work");
|
||||
return laddress(r);
|
||||
};
|
||||
static inline Address aaddress(Register r) { return iaddress(r); };
|
||||
static inline Address faddress(Register r) { return iaddress(r); }
|
||||
static inline Address daddress(Register r) { return laddress(r); }
|
||||
static inline Address aaddress(Register r) { return iaddress(r); }
|
||||
|
||||
// expression stack
|
||||
// (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
|
||||
@ -448,7 +445,6 @@ void TemplateTable::iload() {
|
||||
// Get the local value into tos
|
||||
locals_index(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
|
||||
@ -456,18 +452,15 @@ void TemplateTable::fast_iload2() {
|
||||
transition(vtos, itos);
|
||||
locals_index(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
__ push(itos);
|
||||
locals_index(rbx, 3);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::fast_iload() {
|
||||
transition(vtos, itos);
|
||||
locals_index(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
|
||||
@ -476,7 +469,6 @@ void TemplateTable::lload() {
|
||||
locals_index(rbx);
|
||||
__ movptr(rax, laddress(rbx));
|
||||
NOT_LP64(__ movl(rdx, haddress(rbx)));
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
|
||||
}
|
||||
|
||||
|
||||
@ -484,26 +476,13 @@ void TemplateTable::fload() {
|
||||
transition(vtos, ftos);
|
||||
locals_index(rbx);
|
||||
__ fld_s(faddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::dload() {
|
||||
transition(vtos, dtos);
|
||||
locals_index(rbx);
|
||||
if (TaggedStackInterpreter) {
|
||||
// Get double out of locals array, onto temp stack and load with
|
||||
// float instruction into ST0
|
||||
__ movl(rax, laddress(rbx));
|
||||
__ movl(rdx, haddress(rbx));
|
||||
__ push(rdx); // push hi first
|
||||
__ push(rax);
|
||||
__ fld_d(Address(rsp, 0));
|
||||
__ addptr(rsp, 2*wordSize);
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
|
||||
} else {
|
||||
__ fld_d(daddress(rbx));
|
||||
}
|
||||
__ fld_d(daddress(rbx));
|
||||
}
|
||||
|
||||
|
||||
@ -511,7 +490,6 @@ void TemplateTable::aload() {
|
||||
transition(vtos, atos);
|
||||
locals_index(rbx);
|
||||
__ movptr(rax, aaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagReference, rbx));
|
||||
}
|
||||
|
||||
|
||||
@ -527,7 +505,6 @@ void TemplateTable::wide_iload() {
|
||||
transition(vtos, itos);
|
||||
locals_index_wide(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
|
||||
@ -536,7 +513,6 @@ void TemplateTable::wide_lload() {
|
||||
locals_index_wide(rbx);
|
||||
__ movptr(rax, laddress(rbx));
|
||||
NOT_LP64(__ movl(rdx, haddress(rbx)));
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
|
||||
}
|
||||
|
||||
|
||||
@ -544,26 +520,13 @@ void TemplateTable::wide_fload() {
|
||||
transition(vtos, ftos);
|
||||
locals_index_wide(rbx);
|
||||
__ fld_s(faddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::wide_dload() {
|
||||
transition(vtos, dtos);
|
||||
locals_index_wide(rbx);
|
||||
if (TaggedStackInterpreter) {
|
||||
// Get double out of locals array, onto temp stack and load with
|
||||
// float instruction into ST0
|
||||
__ movl(rax, laddress(rbx));
|
||||
__ movl(rdx, haddress(rbx));
|
||||
__ push(rdx); // push hi first
|
||||
__ push(rax);
|
||||
__ fld_d(Address(rsp, 0));
|
||||
__ addl(rsp, 2*wordSize);
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
|
||||
} else {
|
||||
__ fld_d(daddress(rbx));
|
||||
}
|
||||
__ fld_d(daddress(rbx));
|
||||
}
|
||||
|
||||
|
||||
@ -571,7 +534,6 @@ void TemplateTable::wide_aload() {
|
||||
transition(vtos, atos);
|
||||
locals_index_wide(rbx);
|
||||
__ movptr(rax, aaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagReference, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::index_check(Register array, Register index) {
|
||||
@ -672,7 +634,6 @@ void TemplateTable::fast_icaload() {
|
||||
// load index out of locals
|
||||
locals_index(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
|
||||
// rdx: array
|
||||
index_check(rdx, rax);
|
||||
@ -695,7 +656,6 @@ void TemplateTable::saload() {
|
||||
void TemplateTable::iload(int n) {
|
||||
transition(vtos, itos);
|
||||
__ movl(rax, iaddress(n));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, n));
|
||||
}
|
||||
|
||||
|
||||
@ -703,39 +663,24 @@ void TemplateTable::lload(int n) {
|
||||
transition(vtos, ltos);
|
||||
__ movptr(rax, laddress(n));
|
||||
NOT_LP64(__ movptr(rdx, haddress(n)));
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, n));
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::fload(int n) {
|
||||
transition(vtos, ftos);
|
||||
__ fld_s(faddress(n));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, n));
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::dload(int n) {
|
||||
transition(vtos, dtos);
|
||||
if (TaggedStackInterpreter) {
|
||||
// Get double out of locals array, onto temp stack and load with
|
||||
// float instruction into ST0
|
||||
__ movl(rax, laddress(n));
|
||||
__ movl(rdx, haddress(n));
|
||||
__ push(rdx); // push hi first
|
||||
__ push(rax);
|
||||
__ fld_d(Address(rsp, 0));
|
||||
__ addptr(rsp, 2*wordSize); // reset rsp
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, n));
|
||||
} else {
|
||||
__ fld_d(daddress(n));
|
||||
}
|
||||
__ fld_d(daddress(n));
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::aload(int n) {
|
||||
transition(vtos, atos);
|
||||
__ movptr(rax, aaddress(n));
|
||||
debug_only(__ verify_local_tag(frame::TagReference, n));
|
||||
}
|
||||
|
||||
|
||||
@ -809,7 +754,6 @@ void TemplateTable::istore() {
|
||||
transition(itos, vtos);
|
||||
locals_index(rbx);
|
||||
__ movl(iaddress(rbx), rax);
|
||||
__ tag_local(frame::TagValue, rbx);
|
||||
}
|
||||
|
||||
|
||||
@ -818,7 +762,6 @@ void TemplateTable::lstore() {
|
||||
locals_index(rbx);
|
||||
__ movptr(laddress(rbx), rax);
|
||||
NOT_LP64(__ movptr(haddress(rbx), rdx));
|
||||
__ tag_local(frame::TagCategory2, rbx);
|
||||
}
|
||||
|
||||
|
||||
@ -826,34 +769,21 @@ void TemplateTable::fstore() {
|
||||
transition(ftos, vtos);
|
||||
locals_index(rbx);
|
||||
__ fstp_s(faddress(rbx));
|
||||
__ tag_local(frame::TagValue, rbx);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::dstore() {
|
||||
transition(dtos, vtos);
|
||||
locals_index(rbx);
|
||||
if (TaggedStackInterpreter) {
|
||||
// Store double on stack and reload into locals nonadjacently
|
||||
__ subptr(rsp, 2 * wordSize);
|
||||
__ fstp_d(Address(rsp, 0));
|
||||
__ pop(rax);
|
||||
__ pop(rdx);
|
||||
__ movptr(laddress(rbx), rax);
|
||||
__ movptr(haddress(rbx), rdx);
|
||||
__ tag_local(frame::TagCategory2, rbx);
|
||||
} else {
|
||||
__ fstp_d(daddress(rbx));
|
||||
}
|
||||
__ fstp_d(daddress(rbx));
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::astore() {
|
||||
transition(vtos, vtos);
|
||||
__ pop_ptr(rax, rdx); // will need to pop tag too
|
||||
__ pop_ptr(rax);
|
||||
locals_index(rbx);
|
||||
__ movptr(aaddress(rbx), rax);
|
||||
__ tag_local(rdx, rbx); // need to store same tag in local may be returnAddr
|
||||
}
|
||||
|
||||
|
||||
@ -862,7 +792,6 @@ void TemplateTable::wide_istore() {
|
||||
__ pop_i(rax);
|
||||
locals_index_wide(rbx);
|
||||
__ movl(iaddress(rbx), rax);
|
||||
__ tag_local(frame::TagValue, rbx);
|
||||
}
|
||||
|
||||
|
||||
@ -872,7 +801,6 @@ void TemplateTable::wide_lstore() {
|
||||
locals_index_wide(rbx);
|
||||
__ movptr(laddress(rbx), rax);
|
||||
NOT_LP64(__ movl(haddress(rbx), rdx));
|
||||
__ tag_local(frame::TagCategory2, rbx);
|
||||
}
|
||||
|
||||
|
||||
@ -888,10 +816,9 @@ void TemplateTable::wide_dstore() {
|
||||
|
||||
void TemplateTable::wide_astore() {
|
||||
transition(vtos, vtos);
|
||||
__ pop_ptr(rax, rdx);
|
||||
__ pop_ptr(rax);
|
||||
locals_index_wide(rbx);
|
||||
__ movptr(aaddress(rbx), rax);
|
||||
__ tag_local(rdx, rbx);
|
||||
}
|
||||
|
||||
|
||||
@ -990,7 +917,7 @@ void TemplateTable::aastore() {
|
||||
|
||||
// Pop stack arguments
|
||||
__ bind(done);
|
||||
__ addptr(rsp, 3 * Interpreter::stackElementSize());
|
||||
__ addptr(rsp, 3 * Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
|
||||
@ -1024,7 +951,6 @@ void TemplateTable::sastore() {
|
||||
void TemplateTable::istore(int n) {
|
||||
transition(itos, vtos);
|
||||
__ movl(iaddress(n), rax);
|
||||
__ tag_local(frame::TagValue, n);
|
||||
}
|
||||
|
||||
|
||||
@ -1032,58 +958,45 @@ void TemplateTable::lstore(int n) {
|
||||
transition(ltos, vtos);
|
||||
__ movptr(laddress(n), rax);
|
||||
NOT_LP64(__ movptr(haddress(n), rdx));
|
||||
__ tag_local(frame::TagCategory2, n);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::fstore(int n) {
|
||||
transition(ftos, vtos);
|
||||
__ fstp_s(faddress(n));
|
||||
__ tag_local(frame::TagValue, n);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::dstore(int n) {
|
||||
transition(dtos, vtos);
|
||||
if (TaggedStackInterpreter) {
|
||||
__ subptr(rsp, 2 * wordSize);
|
||||
__ fstp_d(Address(rsp, 0));
|
||||
__ pop(rax);
|
||||
__ pop(rdx);
|
||||
__ movl(laddress(n), rax);
|
||||
__ movl(haddress(n), rdx);
|
||||
__ tag_local(frame::TagCategory2, n);
|
||||
} else {
|
||||
__ fstp_d(daddress(n));
|
||||
}
|
||||
__ fstp_d(daddress(n));
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::astore(int n) {
|
||||
transition(vtos, vtos);
|
||||
__ pop_ptr(rax, rdx);
|
||||
__ pop_ptr(rax);
|
||||
__ movptr(aaddress(n), rax);
|
||||
__ tag_local(rdx, n);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::pop() {
|
||||
transition(vtos, vtos);
|
||||
__ addptr(rsp, Interpreter::stackElementSize());
|
||||
__ addptr(rsp, Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::pop2() {
|
||||
transition(vtos, vtos);
|
||||
__ addptr(rsp, 2*Interpreter::stackElementSize());
|
||||
__ addptr(rsp, 2*Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::dup() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a
|
||||
__ load_ptr_and_tag(0, rax, rdx);
|
||||
__ push_ptr(rax, rdx);
|
||||
__ load_ptr(0, rax);
|
||||
__ push_ptr(rax);
|
||||
// stack: ..., a, a
|
||||
}
|
||||
|
||||
@ -1091,11 +1004,11 @@ void TemplateTable::dup() {
|
||||
void TemplateTable::dup_x1() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b
|
||||
__ load_ptr_and_tag(0, rax, rdx); // load b
|
||||
__ load_ptr_and_tag(1, rcx, rbx); // load a
|
||||
__ store_ptr_and_tag(1, rax, rdx); // store b
|
||||
__ store_ptr_and_tag(0, rcx, rbx); // store a
|
||||
__ push_ptr(rax, rdx); // push b
|
||||
__ load_ptr( 0, rax); // load b
|
||||
__ load_ptr( 1, rcx); // load a
|
||||
__ store_ptr(1, rax); // store b
|
||||
__ store_ptr(0, rcx); // store a
|
||||
__ push_ptr(rax); // push b
|
||||
// stack: ..., b, a, b
|
||||
}
|
||||
|
||||
@ -1103,15 +1016,15 @@ void TemplateTable::dup_x1() {
|
||||
void TemplateTable::dup_x2() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b, c
|
||||
__ load_ptr_and_tag(0, rax, rdx); // load c
|
||||
__ load_ptr_and_tag(2, rcx, rbx); // load a
|
||||
__ store_ptr_and_tag(2, rax, rdx); // store c in a
|
||||
__ push_ptr(rax, rdx); // push c
|
||||
__ load_ptr( 0, rax); // load c
|
||||
__ load_ptr( 2, rcx); // load a
|
||||
__ store_ptr(2, rax); // store c in a
|
||||
__ push_ptr(rax); // push c
|
||||
// stack: ..., c, b, c, c
|
||||
__ load_ptr_and_tag(2, rax, rdx); // load b
|
||||
__ store_ptr_and_tag(2, rcx, rbx); // store a in b
|
||||
__ load_ptr( 2, rax); // load b
|
||||
__ store_ptr(2, rcx); // store a in b
|
||||
// stack: ..., c, a, c, c
|
||||
__ store_ptr_and_tag(1, rax, rdx); // store b in c
|
||||
__ store_ptr(1, rax); // store b in c
|
||||
// stack: ..., c, a, b, c
|
||||
}
|
||||
|
||||
@ -1119,10 +1032,10 @@ void TemplateTable::dup_x2() {
|
||||
void TemplateTable::dup2() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b
|
||||
__ load_ptr_and_tag(1, rax, rdx); // load a
|
||||
__ push_ptr(rax, rdx); // push a
|
||||
__ load_ptr_and_tag(1, rax, rdx); // load b
|
||||
__ push_ptr(rax, rdx); // push b
|
||||
__ load_ptr(1, rax); // load a
|
||||
__ push_ptr(rax); // push a
|
||||
__ load_ptr(1, rax); // load b
|
||||
__ push_ptr(rax); // push b
|
||||
// stack: ..., a, b, a, b
|
||||
}
|
||||
|
||||
@ -1130,17 +1043,17 @@ void TemplateTable::dup2() {
|
||||
void TemplateTable::dup2_x1() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b, c
|
||||
__ load_ptr_and_tag(0, rcx, rbx); // load c
|
||||
__ load_ptr_and_tag(1, rax, rdx); // load b
|
||||
__ push_ptr(rax, rdx); // push b
|
||||
__ push_ptr(rcx, rbx); // push c
|
||||
__ load_ptr( 0, rcx); // load c
|
||||
__ load_ptr( 1, rax); // load b
|
||||
__ push_ptr(rax); // push b
|
||||
__ push_ptr(rcx); // push c
|
||||
// stack: ..., a, b, c, b, c
|
||||
__ store_ptr_and_tag(3, rcx, rbx); // store c in b
|
||||
__ store_ptr(3, rcx); // store c in b
|
||||
// stack: ..., a, c, c, b, c
|
||||
__ load_ptr_and_tag(4, rcx, rbx); // load a
|
||||
__ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
|
||||
__ load_ptr( 4, rcx); // load a
|
||||
__ store_ptr(2, rcx); // store a in 2nd c
|
||||
// stack: ..., a, c, a, b, c
|
||||
__ store_ptr_and_tag(4, rax, rdx); // store b in a
|
||||
__ store_ptr(4, rax); // store b in a
|
||||
// stack: ..., b, c, a, b, c
|
||||
// stack: ..., b, c, a, b, c
|
||||
}
|
||||
@ -1149,19 +1062,19 @@ void TemplateTable::dup2_x1() {
|
||||
void TemplateTable::dup2_x2() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b, c, d
|
||||
__ load_ptr_and_tag(0, rcx, rbx); // load d
|
||||
__ load_ptr_and_tag(1, rax, rdx); // load c
|
||||
__ push_ptr(rax, rdx); // push c
|
||||
__ push_ptr(rcx, rbx); // push d
|
||||
__ load_ptr( 0, rcx); // load d
|
||||
__ load_ptr( 1, rax); // load c
|
||||
__ push_ptr(rax); // push c
|
||||
__ push_ptr(rcx); // push d
|
||||
// stack: ..., a, b, c, d, c, d
|
||||
__ load_ptr_and_tag(4, rax, rdx); // load b
|
||||
__ store_ptr_and_tag(2, rax, rdx); // store b in d
|
||||
__ store_ptr_and_tag(4, rcx, rbx); // store d in b
|
||||
__ load_ptr( 4, rax); // load b
|
||||
__ store_ptr(2, rax); // store b in d
|
||||
__ store_ptr(4, rcx); // store d in b
|
||||
// stack: ..., a, d, c, b, c, d
|
||||
__ load_ptr_and_tag(5, rcx, rbx); // load a
|
||||
__ load_ptr_and_tag(3, rax, rdx); // load c
|
||||
__ store_ptr_and_tag(3, rcx, rbx); // store a in c
|
||||
__ store_ptr_and_tag(5, rax, rdx); // store c in a
|
||||
__ load_ptr( 5, rcx); // load a
|
||||
__ load_ptr( 3, rax); // load c
|
||||
__ store_ptr(3, rcx); // store a in c
|
||||
__ store_ptr(5, rax); // store c in a
|
||||
// stack: ..., c, d, a, b, c, d
|
||||
// stack: ..., c, d, a, b, c, d
|
||||
}
|
||||
@ -1170,10 +1083,10 @@ void TemplateTable::dup2_x2() {
|
||||
void TemplateTable::swap() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b
|
||||
__ load_ptr_and_tag(1, rcx, rbx); // load a
|
||||
__ load_ptr_and_tag(0, rax, rdx); // load b
|
||||
__ store_ptr_and_tag(0, rcx, rbx); // store a in b
|
||||
__ store_ptr_and_tag(1, rax, rdx); // store b in a
|
||||
__ load_ptr( 1, rcx); // load a
|
||||
__ load_ptr( 0, rax); // load b
|
||||
__ store_ptr(0, rcx); // store a in b
|
||||
__ store_ptr(1, rax); // store b in a
|
||||
// stack: ..., b, a
|
||||
}
|
||||
|
||||
@ -1181,12 +1094,12 @@ void TemplateTable::swap() {
|
||||
void TemplateTable::iop2(Operation op) {
|
||||
transition(itos, itos);
|
||||
switch (op) {
|
||||
case add : __ pop_i(rdx); __ addl (rax, rdx); break;
|
||||
case add : __ pop_i(rdx); __ addl (rax, rdx); break;
|
||||
case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
|
||||
case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
|
||||
case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
|
||||
case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
|
||||
case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
|
||||
case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
|
||||
case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
|
||||
case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
|
||||
case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
|
||||
case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
|
||||
case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
|
||||
case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
|
||||
@ -1199,13 +1112,13 @@ void TemplateTable::lop2(Operation op) {
|
||||
transition(ltos, ltos);
|
||||
__ pop_l(rbx, rcx);
|
||||
switch (op) {
|
||||
case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
|
||||
case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
|
||||
__ mov(rax, rbx); __ mov(rdx, rcx); break;
|
||||
case _and: __ andl(rax, rbx); __ andl(rdx, rcx); break;
|
||||
case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
|
||||
case _xor: __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
|
||||
default : ShouldNotReachHere();
|
||||
case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
|
||||
case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
|
||||
__ mov (rax, rbx); __ mov (rdx, rcx); break;
|
||||
case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
|
||||
case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
|
||||
case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1299,7 +1212,6 @@ void TemplateTable::lushr() {
|
||||
|
||||
void TemplateTable::fop2(Operation op) {
|
||||
transition(ftos, ftos);
|
||||
__ pop_ftos_to_rsp(); // pop ftos into rsp
|
||||
switch (op) {
|
||||
case add: __ fadd_s (at_rsp()); break;
|
||||
case sub: __ fsubr_s(at_rsp()); break;
|
||||
@ -1315,7 +1227,6 @@ void TemplateTable::fop2(Operation op) {
|
||||
|
||||
void TemplateTable::dop2(Operation op) {
|
||||
transition(dtos, dtos);
|
||||
__ pop_dtos_to_rsp(); // pop dtos into rsp
|
||||
|
||||
switch (op) {
|
||||
case add: __ fadd_d (at_rsp()); break;
|
||||
@ -1557,10 +1468,8 @@ void TemplateTable::lcmp() {
|
||||
|
||||
void TemplateTable::float_cmp(bool is_float, int unordered_result) {
|
||||
if (is_float) {
|
||||
__ pop_ftos_to_rsp();
|
||||
__ fld_s(at_rsp());
|
||||
} else {
|
||||
__ pop_dtos_to_rsp();
|
||||
__ fld_d(at_rsp());
|
||||
__ pop(rdx);
|
||||
}
|
||||
@ -2854,7 +2763,6 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
transition(vtos, state);
|
||||
// get receiver
|
||||
__ movptr(rax, aaddress(0));
|
||||
debug_only(__ verify_local_tag(frame::TagReference, 0));
|
||||
// access constant pool cache
|
||||
__ get_cache_and_index_at_bcp(rcx, rdx, 2);
|
||||
__ movptr(rbx, Address(rcx,
|
||||
|
@ -58,7 +58,7 @@ static inline Address aaddress(int n) {
|
||||
}
|
||||
|
||||
static inline Address iaddress(Register r) {
|
||||
return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes());
|
||||
return Address(r14, r, Address::times_8);
|
||||
}
|
||||
|
||||
static inline Address laddress(Register r) {
|
||||
@ -418,7 +418,6 @@ void TemplateTable::ldc2_w() {
|
||||
void TemplateTable::locals_index(Register reg, int offset) {
|
||||
__ load_unsigned_byte(reg, at_bcp(offset));
|
||||
__ negptr(reg);
|
||||
if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2
|
||||
}
|
||||
|
||||
void TemplateTable::iload() {
|
||||
@ -460,53 +459,45 @@ void TemplateTable::iload() {
|
||||
// Get the local value into tos
|
||||
locals_index(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::fast_iload2() {
|
||||
transition(vtos, itos);
|
||||
locals_index(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
__ push(itos);
|
||||
locals_index(rbx, 3);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::fast_iload() {
|
||||
transition(vtos, itos);
|
||||
locals_index(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::lload() {
|
||||
transition(vtos, ltos);
|
||||
locals_index(rbx);
|
||||
__ movq(rax, laddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::fload() {
|
||||
transition(vtos, ftos);
|
||||
locals_index(rbx);
|
||||
__ movflt(xmm0, faddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::dload() {
|
||||
transition(vtos, dtos);
|
||||
locals_index(rbx);
|
||||
__ movdbl(xmm0, daddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::aload() {
|
||||
transition(vtos, atos);
|
||||
locals_index(rbx);
|
||||
__ movptr(rax, aaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagReference, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::locals_index_wide(Register reg) {
|
||||
@ -514,42 +505,36 @@ void TemplateTable::locals_index_wide(Register reg) {
|
||||
__ bswapl(reg);
|
||||
__ shrl(reg, 16);
|
||||
__ negptr(reg);
|
||||
if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2
|
||||
}
|
||||
|
||||
void TemplateTable::wide_iload() {
|
||||
transition(vtos, itos);
|
||||
locals_index_wide(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::wide_lload() {
|
||||
transition(vtos, ltos);
|
||||
locals_index_wide(rbx);
|
||||
__ movq(rax, laddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::wide_fload() {
|
||||
transition(vtos, ftos);
|
||||
locals_index_wide(rbx);
|
||||
__ movflt(xmm0, faddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::wide_dload() {
|
||||
transition(vtos, dtos);
|
||||
locals_index_wide(rbx);
|
||||
__ movdbl(xmm0, daddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::wide_aload() {
|
||||
transition(vtos, atos);
|
||||
locals_index_wide(rbx);
|
||||
__ movptr(rax, aaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagReference, rbx));
|
||||
}
|
||||
|
||||
void TemplateTable::index_check(Register array, Register index) {
|
||||
@ -654,7 +639,6 @@ void TemplateTable::fast_icaload() {
|
||||
// load index out of locals
|
||||
locals_index(rbx);
|
||||
__ movl(rax, iaddress(rbx));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, rbx));
|
||||
|
||||
// eax: index
|
||||
// rdx: array
|
||||
@ -681,31 +665,26 @@ void TemplateTable::saload() {
|
||||
void TemplateTable::iload(int n) {
|
||||
transition(vtos, itos);
|
||||
__ movl(rax, iaddress(n));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, n));
|
||||
}
|
||||
|
||||
void TemplateTable::lload(int n) {
|
||||
transition(vtos, ltos);
|
||||
__ movq(rax, laddress(n));
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, n));
|
||||
}
|
||||
|
||||
void TemplateTable::fload(int n) {
|
||||
transition(vtos, ftos);
|
||||
__ movflt(xmm0, faddress(n));
|
||||
debug_only(__ verify_local_tag(frame::TagValue, n));
|
||||
}
|
||||
|
||||
void TemplateTable::dload(int n) {
|
||||
transition(vtos, dtos);
|
||||
__ movdbl(xmm0, daddress(n));
|
||||
debug_only(__ verify_local_tag(frame::TagCategory2, n));
|
||||
}
|
||||
|
||||
void TemplateTable::aload(int n) {
|
||||
transition(vtos, atos);
|
||||
__ movptr(rax, aaddress(n));
|
||||
debug_only(__ verify_local_tag(frame::TagReference, n));
|
||||
}
|
||||
|
||||
void TemplateTable::aload_0() {
|
||||
@ -791,36 +770,31 @@ void TemplateTable::istore() {
|
||||
transition(itos, vtos);
|
||||
locals_index(rbx);
|
||||
__ movl(iaddress(rbx), rax);
|
||||
__ tag_local(frame::TagValue, rbx);
|
||||
}
|
||||
|
||||
void TemplateTable::lstore() {
|
||||
transition(ltos, vtos);
|
||||
locals_index(rbx);
|
||||
__ movq(laddress(rbx), rax);
|
||||
__ tag_local(frame::TagCategory2, rbx);
|
||||
}
|
||||
|
||||
void TemplateTable::fstore() {
|
||||
transition(ftos, vtos);
|
||||
locals_index(rbx);
|
||||
__ movflt(faddress(rbx), xmm0);
|
||||
__ tag_local(frame::TagValue, rbx);
|
||||
}
|
||||
|
||||
void TemplateTable::dstore() {
|
||||
transition(dtos, vtos);
|
||||
locals_index(rbx);
|
||||
__ movdbl(daddress(rbx), xmm0);
|
||||
__ tag_local(frame::TagCategory2, rbx);
|
||||
}
|
||||
|
||||
void TemplateTable::astore() {
|
||||
transition(vtos, vtos);
|
||||
__ pop_ptr(rax, rdx); // will need to pop tag too
|
||||
__ pop_ptr(rax);
|
||||
locals_index(rbx);
|
||||
__ movptr(aaddress(rbx), rax);
|
||||
__ tag_local(rdx, rbx); // store tag from stack, might be returnAddr
|
||||
}
|
||||
|
||||
void TemplateTable::wide_istore() {
|
||||
@ -828,7 +802,6 @@ void TemplateTable::wide_istore() {
|
||||
__ pop_i();
|
||||
locals_index_wide(rbx);
|
||||
__ movl(iaddress(rbx), rax);
|
||||
__ tag_local(frame::TagValue, rbx);
|
||||
}
|
||||
|
||||
void TemplateTable::wide_lstore() {
|
||||
@ -836,7 +809,6 @@ void TemplateTable::wide_lstore() {
|
||||
__ pop_l();
|
||||
locals_index_wide(rbx);
|
||||
__ movq(laddress(rbx), rax);
|
||||
__ tag_local(frame::TagCategory2, rbx);
|
||||
}
|
||||
|
||||
void TemplateTable::wide_fstore() {
|
||||
@ -844,7 +816,6 @@ void TemplateTable::wide_fstore() {
|
||||
__ pop_f();
|
||||
locals_index_wide(rbx);
|
||||
__ movflt(faddress(rbx), xmm0);
|
||||
__ tag_local(frame::TagValue, rbx);
|
||||
}
|
||||
|
||||
void TemplateTable::wide_dstore() {
|
||||
@ -852,15 +823,13 @@ void TemplateTable::wide_dstore() {
|
||||
__ pop_d();
|
||||
locals_index_wide(rbx);
|
||||
__ movdbl(daddress(rbx), xmm0);
|
||||
__ tag_local(frame::TagCategory2, rbx);
|
||||
}
|
||||
|
||||
void TemplateTable::wide_astore() {
|
||||
transition(vtos, vtos);
|
||||
__ pop_ptr(rax, rdx); // will need to pop tag too
|
||||
__ pop_ptr(rax);
|
||||
locals_index_wide(rbx);
|
||||
__ movptr(aaddress(rbx), rax);
|
||||
__ tag_local(rdx, rbx); // store tag from stack, might be returnAddr
|
||||
}
|
||||
|
||||
void TemplateTable::iastore() {
|
||||
@ -972,7 +941,7 @@ void TemplateTable::aastore() {
|
||||
|
||||
// Pop stack arguments
|
||||
__ bind(done);
|
||||
__ addptr(rsp, 3 * Interpreter::stackElementSize());
|
||||
__ addptr(rsp, 3 * Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
void TemplateTable::bastore() {
|
||||
@ -1010,130 +979,125 @@ void TemplateTable::sastore() {
|
||||
void TemplateTable::istore(int n) {
|
||||
transition(itos, vtos);
|
||||
__ movl(iaddress(n), rax);
|
||||
__ tag_local(frame::TagValue, n);
|
||||
}
|
||||
|
||||
void TemplateTable::lstore(int n) {
|
||||
transition(ltos, vtos);
|
||||
__ movq(laddress(n), rax);
|
||||
__ tag_local(frame::TagCategory2, n);
|
||||
}
|
||||
|
||||
void TemplateTable::fstore(int n) {
|
||||
transition(ftos, vtos);
|
||||
__ movflt(faddress(n), xmm0);
|
||||
__ tag_local(frame::TagValue, n);
|
||||
}
|
||||
|
||||
void TemplateTable::dstore(int n) {
|
||||
transition(dtos, vtos);
|
||||
__ movdbl(daddress(n), xmm0);
|
||||
__ tag_local(frame::TagCategory2, n);
|
||||
}
|
||||
|
||||
void TemplateTable::astore(int n) {
|
||||
transition(vtos, vtos);
|
||||
__ pop_ptr(rax, rdx);
|
||||
__ pop_ptr(rax);
|
||||
__ movptr(aaddress(n), rax);
|
||||
__ tag_local(rdx, n);
|
||||
}
|
||||
|
||||
void TemplateTable::pop() {
|
||||
transition(vtos, vtos);
|
||||
__ addptr(rsp, Interpreter::stackElementSize());
|
||||
__ addptr(rsp, Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
void TemplateTable::pop2() {
|
||||
transition(vtos, vtos);
|
||||
__ addptr(rsp, 2 * Interpreter::stackElementSize());
|
||||
__ addptr(rsp, 2 * Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
void TemplateTable::dup() {
|
||||
transition(vtos, vtos);
|
||||
__ load_ptr_and_tag(0, rax, rdx);
|
||||
__ push_ptr(rax, rdx);
|
||||
__ load_ptr(0, rax);
|
||||
__ push_ptr(rax);
|
||||
// stack: ..., a, a
|
||||
}
|
||||
|
||||
void TemplateTable::dup_x1() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b
|
||||
__ load_ptr_and_tag(0, rax, rdx); // load b
|
||||
__ load_ptr_and_tag(1, rcx, rbx); // load a
|
||||
__ store_ptr_and_tag(1, rax, rdx); // store b
|
||||
__ store_ptr_and_tag(0, rcx, rbx); // store a
|
||||
__ push_ptr(rax, rdx); // push b
|
||||
__ load_ptr( 0, rax); // load b
|
||||
__ load_ptr( 1, rcx); // load a
|
||||
__ store_ptr(1, rax); // store b
|
||||
__ store_ptr(0, rcx); // store a
|
||||
__ push_ptr(rax); // push b
|
||||
// stack: ..., b, a, b
|
||||
}
|
||||
|
||||
void TemplateTable::dup_x2() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b, c
|
||||
__ load_ptr_and_tag(0, rax, rdx); // load c
|
||||
__ load_ptr_and_tag(2, rcx, rbx); // load a
|
||||
__ store_ptr_and_tag(2, rax, rdx); // store c in a
|
||||
__ push_ptr(rax, rdx); // push c
|
||||
__ load_ptr( 0, rax); // load c
|
||||
__ load_ptr( 2, rcx); // load a
|
||||
__ store_ptr(2, rax); // store c in a
|
||||
__ push_ptr(rax); // push c
|
||||
// stack: ..., c, b, c, c
|
||||
__ load_ptr_and_tag(2, rax, rdx); // load b
|
||||
__ store_ptr_and_tag(2, rcx, rbx); // store a in b
|
||||
__ load_ptr( 2, rax); // load b
|
||||
__ store_ptr(2, rcx); // store a in b
|
||||
// stack: ..., c, a, c, c
|
||||
__ store_ptr_and_tag(1, rax, rdx); // store b in c
|
||||
__ store_ptr(1, rax); // store b in c
|
||||
// stack: ..., c, a, b, c
|
||||
}
|
||||
|
||||
void TemplateTable::dup2() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b
|
||||
__ load_ptr_and_tag(1, rax, rdx); // load a
|
||||
__ push_ptr(rax, rdx); // push a
|
||||
__ load_ptr_and_tag(1, rax, rdx); // load b
|
||||
__ push_ptr(rax, rdx); // push b
|
||||
__ load_ptr(1, rax); // load a
|
||||
__ push_ptr(rax); // push a
|
||||
__ load_ptr(1, rax); // load b
|
||||
__ push_ptr(rax); // push b
|
||||
// stack: ..., a, b, a, b
|
||||
}
|
||||
|
||||
void TemplateTable::dup2_x1() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b, c
|
||||
__ load_ptr_and_tag(0, rcx, rbx); // load c
|
||||
__ load_ptr_and_tag(1, rax, rdx); // load b
|
||||
__ push_ptr(rax, rdx); // push b
|
||||
__ push_ptr(rcx, rbx); // push c
|
||||
__ load_ptr( 0, rcx); // load c
|
||||
__ load_ptr( 1, rax); // load b
|
||||
__ push_ptr(rax); // push b
|
||||
__ push_ptr(rcx); // push c
|
||||
// stack: ..., a, b, c, b, c
|
||||
__ store_ptr_and_tag(3, rcx, rbx); // store c in b
|
||||
__ store_ptr(3, rcx); // store c in b
|
||||
// stack: ..., a, c, c, b, c
|
||||
__ load_ptr_and_tag(4, rcx, rbx); // load a
|
||||
__ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
|
||||
__ load_ptr( 4, rcx); // load a
|
||||
__ store_ptr(2, rcx); // store a in 2nd c
|
||||
// stack: ..., a, c, a, b, c
|
||||
__ store_ptr_and_tag(4, rax, rdx); // store b in a
|
||||
__ store_ptr(4, rax); // store b in a
|
||||
// stack: ..., b, c, a, b, c
|
||||
}
|
||||
|
||||
void TemplateTable::dup2_x2() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b, c, d
|
||||
__ load_ptr_and_tag(0, rcx, rbx); // load d
|
||||
__ load_ptr_and_tag(1, rax, rdx); // load c
|
||||
__ push_ptr(rax, rdx); // push c
|
||||
__ push_ptr(rcx, rbx); // push d
|
||||
__ load_ptr( 0, rcx); // load d
|
||||
__ load_ptr( 1, rax); // load c
|
||||
__ push_ptr(rax); // push c
|
||||
__ push_ptr(rcx); // push d
|
||||
// stack: ..., a, b, c, d, c, d
|
||||
__ load_ptr_and_tag(4, rax, rdx); // load b
|
||||
__ store_ptr_and_tag(2, rax, rdx); // store b in d
|
||||
__ store_ptr_and_tag(4, rcx, rbx); // store d in b
|
||||
__ load_ptr( 4, rax); // load b
|
||||
__ store_ptr(2, rax); // store b in d
|
||||
__ store_ptr(4, rcx); // store d in b
|
||||
// stack: ..., a, d, c, b, c, d
|
||||
__ load_ptr_and_tag(5, rcx, rbx); // load a
|
||||
__ load_ptr_and_tag(3, rax, rdx); // load c
|
||||
__ store_ptr_and_tag(3, rcx, rbx); // store a in c
|
||||
__ store_ptr_and_tag(5, rax, rdx); // store c in a
|
||||
__ load_ptr( 5, rcx); // load a
|
||||
__ load_ptr( 3, rax); // load c
|
||||
__ store_ptr(3, rcx); // store a in c
|
||||
__ store_ptr(5, rax); // store c in a
|
||||
// stack: ..., c, d, a, b, c, d
|
||||
}
|
||||
|
||||
void TemplateTable::swap() {
|
||||
transition(vtos, vtos);
|
||||
// stack: ..., a, b
|
||||
__ load_ptr_and_tag(1, rcx, rbx); // load a
|
||||
__ load_ptr_and_tag(0, rax, rdx); // load b
|
||||
__ store_ptr_and_tag(0, rcx, rbx); // store a in b
|
||||
__ store_ptr_and_tag(1, rax, rdx); // store b in a
|
||||
__ load_ptr( 1, rcx); // load a
|
||||
__ load_ptr( 0, rax); // load b
|
||||
__ store_ptr(0, rcx); // store a in b
|
||||
__ store_ptr(1, rax); // store b in a
|
||||
// stack: ..., b, a
|
||||
}
|
||||
|
||||
@ -1156,12 +1120,12 @@ void TemplateTable::iop2(Operation op) {
|
||||
void TemplateTable::lop2(Operation op) {
|
||||
transition(ltos, ltos);
|
||||
switch (op) {
|
||||
case add : __ pop_l(rdx); __ addptr (rax, rdx); break;
|
||||
case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr (rax, rdx); break;
|
||||
case _and : __ pop_l(rdx); __ andptr (rax, rdx); break;
|
||||
case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
|
||||
case _xor : __ pop_l(rdx); __ xorptr (rax, rdx); break;
|
||||
default : ShouldNotReachHere();
|
||||
case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
|
||||
case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
|
||||
case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
|
||||
case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
|
||||
case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1250,7 +1214,7 @@ void TemplateTable::fop2(Operation op) {
|
||||
switch (op) {
|
||||
case add:
|
||||
__ addss(xmm0, at_rsp());
|
||||
__ addptr(rsp, Interpreter::stackElementSize());
|
||||
__ addptr(rsp, Interpreter::stackElementSize);
|
||||
break;
|
||||
case sub:
|
||||
__ movflt(xmm1, xmm0);
|
||||
@ -1259,7 +1223,7 @@ void TemplateTable::fop2(Operation op) {
|
||||
break;
|
||||
case mul:
|
||||
__ mulss(xmm0, at_rsp());
|
||||
__ addptr(rsp, Interpreter::stackElementSize());
|
||||
__ addptr(rsp, Interpreter::stackElementSize);
|
||||
break;
|
||||
case div:
|
||||
__ movflt(xmm1, xmm0);
|
||||
@ -1282,7 +1246,7 @@ void TemplateTable::dop2(Operation op) {
|
||||
switch (op) {
|
||||
case add:
|
||||
__ addsd(xmm0, at_rsp());
|
||||
__ addptr(rsp, 2 * Interpreter::stackElementSize());
|
||||
__ addptr(rsp, 2 * Interpreter::stackElementSize);
|
||||
break;
|
||||
case sub:
|
||||
__ movdbl(xmm1, xmm0);
|
||||
@ -1291,7 +1255,7 @@ void TemplateTable::dop2(Operation op) {
|
||||
break;
|
||||
case mul:
|
||||
__ mulsd(xmm0, at_rsp());
|
||||
__ addptr(rsp, 2 * Interpreter::stackElementSize());
|
||||
__ addptr(rsp, 2 * Interpreter::stackElementSize);
|
||||
break;
|
||||
case div:
|
||||
__ movdbl(xmm1, xmm0);
|
||||
@ -2782,7 +2746,6 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
|
||||
// get receiver
|
||||
__ movptr(rax, aaddress(0));
|
||||
debug_only(__ verify_local_tag(frame::TagReference, 0));
|
||||
// access constant pool cache
|
||||
__ get_cache_and_index_at_bcp(rcx, rdx, 2);
|
||||
__ movptr(rbx,
|
||||
@ -2858,7 +2821,6 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
if (load_receiver) {
|
||||
__ movl(recv, flags);
|
||||
__ andl(recv, 0xFF);
|
||||
if (TaggedStackInterpreter) __ shll(recv, 1); // index*2
|
||||
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
|
||||
__ movptr(recv, recv_addr);
|
||||
__ verify_oop(recv);
|
||||
@ -3610,13 +3572,11 @@ void TemplateTable::multianewarray() {
|
||||
__ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
|
||||
// last dim is on top of stack; we want address of first one:
|
||||
// first_addr = last_addr + (ndims - 1) * wordSize
|
||||
if (TaggedStackInterpreter) __ shll(rax, 1); // index*2
|
||||
__ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
|
||||
call_VM(rax,
|
||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
|
||||
c_rarg1);
|
||||
__ load_unsigned_byte(rbx, at_bcp(3));
|
||||
if (TaggedStackInterpreter) __ shll(rbx, 1); // index*2
|
||||
__ lea(rsp, Address(rsp, rbx, Address::times_8));
|
||||
}
|
||||
#endif // !CC_INTERP
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
// Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -6272,6 +6272,30 @@ instruct bytes_reverse_long(eRegL dst) %{
|
||||
ins_pipe( ialu_reg_reg);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_unsigned_short(eRegI dst) %{
|
||||
match(Set dst (ReverseBytesUS dst));
|
||||
|
||||
format %{ "BSWAP $dst\n\t"
|
||||
"SHR $dst,16\n\t" %}
|
||||
ins_encode %{
|
||||
__ bswapl($dst$$Register);
|
||||
__ shrl($dst$$Register, 16);
|
||||
%}
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short(eRegI dst) %{
|
||||
match(Set dst (ReverseBytesS dst));
|
||||
|
||||
format %{ "BSWAP $dst\n\t"
|
||||
"SAR $dst,16\n\t" %}
|
||||
ins_encode %{
|
||||
__ bswapl($dst$$Register);
|
||||
__ sarl($dst$$Register, 16);
|
||||
%}
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
|
||||
//---------- Zeros Count Instructions ------------------------------------------
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
// Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -7371,6 +7371,30 @@ instruct bytes_reverse_long(rRegL dst) %{
|
||||
ins_pipe( ialu_reg);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_unsigned_short(rRegI dst) %{
|
||||
match(Set dst (ReverseBytesUS dst));
|
||||
|
||||
format %{ "bswapl $dst\n\t"
|
||||
"shrl $dst,16\n\t" %}
|
||||
ins_encode %{
|
||||
__ bswapl($dst$$Register);
|
||||
__ shrl($dst$$Register, 16);
|
||||
%}
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short(rRegI dst) %{
|
||||
match(Set dst (ReverseBytesS dst));
|
||||
|
||||
format %{ "bswapl $dst\n\t"
|
||||
"sar $dst,16\n\t" %}
|
||||
ins_encode %{
|
||||
__ bswapl($dst$$Register);
|
||||
__ sarl($dst$$Register, 16);
|
||||
%}
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
instruct loadI_reversed(rRegI dst, memory src) %{
|
||||
match(Set dst (ReverseBytesI (LoadI src)));
|
||||
|
||||
|
@ -37,27 +37,18 @@
|
||||
thread->reset_last_Java_frame(); \
|
||||
fixup_after_potential_safepoint()
|
||||
|
||||
void CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
int CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
JavaThread *thread = (JavaThread *) THREAD;
|
||||
ZeroStack *stack = thread->zero_stack();
|
||||
|
||||
// Adjust the caller's stack frame to accomodate any additional
|
||||
// local variables we have contiguously with our parameters.
|
||||
int extra_locals = method->max_locals() - method->size_of_parameters();
|
||||
if (extra_locals > 0) {
|
||||
if (extra_locals > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
for (int i = 0; i < extra_locals; i++)
|
||||
stack->push(0);
|
||||
}
|
||||
|
||||
// Allocate and initialize our frame.
|
||||
InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread);
|
||||
InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
|
||||
thread->push_zero_frame(frame);
|
||||
|
||||
// Execute those bytecodes!
|
||||
main_loop(0, THREAD);
|
||||
|
||||
// No deoptimized frames on the stack
|
||||
return 0;
|
||||
}
|
||||
|
||||
void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
@ -76,12 +67,6 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
intptr_t *result = NULL;
|
||||
int result_slots = 0;
|
||||
|
||||
// Check we're not about to run out of stack
|
||||
if (stack_overflow_imminent(thread)) {
|
||||
CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
|
||||
goto unwind_and_return;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
// We can set up the frame anchor with everything we want at
|
||||
// this point as we are thread_in_Java and no safepoints can
|
||||
@ -123,9 +108,9 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
int monitor_words = frame::interpreter_frame_monitor_size();
|
||||
|
||||
// Allocate the space
|
||||
if (monitor_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
stack->overflow_check(monitor_words, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION)
|
||||
break;
|
||||
stack->alloc(monitor_words * wordSize);
|
||||
|
||||
// Move the expression stack contents
|
||||
@ -172,8 +157,6 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
}
|
||||
}
|
||||
|
||||
unwind_and_return:
|
||||
|
||||
// Unwind the current frame
|
||||
thread->pop_zero_frame();
|
||||
|
||||
@ -185,7 +168,7 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
stack->push(result[-i]);
|
||||
}
|
||||
|
||||
void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
int CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
// Make sure method is native and not abstract
|
||||
assert(method->is_native() && !method->is_abstract(), "should be");
|
||||
|
||||
@ -193,17 +176,11 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
ZeroStack *stack = thread->zero_stack();
|
||||
|
||||
// Allocate and initialize our frame
|
||||
InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread);
|
||||
InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
|
||||
thread->push_zero_frame(frame);
|
||||
interpreterState istate = frame->interpreter_state();
|
||||
intptr_t *locals = istate->locals();
|
||||
|
||||
// Check we're not about to run out of stack
|
||||
if (stack_overflow_imminent(thread)) {
|
||||
CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
|
||||
goto unwind_and_return;
|
||||
}
|
||||
|
||||
// Update the invocation counter
|
||||
if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
|
||||
InvocationCounter *counter = method->invocation_counter();
|
||||
@ -264,9 +241,10 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
assert(function != NULL, "should be set if signature handler is");
|
||||
|
||||
// Build the argument list
|
||||
if (handler->argument_count() * 2 > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
stack->overflow_check(handler->argument_count() * 2, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION)
|
||||
goto unlock_unwind_and_return;
|
||||
|
||||
void **arguments;
|
||||
void *mirror; {
|
||||
arguments =
|
||||
@ -455,25 +433,26 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
// No deoptimized frames on the stack
|
||||
return 0;
|
||||
}
|
||||
|
||||
void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
int CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
JavaThread *thread = (JavaThread *) THREAD;
|
||||
ZeroStack *stack = thread->zero_stack();
|
||||
intptr_t *locals = stack->sp();
|
||||
|
||||
// Drop into the slow path if we need a safepoint check
|
||||
if (SafepointSynchronize::do_call_back()) {
|
||||
normal_entry(method, 0, THREAD);
|
||||
return;
|
||||
return normal_entry(method, 0, THREAD);
|
||||
}
|
||||
|
||||
// Load the object pointer and drop into the slow path
|
||||
// if we have a NullPointerException
|
||||
oop object = LOCALS_OBJECT(0);
|
||||
if (object == NULL) {
|
||||
normal_entry(method, 0, THREAD);
|
||||
return;
|
||||
return normal_entry(method, 0, THREAD);
|
||||
}
|
||||
|
||||
// Read the field index from the bytecode, which looks like this:
|
||||
@ -495,17 +474,14 @@ void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
constantPoolCacheOop cache = method->constants()->cache();
|
||||
ConstantPoolCacheEntry* entry = cache->entry_at(index);
|
||||
if (!entry->is_resolved(Bytecodes::_getfield)) {
|
||||
normal_entry(method, 0, THREAD);
|
||||
return;
|
||||
return normal_entry(method, 0, THREAD);
|
||||
}
|
||||
|
||||
// Get the result and push it onto the stack
|
||||
switch (entry->flag_state()) {
|
||||
case ltos:
|
||||
case dtos:
|
||||
if (stack->available_words() < 1) {
|
||||
Unimplemented();
|
||||
}
|
||||
stack->overflow_check(1, CHECK_0);
|
||||
stack->alloc(wordSize);
|
||||
break;
|
||||
}
|
||||
@ -585,55 +561,51 @@ void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
// No deoptimized frames on the stack
|
||||
return 0;
|
||||
}
|
||||
|
||||
void CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
int CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
JavaThread *thread = (JavaThread *) THREAD;
|
||||
ZeroStack *stack = thread->zero_stack();
|
||||
|
||||
// Drop into the slow path if we need a safepoint check
|
||||
if (SafepointSynchronize::do_call_back()) {
|
||||
normal_entry(method, 0, THREAD);
|
||||
return;
|
||||
return normal_entry(method, 0, THREAD);
|
||||
}
|
||||
|
||||
// Pop our parameters
|
||||
stack->set_sp(stack->sp() + method->size_of_parameters());
|
||||
|
||||
// No deoptimized frames on the stack
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool CppInterpreter::stack_overflow_imminent(JavaThread *thread) {
|
||||
// How is the ABI stack?
|
||||
address stack_top = thread->stack_base() - thread->stack_size();
|
||||
int free_stack = os::current_stack_pointer() - stack_top;
|
||||
if (free_stack < StackShadowPages * os::vm_page_size()) {
|
||||
return true;
|
||||
InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
|
||||
JavaThread *thread = (JavaThread *) THREAD;
|
||||
ZeroStack *stack = thread->zero_stack();
|
||||
|
||||
// Calculate the size of the frame we'll build, including
|
||||
// any adjustments to the caller's frame that we'll make.
|
||||
int extra_locals = 0;
|
||||
int monitor_words = 0;
|
||||
int stack_words = 0;
|
||||
|
||||
if (!method->is_native()) {
|
||||
extra_locals = method->max_locals() - method->size_of_parameters();
|
||||
stack_words = method->max_stack();
|
||||
}
|
||||
|
||||
// How is the Zero stack?
|
||||
// Throwing a StackOverflowError involves a VM call, which means
|
||||
// we need a frame on the stack. We should be checking here to
|
||||
// ensure that methods we call have enough room to install the
|
||||
// largest possible frame, but that's more than twice the size
|
||||
// of the entire Zero stack we get by default, so we just check
|
||||
// we have *some* space instead...
|
||||
free_stack = thread->zero_stack()->available_words() * wordSize;
|
||||
if (free_stack < StackShadowPages * os::vm_page_size()) {
|
||||
return true;
|
||||
if (method->is_synchronized()) {
|
||||
monitor_words = frame::interpreter_frame_monitor_size();
|
||||
}
|
||||
stack->overflow_check(
|
||||
extra_locals + header_words + monitor_words + stack_words, CHECK_NULL);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
InterpreterFrame *InterpreterFrame::build(ZeroStack* stack,
|
||||
const methodOop method,
|
||||
JavaThread* thread) {
|
||||
int monitor_words =
|
||||
method->is_synchronized() ? frame::interpreter_frame_monitor_size() : 0;
|
||||
int stack_words = method->is_native() ? 0 : method->max_stack();
|
||||
|
||||
if (header_words + monitor_words + stack_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
// Adjust the caller's stack frame to accomodate any additional
|
||||
// local variables we have contiguously with our parameters.
|
||||
for (int i = 0; i < extra_locals; i++)
|
||||
stack->push(0);
|
||||
|
||||
intptr_t *locals;
|
||||
if (method->is_native())
|
||||
@ -812,14 +784,13 @@ InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||
|
||||
// Deoptimization helpers
|
||||
|
||||
InterpreterFrame *InterpreterFrame::build(ZeroStack* stack, int size) {
|
||||
InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
|
||||
ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
|
||||
|
||||
int size_in_words = size >> LogBytesPerWord;
|
||||
assert(size_in_words * wordSize == size, "unaligned");
|
||||
assert(size_in_words >= header_words, "too small");
|
||||
|
||||
if (size_in_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
stack->overflow_check(size_in_words, CHECK_NULL);
|
||||
|
||||
stack->push(0); // next_frame, filled in later
|
||||
intptr_t *fp = stack->sp();
|
||||
@ -870,7 +841,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
|
||||
int callee_extra_locals = callee_locals - callee_param_count;
|
||||
|
||||
if (interpreter_frame) {
|
||||
intptr_t *locals = interpreter_frame->sp() + method->max_locals();
|
||||
intptr_t *locals = interpreter_frame->fp() + method->max_locals();
|
||||
interpreterState istate = interpreter_frame->get_interpreterState();
|
||||
intptr_t *monitor_base = (intptr_t*) istate;
|
||||
intptr_t *stack_base = monitor_base - monitor_words;
|
||||
|
@ -29,19 +29,15 @@
|
||||
|
||||
public:
|
||||
// Method entries
|
||||
static void normal_entry(methodOop method, intptr_t UNUSED, TRAPS);
|
||||
static void native_entry(methodOop method, intptr_t UNUSED, TRAPS);
|
||||
static void accessor_entry(methodOop method, intptr_t UNUSED, TRAPS);
|
||||
static void empty_entry(methodOop method, intptr_t UNUSED, TRAPS);
|
||||
static int normal_entry(methodOop method, intptr_t UNUSED, TRAPS);
|
||||
static int native_entry(methodOop method, intptr_t UNUSED, TRAPS);
|
||||
static int accessor_entry(methodOop method, intptr_t UNUSED, TRAPS);
|
||||
static int empty_entry(methodOop method, intptr_t UNUSED, TRAPS);
|
||||
|
||||
public:
|
||||
// Main loop of normal_entry
|
||||
static void main_loop(int recurse, TRAPS);
|
||||
|
||||
private:
|
||||
// Stack overflow checks
|
||||
static bool stack_overflow_imminent(JavaThread *thread);
|
||||
|
||||
private:
|
||||
// Fast result type determination
|
||||
static BasicType result_type_of(methodOop method);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008 Red Hat, Inc.
|
||||
* Copyright 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,10 +47,10 @@ class EntryFrame : public ZeroFrame {
|
||||
};
|
||||
|
||||
public:
|
||||
static EntryFrame *build(ZeroStack* stack,
|
||||
const intptr_t* parameters,
|
||||
static EntryFrame *build(const intptr_t* parameters,
|
||||
int parameter_words,
|
||||
JavaCallWrapper* call_wrapper);
|
||||
JavaCallWrapper* call_wrapper,
|
||||
TRAPS);
|
||||
public:
|
||||
JavaCallWrapper *call_wrapper() const {
|
||||
return (JavaCallWrapper *) value_of_word(call_wrapper_off);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,20 +41,30 @@ class ZeroEntry {
|
||||
}
|
||||
|
||||
private:
|
||||
typedef void (*NormalEntryFunc)(methodOop method,
|
||||
intptr_t base_pc,
|
||||
TRAPS);
|
||||
typedef void (*OSREntryFunc)(methodOop method,
|
||||
address osr_buf,
|
||||
intptr_t base_pc,
|
||||
TRAPS);
|
||||
typedef int (*NormalEntryFunc)(methodOop method,
|
||||
intptr_t base_pc,
|
||||
TRAPS);
|
||||
typedef int (*OSREntryFunc)(methodOop method,
|
||||
address osr_buf,
|
||||
intptr_t base_pc,
|
||||
TRAPS);
|
||||
|
||||
public:
|
||||
void invoke(methodOop method, TRAPS) const {
|
||||
((NormalEntryFunc) entry_point())(method, (intptr_t) this, THREAD);
|
||||
maybe_deoptimize(
|
||||
((NormalEntryFunc) entry_point())(method, (intptr_t) this, THREAD),
|
||||
THREAD);
|
||||
}
|
||||
void invoke_osr(methodOop method, address osr_buf, TRAPS) const {
|
||||
((OSREntryFunc) entry_point())(method, osr_buf, (intptr_t) this, THREAD);
|
||||
maybe_deoptimize(
|
||||
((OSREntryFunc) entry_point())(method, osr_buf, (intptr_t) this, THREAD),
|
||||
THREAD);
|
||||
}
|
||||
|
||||
private:
|
||||
static void maybe_deoptimize(int deoptimized_frames, TRAPS) {
|
||||
if (deoptimized_frames)
|
||||
CppInterpreter::main_loop(deoptimized_frames - 1, THREAD);
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008 Red Hat, Inc.
|
||||
* Copyright 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,7 @@ class FakeStubFrame : public ZeroFrame {
|
||||
};
|
||||
|
||||
public:
|
||||
static FakeStubFrame *build(ZeroStack* stack);
|
||||
static FakeStubFrame *build(TRAPS);
|
||||
|
||||
public:
|
||||
void identify_word(int frame_index,
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,6 +36,10 @@ bool frame::is_interpreted_frame() const {
|
||||
return zeroframe()->is_interpreter_frame();
|
||||
}
|
||||
|
||||
bool frame::is_fake_stub_frame() const {
|
||||
return zeroframe()->is_fake_stub_frame();
|
||||
}
|
||||
|
||||
frame frame::sender_for_entry_frame(RegisterMap *map) const {
|
||||
assert(zeroframe()->is_entry_frame(), "wrong type of frame");
|
||||
assert(map != NULL, "map must be set");
|
||||
@ -44,14 +48,14 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const {
|
||||
"sender should be next Java frame");
|
||||
map->clear();
|
||||
assert(map->include_argument_oops(), "should be set by clear");
|
||||
return frame(sender_sp(), sp() + 1);
|
||||
return frame(zeroframe()->next(), sender_sp());
|
||||
}
|
||||
|
||||
frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
|
||||
assert(zeroframe()->is_interpreter_frame() ||
|
||||
zeroframe()->is_shark_frame() ||
|
||||
zeroframe()->is_fake_stub_frame(), "wrong type of frame");
|
||||
return frame(sender_sp(), sp() + 1);
|
||||
return frame(zeroframe()->next(), sender_sp());
|
||||
}
|
||||
|
||||
frame frame::sender(RegisterMap* map) const {
|
||||
@ -172,8 +176,8 @@ void frame::zero_print_on_error(int frame_index,
|
||||
char *valuebuf = buf + buflen;
|
||||
|
||||
// Print each word of the frame
|
||||
for (intptr_t *addr = fp(); addr <= sp(); addr++) {
|
||||
int offset = sp() - addr;
|
||||
for (intptr_t *addr = sp(); addr <= fp(); addr++) {
|
||||
int offset = fp() - addr;
|
||||
|
||||
// Fill in default values, then try and improve them
|
||||
snprintf(fieldbuf, buflen, "word[%d]", offset);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,17 +32,18 @@
|
||||
|
||||
// Constructor
|
||||
public:
|
||||
frame(intptr_t* sp, intptr_t* fp);
|
||||
frame(ZeroFrame* zeroframe, intptr_t* sp);
|
||||
|
||||
// The sp of a Zero frame is the address of the highest word in
|
||||
// that frame. We keep track of the lowest address too, so the
|
||||
// boundaries of the frame are available for debug printing.
|
||||
private:
|
||||
intptr_t* _fp;
|
||||
ZeroFrame* _zeroframe;
|
||||
|
||||
public:
|
||||
const ZeroFrame *zeroframe() const {
|
||||
return _zeroframe;
|
||||
}
|
||||
|
||||
intptr_t* fp() const {
|
||||
return _fp;
|
||||
return (intptr_t *) zeroframe();
|
||||
}
|
||||
|
||||
#ifdef CC_INTERP
|
||||
@ -50,10 +51,6 @@
|
||||
#endif // CC_INTERP
|
||||
|
||||
public:
|
||||
const ZeroFrame *zeroframe() const {
|
||||
return (ZeroFrame *) sp();
|
||||
}
|
||||
|
||||
const EntryFrame *zero_entryframe() const {
|
||||
return zeroframe()->as_entry_frame();
|
||||
}
|
||||
@ -64,6 +61,9 @@
|
||||
return zeroframe()->as_shark_frame();
|
||||
}
|
||||
|
||||
public:
|
||||
bool is_fake_stub_frame() const;
|
||||
|
||||
public:
|
||||
frame sender_for_nonentry_frame(RegisterMap* map) const;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,16 +26,16 @@
|
||||
// Constructors
|
||||
|
||||
inline frame::frame() {
|
||||
_zeroframe = NULL;
|
||||
_sp = NULL;
|
||||
_fp = NULL;
|
||||
_pc = NULL;
|
||||
_cb = NULL;
|
||||
_deopt_state = unknown;
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
|
||||
_zeroframe = zf;
|
||||
_sp = sp;
|
||||
_fp = fp;
|
||||
switch (zeroframe()->type()) {
|
||||
case ZeroFrame::ENTRY_FRAME:
|
||||
_pc = StubRoutines::call_stub_return_pc();
|
||||
@ -66,7 +66,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
// Accessors
|
||||
|
||||
inline intptr_t* frame::sender_sp() const {
|
||||
return (intptr_t *) zeroframe()->next();
|
||||
return fp() + 1;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::link() const {
|
||||
@ -120,7 +120,7 @@ inline jint frame::interpreter_frame_expression_stack_direction() {
|
||||
// we can distinguish identity and younger/older relationship. NULL
|
||||
// represents an invalid (incomparable) frame.
|
||||
inline intptr_t* frame::id() const {
|
||||
return sp();
|
||||
return fp();
|
||||
}
|
||||
|
||||
inline JavaCallWrapper* frame::entry_frame_call_wrapper() const {
|
||||
|
@ -35,6 +35,7 @@ define_pd_global(bool, ImplicitNullChecks, true);
|
||||
define_pd_global(bool, UncommonNullCast, true);
|
||||
|
||||
define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
define_pd_global(intx, PreInflateSpin, 10);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008 Red Hat, Inc.
|
||||
* Copyright 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,10 +55,8 @@ class InterpreterFrame : public ZeroFrame {
|
||||
};
|
||||
|
||||
public:
|
||||
static InterpreterFrame *build(ZeroStack* stack,
|
||||
const methodOop method,
|
||||
JavaThread* thread);
|
||||
static InterpreterFrame *build(ZeroStack* stack, int size);
|
||||
static InterpreterFrame *build(const methodOop method, TRAPS);
|
||||
static InterpreterFrame *build(int size, TRAPS);
|
||||
|
||||
public:
|
||||
interpreterState interpreter_state() const {
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -140,9 +140,8 @@ IRT_ENTRY(address,
|
||||
int required_words =
|
||||
(align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
|
||||
(method->is_static() ? 2 : 1) + method->size_of_parameters() + 1;
|
||||
if (required_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
stack->overflow_check(required_words, CHECK_NULL);
|
||||
|
||||
intptr_t *buf = (intptr_t *) stack->alloc(required_words * wordSize);
|
||||
SlowSignatureHandlerGenerator sshg(methodHandle(thread, method), buf);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -36,26 +36,14 @@
|
||||
|
||||
public:
|
||||
static int expr_index_at(int i) {
|
||||
return stackElementWords() * i;
|
||||
}
|
||||
static int expr_tag_index_at(int i) {
|
||||
assert(TaggedStackInterpreter, "should not call this");
|
||||
Unimplemented();
|
||||
return stackElementWords * i;
|
||||
}
|
||||
|
||||
static int expr_offset_in_bytes(int i) {
|
||||
return stackElementSize() * i;
|
||||
}
|
||||
static int expr_tag_offset_in_bytes(int i) {
|
||||
assert(TaggedStackInterpreter, "should not call this");
|
||||
Unimplemented();
|
||||
return stackElementSize * i;
|
||||
}
|
||||
|
||||
static int local_index_at(int i) {
|
||||
assert(i <= 0, "local direction already negated");
|
||||
return stackElementWords() * i + (value_offset_in_bytes() / wordSize);
|
||||
}
|
||||
static int local_tag_index_at(int i) {
|
||||
assert(TaggedStackInterpreter, "should not call this");
|
||||
Unimplemented();
|
||||
return stackElementWords * i;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,21 +23,31 @@
|
||||
*
|
||||
*/
|
||||
|
||||
private:
|
||||
ZeroFrame* volatile _last_Java_fp;
|
||||
|
||||
public:
|
||||
// Each arch must define reset, save, restore
|
||||
// These are used by objects that only care about:
|
||||
// 1 - initializing a new state (thread creation, javaCalls)
|
||||
// 2 - saving a current state (javaCalls)
|
||||
// 3 - restoring an old state (javaCalls)
|
||||
// Note that whenever _last_Java_sp != NULL other anchor fields
|
||||
// must be valid. The profiler apparently depends on this.
|
||||
|
||||
void clear() {
|
||||
// clearing _last_Java_sp must be first
|
||||
_last_Java_sp = NULL;
|
||||
// fence?
|
||||
_last_Java_fp = NULL;
|
||||
_last_Java_pc = NULL;
|
||||
}
|
||||
|
||||
void copy(JavaFrameAnchor* src) {
|
||||
set(src->_last_Java_sp, src->_last_Java_pc, src->_last_Java_fp);
|
||||
}
|
||||
|
||||
void set(intptr_t* sp, address pc, ZeroFrame* fp) {
|
||||
// In order to make sure the transition state is valid for "this"
|
||||
// We must clear _last_Java_sp before copying the rest of the new
|
||||
// data
|
||||
@ -46,13 +56,14 @@
|
||||
// previous version (pd_cache_state) don't NULL _last_Java_sp
|
||||
// unless the value is changing
|
||||
//
|
||||
if (_last_Java_sp != src->_last_Java_sp)
|
||||
if (_last_Java_sp != sp)
|
||||
_last_Java_sp = NULL;
|
||||
|
||||
_last_Java_pc = src->_last_Java_pc;
|
||||
_last_Java_fp = fp;
|
||||
_last_Java_pc = pc;
|
||||
// Must be last so profiler will always see valid frame if
|
||||
// has_last_frame() is true
|
||||
_last_Java_sp = src->_last_Java_sp;
|
||||
_last_Java_sp = sp;
|
||||
}
|
||||
|
||||
bool walkable() {
|
||||
@ -67,6 +78,10 @@
|
||||
return _last_Java_sp;
|
||||
}
|
||||
|
||||
void set_last_Java_sp(intptr_t* sp) {
|
||||
_last_Java_sp = sp;
|
||||
ZeroFrame* last_Java_fp() const {
|
||||
return _last_Java_fp;
|
||||
}
|
||||
|
||||
static ByteSize last_Java_fp_offset() {
|
||||
return byte_offset_of(JavaFrameAnchor, _last_Java_fp);
|
||||
}
|
||||
|
@ -26,6 +26,10 @@
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_methodHandles_zero.cpp.incl"
|
||||
|
||||
int MethodHandles::adapter_conversion_ops_supported_mask() {
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
|
||||
void MethodHandles::generate_method_handle_stub(MacroAssembler* masm,
|
||||
MethodHandles::EntryKind ek) {
|
||||
ShouldNotCallThis();
|
||||
|
86
hotspot/src/cpu/zero/vm/stack_zero.cpp
Normal file
86
hotspot/src/cpu/zero/vm/stack_zero.cpp
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_stack_zero.cpp.incl"
|
||||
|
||||
int ZeroStack::suggest_size(Thread *thread) const {
|
||||
assert(needs_setup(), "already set up");
|
||||
return align_size_down(abi_stack_available(thread) / 2, wordSize);
|
||||
}
|
||||
|
||||
void ZeroStack::handle_overflow(TRAPS) {
|
||||
JavaThread *thread = (JavaThread *) THREAD;
|
||||
|
||||
// Set up the frame anchor if it isn't already
|
||||
bool has_last_Java_frame = thread->has_last_Java_frame();
|
||||
if (!has_last_Java_frame) {
|
||||
intptr_t *sp = thread->zero_stack()->sp();
|
||||
ZeroFrame *frame = thread->top_zero_frame();
|
||||
while (frame) {
|
||||
if (frame->is_shark_frame())
|
||||
break;
|
||||
|
||||
if (frame->is_interpreter_frame()) {
|
||||
interpreterState istate =
|
||||
frame->as_interpreter_frame()->interpreter_state();
|
||||
if (istate->self_link() == istate)
|
||||
break;
|
||||
}
|
||||
|
||||
sp = ((intptr_t *) frame) + 1;
|
||||
frame = frame->next();
|
||||
}
|
||||
|
||||
if (frame == NULL)
|
||||
fatal("unrecoverable stack overflow");
|
||||
|
||||
thread->set_last_Java_frame(frame, sp);
|
||||
}
|
||||
|
||||
// Throw the exception
|
||||
switch (thread->thread_state()) {
|
||||
case _thread_in_Java:
|
||||
InterpreterRuntime::throw_StackOverflowError(thread);
|
||||
break;
|
||||
|
||||
case _thread_in_vm:
|
||||
Exceptions::throw_stack_overflow_exception(thread, __FILE__, __LINE__);
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// Reset the frame anchor if necessary
|
||||
if (!has_last_Java_frame)
|
||||
thread->reset_last_Java_frame();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ZeroStack::zap(int c) {
|
||||
memset(_base, c, available_words() * wordSize);
|
||||
}
|
||||
#endif // PRODUCT
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,14 +29,21 @@ class ZeroStack {
|
||||
intptr_t *_top; // the word past the end of the stack
|
||||
intptr_t *_sp; // the top word on the stack
|
||||
|
||||
private:
|
||||
int _shadow_pages_size; // how much ABI stack must we keep free?
|
||||
|
||||
public:
|
||||
ZeroStack()
|
||||
: _base(NULL), _top(NULL), _sp(NULL) {}
|
||||
: _base(NULL), _top(NULL), _sp(NULL) {
|
||||
_shadow_pages_size = StackShadowPages * os::vm_page_size();
|
||||
}
|
||||
|
||||
bool needs_setup() const {
|
||||
return _base == NULL;
|
||||
}
|
||||
|
||||
int suggest_size(Thread *thread) const;
|
||||
|
||||
void setup(void *mem, size_t size) {
|
||||
assert(needs_setup(), "already set up");
|
||||
assert(!(size & WordAlignmentMask), "unaligned");
|
||||
@ -62,6 +69,9 @@ class ZeroStack {
|
||||
_sp = new_sp;
|
||||
}
|
||||
|
||||
int total_words() const {
|
||||
return _top - _base;
|
||||
}
|
||||
int available_words() const {
|
||||
return _sp - _base;
|
||||
}
|
||||
@ -81,6 +91,18 @@ class ZeroStack {
|
||||
return _sp -= count;
|
||||
}
|
||||
|
||||
int shadow_pages_size() const {
|
||||
return _shadow_pages_size;
|
||||
}
|
||||
int abi_stack_available(Thread *thread) const;
|
||||
|
||||
public:
|
||||
void overflow_check(int required_words, TRAPS);
|
||||
static void handle_overflow(TRAPS);
|
||||
|
||||
public:
|
||||
void zap(int c) PRODUCT_RETURN;
|
||||
|
||||
public:
|
||||
static ByteSize base_offset() {
|
||||
return byte_offset_of(ZeroStack, _base);
|
||||
|
48
hotspot/src/cpu/zero/vm/stack_zero.inline.hpp
Normal file
48
hotspot/src/cpu/zero/vm/stack_zero.inline.hpp
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// This function should match SharkStack::CreateStackOverflowCheck
|
||||
inline void ZeroStack::overflow_check(int required_words, TRAPS) {
|
||||
// Check the Zero stack
|
||||
if (available_words() < required_words) {
|
||||
handle_overflow(THREAD);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check the ABI stack
|
||||
if (abi_stack_available(THREAD) < 0) {
|
||||
handle_overflow(THREAD);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// This method returns the amount of ABI stack available for us
|
||||
// to use under normal circumstances. Note that the returned
|
||||
// value can be negative.
|
||||
inline int ZeroStack::abi_stack_available(Thread *thread) const {
|
||||
int stack_used = thread->stack_base() - (address) &stack_used;
|
||||
int stack_free = thread->stack_size() - stack_used;
|
||||
return stack_free - shadow_pages_size();
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -51,46 +51,48 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Set up the stack if necessary
|
||||
bool stack_needs_teardown = false;
|
||||
if (stack->needs_setup()) {
|
||||
size_t stack_used = thread->stack_base() - (address) &stack_used;
|
||||
size_t stack_free = thread->stack_size() - stack_used;
|
||||
size_t zero_stack_size = align_size_down(stack_free / 2, wordSize);
|
||||
|
||||
size_t zero_stack_size = stack->suggest_size(thread);
|
||||
stack->setup(alloca(zero_stack_size), zero_stack_size);
|
||||
stack_needs_teardown = true;
|
||||
}
|
||||
|
||||
// Allocate and initialize our frame
|
||||
thread->push_zero_frame(
|
||||
EntryFrame::build(stack, parameters, parameter_words, call_wrapper));
|
||||
EntryFrame *frame =
|
||||
EntryFrame::build(parameters, parameter_words, call_wrapper, THREAD);
|
||||
|
||||
// Make the call
|
||||
Interpreter::invoke_method(method, entry_point, THREAD);
|
||||
|
||||
// Store result depending on type
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
switch (result_type) {
|
||||
case T_INT:
|
||||
*(jint *) result = *(jint *) stack->sp();
|
||||
break;
|
||||
case T_LONG:
|
||||
*(jlong *) result = *(jlong *) stack->sp();
|
||||
break;
|
||||
case T_FLOAT:
|
||||
*(jfloat *) result = *(jfloat *) stack->sp();
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
*(jdouble *) result = *(jdouble *) stack->sp();
|
||||
break;
|
||||
case T_OBJECT:
|
||||
*(oop *) result = *(oop *) stack->sp();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
// Push the frame
|
||||
thread->push_zero_frame(frame);
|
||||
|
||||
// Unwind our frame
|
||||
thread->pop_zero_frame();
|
||||
// Make the call
|
||||
Interpreter::invoke_method(method, entry_point, THREAD);
|
||||
|
||||
// Store the result
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
switch (result_type) {
|
||||
case T_INT:
|
||||
*(jint *) result = *(jint *) stack->sp();
|
||||
break;
|
||||
case T_LONG:
|
||||
*(jlong *) result = *(jlong *) stack->sp();
|
||||
break;
|
||||
case T_FLOAT:
|
||||
*(jfloat *) result = *(jfloat *) stack->sp();
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
*(jdouble *) result = *(jdouble *) stack->sp();
|
||||
break;
|
||||
case T_OBJECT:
|
||||
*(oop *) result = *(oop *) stack->sp();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
// Unwind the frame
|
||||
thread->pop_zero_frame();
|
||||
}
|
||||
|
||||
// Tear down the stack if necessary
|
||||
if (stack_needs_teardown)
|
||||
@ -226,13 +228,13 @@ void StubGenerator_generate(CodeBuffer* code, bool all) {
|
||||
StubGenerator g(code, all);
|
||||
}
|
||||
|
||||
EntryFrame *EntryFrame::build(ZeroStack* stack,
|
||||
const intptr_t* parameters,
|
||||
EntryFrame *EntryFrame::build(const intptr_t* parameters,
|
||||
int parameter_words,
|
||||
JavaCallWrapper* call_wrapper) {
|
||||
if (header_words + parameter_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
JavaCallWrapper* call_wrapper,
|
||||
TRAPS) {
|
||||
|
||||
ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
|
||||
stack->overflow_check(header_words + parameter_words, CHECK_NULL);
|
||||
|
||||
stack->push(0); // next_frame, filled in later
|
||||
intptr_t *fp = stack->sp();
|
||||
|
@ -192,7 +192,8 @@ int LinuxAttachListener::init() {
|
||||
res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
|
||||
}
|
||||
if (res == -1) {
|
||||
sprintf(path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id());
|
||||
snprintf(path, PATH_MAX+1, "%s/.java_pid%d",
|
||||
os::get_temp_directory(), os::current_process_id());
|
||||
strcpy(addr.sun_path, path);
|
||||
::unlink(path);
|
||||
res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
|
||||
@ -460,13 +461,14 @@ bool AttachListener::is_init_trigger() {
|
||||
if (init_at_startup() || is_initialized()) {
|
||||
return false; // initialized at startup or already initialized
|
||||
}
|
||||
char fn[32];
|
||||
char fn[PATH_MAX+1];
|
||||
sprintf(fn, ".attach_pid%d", os::current_process_id());
|
||||
int ret;
|
||||
struct stat64 st;
|
||||
RESTARTABLE(::stat64(fn, &st), ret);
|
||||
if (ret == -1) {
|
||||
sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id());
|
||||
snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
|
||||
os::get_temp_directory(), os::current_process_id());
|
||||
RESTARTABLE(::stat64(fn, &st), ret);
|
||||
}
|
||||
if (ret == 0) {
|
||||
|
@ -1522,7 +1522,10 @@ int os::current_process_id() {
|
||||
|
||||
const char* os::dll_file_extension() { return ".so"; }
|
||||
|
||||
const char* os::get_temp_directory() { return "/tmp/"; }
|
||||
const char* os::get_temp_directory() {
|
||||
const char *prop = Arguments::get_property("java.io.tmpdir");
|
||||
return prop == NULL ? "/tmp" : prop;
|
||||
}
|
||||
|
||||
static bool file_exists(const char* filename) {
|
||||
struct stat statbuf;
|
||||
@ -2302,10 +2305,11 @@ void linux_wrap_code(char* base, size_t size) {
|
||||
return;
|
||||
}
|
||||
|
||||
char buf[40];
|
||||
char buf[PATH_MAX+1];
|
||||
int num = Atomic::add(1, &cnt);
|
||||
|
||||
sprintf(buf, "/tmp/hs-vm-%d-%d", os::current_process_id(), num);
|
||||
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
|
||||
os::get_temp_directory(), os::current_process_id(), num);
|
||||
unlink(buf);
|
||||
|
||||
int fd = open(buf, O_CREAT | O_RDWR, S_IRWXU);
|
||||
@ -2784,7 +2788,7 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
||||
}
|
||||
|
||||
// attach to the region
|
||||
addr = (char*)shmat(shmid, NULL, 0);
|
||||
addr = (char*)shmat(shmid, req_addr, 0);
|
||||
int err = errno;
|
||||
|
||||
// Remove shmid. If shmat() is successful, the actual shared memory segment
|
||||
@ -3491,7 +3495,8 @@ void os::Linux::set_signal_handler(int sig, bool set_installed) {
|
||||
// libjsig also interposes the sigaction() call below and saves the
|
||||
// old sigaction on it own.
|
||||
} else {
|
||||
fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig);
|
||||
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig));
|
||||
}
|
||||
}
|
||||
|
||||
@ -3813,7 +3818,8 @@ void os::init(void) {
|
||||
|
||||
Linux::set_page_size(sysconf(_SC_PAGESIZE));
|
||||
if (Linux::page_size() == -1) {
|
||||
fatal1("os_linux.cpp: os::init: sysconf failed (%s)", strerror(errno));
|
||||
fatal(err_msg("os_linux.cpp: os::init: sysconf failed (%s)",
|
||||
strerror(errno)));
|
||||
}
|
||||
init_page_sizes((size_t) Linux::page_size());
|
||||
|
||||
|
@ -145,11 +145,11 @@ static char* get_user_tmp_dir(const char* user) {
|
||||
|
||||
const char* tmpdir = os::get_temp_directory();
|
||||
const char* perfdir = PERFDATA_NAME;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
|
||||
char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
|
||||
|
||||
// construct the path name to user specific tmp directory
|
||||
snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
|
||||
snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
|
||||
|
||||
return dirname;
|
||||
}
|
||||
@ -331,8 +331,9 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
}
|
||||
|
||||
char* usrdir_name = NEW_C_HEAP_ARRAY(char,
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 1);
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 2);
|
||||
strcpy(usrdir_name, tmpdirname);
|
||||
strcat(usrdir_name, "/");
|
||||
strcat(usrdir_name, dentry->d_name);
|
||||
|
||||
DIR* subdirp = os::opendir(usrdir_name);
|
||||
|
@ -375,7 +375,8 @@ int SolarisAttachListener::create_door() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
sprintf(door_path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id());
|
||||
snprintf(door_path, sizeof(door_path), "%s/.java_pid%d",
|
||||
os::get_temp_directory(), os::current_process_id());
|
||||
RESTARTABLE(::creat(door_path, S_IRUSR | S_IWUSR), fd);
|
||||
|
||||
if (fd == -1) {
|
||||
@ -591,13 +592,14 @@ bool AttachListener::is_init_trigger() {
|
||||
if (init_at_startup() || is_initialized()) {
|
||||
return false; // initialized at startup or already initialized
|
||||
}
|
||||
char fn[32];
|
||||
char fn[PATH_MAX+1];
|
||||
sprintf(fn, ".attach_pid%d", os::current_process_id());
|
||||
int ret;
|
||||
struct stat64 st;
|
||||
RESTARTABLE(::stat64(fn, &st), ret);
|
||||
if (ret == -1) {
|
||||
sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id());
|
||||
snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
|
||||
os::get_temp_directory(), os::current_process_id());
|
||||
RESTARTABLE(::stat64(fn, &st), ret);
|
||||
}
|
||||
if (ret == 0) {
|
||||
|
@ -676,15 +676,6 @@ bool os::have_special_privileges() {
|
||||
}
|
||||
|
||||
|
||||
static char* get_property(char* name, char* buffer, int buffer_size) {
|
||||
if (os::getenv(name, buffer, buffer_size)) {
|
||||
return buffer;
|
||||
}
|
||||
static char empty[] = "";
|
||||
return empty;
|
||||
}
|
||||
|
||||
|
||||
void os::init_system_properties_values() {
|
||||
char arch[12];
|
||||
sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
|
||||
@ -1576,7 +1567,8 @@ int os::allocate_thread_local_storage() {
|
||||
// treat %g2 as a caller-save register, preserving it in a %lN.
|
||||
thread_key_t tk;
|
||||
if (thr_keycreate( &tk, NULL ) )
|
||||
fatal1("os::allocate_thread_local_storage: thr_keycreate failed (%s)", strerror(errno));
|
||||
fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
|
||||
"(%s)", strerror(errno)));
|
||||
return int(tk);
|
||||
}
|
||||
|
||||
@ -1594,7 +1586,8 @@ void os::thread_local_storage_at_put(int index, void* value) {
|
||||
if (errno == ENOMEM) {
|
||||
vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
|
||||
} else {
|
||||
fatal1("os::thread_local_storage_at_put: thr_setspecific failed (%s)", strerror(errno));
|
||||
fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
|
||||
"(%s)", strerror(errno)));
|
||||
}
|
||||
} else {
|
||||
ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
|
||||
@ -1747,7 +1740,7 @@ jlong getTimeMillis() {
|
||||
jlong os::javaTimeMillis() {
|
||||
timeval t;
|
||||
if (gettimeofday( &t, NULL) == -1)
|
||||
fatal1("os::javaTimeMillis: gettimeofday (%s)", strerror(errno));
|
||||
fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
|
||||
return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
|
||||
}
|
||||
|
||||
@ -1826,7 +1819,10 @@ void os::set_error_file(const char *logfile) {}
|
||||
|
||||
const char* os::dll_file_extension() { return ".so"; }
|
||||
|
||||
const char* os::get_temp_directory() { return "/tmp/"; }
|
||||
const char* os::get_temp_directory() {
|
||||
const char *prop = Arguments::get_property("java.io.tmpdir");
|
||||
return prop == NULL ? "/tmp" : prop;
|
||||
}
|
||||
|
||||
static bool file_exists(const char* filename) {
|
||||
struct stat statbuf;
|
||||
@ -4239,7 +4235,8 @@ void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain
|
||||
// libjsig also interposes the sigaction() call below and saves the
|
||||
// old sigaction on it own.
|
||||
} else {
|
||||
fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig);
|
||||
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig));
|
||||
}
|
||||
}
|
||||
|
||||
@ -4770,7 +4767,8 @@ void os::init(void) {
|
||||
|
||||
page_size = sysconf(_SC_PAGESIZE);
|
||||
if (page_size == -1)
|
||||
fatal1("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno));
|
||||
fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
|
||||
strerror(errno)));
|
||||
init_page_sizes((size_t) page_size);
|
||||
|
||||
Solaris::initialize_system_info();
|
||||
@ -4781,7 +4779,7 @@ void os::init(void) {
|
||||
|
||||
int fd = open("/dev/zero", O_RDWR);
|
||||
if (fd < 0) {
|
||||
fatal1("os::init: cannot open /dev/zero (%s)", strerror(errno));
|
||||
fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
|
||||
} else {
|
||||
Solaris::set_dev_zero_fd(fd);
|
||||
|
||||
|
@ -147,11 +147,11 @@ static char* get_user_tmp_dir(const char* user) {
|
||||
|
||||
const char* tmpdir = os::get_temp_directory();
|
||||
const char* perfdir = PERFDATA_NAME;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
|
||||
char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
|
||||
|
||||
// construct the path name to user specific tmp directory
|
||||
snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
|
||||
snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
|
||||
|
||||
return dirname;
|
||||
}
|
||||
@ -322,8 +322,9 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
}
|
||||
|
||||
char* usrdir_name = NEW_C_HEAP_ARRAY(char,
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 1);
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 2);
|
||||
strcpy(usrdir_name, tmpdirname);
|
||||
strcat(usrdir_name, "/");
|
||||
strcat(usrdir_name, dentry->d_name);
|
||||
|
||||
DIR* subdirp = os::opendir(usrdir_name);
|
||||
|
@ -47,7 +47,8 @@ ThreadCritical::ThreadCritical() {
|
||||
thread_t owner = thr_self();
|
||||
if (global_mut_owner != owner) {
|
||||
if (os::Solaris::mutex_lock(&global_mut))
|
||||
fatal1("ThreadCritical::ThreadCritical: mutex_lock failed (%s)", strerror(errno));
|
||||
fatal(err_msg("ThreadCritical::ThreadCritical: mutex_lock failed (%s)",
|
||||
strerror(errno)));
|
||||
assert(global_mut_count == 0, "must have clean count");
|
||||
assert(global_mut_owner == -1, "must have clean owner");
|
||||
}
|
||||
@ -66,7 +67,8 @@ ThreadCritical::~ThreadCritical() {
|
||||
if (global_mut_count == 0) {
|
||||
global_mut_owner = -1;
|
||||
if (os::Solaris::mutex_unlock(&global_mut))
|
||||
fatal1("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", strerror(errno));
|
||||
fatal(err_msg("ThreadCritical::~ThreadCritical: mutex_unlock failed "
|
||||
"(%s)", strerror(errno)));
|
||||
}
|
||||
} else {
|
||||
assert (Threads::number_of_threads() == 0, "valid only during initialization");
|
||||
|
@ -724,7 +724,7 @@ jlong offset() {
|
||||
java_origin.wMilliseconds = 0;
|
||||
FILETIME jot;
|
||||
if (!SystemTimeToFileTime(&java_origin, &jot)) {
|
||||
fatal1("Error = %d\nWindows error", GetLastError());
|
||||
fatal(err_msg("Error = %d\nWindows error", GetLastError()));
|
||||
}
|
||||
_calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
|
||||
_has_calculated_offset = 1;
|
||||
@ -998,15 +998,16 @@ os::closedir(DIR *dirp)
|
||||
|
||||
const char* os::dll_file_extension() { return ".dll"; }
|
||||
|
||||
const char * os::get_temp_directory()
|
||||
{
|
||||
static char path_buf[MAX_PATH];
|
||||
if (GetTempPath(MAX_PATH, path_buf)>0)
|
||||
return path_buf;
|
||||
else{
|
||||
path_buf[0]='\0';
|
||||
return path_buf;
|
||||
}
|
||||
const char* os::get_temp_directory() {
|
||||
const char *prop = Arguments::get_property("java.io.tmpdir");
|
||||
if (prop != 0) return prop;
|
||||
static char path_buf[MAX_PATH];
|
||||
if (GetTempPath(MAX_PATH, path_buf)>0)
|
||||
return path_buf;
|
||||
else{
|
||||
path_buf[0]='\0';
|
||||
return path_buf;
|
||||
}
|
||||
}
|
||||
|
||||
static bool file_exists(const char* filename) {
|
||||
@ -4094,7 +4095,7 @@ bool os::check_heap(bool force) {
|
||||
}
|
||||
int err = GetLastError();
|
||||
if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
|
||||
fatal1("heap walk aborted with error %d", err);
|
||||
fatal(err_msg("heap walk aborted with error %d", err));
|
||||
}
|
||||
HeapUnlock(heap);
|
||||
}
|
||||
|
@ -149,11 +149,11 @@ static char* get_user_tmp_dir(const char* user) {
|
||||
|
||||
const char* tmpdir = os::get_temp_directory();
|
||||
const char* perfdir = PERFDATA_NAME;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
|
||||
char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
|
||||
|
||||
// construct the path name to user specific tmp directory
|
||||
_snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
|
||||
_snprintf(dirname, nbytes, "%s\\%s_%s", tmpdir, perfdir, user);
|
||||
|
||||
return dirname;
|
||||
}
|
||||
@ -318,8 +318,9 @@ static char* get_user_name_slow(int vmid) {
|
||||
}
|
||||
|
||||
char* usrdir_name = NEW_C_HEAP_ARRAY(char,
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 1);
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 2);
|
||||
strcpy(usrdir_name, tmpdirname);
|
||||
strcat(usrdir_name, "\\");
|
||||
strcat(usrdir_name, dentry->d_name);
|
||||
|
||||
DIR* subdirp = os::opendir(usrdir_name);
|
||||
|
@ -153,7 +153,7 @@ static void current_stack_region(address* bottom, size_t* size) {
|
||||
if (rslt == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, "pthread_getattr_np");
|
||||
} else {
|
||||
fatal1("pthread_getattr_np failed with errno = %d", rslt);
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,9 +30,9 @@ define_pd_global(bool, DontYieldALot, false);
|
||||
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
#else
|
||||
// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
|
||||
// to run while keeping the number of threads that can be created high.
|
||||
// System default ThreadStackSize appears to be 512 which is too big.
|
||||
// ThreadStackSize 320 allows a couple of test cases to run while
|
||||
// keeping the number of threads that can be created high. System
|
||||
// default ThreadStackSize appears to be 512 which is too big.
|
||||
define_pd_global(intx, ThreadStackSize, 320);
|
||||
define_pd_global(intx, VMThreadStackSize, 512);
|
||||
#endif // AMD64
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user