Merge
This commit is contained in:
commit
5d6c7f37ba
3
.hgtags
3
.hgtags
@ -63,3 +63,6 @@ b6f633a93ae0ec4555ff4bf756f5e2150c9bdede jdk7-b85
|
||||
c94d9cc81f495d97817eba9d71b84fc45f7661a5 jdk7-b86
|
||||
b7456c473862048fa70ed8092313a4ef0a55d403 jdk7-b87
|
||||
7077b95d42f6b3942a8751bba033801ff50e5889 jdk7-b88
|
||||
44158f6d3b94c0fa020e33632532473d92d1ea96 jdk7-b89
|
||||
1d1927f9ec097b62c913921e2dfa5dbaf5dc325b jdk7-b90
|
||||
308ad8f68b8dd68e22d73dd490e110059b732422 jdk7-b91
|
||||
|
@ -63,3 +63,6 @@ cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85
|
||||
433a60a9c0bf1b26ee7e65cebaa89c541f497aed jdk7-b86
|
||||
6b1069f53fbc30663ccef49d78c31bb7d6967bde jdk7-b87
|
||||
82135c848d5fcddb065e98ae77b81077c858f593 jdk7-b88
|
||||
7f1ba4459972bf84b8201dc1cc4f62b1fe1c74f4 jdk7-b89
|
||||
425ba3efabbfe0b188105c10aaf7c3c8fa8d1a38 jdk7-b90
|
||||
97d8b6c659c29c8493a8b2b72c2796a021a8cf79 jdk7-b91
|
||||
|
@ -63,3 +63,6 @@ c67a9df7bc0ca291f08f9a9cc05cb78ea15d25e6 jdk7-b85
|
||||
6253e28826d16cf1aecc39ce04c8de1f6bf2df5f jdk7-b86
|
||||
09a41111a401d327f65e453384d976a10154d9ea jdk7-b87
|
||||
39e14d2da687c7e592142137517aaf689544820f jdk7-b88
|
||||
bb4424c5e778b842c064a8b1aa902b35f4397654 jdk7-b89
|
||||
56ce07b0eb47b93a98a72adef0f21e602c460623 jdk7-b90
|
||||
bcd2fc089227559ac5be927923609fac29f067fa jdk7-b91
|
||||
|
@ -87,3 +87,8 @@ bf823ef06b4f211e66988d76a2e2669be5c0820e jdk7-b86
|
||||
07226e9eab8f74b37346b32715f829a2ef2c3188 hs18-b01
|
||||
e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87
|
||||
4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b jdk7-b88
|
||||
15836273ac2494f36ef62088bc1cb6f3f011f565 jdk7-b89
|
||||
4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b hs18-b02
|
||||
605c9707a766ff518cd841fc04f9bb4b36a3a30b jdk7-b90
|
||||
e0a1a502e402dbe7bf2d9102b4084a7e79a99a9b jdk7-b91
|
||||
25f53b53aaa3eb8b2d5391a1e8de9a76ae1dd8a2 hs18-b03
|
||||
|
@ -884,9 +884,12 @@ static bool read_shared_lib_info(struct ps_prochandle* ph) {
|
||||
}
|
||||
|
||||
// read name of the shared object
|
||||
if (read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
|
||||
lib_name[0] = '\0';
|
||||
if (lib_name_addr != 0 &&
|
||||
read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
|
||||
print_debug("can't read shared object name\n");
|
||||
return false;
|
||||
// don't let failure to read the name stop opening the file. If something is really wrong
|
||||
// it will fail later.
|
||||
}
|
||||
|
||||
if (lib_name[0] != '\0') {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -494,6 +494,68 @@ public class CommandProcessor {
|
||||
}
|
||||
}
|
||||
},
|
||||
new Command("revptrs", "revptrs address", false) {
|
||||
public void doit(Tokens t) {
|
||||
int tokens = t.countTokens();
|
||||
if (tokens != 1 && (tokens != 2 || !t.nextToken().equals("-c"))) {
|
||||
usage();
|
||||
return;
|
||||
}
|
||||
boolean chase = tokens == 2;
|
||||
ReversePtrs revptrs = VM.getVM().getRevPtrs();
|
||||
if (revptrs == null) {
|
||||
out.println("Computing reverse pointers...");
|
||||
ReversePtrsAnalysis analysis = new ReversePtrsAnalysis();
|
||||
final boolean[] complete = new boolean[1];
|
||||
HeapProgressThunk thunk = new HeapProgressThunk() {
|
||||
public void heapIterationFractionUpdate(double d) {}
|
||||
public synchronized void heapIterationComplete() {
|
||||
complete[0] = true;
|
||||
notify();
|
||||
}
|
||||
};
|
||||
analysis.setHeapProgressThunk(thunk);
|
||||
analysis.run();
|
||||
while (!complete[0]) {
|
||||
synchronized (thunk) {
|
||||
try {
|
||||
thunk.wait();
|
||||
} catch (Exception e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
revptrs = VM.getVM().getRevPtrs();
|
||||
out.println("Done.");
|
||||
}
|
||||
Address a = VM.getVM().getDebugger().parseAddress(t.nextToken());
|
||||
if (VM.getVM().getUniverse().heap().isInReserved(a)) {
|
||||
OopHandle handle = a.addOffsetToAsOopHandle(0);
|
||||
Oop oop = VM.getVM().getObjectHeap().newOop(handle);
|
||||
ArrayList ptrs = revptrs.get(oop);
|
||||
if (ptrs == null) {
|
||||
out.println("no live references to " + a);
|
||||
} else {
|
||||
if (chase) {
|
||||
while (ptrs.size() == 1) {
|
||||
LivenessPathElement e = (LivenessPathElement)ptrs.get(0);
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
|
||||
out.println(bos.toString());
|
||||
ptrs = revptrs.get(e.getObj());
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < ptrs.size(); i++) {
|
||||
LivenessPathElement e = (LivenessPathElement)ptrs.get(i);
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
|
||||
out.println(bos.toString());
|
||||
oop = e.getObj();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
new Command("inspect", "inspect expression", false) {
|
||||
public void doit(Tokens t) {
|
||||
if (t.countTokens() != 1) {
|
||||
@ -816,8 +878,24 @@ public class CommandProcessor {
|
||||
dumpType(type);
|
||||
} else {
|
||||
Iterator i = agent.getTypeDataBase().getTypes();
|
||||
// Make sure the types are emitted in an order than can be read back in
|
||||
HashSet emitted = new HashSet();
|
||||
Stack pending = new Stack();
|
||||
while (i.hasNext()) {
|
||||
dumpType((Type)i.next());
|
||||
Type n = (Type)i.next();
|
||||
if (emitted.contains(n.getName())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
while (n != null && !emitted.contains(n.getName())) {
|
||||
pending.push(n);
|
||||
n = n.getSuperclass();
|
||||
}
|
||||
while (!pending.empty()) {
|
||||
n = (Type)pending.pop();
|
||||
dumpType(n);
|
||||
emitted.add(n.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -846,83 +924,105 @@ public class CommandProcessor {
|
||||
|
||||
}
|
||||
},
|
||||
new Command("search", "search [ heap | codecache | threads ] value", false) {
|
||||
new Command("search", "search [ heap | perm | rawheap | codecache | threads ] value", false) {
|
||||
public void doit(Tokens t) {
|
||||
if (t.countTokens() != 2) {
|
||||
usage();
|
||||
} else {
|
||||
String type = t.nextToken();
|
||||
final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken());
|
||||
final long stride = VM.getVM().getAddressSize();
|
||||
if (type.equals("threads")) {
|
||||
Threads threads = VM.getVM().getThreads();
|
||||
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
|
||||
Address base = thread.getBaseOfStackPointer();
|
||||
Address end = thread.getLastJavaSP();
|
||||
if (end == null) continue;
|
||||
if (end.lessThan(base)) {
|
||||
Address tmp = base;
|
||||
base = end;
|
||||
end = tmp;
|
||||
}
|
||||
out.println("Searching " + base + " " + end);
|
||||
while (base != null && base.lessThan(end)) {
|
||||
Address val = base.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println(base);
|
||||
}
|
||||
base = base.addOffsetTo(stride);
|
||||
}
|
||||
return;
|
||||
}
|
||||
String type = t.nextToken();
|
||||
final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken());
|
||||
final long stride = VM.getVM().getAddressSize();
|
||||
if (type.equals("threads")) {
|
||||
Threads threads = VM.getVM().getThreads();
|
||||
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
|
||||
Address base = thread.getBaseOfStackPointer();
|
||||
Address end = thread.getLastJavaSP();
|
||||
if (end == null) continue;
|
||||
if (end.lessThan(base)) {
|
||||
Address tmp = base;
|
||||
base = end;
|
||||
end = tmp;
|
||||
}
|
||||
out.println("Searching " + base + " " + end);
|
||||
while (base != null && base.lessThan(end)) {
|
||||
Address val = base.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println(base);
|
||||
}
|
||||
base = base.addOffsetTo(stride);
|
||||
}
|
||||
} else if (type.equals("heap")) {
|
||||
RawHeapVisitor iterator = new RawHeapVisitor() {
|
||||
public void prologue(long used) {
|
||||
}
|
||||
|
||||
public void visitAddress(Address addr) {
|
||||
Address val = addr.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found at " + addr);
|
||||
}
|
||||
}
|
||||
public void visitCompOopAddress(Address addr) {
|
||||
Address val = addr.getCompOopAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found at " + addr);
|
||||
}
|
||||
}
|
||||
public void epilogue() {
|
||||
}
|
||||
};
|
||||
VM.getVM().getObjectHeap().iterateRaw(iterator);
|
||||
} else if (type.equals("codecache")) {
|
||||
CodeCacheVisitor v = new CodeCacheVisitor() {
|
||||
public void prologue(Address start, Address end) {
|
||||
}
|
||||
public void visit(CodeBlob blob) {
|
||||
boolean printed = false;
|
||||
Address base = blob.getAddress();
|
||||
Address end = base.addOffsetTo(blob.getSize());
|
||||
while (base != null && base.lessThan(end)) {
|
||||
Address val = base.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
if (!printed) {
|
||||
printed = true;
|
||||
blob.printOn(out);
|
||||
}
|
||||
out.println("found at " + base + "\n");
|
||||
}
|
||||
base = base.addOffsetTo(stride);
|
||||
}
|
||||
}
|
||||
public void epilogue() {
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
VM.getVM().getCodeCache().iterate(v);
|
||||
|
||||
}
|
||||
} else if (type.equals("rawheap")) {
|
||||
RawHeapVisitor iterator = new RawHeapVisitor() {
|
||||
public void prologue(long used) {
|
||||
}
|
||||
|
||||
public void visitAddress(Address addr) {
|
||||
Address val = addr.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found at " + addr);
|
||||
}
|
||||
}
|
||||
public void visitCompOopAddress(Address addr) {
|
||||
Address val = addr.getCompOopAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found at " + addr);
|
||||
}
|
||||
}
|
||||
public void epilogue() {
|
||||
}
|
||||
};
|
||||
VM.getVM().getObjectHeap().iterateRaw(iterator);
|
||||
} else if (type.equals("heap") || type.equals("perm")) {
|
||||
HeapVisitor iterator = new DefaultHeapVisitor() {
|
||||
public boolean doObj(Oop obj) {
|
||||
int index = 0;
|
||||
Address start = obj.getHandle();
|
||||
long end = obj.getObjectSize();
|
||||
while (index < end) {
|
||||
Address val = start.getAddressAt(index);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
out.println("found in " + obj.getHandle());
|
||||
break;
|
||||
}
|
||||
index += 4;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
if (type.equals("heap")) {
|
||||
VM.getVM().getObjectHeap().iterate(iterator);
|
||||
} else {
|
||||
VM.getVM().getObjectHeap().iteratePerm(iterator);
|
||||
}
|
||||
} else if (type.equals("codecache")) {
|
||||
CodeCacheVisitor v = new CodeCacheVisitor() {
|
||||
public void prologue(Address start, Address end) {
|
||||
}
|
||||
public void visit(CodeBlob blob) {
|
||||
boolean printed = false;
|
||||
Address base = blob.getAddress();
|
||||
Address end = base.addOffsetTo(blob.getSize());
|
||||
while (base != null && base.lessThan(end)) {
|
||||
Address val = base.getAddressAt(0);
|
||||
if (AddressOps.equal(val, value)) {
|
||||
if (!printed) {
|
||||
printed = true;
|
||||
blob.printOn(out);
|
||||
}
|
||||
out.println("found at " + base + "\n");
|
||||
}
|
||||
base = base.addOffsetTo(stride);
|
||||
}
|
||||
}
|
||||
public void epilogue() {
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
VM.getVM().getCodeCache().iterate(v);
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -957,12 +1057,19 @@ public class CommandProcessor {
|
||||
Threads threads = VM.getVM().getThreads();
|
||||
boolean all = name.equals("-a");
|
||||
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
|
||||
StringWriter sw = new StringWriter();
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
thread.printThreadIDOn(new PrintStream(bos));
|
||||
if (all || bos.toString().equals(name)) {
|
||||
out.println(bos.toString() + " = " + thread.getAddress());
|
||||
HTMLGenerator gen = new HTMLGenerator(false);
|
||||
out.println(gen.genHTMLForJavaStackTrace(thread));
|
||||
try {
|
||||
out.println(gen.genHTMLForJavaStackTrace(thread));
|
||||
} catch (Exception e) {
|
||||
err.println("Error: " + e);
|
||||
if (verboseExceptions) {
|
||||
e.printStackTrace(err);
|
||||
}
|
||||
}
|
||||
if (!all) return;
|
||||
}
|
||||
}
|
||||
@ -970,6 +1077,26 @@ public class CommandProcessor {
|
||||
}
|
||||
}
|
||||
},
|
||||
new Command("thread", "thread { -a | id }", false) {
|
||||
public void doit(Tokens t) {
|
||||
if (t.countTokens() != 1) {
|
||||
usage();
|
||||
} else {
|
||||
String name = t.nextToken();
|
||||
Threads threads = VM.getVM().getThreads();
|
||||
boolean all = name.equals("-a");
|
||||
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
thread.printThreadIDOn(new PrintStream(bos));
|
||||
if (all || bos.toString().equals(name)) {
|
||||
out.println(bos.toString() + " = " + thread.getAddress());
|
||||
if (!all) return;
|
||||
}
|
||||
}
|
||||
out.println("Couldn't find thread " + name);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
new Command("threads", false) {
|
||||
public void doit(Tokens t) {
|
||||
@ -1161,7 +1288,7 @@ public class CommandProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*))");
|
||||
static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*)|(![a-zA-Z][^ ]*))");
|
||||
|
||||
public void executeCommand(String ln) {
|
||||
if (ln.indexOf('!') != -1) {
|
||||
@ -1195,14 +1322,37 @@ public class CommandProcessor {
|
||||
result.append(item.at(item.countTokens() - 1));
|
||||
} else {
|
||||
String tail = cmd.substring(1);
|
||||
int index = Integer.parseInt(tail);
|
||||
if (index < 0) {
|
||||
index = history.size() + index;
|
||||
switch (tail.charAt(0)) {
|
||||
case '0':
|
||||
case '1':
|
||||
case '2':
|
||||
case '3':
|
||||
case '4':
|
||||
case '5':
|
||||
case '6':
|
||||
case '7':
|
||||
case '8':
|
||||
case '9':
|
||||
case '-': {
|
||||
int index = Integer.parseInt(tail);
|
||||
if (index < 0) {
|
||||
index = history.size() + index;
|
||||
}
|
||||
if (index > size) {
|
||||
err.println("No such history item");
|
||||
} else {
|
||||
result.append((String)history.get(index));
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
for (int i = history.size() - 1; i >= 0; i--) {
|
||||
String s = (String)history.get(i);
|
||||
if (s.startsWith(tail)) {
|
||||
result.append(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (index > size) {
|
||||
err.println("No such history item");
|
||||
} else {
|
||||
result.append((String)history.get(index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -985,6 +985,12 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
|
||||
annoPanel.addAnnotation(new Annotation(curFrame.addressOfInterpreterFrameExpressionStack(),
|
||||
curFrame.addressOfInterpreterFrameTOS(),
|
||||
"Interpreter expression stack"));
|
||||
Address monBegin = curFrame.interpreterFrameMonitorBegin().address();
|
||||
Address monEnd = curFrame.interpreterFrameMonitorEnd().address();
|
||||
if (!monBegin.equals(monEnd)) {
|
||||
annoPanel.addAnnotation(new Annotation(monBegin, monEnd,
|
||||
"BasicObjectLocks"));
|
||||
}
|
||||
if (interpreterFrameMethod != null) {
|
||||
// The offset is just to get the right stack slots highlighted in the output
|
||||
int offset = 1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2003 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -294,7 +294,7 @@ public class BugSpot extends JPanel {
|
||||
attachDialog.setSize(400, 300);
|
||||
GraphicsUtilities.centerInContainer(attachDialog.getComponent(),
|
||||
getParentDimension(attachDialog.getComponent()));
|
||||
attachDialog.show();
|
||||
attachDialog.setVisible(true);
|
||||
}
|
||||
|
||||
public void showThreadsDialog() {
|
||||
@ -321,7 +321,7 @@ public class BugSpot extends JPanel {
|
||||
getParentDimension(threadsDialog.getComponent()));
|
||||
GraphicsUtilities.centerInContainer(threadsDialog.getComponent(),
|
||||
getParentDimension(threadsDialog.getComponent()));
|
||||
threadsDialog.show();
|
||||
threadsDialog.setVisible(true);
|
||||
}
|
||||
|
||||
public void showMemoryDialog() {
|
||||
@ -341,7 +341,7 @@ public class BugSpot extends JPanel {
|
||||
getParentDimension(memoryDialog.getComponent()));
|
||||
GraphicsUtilities.centerInContainer(memoryDialog.getComponent(),
|
||||
getParentDimension(memoryDialog.getComponent()));
|
||||
memoryDialog.show();
|
||||
memoryDialog.setVisible(true);
|
||||
}
|
||||
|
||||
/** Changes the editor factory this debugger uses to display source
|
||||
@ -530,7 +530,7 @@ public class BugSpot extends JPanel {
|
||||
addFrame(stackFrame);
|
||||
stackFrame.setSize(400, 200);
|
||||
GraphicsUtilities.moveToInContainer(stackFrame.getComponent(), 0.0f, 1.0f, 0, 20);
|
||||
stackFrame.show();
|
||||
stackFrame.setVisible(true);
|
||||
|
||||
// Create register panel
|
||||
registerPanel = new RegisterPanel();
|
||||
@ -544,7 +544,7 @@ public class BugSpot extends JPanel {
|
||||
registerFrame.setSize(225, 200);
|
||||
GraphicsUtilities.moveToInContainer(registerFrame.getComponent(),
|
||||
1.0f, 0.0f, 0, 0);
|
||||
registerFrame.show();
|
||||
registerFrame.setVisible(true);
|
||||
|
||||
resetCurrentThread();
|
||||
} catch (DebuggerException e) {
|
||||
@ -979,7 +979,7 @@ public class BugSpot extends JPanel {
|
||||
1.0f,
|
||||
0.85f,
|
||||
getParentDimension(editorFrame.getComponent()));
|
||||
editorFrame.show();
|
||||
editorFrame.setVisible(true);
|
||||
shown = true;
|
||||
}
|
||||
code.showLineNumber(lineNo);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -96,10 +96,6 @@ public class BytecodeDisassembler {
|
||||
addBytecodeClass(Bytecodes._dstore, BytecodeStore.class);
|
||||
addBytecodeClass(Bytecodes._astore, BytecodeStore.class);
|
||||
addBytecodeClass(Bytecodes._tableswitch, BytecodeTableswitch.class);
|
||||
|
||||
// only special fast_xxx cases. others are handled differently.
|
||||
addBytecodeClass(Bytecodes._fast_iaccess_0, BytecodeFastAAccess0.class);
|
||||
addBytecodeClass(Bytecodes._fast_aaccess_0, BytecodeFastIAccess0.class);
|
||||
}
|
||||
|
||||
public BytecodeDisassembler(Method method) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -263,11 +263,12 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
case JVM_CONSTANT_NameAndType: return "JVM_CONSTANT_NameAndType";
|
||||
case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid";
|
||||
case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass";
|
||||
case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError";
|
||||
case JVM_CONSTANT_ClassIndex: return "JVM_CONSTANT_ClassIndex";
|
||||
case JVM_CONSTANT_UnresolvedString: return "JVM_CONSTANT_UnresolvedString";
|
||||
case JVM_CONSTANT_StringIndex: return "JVM_CONSTANT_StringIndex";
|
||||
}
|
||||
throw new InternalError("unknown tag");
|
||||
throw new InternalError("Unknown tag: " + tag);
|
||||
}
|
||||
|
||||
public void iterateFields(OopVisitor visitor, boolean doVMFields) {
|
||||
@ -304,6 +305,7 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
index++;
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
case JVM_CONSTANT_UnresolvedClass:
|
||||
case JVM_CONSTANT_Class:
|
||||
case JVM_CONSTANT_UnresolvedString:
|
||||
@ -409,6 +411,7 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
}
|
||||
|
||||
// case JVM_CONSTANT_ClassIndex:
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
case JVM_CONSTANT_UnresolvedClass: {
|
||||
dos.writeByte(JVM_CONSTANT_Class);
|
||||
String klassName = getSymbolAt(ci).asString();
|
||||
@ -464,6 +467,8 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
+ ", type = " + signatureIndex);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new InternalError("unknown tag: " + cpConstType);
|
||||
} // switch
|
||||
}
|
||||
dos.flush();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,6 +58,9 @@ public interface ClassConstants
|
||||
// Temporary tag while constructing constant pool
|
||||
public static final int JVM_CONSTANT_StringIndex = 103;
|
||||
|
||||
// Temporary tag while constructing constant pool
|
||||
public static final int JVM_CONSTANT_UnresolvedClassInError = 104;
|
||||
|
||||
// 1.5 major/minor version numbers from JVM spec. 3rd edition
|
||||
public static final short MAJOR_VERSION = 49;
|
||||
public static final short MINOR_VERSION = 0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -108,7 +108,7 @@ public abstract class SignatureIterator {
|
||||
return BasicTypeSize.getTArraySize();
|
||||
}
|
||||
}
|
||||
throw new RuntimeException("Should not reach here");
|
||||
throw new RuntimeException("Should not reach here: char " + (char)_signature.getByteAt(_index) + " @ " + _index + " in " + _signature.asString());
|
||||
}
|
||||
protected void checkSignatureEnd() {
|
||||
if (_index < _signature.getLength()) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -238,6 +238,7 @@ public class ClassWriter implements /* imports */ ClassConstants
|
||||
}
|
||||
|
||||
// case JVM_CONSTANT_ClassIndex:
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
case JVM_CONSTANT_UnresolvedClass: {
|
||||
dos.writeByte(JVM_CONSTANT_Class);
|
||||
String klassName = cpool.getSymbolAt(ci).asString();
|
||||
@ -296,6 +297,8 @@ public class ClassWriter implements /* imports */ ClassConstants
|
||||
+ ", type = " + signatureIndex);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new InternalError("Unknown tag: " + cpConstType);
|
||||
} // switch
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,6 @@ public interface FrameWrapper {
|
||||
public void setVisible(boolean visible);
|
||||
public void setSize(int x, int y);
|
||||
public void pack();
|
||||
public void show();
|
||||
public void dispose();
|
||||
public void setBackground(Color color);
|
||||
public void setResizable(boolean resizable);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -95,8 +95,10 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
|
||||
// list tags
|
||||
void beginList() { beginTag("ul"); nl(); }
|
||||
void li(String s) { wrap("li", s); nl(); }
|
||||
void endList() { endTag("ul"); nl(); }
|
||||
void beginListItem() { beginTag("li"); }
|
||||
void endListItem() { endTag("li"); nl(); }
|
||||
void li(String s) { wrap("li", s); nl(); }
|
||||
|
||||
// table tags
|
||||
void beginTable(int border) {
|
||||
@ -505,6 +507,11 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.cell(cpool.getSymbolAt(index).asString());
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
buf.cell("JVM_CONSTANT_UnresolvedClassInError");
|
||||
buf.cell(cpool.getSymbolAt(index).asString());
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_Class:
|
||||
buf.cell("JVM_CONSTANT_Class");
|
||||
Klass klass = (Klass) cpool.getObjAt(index);
|
||||
@ -564,6 +571,9 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.cell("JVM_CONSTANT_StringIndex");
|
||||
buf.cell(Integer.toString(cpool.getIntAt(index)));
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new InternalError("unknown tag: " + ctag);
|
||||
}
|
||||
|
||||
buf.endTag("tr");
|
||||
@ -671,7 +681,16 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.cell(Integer.toString(curBci) + spaces);
|
||||
|
||||
buf.beginTag("td");
|
||||
String instrStr = escapeHTMLSpecialChars(instr.toString());
|
||||
String instrStr = null;
|
||||
try {
|
||||
instrStr = escapeHTMLSpecialChars(instr.toString());
|
||||
} catch (RuntimeException re) {
|
||||
buf.append("exception during bytecode processing");
|
||||
buf.endTag("td");
|
||||
buf.endTag("tr");
|
||||
re.printStackTrace();
|
||||
return;
|
||||
}
|
||||
|
||||
if (instr instanceof BytecodeNew) {
|
||||
BytecodeNew newBytecode = (BytecodeNew) instr;
|
||||
@ -1396,9 +1415,7 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
final SymbolFinder symFinder = createSymbolFinder();
|
||||
final Disassembler disasm = createDisassembler(startPc, code);
|
||||
class NMethodVisitor implements InstructionVisitor {
|
||||
boolean prevWasCall;
|
||||
public void prologue() {
|
||||
prevWasCall = false;
|
||||
}
|
||||
|
||||
public void visit(long currentPc, Instruction instr) {
|
||||
@ -1418,8 +1435,7 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
|
||||
PCDesc pcDesc = (PCDesc) safepoints.get(longToAddress(currentPc));
|
||||
|
||||
boolean isSafepoint = (pcDesc != null);
|
||||
if (isSafepoint && prevWasCall) {
|
||||
if (pcDesc != null) {
|
||||
buf.append(genSafepointInfo(nmethod, pcDesc));
|
||||
}
|
||||
|
||||
@ -1435,11 +1451,6 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
}
|
||||
|
||||
buf.br();
|
||||
if (isSafepoint && !prevWasCall) {
|
||||
buf.append(genSafepointInfo(nmethod, pcDesc));
|
||||
}
|
||||
|
||||
prevWasCall = instr.isCall();
|
||||
}
|
||||
|
||||
public void epilogue() {
|
||||
@ -1783,22 +1794,20 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.h3("Fields");
|
||||
buf.beginList();
|
||||
for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) {
|
||||
int nameIndex = fields.getShortAt(f + InstanceKlass.NAME_INDEX_OFFSET);
|
||||
int sigIndex = fields.getShortAt(f + InstanceKlass.SIGNATURE_INDEX_OFFSET);
|
||||
int genSigIndex = fields.getShortAt(f + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET);
|
||||
Symbol f_name = cp.getSymbolAt(nameIndex);
|
||||
Symbol f_sig = cp.getSymbolAt(sigIndex);
|
||||
Symbol f_genSig = (genSigIndex != 0)? cp.getSymbolAt(genSigIndex) : null;
|
||||
AccessFlags acc = new AccessFlags(fields.getShortAt(f + InstanceKlass.ACCESS_FLAGS_OFFSET));
|
||||
sun.jvm.hotspot.oops.Field field = klass.getFieldByIndex(f);
|
||||
String f_name = ((NamedFieldIdentifier)field.getID()).getName();
|
||||
Symbol f_sig = field.getSignature();
|
||||
Symbol f_genSig = field.getGenericSignature();
|
||||
AccessFlags acc = field.getAccessFlagsObj();
|
||||
|
||||
buf.beginTag("li");
|
||||
buf.beginListItem();
|
||||
buf.append(genFieldModifierString(acc));
|
||||
buf.append(' ');
|
||||
Formatter sigBuf = new Formatter(genHTML);
|
||||
new SignatureConverter(f_sig, sigBuf.getBuffer()).dispatchField();
|
||||
buf.append(sigBuf.toString().replace('/', '.'));
|
||||
buf.append(' ');
|
||||
buf.append(f_name.asString());
|
||||
buf.append(f_name);
|
||||
buf.append(';');
|
||||
// is it generic?
|
||||
if (f_genSig != null) {
|
||||
@ -1806,7 +1815,8 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.append(escapeHTMLSpecialChars(f_genSig.asString()));
|
||||
buf.append("] ");
|
||||
}
|
||||
buf.endTag("li");
|
||||
buf.append(" (offset = " + field.getOffset() + ")");
|
||||
buf.endListItem();
|
||||
}
|
||||
buf.endList();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@ public class Assert {
|
||||
public static boolean ASSERTS_ENABLED = true;
|
||||
|
||||
public static void that(boolean test, String message) {
|
||||
if (!test) {
|
||||
if (ASSERTS_ENABLED && !test) {
|
||||
throw new AssertionFailure(message);
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2010
|
||||
|
||||
HS_MAJOR_VER=18
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=02
|
||||
HS_BUILD_NUMBER=04
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=7
|
||||
|
@ -127,6 +127,9 @@ $(GENERATEDFILES): refresh_adfiles
|
||||
# Note that product files are updated via "mv", which is atomic.
|
||||
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
|
||||
|
||||
# Debuggable by default
|
||||
CFLAGS += -g
|
||||
|
||||
# Pass -D flags into ADLC.
|
||||
ADLCFLAGS += $(SYSDEFS)
|
||||
|
||||
@ -135,7 +138,7 @@ ADLCFLAGS += -q -T
|
||||
|
||||
# Normally, debugging is done directly on the ad_<arch>*.cpp files.
|
||||
# But -g will put #line directives in those files pointing back to <arch>.ad.
|
||||
#ADLCFLAGS += -g
|
||||
ADLCFLAGS += -g
|
||||
|
||||
ifdef LP64
|
||||
ADLCFLAGS += -D_LP64
|
||||
|
@ -147,6 +147,9 @@ $(GENERATEDFILES): refresh_adfiles
|
||||
# Note that product files are updated via "mv", which is atomic.
|
||||
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
|
||||
|
||||
# Debuggable by default
|
||||
CFLAGS += -g
|
||||
|
||||
# Pass -D flags into ADLC.
|
||||
ADLCFLAGS += $(SYSDEFS)
|
||||
|
||||
@ -155,7 +158,7 @@ ADLCFLAGS += -q -T
|
||||
|
||||
# Normally, debugging is done directly on the ad_<arch>*.cpp files.
|
||||
# But -g will put #line directives in those files pointing back to <arch>.ad.
|
||||
#ADLCFLAGS += -g
|
||||
ADLCFLAGS += -g
|
||||
|
||||
ifdef LP64
|
||||
ADLCFLAGS += -D_LP64
|
||||
|
@ -661,9 +661,6 @@ class Assembler : public AbstractAssembler {
|
||||
stx_op3 = 0x0e,
|
||||
swap_op3 = 0x0f,
|
||||
|
||||
lduwa_op3 = 0x10,
|
||||
ldxa_op3 = 0x1b,
|
||||
|
||||
stwa_op3 = 0x14,
|
||||
stxa_op3 = 0x1e,
|
||||
|
||||
|
@ -388,6 +388,60 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
}
|
||||
|
||||
|
||||
// Emit the code to remove the frame from the stack in the exception
|
||||
// unwind path.
|
||||
int LIR_Assembler::emit_unwind_handler() {
|
||||
#ifndef PRODUCT
|
||||
if (CommentedAssembly) {
|
||||
_masm->block_comment("Unwind handler");
|
||||
}
|
||||
#endif
|
||||
|
||||
int offset = code_offset();
|
||||
|
||||
// Fetch the exception from TLS and clear out exception related thread state
|
||||
__ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
|
||||
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
|
||||
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
|
||||
|
||||
__ bind(_unwind_handler_entry);
|
||||
__ verify_not_null_oop(O0);
|
||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||
__ mov(O0, I0); // Preserve the exception
|
||||
}
|
||||
|
||||
// Preform needed unlocking
|
||||
MonitorExitStub* stub = NULL;
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::I1_opr);
|
||||
stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
|
||||
__ unlock_object(I3, I2, I1, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
if (compilation()->env()->dtrace_method_probes()) {
|
||||
jobject2reg(method()->constant_encoding(), O0);
|
||||
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||
__ mov(I0, O0); // Restore the exception
|
||||
}
|
||||
|
||||
// dispatch to the unwind logic
|
||||
__ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
|
||||
// Emit the slow path assembly
|
||||
if (stub != NULL) {
|
||||
stub->emit_code(this);
|
||||
}
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::emit_deopt_handler() {
|
||||
// if the last instruction is a call (typically to do a throw which
|
||||
// is coming at the end after block reordering) the return address
|
||||
@ -1728,9 +1782,13 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else if (code == lir_cmp_l2i) {
|
||||
#ifdef _LP64
|
||||
__ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
|
||||
#else
|
||||
__ lcmp(left->as_register_hi(), left->as_register_lo(),
|
||||
right->as_register_hi(), right->as_register_lo(),
|
||||
dst->as_register());
|
||||
#endif
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -2046,26 +2104,29 @@ int LIR_Assembler::shift_amount(BasicType t) {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
|
||||
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
|
||||
assert(exceptionOop->as_register() == Oexception, "should match");
|
||||
assert(unwind || exceptionPC->as_register() == Oissuing_pc, "should match");
|
||||
assert(exceptionPC->as_register() == Oissuing_pc, "should match");
|
||||
|
||||
info->add_register_oop(exceptionOop);
|
||||
|
||||
if (unwind) {
|
||||
__ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
} else {
|
||||
// reuse the debug info from the safepoint poll for the throw op itself
|
||||
address pc_for_athrow = __ pc();
|
||||
int pc_for_athrow_offset = __ offset();
|
||||
RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
|
||||
__ set(pc_for_athrow, Oissuing_pc, rspec);
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
// reuse the debug info from the safepoint poll for the throw op itself
|
||||
address pc_for_athrow = __ pc();
|
||||
int pc_for_athrow_offset = __ offset();
|
||||
RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
|
||||
__ set(pc_for_athrow, Oissuing_pc, rspec);
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
|
||||
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
|
||||
assert(exceptionOop->as_register() == Oexception, "should match");
|
||||
|
||||
__ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
|
||||
@ -2354,7 +2415,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
|
||||
if (UseSlowPath ||
|
||||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
|
||||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
|
||||
__ br(Assembler::always, false, Assembler::pn, *op->stub()->entry());
|
||||
__ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
|
||||
__ delayed()->nop();
|
||||
} else {
|
||||
__ allocate_array(op->obj()->as_register(),
|
||||
@ -2849,7 +2910,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
|
||||
|
||||
void LIR_Assembler::align_backward_branch_target() {
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
}
|
||||
|
||||
|
||||
|
@ -60,9 +60,6 @@ define_pd_global(intx, FreqInlineSize, 175);
|
||||
define_pd_global(intx, INTPRESSURE, 48); // large register set
|
||||
define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment
|
||||
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
// The default setting 16/16 seems to work best.
|
||||
// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
|
||||
define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize
|
||||
define_pd_global(intx, RegisterCostAreaRatio, 12000);
|
||||
define_pd_global(bool, UseTLAB, true);
|
||||
define_pd_global(bool, ResizeTLAB, true);
|
||||
|
@ -40,6 +40,9 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
|
||||
|
||||
define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
// The default setting 16/16 seems to work best.
|
||||
// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
|
||||
define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize
|
||||
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
|
||||
define_pd_global(intx, InlineSmallCode, 1500);
|
||||
#ifdef _LP64
|
||||
|
@ -471,6 +471,9 @@ extern bool can_branch_register( Node *bol, Node *cmp );
|
||||
source %{
|
||||
#define __ _masm.
|
||||
|
||||
// Block initializing store
|
||||
#define ASI_BLK_INIT_QUAD_LDD_P 0xE2
|
||||
|
||||
// tertiary op of a LoadP or StoreP encoding
|
||||
#define REGP_OP true
|
||||
|
||||
@ -920,38 +923,6 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
|
||||
#endif
|
||||
}
|
||||
|
||||
void emit_form3_mem_reg_asi(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary,
|
||||
int src1_enc, int disp32, int src2_enc, int dst_enc, int asi) {
|
||||
|
||||
uint instr;
|
||||
instr = (Assembler::ldst_op << 30)
|
||||
| (dst_enc << 25)
|
||||
| (primary << 19)
|
||||
| (src1_enc << 14);
|
||||
|
||||
int disp = disp32;
|
||||
int index = src2_enc;
|
||||
|
||||
if (src1_enc == R_SP_enc || src1_enc == R_FP_enc)
|
||||
disp += STACK_BIAS;
|
||||
|
||||
// We should have a compiler bailout here rather than a guarantee.
|
||||
// Better yet would be some mechanism to handle variable-size matches correctly.
|
||||
guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
|
||||
|
||||
if( disp != 0 ) {
|
||||
// use reg-reg form
|
||||
// set src2=R_O7 contains offset
|
||||
index = R_O7_enc;
|
||||
emit3_simm13( cbuf, Assembler::arith_op, index, Assembler::or_op3, 0, disp);
|
||||
}
|
||||
instr |= (asi << 5);
|
||||
instr |= index;
|
||||
uint *code = (uint*)cbuf.code_end();
|
||||
*code = instr;
|
||||
cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
|
||||
}
|
||||
|
||||
void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) {
|
||||
// The method which records debug information at every safepoint
|
||||
// expects the call to be the first instruction in the snippet as
|
||||
@ -1951,11 +1922,6 @@ encode %{
|
||||
$mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
|
||||
%}
|
||||
|
||||
enc_class form3_mem_reg_little( memory mem, iRegI dst) %{
|
||||
emit_form3_mem_reg_asi(cbuf, this, $primary, -1,
|
||||
$mem$$base, $mem$$disp, $mem$$index, $dst$$reg, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
|
||||
enc_class form3_mem_prefetch_read( memory mem ) %{
|
||||
emit_form3_mem_reg(cbuf, this, $primary, -1,
|
||||
$mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
|
||||
@ -4308,8 +4274,8 @@ operand cmpOp_commute() %{
|
||||
// instructions for every form of operand when the instruction accepts
|
||||
// multiple operand types with the same basic encoding and format. The classic
|
||||
// case of this is memory operands.
|
||||
// Indirect is not included since its use is limited to Compare & Swap
|
||||
opclass memory( indirect, indOffset13, indIndex );
|
||||
opclass indIndexMemory( indIndex );
|
||||
|
||||
//----------PIPELINE-----------------------------------------------------------
|
||||
pipeline %{
|
||||
@ -6147,6 +6113,7 @@ instruct prefetchr( memory mem ) %{
|
||||
%}
|
||||
|
||||
instruct prefetchw( memory mem ) %{
|
||||
predicate(AllocatePrefetchStyle != 3 );
|
||||
match( PrefetchWrite mem );
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
@ -6156,6 +6123,23 @@ instruct prefetchw( memory mem ) %{
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Use BIS instruction to prefetch.
|
||||
instruct prefetchw_bis( memory mem ) %{
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
match( PrefetchWrite mem );
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "STXA G0,$mem\t! // Block initializing store" %}
|
||||
ins_encode %{
|
||||
Register base = as_Register($mem$$base);
|
||||
int disp = $mem$$disp;
|
||||
if (disp != 0) {
|
||||
__ add(base, AllocatePrefetchStepSize, base);
|
||||
}
|
||||
__ stxa(G0, base, G0, ASI_BLK_INIT_QUAD_LDD_P);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
//----------Store Instructions-------------------------------------------------
|
||||
// Store Byte
|
||||
@ -9645,84 +9629,179 @@ instruct popCountL(iRegI dst, iRegL src) %{
|
||||
|
||||
instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
|
||||
match(Set dst (ReverseBytesI src));
|
||||
effect(DEF dst, USE src);
|
||||
|
||||
// Op cost is artificially doubled to make sure that load or store
|
||||
// instructions are preferred over this one which requires a spill
|
||||
// onto a stack slot.
|
||||
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
|
||||
size(8);
|
||||
format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
|
||||
opcode(Assembler::lduwa_op3);
|
||||
ins_encode( form3_mem_reg_little(src, dst) );
|
||||
|
||||
ins_encode %{
|
||||
__ set($src$$disp + STACK_BIAS, O7);
|
||||
__ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe( iload_mem );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
|
||||
match(Set dst (ReverseBytesL src));
|
||||
effect(DEF dst, USE src);
|
||||
|
||||
// Op cost is artificially doubled to make sure that load or store
|
||||
// instructions are preferred over this one which requires a spill
|
||||
// onto a stack slot.
|
||||
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
|
||||
size(8);
|
||||
format %{ "LDXA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::ldxa_op3);
|
||||
ins_encode( form3_mem_reg_little(src, dst) );
|
||||
ins_encode %{
|
||||
__ set($src$$disp + STACK_BIAS, O7);
|
||||
__ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe( iload_mem );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
|
||||
match(Set dst (ReverseBytesUS src));
|
||||
|
||||
// Op cost is artificially doubled to make sure that load or store
|
||||
// instructions are preferred over this one which requires a spill
|
||||
// onto a stack slot.
|
||||
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
|
||||
format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %}
|
||||
|
||||
ins_encode %{
|
||||
// the value was spilled as an int so bias the load
|
||||
__ set($src$$disp + STACK_BIAS + 2, O7);
|
||||
__ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe( iload_mem );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
|
||||
match(Set dst (ReverseBytesS src));
|
||||
|
||||
// Op cost is artificially doubled to make sure that load or store
|
||||
// instructions are preferred over this one which requires a spill
|
||||
// onto a stack slot.
|
||||
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
|
||||
format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %}
|
||||
|
||||
ins_encode %{
|
||||
// the value was spilled as an int so bias the load
|
||||
__ set($src$$disp + STACK_BIAS + 2, O7);
|
||||
__ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe( iload_mem );
|
||||
%}
|
||||
|
||||
// Load Integer reversed byte order
|
||||
instruct loadI_reversed(iRegI dst, memory src) %{
|
||||
instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
|
||||
match(Set dst (ReverseBytesI (LoadI src)));
|
||||
|
||||
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
|
||||
size(8);
|
||||
size(4);
|
||||
format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::lduwa_op3);
|
||||
ins_encode( form3_mem_reg_little( src, dst) );
|
||||
ins_encode %{
|
||||
__ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Long - aligned and reversed
|
||||
instruct loadL_reversed(iRegL dst, memory src) %{
|
||||
instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
|
||||
match(Set dst (ReverseBytesL (LoadL src)));
|
||||
|
||||
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
|
||||
size(8);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "LDXA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::ldxa_op3);
|
||||
ins_encode( form3_mem_reg_little( src, dst ) );
|
||||
ins_encode %{
|
||||
__ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load unsigned short / char reversed byte order
|
||||
instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
|
||||
match(Set dst (ReverseBytesUS (LoadUS src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "LDUHA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
ins_encode %{
|
||||
__ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load short reversed byte order
|
||||
instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
|
||||
match(Set dst (ReverseBytesS (LoadS src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "LDSHA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
ins_encode %{
|
||||
__ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Store Integer reversed byte order
|
||||
instruct storeI_reversed(memory dst, iRegI src) %{
|
||||
instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
|
||||
match(Set dst (StoreI dst (ReverseBytesI src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(8);
|
||||
size(4);
|
||||
format %{ "STWA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::stwa_op3);
|
||||
ins_encode( form3_mem_reg_little( dst, src) );
|
||||
ins_encode %{
|
||||
__ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
// Store Long reversed byte order
|
||||
instruct storeL_reversed(memory dst, iRegL src) %{
|
||||
instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
|
||||
match(Set dst (StoreL dst (ReverseBytesL src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(8);
|
||||
size(4);
|
||||
format %{ "STXA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
opcode(Assembler::stxa_op3);
|
||||
ins_encode( form3_mem_reg_little( dst, src) );
|
||||
ins_encode %{
|
||||
__ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
// Store unsighed short/char reversed byte order
|
||||
instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
|
||||
match(Set dst (StoreC dst (ReverseBytesUS src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "STHA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
ins_encode %{
|
||||
__ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
// Store short reversed byte order
|
||||
instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
|
||||
match(Set dst (StoreC dst (ReverseBytesS src)));
|
||||
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "STHA $src, $dst\t!asi=primary_little" %}
|
||||
|
||||
ins_encode %{
|
||||
__ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
|
||||
%}
|
||||
ins_pipe(istore_mem_reg);
|
||||
%}
|
||||
|
||||
|
@ -1148,7 +1148,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ andn(from, 7, from); // Align address
|
||||
__ ldx(from, 0, O3);
|
||||
__ inc(from, 8);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
__ ldx(from, 0, O4);
|
||||
__ deccc(count, count_dec); // Can we do next iteration after this one?
|
||||
@ -1220,7 +1220,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
__ andn(end_from, 7, end_from); // Align address
|
||||
__ ldx(end_from, 0, O3);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
__ ldx(end_from, -8, O4);
|
||||
__ deccc(count, count_dec); // Can we do next iteration after this one?
|
||||
@ -1349,7 +1349,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ BIND(L_copy_byte);
|
||||
__ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
|
||||
__ delayed()->nop();
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_byte_loop);
|
||||
__ ldub(from, offset, O3);
|
||||
__ deccc(count);
|
||||
@ -1445,7 +1445,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
L_aligned_copy, L_copy_byte);
|
||||
}
|
||||
// copy 4 elements (16 bytes) at a time
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_aligned_copy);
|
||||
__ dec(end_from, 16);
|
||||
__ ldx(end_from, 8, O3);
|
||||
@ -1461,7 +1461,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ BIND(L_copy_byte);
|
||||
__ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
|
||||
__ delayed()->nop();
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_byte_loop);
|
||||
__ dec(end_from);
|
||||
__ dec(end_to);
|
||||
@ -1577,7 +1577,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ BIND(L_copy_2_bytes);
|
||||
__ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
|
||||
__ delayed()->nop();
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_2_bytes_loop);
|
||||
__ lduh(from, offset, O3);
|
||||
__ deccc(count);
|
||||
@ -1684,7 +1684,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
L_aligned_copy, L_copy_2_bytes);
|
||||
}
|
||||
// copy 4 elements (16 bytes) at a time
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_aligned_copy);
|
||||
__ dec(end_from, 16);
|
||||
__ ldx(end_from, 8, O3);
|
||||
@ -1781,7 +1781,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// copy with shift 4 elements (16 bytes) at a time
|
||||
__ dec(count, 4); // The cmp at the beginning guaranty count >= 4
|
||||
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_16_bytes);
|
||||
__ ldx(from, 4, O4);
|
||||
__ deccc(count, 4); // Can we do next iteration after this one?
|
||||
@ -1907,7 +1907,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to form 2 aligned 8-bytes chunks to store.
|
||||
//
|
||||
__ ldx(end_from, -4, O3);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_16_bytes);
|
||||
__ ldx(end_from, -12, O4);
|
||||
__ deccc(count, 4);
|
||||
@ -1929,7 +1929,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ delayed()->inc(count, 4);
|
||||
|
||||
// copy 4 elements (16 bytes) at a time
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_aligned_copy);
|
||||
__ dec(end_from, 16);
|
||||
__ ldx(end_from, 8, O3);
|
||||
@ -2000,6 +2000,27 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to: O1
|
||||
// count: O2 treated as signed
|
||||
//
|
||||
// count -= 2;
|
||||
// if ( count >= 0 ) { // >= 2 elements
|
||||
// if ( count > 6) { // >= 8 elements
|
||||
// count -= 6; // original count - 8
|
||||
// do {
|
||||
// copy_8_elements;
|
||||
// count -= 8;
|
||||
// } while ( count >= 0 );
|
||||
// count += 6;
|
||||
// }
|
||||
// if ( count >= 0 ) { // >= 2 elements
|
||||
// do {
|
||||
// copy_2_elements;
|
||||
// } while ( (count=count-2) >= 0 );
|
||||
// }
|
||||
// }
|
||||
// count += 2;
|
||||
// if ( count != 0 ) { // 1 element left
|
||||
// copy_1_element;
|
||||
// }
|
||||
//
|
||||
void generate_disjoint_long_copy_core(bool aligned) {
|
||||
Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
|
||||
const Register from = O0; // source array address
|
||||
@ -2012,7 +2033,39 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ mov(G0, offset0); // offset from start of arrays (0)
|
||||
__ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
|
||||
__ delayed()->add(offset0, 8, offset8);
|
||||
__ align(16);
|
||||
|
||||
// Copy by 64 bytes chunks
|
||||
Label L_copy_64_bytes;
|
||||
const Register from64 = O3; // source address
|
||||
const Register to64 = G3; // destination address
|
||||
__ subcc(count, 6, O3);
|
||||
__ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
|
||||
__ delayed()->mov(to, to64);
|
||||
// Now we can use O4(offset0), O5(offset8) as temps
|
||||
__ mov(O3, count);
|
||||
__ mov(from, from64);
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_64_bytes);
|
||||
for( int off = 0; off < 64; off += 16 ) {
|
||||
__ ldx(from64, off+0, O4);
|
||||
__ ldx(from64, off+8, O5);
|
||||
__ stx(O4, to64, off+0);
|
||||
__ stx(O5, to64, off+8);
|
||||
}
|
||||
__ deccc(count, 8);
|
||||
__ inc(from64, 64);
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
|
||||
__ delayed()->inc(to64, 64);
|
||||
|
||||
// Restore O4(offset0), O5(offset8)
|
||||
__ sub(from64, from, offset0);
|
||||
__ inccc(count, 6);
|
||||
__ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
|
||||
__ delayed()->add(offset0, 8, offset8);
|
||||
|
||||
// Copy by 16 bytes chunks
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_16_bytes);
|
||||
__ ldx(from, offset0, O3);
|
||||
__ ldx(from, offset8, G3);
|
||||
@ -2023,6 +2076,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
|
||||
__ delayed()->inc(offset8, 16);
|
||||
|
||||
// Copy last 8 bytes
|
||||
__ BIND(L_copy_8_bytes);
|
||||
__ inccc(count, 2);
|
||||
__ brx(Assembler::zero, true, Assembler::pn, L_exit );
|
||||
@ -2085,7 +2139,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
|
||||
__ delayed()->sllx(count, LogBytesPerLong, offset8);
|
||||
__ sub(offset8, 8, offset0);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_16_bytes);
|
||||
__ ldx(from, offset8, O2);
|
||||
__ ldx(from, offset0, O3);
|
||||
@ -2351,7 +2405,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
|
||||
// (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
|
||||
// G3, G4, G5 --- current oop, oop.klass, oop.klass.super
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
|
||||
__ BIND(store_element);
|
||||
__ deccc(G1_remain); // decrement the count
|
||||
|
@ -86,14 +86,24 @@ void VM_Version::initialize() {
|
||||
if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
|
||||
FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
|
||||
}
|
||||
if (is_niagara1_plus()) {
|
||||
if (AllocatePrefetchStyle > 0 && FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
|
||||
// Use BIS instruction for allocation prefetch.
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
|
||||
if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
|
||||
// Use smaller prefetch distance on N2 with BIS
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
|
||||
}
|
||||
}
|
||||
if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
|
||||
// Use different prefetch distance without BIS
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
|
||||
FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
|
||||
}
|
||||
if (is_niagara1_plus() && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
|
||||
// Use smaller prefetch distance on N2
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Use hardware population count instruction if available.
|
||||
|
@ -3365,6 +3365,13 @@ void Assembler::shrdl(Register dst, Register src) {
|
||||
|
||||
#else // LP64
|
||||
|
||||
void Assembler::set_byte_if_not_zero(Register dst) {
|
||||
int enc = prefix_and_encode(dst->encoding(), true);
|
||||
emit_byte(0x0F);
|
||||
emit_byte(0x95);
|
||||
emit_byte(0xE0 | enc);
|
||||
}
|
||||
|
||||
// 64bit only pieces of the assembler
|
||||
// This should only be used by 64bit instructions that can use rip-relative
|
||||
// it cannot be used by instructions that want an immediate value.
|
||||
|
@ -455,6 +455,60 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
}
|
||||
|
||||
|
||||
// Emit the code to remove the frame from the stack in the exception
|
||||
// unwind path.
|
||||
int LIR_Assembler::emit_unwind_handler() {
|
||||
#ifndef PRODUCT
|
||||
if (CommentedAssembly) {
|
||||
_masm->block_comment("Unwind handler");
|
||||
}
|
||||
#endif
|
||||
|
||||
int offset = code_offset();
|
||||
|
||||
// Fetch the exception from TLS and clear out exception related thread state
|
||||
__ get_thread(rsi);
|
||||
__ movptr(rax, Address(rsi, JavaThread::exception_oop_offset()));
|
||||
__ movptr(Address(rsi, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rsi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
|
||||
|
||||
__ bind(_unwind_handler_entry);
|
||||
__ verify_not_null_oop(rax);
|
||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||
__ mov(rsi, rax); // Preserve the exception
|
||||
}
|
||||
|
||||
// Preform needed unlocking
|
||||
MonitorExitStub* stub = NULL;
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::rax_opr);
|
||||
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
|
||||
__ unlock_object(rdi, rbx, rax, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
if (compilation()->env()->dtrace_method_probes()) {
|
||||
__ movoop(Address(rsp, 0), method()->constant_encoding());
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
|
||||
}
|
||||
|
||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||
__ mov(rax, rsi); // Restore the exception
|
||||
}
|
||||
|
||||
// remove the activation and dispatch to the unwind handler
|
||||
__ remove_frame(initial_frame_size_in_bytes());
|
||||
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
|
||||
|
||||
// Emit the slow path assembly
|
||||
if (stub != NULL) {
|
||||
stub->emit_code(this);
|
||||
}
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::emit_deopt_handler() {
|
||||
// if the last instruction is a call (typically to do a throw which
|
||||
// is coming at the end after block reordering) the return address
|
||||
@ -1190,8 +1244,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
break;
|
||||
#endif // _L64
|
||||
case T_INT:
|
||||
// %%% could this be a movl? this is safer but longer instruction
|
||||
__ movl2ptr(dest->as_register(), from_addr);
|
||||
__ movl(dest->as_register(), from_addr);
|
||||
break;
|
||||
|
||||
case T_LONG: {
|
||||
@ -1249,7 +1302,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
__ shll(dest_reg, 24);
|
||||
__ sarl(dest_reg, 24);
|
||||
}
|
||||
// These are unsigned so the zero extension on 64bit is just what we need
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1261,8 +1313,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
} else {
|
||||
__ movw(dest_reg, from_addr);
|
||||
}
|
||||
// This is unsigned so the zero extension on 64bit is just what we need
|
||||
// __ movl2ptr(dest_reg, dest_reg);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1275,8 +1325,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
__ shll(dest_reg, 16);
|
||||
__ sarl(dest_reg, 16);
|
||||
}
|
||||
// Might not be needed in 64bit but certainly doesn't hurt (except for code size)
|
||||
__ movl2ptr(dest_reg, dest_reg);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2690,19 +2738,14 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
||||
} else {
|
||||
assert(code == lir_cmp_l2i, "check");
|
||||
#ifdef _LP64
|
||||
Register dest = dst->as_register();
|
||||
__ xorptr(dest, dest);
|
||||
Label high, done;
|
||||
__ cmpptr(left->as_register_lo(), right->as_register_lo());
|
||||
__ jcc(Assembler::equal, done);
|
||||
__ jcc(Assembler::greater, high);
|
||||
__ decrement(dest);
|
||||
__ jmp(done);
|
||||
__ bind(high);
|
||||
__ increment(dest);
|
||||
|
||||
__ bind(done);
|
||||
|
||||
Label done;
|
||||
Register dest = dst->as_register();
|
||||
__ cmpptr(left->as_register_lo(), right->as_register_lo());
|
||||
__ movl(dest, -1);
|
||||
__ jccb(Assembler::less, done);
|
||||
__ set_byte_if_not_zero(dest);
|
||||
__ movzbl(dest, dest);
|
||||
__ bind(done);
|
||||
#else
|
||||
__ lcmp2int(left->as_register_hi(),
|
||||
left->as_register_lo(),
|
||||
@ -2800,42 +2843,43 @@ void LIR_Assembler::emit_static_call_stub() {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
|
||||
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
|
||||
assert(exceptionOop->as_register() == rax, "must match");
|
||||
assert(unwind || exceptionPC->as_register() == rdx, "must match");
|
||||
assert(exceptionPC->as_register() == rdx, "must match");
|
||||
|
||||
// exception object is not added to oop map by LinearScan
|
||||
// (LinearScan assumes that no oops are in fixed registers)
|
||||
info->add_register_oop(exceptionOop);
|
||||
Runtime1::StubID unwind_id;
|
||||
|
||||
if (!unwind) {
|
||||
// get current pc information
|
||||
// pc is only needed if the method has an exception handler, the unwind code does not need it.
|
||||
int pc_for_athrow_offset = __ offset();
|
||||
InternalAddress pc_for_athrow(__ pc());
|
||||
__ lea(exceptionPC->as_register(), pc_for_athrow);
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
// get current pc information
|
||||
// pc is only needed if the method has an exception handler, the unwind code does not need it.
|
||||
int pc_for_athrow_offset = __ offset();
|
||||
InternalAddress pc_for_athrow(__ pc());
|
||||
__ lea(exceptionPC->as_register(), pc_for_athrow);
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
|
||||
__ verify_not_null_oop(rax);
|
||||
// search an exception handler (rax: exception oop, rdx: throwing pc)
|
||||
if (compilation()->has_fpu_code()) {
|
||||
unwind_id = Runtime1::handle_exception_id;
|
||||
} else {
|
||||
unwind_id = Runtime1::handle_exception_nofpu_id;
|
||||
}
|
||||
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
|
||||
__ verify_not_null_oop(rax);
|
||||
// search an exception handler (rax: exception oop, rdx: throwing pc)
|
||||
if (compilation()->has_fpu_code()) {
|
||||
unwind_id = Runtime1::handle_exception_id;
|
||||
} else {
|
||||
// remove the activation
|
||||
__ remove_frame(initial_frame_size_in_bytes());
|
||||
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
|
||||
unwind_id = Runtime1::handle_exception_nofpu_id;
|
||||
}
|
||||
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
|
||||
|
||||
// enough room for two byte trap
|
||||
__ nop();
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
|
||||
assert(exceptionOop->as_register() == rax, "must match");
|
||||
|
||||
__ jmp(_unwind_handler_entry);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
|
||||
|
||||
// optimized version for linear scan:
|
||||
|
@ -781,7 +781,7 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
|
||||
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call site.
|
||||
NOT_LP64(__ get_thread(thread);)
|
||||
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
|
||||
__ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
|
||||
__ cmovptr(Assembler::notEqual, rsp, rbp);
|
||||
|
||||
// continue at exception handler (return address removed)
|
||||
|
@ -80,7 +80,6 @@ define_pd_global(intx, CodeCacheExpansionSize, 32*K);
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
|
||||
#endif // AMD64
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, RegisterCostAreaRatio, 16000);
|
||||
|
||||
// Peephole and CISC spilling both break the graph, and so makes the
|
||||
|
@ -45,6 +45,7 @@ define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
#else
|
||||
define_pd_global(intx, CodeEntryAlignment, 16);
|
||||
#endif // COMPILER2
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
define_pd_global(intx, InlineSmallCode, 1000);
|
||||
|
||||
|
@ -115,8 +115,8 @@ void OptoRuntime::generate_exception_blob() {
|
||||
|
||||
// rax: exception handler for given <exception oop/exception pc>
|
||||
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call.
|
||||
__ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call site.
|
||||
__ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
|
||||
__ cmovptr(Assembler::notEqual, rsp, rbp);
|
||||
|
||||
// We have a handler in rax, (could be deopt blob)
|
||||
|
@ -3328,8 +3328,8 @@ void OptoRuntime::generate_exception_blob() {
|
||||
|
||||
// rax: exception handler
|
||||
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call.
|
||||
__ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0);
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call site.
|
||||
__ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
|
||||
__ cmovptr(Assembler::notEqual, rsp, rbp);
|
||||
|
||||
// We have a handler in rax (could be deopt blob).
|
||||
|
@ -430,7 +430,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ verify_oop(exception_oop);
|
||||
|
||||
// Restore SP from BP if the exception PC is a MethodHandle call site.
|
||||
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
|
||||
__ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
|
||||
__ cmovptr(Assembler::notEqual, rsp, rbp);
|
||||
|
||||
// continue at exception handler (return address removed)
|
||||
@ -812,7 +812,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
|
||||
// Copy 64-byte chunks
|
||||
__ jmpb(L_copy_64_bytes);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_64_bytes_loop);
|
||||
|
||||
if(UseUnalignedLoadStores) {
|
||||
@ -874,7 +874,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
|
||||
// Copy 64-byte chunks
|
||||
__ jmpb(L_copy_64_bytes);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_64_bytes_loop);
|
||||
__ movq(mmx0, Address(from, 0));
|
||||
__ movq(mmx1, Address(from, 8));
|
||||
@ -1144,7 +1144,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ movl(Address(to, count, sf, 0), rdx);
|
||||
__ jmpb(L_copy_8_bytes);
|
||||
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
// Move 8 bytes
|
||||
__ BIND(L_copy_8_bytes_loop);
|
||||
if (UseXMMForArrayCopy) {
|
||||
@ -1235,7 +1235,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
} else {
|
||||
__ jmpb(L_copy_8_bytes);
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_8_bytes_loop);
|
||||
__ fild_d(Address(from, 0));
|
||||
__ fistp_d(Address(from, to_from, Address::times_1));
|
||||
@ -1282,7 +1282,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
__ jmpb(L_copy_8_bytes);
|
||||
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_8_bytes_loop);
|
||||
if (VM_Version::supports_mmx()) {
|
||||
if (UseXMMForArrayCopy) {
|
||||
@ -1454,7 +1454,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Loop control:
|
||||
// for (count = -count; count != 0; count++)
|
||||
// Base pointers src, dst are biased by 8*count,to last element.
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
|
||||
__ BIND(L_store_element);
|
||||
__ movptr(to_element_addr, elem); // store the oop
|
||||
|
@ -871,9 +871,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
address generate_fp_mask(const char *stub_name, int64_t mask) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", stub_name);
|
||||
|
||||
__ align(16);
|
||||
address start = __ pc();
|
||||
|
||||
__ emit_data64( mask, relocInfo::none );
|
||||
@ -1268,7 +1267,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
|
||||
DEBUG_ONLY(__ stop("enter at entry label, not here"));
|
||||
Label L_loop;
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
if(UseUnalignedLoadStores) {
|
||||
__ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
|
||||
@ -1309,7 +1308,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
|
||||
DEBUG_ONLY(__ stop("enter at entry label, not here"));
|
||||
Label L_loop;
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
if(UseUnalignedLoadStores) {
|
||||
__ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
|
||||
@ -2229,7 +2228,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Loop control:
|
||||
// for (count = -count; count != 0; count++)
|
||||
// Base pointers src, dst are biased by 8*(count-1),to last element.
|
||||
__ align(16);
|
||||
__ align(OptoLoopAlignment);
|
||||
|
||||
__ BIND(L_store_element);
|
||||
__ store_heap_oop(to_element_addr, rax_oop); // store the oop
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
// Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -6272,6 +6272,30 @@ instruct bytes_reverse_long(eRegL dst) %{
|
||||
ins_pipe( ialu_reg_reg);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_unsigned_short(eRegI dst) %{
|
||||
match(Set dst (ReverseBytesUS dst));
|
||||
|
||||
format %{ "BSWAP $dst\n\t"
|
||||
"SHR $dst,16\n\t" %}
|
||||
ins_encode %{
|
||||
__ bswapl($dst$$Register);
|
||||
__ shrl($dst$$Register, 16);
|
||||
%}
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short(eRegI dst) %{
|
||||
match(Set dst (ReverseBytesS dst));
|
||||
|
||||
format %{ "BSWAP $dst\n\t"
|
||||
"SAR $dst,16\n\t" %}
|
||||
ins_encode %{
|
||||
__ bswapl($dst$$Register);
|
||||
__ sarl($dst$$Register, 16);
|
||||
%}
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
|
||||
//---------- Zeros Count Instructions ------------------------------------------
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
// Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -7371,6 +7371,30 @@ instruct bytes_reverse_long(rRegL dst) %{
|
||||
ins_pipe( ialu_reg);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_unsigned_short(rRegI dst) %{
|
||||
match(Set dst (ReverseBytesUS dst));
|
||||
|
||||
format %{ "bswapl $dst\n\t"
|
||||
"shrl $dst,16\n\t" %}
|
||||
ins_encode %{
|
||||
__ bswapl($dst$$Register);
|
||||
__ shrl($dst$$Register, 16);
|
||||
%}
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short(rRegI dst) %{
|
||||
match(Set dst (ReverseBytesS dst));
|
||||
|
||||
format %{ "bswapl $dst\n\t"
|
||||
"sar $dst,16\n\t" %}
|
||||
ins_encode %{
|
||||
__ bswapl($dst$$Register);
|
||||
__ sarl($dst$$Register, 16);
|
||||
%}
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
instruct loadI_reversed(rRegI dst, memory src) %{
|
||||
match(Set dst (ReverseBytesI (LoadI src)));
|
||||
|
||||
|
@ -39,21 +39,9 @@
|
||||
|
||||
void CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
JavaThread *thread = (JavaThread *) THREAD;
|
||||
ZeroStack *stack = thread->zero_stack();
|
||||
|
||||
// Adjust the caller's stack frame to accomodate any additional
|
||||
// local variables we have contiguously with our parameters.
|
||||
int extra_locals = method->max_locals() - method->size_of_parameters();
|
||||
if (extra_locals > 0) {
|
||||
if (extra_locals > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
for (int i = 0; i < extra_locals; i++)
|
||||
stack->push(0);
|
||||
}
|
||||
|
||||
// Allocate and initialize our frame.
|
||||
InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread);
|
||||
InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
|
||||
thread->push_zero_frame(frame);
|
||||
|
||||
// Execute those bytecodes!
|
||||
@ -76,12 +64,6 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
intptr_t *result = NULL;
|
||||
int result_slots = 0;
|
||||
|
||||
// Check we're not about to run out of stack
|
||||
if (stack_overflow_imminent(thread)) {
|
||||
CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
|
||||
goto unwind_and_return;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
// We can set up the frame anchor with everything we want at
|
||||
// this point as we are thread_in_Java and no safepoints can
|
||||
@ -123,9 +105,9 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
int monitor_words = frame::interpreter_frame_monitor_size();
|
||||
|
||||
// Allocate the space
|
||||
if (monitor_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
stack->overflow_check(monitor_words, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION)
|
||||
break;
|
||||
stack->alloc(monitor_words * wordSize);
|
||||
|
||||
// Move the expression stack contents
|
||||
@ -172,8 +154,6 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
}
|
||||
}
|
||||
|
||||
unwind_and_return:
|
||||
|
||||
// Unwind the current frame
|
||||
thread->pop_zero_frame();
|
||||
|
||||
@ -193,20 +173,13 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
ZeroStack *stack = thread->zero_stack();
|
||||
|
||||
// Allocate and initialize our frame
|
||||
InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread);
|
||||
InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
|
||||
thread->push_zero_frame(frame);
|
||||
interpreterState istate = frame->interpreter_state();
|
||||
intptr_t *locals = istate->locals();
|
||||
|
||||
// Check we're not about to run out of stack
|
||||
if (stack_overflow_imminent(thread)) {
|
||||
CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
|
||||
goto unwind_and_return;
|
||||
}
|
||||
|
||||
// Update the invocation counter
|
||||
if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
|
||||
thread->set_do_not_unlock();
|
||||
InvocationCounter *counter = method->invocation_counter();
|
||||
counter->increment();
|
||||
if (counter->reached_InvocationLimit()) {
|
||||
@ -215,7 +188,6 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
if (HAS_PENDING_EXCEPTION)
|
||||
goto unwind_and_return;
|
||||
}
|
||||
thread->clr_do_not_unlock();
|
||||
}
|
||||
|
||||
// Lock if necessary
|
||||
@ -266,9 +238,10 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
assert(function != NULL, "should be set if signature handler is");
|
||||
|
||||
// Build the argument list
|
||||
if (handler->argument_count() * 2 > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
stack->overflow_check(handler->argument_count() * 2, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION)
|
||||
goto unlock_unwind_and_return;
|
||||
|
||||
void **arguments;
|
||||
void *mirror; {
|
||||
arguments =
|
||||
@ -505,9 +478,7 @@ void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
switch (entry->flag_state()) {
|
||||
case ltos:
|
||||
case dtos:
|
||||
if (stack->available_words() < 1) {
|
||||
Unimplemented();
|
||||
}
|
||||
stack->overflow_check(1, CHECK);
|
||||
stack->alloc(wordSize);
|
||||
break;
|
||||
}
|
||||
@ -603,39 +574,30 @@ void CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
stack->set_sp(stack->sp() + method->size_of_parameters());
|
||||
}
|
||||
|
||||
bool CppInterpreter::stack_overflow_imminent(JavaThread *thread) {
|
||||
// How is the ABI stack?
|
||||
address stack_top = thread->stack_base() - thread->stack_size();
|
||||
int free_stack = os::current_stack_pointer() - stack_top;
|
||||
if (free_stack < StackShadowPages * os::vm_page_size()) {
|
||||
return true;
|
||||
InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
|
||||
JavaThread *thread = (JavaThread *) THREAD;
|
||||
ZeroStack *stack = thread->zero_stack();
|
||||
|
||||
// Calculate the size of the frame we'll build, including
|
||||
// any adjustments to the caller's frame that we'll make.
|
||||
int extra_locals = 0;
|
||||
int monitor_words = 0;
|
||||
int stack_words = 0;
|
||||
|
||||
if (!method->is_native()) {
|
||||
extra_locals = method->max_locals() - method->size_of_parameters();
|
||||
stack_words = method->max_stack();
|
||||
}
|
||||
|
||||
// How is the Zero stack?
|
||||
// Throwing a StackOverflowError involves a VM call, which means
|
||||
// we need a frame on the stack. We should be checking here to
|
||||
// ensure that methods we call have enough room to install the
|
||||
// largest possible frame, but that's more than twice the size
|
||||
// of the entire Zero stack we get by default, so we just check
|
||||
// we have *some* space instead...
|
||||
free_stack = thread->zero_stack()->available_words() * wordSize;
|
||||
if (free_stack < StackShadowPages * os::vm_page_size()) {
|
||||
return true;
|
||||
if (method->is_synchronized()) {
|
||||
monitor_words = frame::interpreter_frame_monitor_size();
|
||||
}
|
||||
stack->overflow_check(
|
||||
extra_locals + header_words + monitor_words + stack_words, CHECK_NULL);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
InterpreterFrame *InterpreterFrame::build(ZeroStack* stack,
|
||||
const methodOop method,
|
||||
JavaThread* thread) {
|
||||
int monitor_words =
|
||||
method->is_synchronized() ? frame::interpreter_frame_monitor_size() : 0;
|
||||
int stack_words = method->is_native() ? 0 : method->max_stack();
|
||||
|
||||
if (header_words + monitor_words + stack_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
// Adjust the caller's stack frame to accomodate any additional
|
||||
// local variables we have contiguously with our parameters.
|
||||
for (int i = 0; i < extra_locals; i++)
|
||||
stack->push(0);
|
||||
|
||||
intptr_t *locals;
|
||||
if (method->is_native())
|
||||
@ -814,14 +776,13 @@ InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||
|
||||
// Deoptimization helpers
|
||||
|
||||
InterpreterFrame *InterpreterFrame::build(ZeroStack* stack, int size) {
|
||||
InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
|
||||
ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
|
||||
|
||||
int size_in_words = size >> LogBytesPerWord;
|
||||
assert(size_in_words * wordSize == size, "unaligned");
|
||||
assert(size_in_words >= header_words, "too small");
|
||||
|
||||
if (size_in_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
stack->overflow_check(size_in_words, CHECK_NULL);
|
||||
|
||||
stack->push(0); // next_frame, filled in later
|
||||
intptr_t *fp = stack->sp();
|
||||
|
@ -38,10 +38,6 @@
|
||||
// Main loop of normal_entry
|
||||
static void main_loop(int recurse, TRAPS);
|
||||
|
||||
private:
|
||||
// Stack overflow checks
|
||||
static bool stack_overflow_imminent(JavaThread *thread);
|
||||
|
||||
private:
|
||||
// Fast result type determination
|
||||
static BasicType result_type_of(methodOop method);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008 Red Hat, Inc.
|
||||
* Copyright 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,10 +47,10 @@ class EntryFrame : public ZeroFrame {
|
||||
};
|
||||
|
||||
public:
|
||||
static EntryFrame *build(ZeroStack* stack,
|
||||
const intptr_t* parameters,
|
||||
static EntryFrame *build(const intptr_t* parameters,
|
||||
int parameter_words,
|
||||
JavaCallWrapper* call_wrapper);
|
||||
JavaCallWrapper* call_wrapper,
|
||||
TRAPS);
|
||||
public:
|
||||
JavaCallWrapper *call_wrapper() const {
|
||||
return (JavaCallWrapper *) value_of_word(call_wrapper_off);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008 Red Hat, Inc.
|
||||
* Copyright 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,7 @@ class FakeStubFrame : public ZeroFrame {
|
||||
};
|
||||
|
||||
public:
|
||||
static FakeStubFrame *build(ZeroStack* stack);
|
||||
static FakeStubFrame *build(TRAPS);
|
||||
|
||||
public:
|
||||
void identify_word(int frame_index,
|
||||
|
@ -35,6 +35,7 @@ define_pd_global(bool, ImplicitNullChecks, true);
|
||||
define_pd_global(bool, UncommonNullCast, true);
|
||||
|
||||
define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
define_pd_global(intx, PreInflateSpin, 10);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008 Red Hat, Inc.
|
||||
* Copyright 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,10 +55,8 @@ class InterpreterFrame : public ZeroFrame {
|
||||
};
|
||||
|
||||
public:
|
||||
static InterpreterFrame *build(ZeroStack* stack,
|
||||
const methodOop method,
|
||||
JavaThread* thread);
|
||||
static InterpreterFrame *build(ZeroStack* stack, int size);
|
||||
static InterpreterFrame *build(const methodOop method, TRAPS);
|
||||
static InterpreterFrame *build(int size, TRAPS);
|
||||
|
||||
public:
|
||||
interpreterState interpreter_state() const {
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -140,9 +140,8 @@ IRT_ENTRY(address,
|
||||
int required_words =
|
||||
(align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
|
||||
(method->is_static() ? 2 : 1) + method->size_of_parameters() + 1;
|
||||
if (required_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
stack->overflow_check(required_words, CHECK_NULL);
|
||||
|
||||
intptr_t *buf = (intptr_t *) stack->alloc(required_words * wordSize);
|
||||
SlowSignatureHandlerGenerator sshg(methodHandle(thread, method), buf);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2009 Red Hat, Inc.
|
||||
* Copyright 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,4 +23,10 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// This file is intentionally empty
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_methodHandles_zero.cpp.incl"
|
||||
|
||||
void MethodHandles::generate_method_handle_stub(MacroAssembler* masm,
|
||||
MethodHandles::EntryKind ek) {
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
|
73
hotspot/src/cpu/zero/vm/stack_zero.cpp
Normal file
73
hotspot/src/cpu/zero/vm/stack_zero.cpp
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_stack_zero.cpp.incl"
|
||||
|
||||
void ZeroStack::handle_overflow(TRAPS) {
|
||||
JavaThread *thread = (JavaThread *) THREAD;
|
||||
|
||||
// Set up the frame anchor if it isn't already
|
||||
bool has_last_Java_frame = thread->has_last_Java_frame();
|
||||
if (!has_last_Java_frame) {
|
||||
ZeroFrame *frame = thread->top_zero_frame();
|
||||
while (frame) {
|
||||
if (frame->is_shark_frame())
|
||||
break;
|
||||
|
||||
if (frame->is_interpreter_frame()) {
|
||||
interpreterState istate =
|
||||
frame->as_interpreter_frame()->interpreter_state();
|
||||
if (istate->self_link() == istate)
|
||||
break;
|
||||
}
|
||||
|
||||
frame = frame->next();
|
||||
}
|
||||
|
||||
if (frame == NULL)
|
||||
fatal("unrecoverable stack overflow");
|
||||
|
||||
thread->set_last_Java_frame(frame);
|
||||
}
|
||||
|
||||
// Throw the exception
|
||||
switch (thread->thread_state()) {
|
||||
case _thread_in_Java:
|
||||
InterpreterRuntime::throw_StackOverflowError(thread);
|
||||
break;
|
||||
|
||||
case _thread_in_vm:
|
||||
Exceptions::throw_stack_overflow_exception(thread, __FILE__, __LINE__);
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// Reset the frame anchor if necessary
|
||||
if (!has_last_Java_frame)
|
||||
thread->reset_last_Java_frame();
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,9 +29,14 @@ class ZeroStack {
|
||||
intptr_t *_top; // the word past the end of the stack
|
||||
intptr_t *_sp; // the top word on the stack
|
||||
|
||||
private:
|
||||
int _shadow_pages_size; // how much ABI stack must we keep free?
|
||||
|
||||
public:
|
||||
ZeroStack()
|
||||
: _base(NULL), _top(NULL), _sp(NULL) {}
|
||||
: _base(NULL), _top(NULL), _sp(NULL) {
|
||||
_shadow_pages_size = StackShadowPages * os::vm_page_size();
|
||||
}
|
||||
|
||||
bool needs_setup() const {
|
||||
return _base == NULL;
|
||||
@ -81,6 +86,14 @@ class ZeroStack {
|
||||
return _sp -= count;
|
||||
}
|
||||
|
||||
int shadow_pages_size() const {
|
||||
return _shadow_pages_size;
|
||||
}
|
||||
|
||||
public:
|
||||
void overflow_check(int required_words, TRAPS);
|
||||
static void handle_overflow(TRAPS);
|
||||
|
||||
public:
|
||||
static ByteSize base_offset() {
|
||||
return byte_offset_of(ZeroStack, _base);
|
||||
|
43
hotspot/src/cpu/zero/vm/stack_zero.inline.hpp
Normal file
43
hotspot/src/cpu/zero/vm/stack_zero.inline.hpp
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// This function should match SharkStack::CreateStackOverflowCheck
|
||||
inline void ZeroStack::overflow_check(int required_words, TRAPS) {
|
||||
JavaThread *thread = (JavaThread *) THREAD;
|
||||
|
||||
// Check the Zero stack
|
||||
if (required_words > available_words()) {
|
||||
handle_overflow(THREAD);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check the ABI stack
|
||||
address stack_top = thread->stack_base() - thread->stack_size();
|
||||
int free_stack = ((address) &stack_top) - stack_top;
|
||||
if (free_stack < shadow_pages_size()) {
|
||||
handle_overflow(THREAD);
|
||||
return;
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,37 +60,42 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Allocate and initialize our frame
|
||||
thread->push_zero_frame(
|
||||
EntryFrame::build(stack, parameters, parameter_words, call_wrapper));
|
||||
EntryFrame *frame =
|
||||
EntryFrame::build(parameters, parameter_words, call_wrapper, THREAD);
|
||||
|
||||
// Make the call
|
||||
Interpreter::invoke_method(method, entry_point, THREAD);
|
||||
|
||||
// Store result depending on type
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
switch (result_type) {
|
||||
case T_INT:
|
||||
*(jint *) result = *(jint *) stack->sp();
|
||||
break;
|
||||
case T_LONG:
|
||||
*(jlong *) result = *(jlong *) stack->sp();
|
||||
break;
|
||||
case T_FLOAT:
|
||||
*(jfloat *) result = *(jfloat *) stack->sp();
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
*(jdouble *) result = *(jdouble *) stack->sp();
|
||||
break;
|
||||
case T_OBJECT:
|
||||
*(oop *) result = *(oop *) stack->sp();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
// Push the frame
|
||||
thread->push_zero_frame(frame);
|
||||
|
||||
// Unwind our frame
|
||||
thread->pop_zero_frame();
|
||||
// Make the call
|
||||
Interpreter::invoke_method(method, entry_point, THREAD);
|
||||
|
||||
// Store the result
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
switch (result_type) {
|
||||
case T_INT:
|
||||
*(jint *) result = *(jint *) stack->sp();
|
||||
break;
|
||||
case T_LONG:
|
||||
*(jlong *) result = *(jlong *) stack->sp();
|
||||
break;
|
||||
case T_FLOAT:
|
||||
*(jfloat *) result = *(jfloat *) stack->sp();
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
*(jdouble *) result = *(jdouble *) stack->sp();
|
||||
break;
|
||||
case T_OBJECT:
|
||||
*(oop *) result = *(oop *) stack->sp();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
// Unwind the frame
|
||||
thread->pop_zero_frame();
|
||||
}
|
||||
|
||||
// Tear down the stack if necessary
|
||||
if (stack_needs_teardown)
|
||||
@ -226,13 +231,13 @@ void StubGenerator_generate(CodeBuffer* code, bool all) {
|
||||
StubGenerator g(code, all);
|
||||
}
|
||||
|
||||
EntryFrame *EntryFrame::build(ZeroStack* stack,
|
||||
const intptr_t* parameters,
|
||||
EntryFrame *EntryFrame::build(const intptr_t* parameters,
|
||||
int parameter_words,
|
||||
JavaCallWrapper* call_wrapper) {
|
||||
if (header_words + parameter_words > stack->available_words()) {
|
||||
Unimplemented();
|
||||
}
|
||||
JavaCallWrapper* call_wrapper,
|
||||
TRAPS) {
|
||||
|
||||
ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
|
||||
stack->overflow_check(header_words + parameter_words, CHECK_NULL);
|
||||
|
||||
stack->push(0); // next_frame, filled in later
|
||||
intptr_t *fp = stack->sp();
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,6 +41,10 @@
|
||||
code_size2 = 0 // if these are too small. Simply increase
|
||||
}; // them if that happens.
|
||||
|
||||
enum method_handles_platform_dependent_constants {
|
||||
method_handles_adapters_code_size = 0
|
||||
};
|
||||
|
||||
#ifdef IA32
|
||||
class x86 {
|
||||
friend class VMStructs;
|
||||
|
@ -192,7 +192,8 @@ int LinuxAttachListener::init() {
|
||||
res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
|
||||
}
|
||||
if (res == -1) {
|
||||
sprintf(path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id());
|
||||
snprintf(path, PATH_MAX+1, "%s/.java_pid%d",
|
||||
os::get_temp_directory(), os::current_process_id());
|
||||
strcpy(addr.sun_path, path);
|
||||
::unlink(path);
|
||||
res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
|
||||
@ -460,13 +461,14 @@ bool AttachListener::is_init_trigger() {
|
||||
if (init_at_startup() || is_initialized()) {
|
||||
return false; // initialized at startup or already initialized
|
||||
}
|
||||
char fn[32];
|
||||
char fn[128];
|
||||
sprintf(fn, ".attach_pid%d", os::current_process_id());
|
||||
int ret;
|
||||
struct stat64 st;
|
||||
RESTARTABLE(::stat64(fn, &st), ret);
|
||||
if (ret == -1) {
|
||||
sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id());
|
||||
snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
|
||||
os::get_temp_directory(), os::current_process_id());
|
||||
RESTARTABLE(::stat64(fn, &st), ret);
|
||||
}
|
||||
if (ret == 0) {
|
||||
|
@ -1522,7 +1522,10 @@ int os::current_process_id() {
|
||||
|
||||
const char* os::dll_file_extension() { return ".so"; }
|
||||
|
||||
const char* os::get_temp_directory() { return "/tmp/"; }
|
||||
const char* os::get_temp_directory() {
|
||||
const char *prop = Arguments::get_property("java.io.tmpdir");
|
||||
return prop == NULL ? "/tmp" : prop;
|
||||
}
|
||||
|
||||
static bool file_exists(const char* filename) {
|
||||
struct stat statbuf;
|
||||
@ -2305,7 +2308,8 @@ void linux_wrap_code(char* base, size_t size) {
|
||||
char buf[40];
|
||||
int num = Atomic::add(1, &cnt);
|
||||
|
||||
sprintf(buf, "/tmp/hs-vm-%d-%d", os::current_process_id(), num);
|
||||
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
|
||||
os::get_temp_directory(), os::current_process_id(), num);
|
||||
unlink(buf);
|
||||
|
||||
int fd = open(buf, O_CREAT | O_RDWR, S_IRWXU);
|
||||
|
@ -145,11 +145,11 @@ static char* get_user_tmp_dir(const char* user) {
|
||||
|
||||
const char* tmpdir = os::get_temp_directory();
|
||||
const char* perfdir = PERFDATA_NAME;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
|
||||
char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
|
||||
|
||||
// construct the path name to user specific tmp directory
|
||||
snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
|
||||
snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
|
||||
|
||||
return dirname;
|
||||
}
|
||||
@ -331,8 +331,9 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
}
|
||||
|
||||
char* usrdir_name = NEW_C_HEAP_ARRAY(char,
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 1);
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 2);
|
||||
strcpy(usrdir_name, tmpdirname);
|
||||
strcat(usrdir_name, "/");
|
||||
strcat(usrdir_name, dentry->d_name);
|
||||
|
||||
DIR* subdirp = os::opendir(usrdir_name);
|
||||
|
@ -375,7 +375,8 @@ int SolarisAttachListener::create_door() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
sprintf(door_path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id());
|
||||
snprintf(door_path, sizeof(door_path), "%s/.java_pid%d",
|
||||
os::get_temp_directory(), os::current_process_id());
|
||||
RESTARTABLE(::creat(door_path, S_IRUSR | S_IWUSR), fd);
|
||||
|
||||
if (fd == -1) {
|
||||
@ -591,13 +592,14 @@ bool AttachListener::is_init_trigger() {
|
||||
if (init_at_startup() || is_initialized()) {
|
||||
return false; // initialized at startup or already initialized
|
||||
}
|
||||
char fn[32];
|
||||
char fn[128];
|
||||
sprintf(fn, ".attach_pid%d", os::current_process_id());
|
||||
int ret;
|
||||
struct stat64 st;
|
||||
RESTARTABLE(::stat64(fn, &st), ret);
|
||||
if (ret == -1) {
|
||||
sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id());
|
||||
snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
|
||||
os::get_temp_directory(), os::current_process_id());
|
||||
RESTARTABLE(::stat64(fn, &st), ret);
|
||||
}
|
||||
if (ret == 0) {
|
||||
|
@ -676,15 +676,6 @@ bool os::have_special_privileges() {
|
||||
}
|
||||
|
||||
|
||||
static char* get_property(char* name, char* buffer, int buffer_size) {
|
||||
if (os::getenv(name, buffer, buffer_size)) {
|
||||
return buffer;
|
||||
}
|
||||
static char empty[] = "";
|
||||
return empty;
|
||||
}
|
||||
|
||||
|
||||
void os::init_system_properties_values() {
|
||||
char arch[12];
|
||||
sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
|
||||
@ -1826,7 +1817,10 @@ void os::set_error_file(const char *logfile) {}
|
||||
|
||||
const char* os::dll_file_extension() { return ".so"; }
|
||||
|
||||
const char* os::get_temp_directory() { return "/tmp/"; }
|
||||
const char* os::get_temp_directory() {
|
||||
const char *prop = Arguments::get_property("java.io.tmpdir");
|
||||
return prop == NULL ? "/tmp" : prop;
|
||||
}
|
||||
|
||||
static bool file_exists(const char* filename) {
|
||||
struct stat statbuf;
|
||||
|
@ -147,11 +147,11 @@ static char* get_user_tmp_dir(const char* user) {
|
||||
|
||||
const char* tmpdir = os::get_temp_directory();
|
||||
const char* perfdir = PERFDATA_NAME;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
|
||||
char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
|
||||
|
||||
// construct the path name to user specific tmp directory
|
||||
snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
|
||||
snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
|
||||
|
||||
return dirname;
|
||||
}
|
||||
@ -322,8 +322,9 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
}
|
||||
|
||||
char* usrdir_name = NEW_C_HEAP_ARRAY(char,
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 1);
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 2);
|
||||
strcpy(usrdir_name, tmpdirname);
|
||||
strcat(usrdir_name, "/");
|
||||
strcat(usrdir_name, dentry->d_name);
|
||||
|
||||
DIR* subdirp = os::opendir(usrdir_name);
|
||||
|
@ -998,15 +998,16 @@ os::closedir(DIR *dirp)
|
||||
|
||||
const char* os::dll_file_extension() { return ".dll"; }
|
||||
|
||||
const char * os::get_temp_directory()
|
||||
{
|
||||
static char path_buf[MAX_PATH];
|
||||
if (GetTempPath(MAX_PATH, path_buf)>0)
|
||||
return path_buf;
|
||||
else{
|
||||
path_buf[0]='\0';
|
||||
return path_buf;
|
||||
}
|
||||
const char* os::get_temp_directory() {
|
||||
const char *prop = Arguments::get_property("java.io.tmpdir");
|
||||
if (prop != 0) return prop;
|
||||
static char path_buf[MAX_PATH];
|
||||
if (GetTempPath(MAX_PATH, path_buf)>0)
|
||||
return path_buf;
|
||||
else{
|
||||
path_buf[0]='\0';
|
||||
return path_buf;
|
||||
}
|
||||
}
|
||||
|
||||
static bool file_exists(const char* filename) {
|
||||
|
@ -149,11 +149,11 @@ static char* get_user_tmp_dir(const char* user) {
|
||||
|
||||
const char* tmpdir = os::get_temp_directory();
|
||||
const char* perfdir = PERFDATA_NAME;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
|
||||
char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
|
||||
|
||||
// construct the path name to user specific tmp directory
|
||||
_snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
|
||||
_snprintf(dirname, nbytes, "%s\\%s_%s", tmpdir, perfdir, user);
|
||||
|
||||
return dirname;
|
||||
}
|
||||
@ -318,8 +318,9 @@ static char* get_user_name_slow(int vmid) {
|
||||
}
|
||||
|
||||
char* usrdir_name = NEW_C_HEAP_ARRAY(char,
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 1);
|
||||
strlen(tmpdirname) + strlen(dentry->d_name) + 2);
|
||||
strcpy(usrdir_name, tmpdirname);
|
||||
strcat(usrdir_name, "\\");
|
||||
strcat(usrdir_name, dentry->d_name);
|
||||
|
||||
DIR* subdirp = os::opendir(usrdir_name);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -68,12 +68,13 @@
|
||||
|
||||
public:
|
||||
void set_last_Java_frame() {
|
||||
JavaFrameAnchor *jfa = frame_anchor();
|
||||
jfa->set_last_Java_sp((intptr_t *) top_zero_frame());
|
||||
set_last_Java_frame(top_zero_frame());
|
||||
}
|
||||
void reset_last_Java_frame() {
|
||||
JavaFrameAnchor *jfa = frame_anchor();
|
||||
jfa->set_last_Java_sp(NULL);
|
||||
set_last_Java_frame(NULL);
|
||||
}
|
||||
void set_last_Java_frame(ZeroFrame* frame) {
|
||||
frame_anchor()->set_last_Java_sp((intptr_t *) frame);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3861,6 +3861,8 @@ int MatchRule::is_expensive() const {
|
||||
strcmp(opType,"RoundFloat")==0 ||
|
||||
strcmp(opType,"ReverseBytesI")==0 ||
|
||||
strcmp(opType,"ReverseBytesL")==0 ||
|
||||
strcmp(opType,"ReverseBytesUS")==0 ||
|
||||
strcmp(opType,"ReverseBytesS")==0 ||
|
||||
strcmp(opType,"Replicate16B")==0 ||
|
||||
strcmp(opType,"Replicate8B")==0 ||
|
||||
strcmp(opType,"Replicate4B")==0 ||
|
||||
|
@ -40,6 +40,7 @@ public:
|
||||
Exceptions, // Offset where exception handler lives
|
||||
Deopt, // Offset where deopt handler lives
|
||||
DeoptMH, // Offset where MethodHandle deopt handler lives
|
||||
UnwindHandler, // Offset to default unwind handler
|
||||
max_Entries };
|
||||
|
||||
// special value to note codeBlobs where profile (forte) stack walking is
|
||||
@ -59,6 +60,7 @@ public:
|
||||
_values[Exceptions ] = -1;
|
||||
_values[Deopt ] = -1;
|
||||
_values[DeoptMH ] = -1;
|
||||
_values[UnwindHandler ] = -1;
|
||||
}
|
||||
|
||||
int value(Entries e) { return _values[e]; }
|
||||
|
@ -229,6 +229,10 @@ void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
|
||||
code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
|
||||
CHECK_BAILOUT();
|
||||
|
||||
// Emit the handler to remove the activation from the stack and
|
||||
// dispatch to the caller.
|
||||
offsets()->set_value(CodeOffsets::UnwindHandler, assembler->emit_unwind_handler());
|
||||
|
||||
// done
|
||||
masm()->flush();
|
||||
}
|
||||
@ -312,7 +316,7 @@ void Compilation::install_code(int frame_size) {
|
||||
implicit_exception_table(),
|
||||
compiler(),
|
||||
_env->comp_level(),
|
||||
needs_debug_information(),
|
||||
true,
|
||||
has_unsafe_access()
|
||||
);
|
||||
}
|
||||
@ -445,8 +449,6 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
|
||||
assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time");
|
||||
_arena = Thread::current()->resource_area();
|
||||
_compilation = this;
|
||||
_needs_debug_information = _env->jvmti_can_examine_or_deopt_anywhere() ||
|
||||
JavaMonitorsInStackTrace || AlwaysEmitDebugInfo || DeoptimizeALot;
|
||||
_exception_info_list = new ExceptionInfoList();
|
||||
_implicit_exception_table.set_size(0);
|
||||
compile_method();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,7 +70,6 @@ class Compilation: public StackObj {
|
||||
int _max_spills;
|
||||
FrameMap* _frame_map;
|
||||
C1_MacroAssembler* _masm;
|
||||
bool _needs_debug_information;
|
||||
bool _has_exception_handlers;
|
||||
bool _has_fpu_code;
|
||||
bool _has_unsafe_access;
|
||||
@ -117,7 +116,6 @@ class Compilation: public StackObj {
|
||||
// accessors
|
||||
ciEnv* env() const { return _env; }
|
||||
AbstractCompiler* compiler() const { return _compiler; }
|
||||
bool needs_debug_information() const { return _needs_debug_information; }
|
||||
bool has_exception_handlers() const { return _has_exception_handlers; }
|
||||
bool has_fpu_code() const { return _has_fpu_code; }
|
||||
bool has_unsafe_access() const { return _has_unsafe_access; }
|
||||
@ -132,7 +130,6 @@ class Compilation: public StackObj {
|
||||
CodeOffsets* offsets() { return &_offsets; }
|
||||
|
||||
// setters
|
||||
void set_needs_debug_information(bool f) { _needs_debug_information = f; }
|
||||
void set_has_exception_handlers(bool f) { _has_exception_handlers = f; }
|
||||
void set_has_fpu_code(bool f) { _has_fpu_code = f; }
|
||||
void set_has_unsafe_access(bool f) { _has_unsafe_access = f; }
|
||||
|
@ -829,12 +829,8 @@ void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
|
||||
// should be left alone since there can be only one and all code
|
||||
// should dispatch to the same one.
|
||||
XHandler* h = handlers->handler_at(i);
|
||||
if (h->handler_bci() != SynchronizationEntryBCI) {
|
||||
h->set_entry_block(block_at(h->handler_bci()));
|
||||
} else {
|
||||
assert(h->entry_block()->is_set(BlockBegin::default_exception_handler_flag),
|
||||
"should be the synthetic unlock block");
|
||||
}
|
||||
assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
|
||||
h->set_entry_block(block_at(h->handler_bci()));
|
||||
}
|
||||
_jsr_xhandlers = handlers;
|
||||
}
|
||||
@ -1497,7 +1493,6 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
|
||||
|
||||
Dependencies* GraphBuilder::dependency_recorder() const {
|
||||
assert(DeoptC1, "need debug information");
|
||||
compilation()->set_needs_debug_information(true);
|
||||
return compilation()->dependency_recorder();
|
||||
}
|
||||
|
||||
@ -2867,19 +2862,6 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
|
||||
_initial_state = state_at_entry();
|
||||
start_block->merge(_initial_state);
|
||||
|
||||
// setup an exception handler to do the unlocking and/or
|
||||
// notification and unwind the frame.
|
||||
BlockBegin* sync_handler = new BlockBegin(-1);
|
||||
sync_handler->set(BlockBegin::exception_entry_flag);
|
||||
sync_handler->set(BlockBegin::is_on_work_list_flag);
|
||||
sync_handler->set(BlockBegin::default_exception_handler_flag);
|
||||
|
||||
ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
|
||||
XHandler* h = new XHandler(desc);
|
||||
h->set_entry_block(sync_handler);
|
||||
scope_data()->xhandlers()->append(h);
|
||||
scope_data()->set_has_handler();
|
||||
|
||||
// complete graph
|
||||
_vmap = new ValueMap();
|
||||
scope->compute_lock_stack_size();
|
||||
@ -2930,19 +2912,6 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
|
||||
}
|
||||
CHECK_BAILOUT();
|
||||
|
||||
if (sync_handler && sync_handler->state() != NULL) {
|
||||
Value lock = NULL;
|
||||
if (method()->is_synchronized()) {
|
||||
lock = method()->is_static() ? new Constant(new InstanceConstant(method()->holder()->java_mirror())) :
|
||||
_initial_state->local_at(0);
|
||||
|
||||
sync_handler->state()->unlock();
|
||||
sync_handler->state()->lock(scope, lock);
|
||||
|
||||
}
|
||||
fill_sync_handler(lock, sync_handler, true);
|
||||
}
|
||||
|
||||
_start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
|
||||
|
||||
eliminate_redundant_phis(_start);
|
||||
|
@ -1628,11 +1628,10 @@ LEAF(BlockBegin, StateSplit)
|
||||
backward_branch_target_flag = 1 << 4,
|
||||
is_on_work_list_flag = 1 << 5,
|
||||
was_visited_flag = 1 << 6,
|
||||
default_exception_handler_flag = 1 << 8, // identify block which represents the default exception handler
|
||||
parser_loop_header_flag = 1 << 9, // set by parser to identify blocks where phi functions can not be created on demand
|
||||
critical_edge_split_flag = 1 << 10, // set for all blocks that are introduced when critical edges are split
|
||||
linear_scan_loop_header_flag = 1 << 11, // set during loop-detection for LinearScan
|
||||
linear_scan_loop_end_flag = 1 << 12 // set during loop-detection for LinearScan
|
||||
parser_loop_header_flag = 1 << 7, // set by parser to identify blocks where phi functions can not be created on demand
|
||||
critical_edge_split_flag = 1 << 8, // set for all blocks that are introduced when critical edges are split
|
||||
linear_scan_loop_header_flag = 1 << 9, // set during loop-detection for LinearScan
|
||||
linear_scan_loop_end_flag = 1 << 10 // set during loop-detection for LinearScan
|
||||
};
|
||||
|
||||
void set(Flag f) { _flags |= f; }
|
||||
|
@ -626,8 +626,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
|
||||
break;
|
||||
}
|
||||
|
||||
case lir_throw:
|
||||
case lir_unwind: {
|
||||
case lir_throw: {
|
||||
assert(op->as_Op2() != NULL, "must be");
|
||||
LIR_Op2* op2 = (LIR_Op2*)op;
|
||||
|
||||
@ -639,6 +638,17 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
|
||||
break;
|
||||
}
|
||||
|
||||
case lir_unwind: {
|
||||
assert(op->as_Op1() != NULL, "must be");
|
||||
LIR_Op1* op1 = (LIR_Op1*)op;
|
||||
|
||||
assert(op1->_info == NULL, "no info");
|
||||
assert(op1->_opr->is_valid(), "exception oop"); do_input(op1->_opr);
|
||||
assert(op1->_result->is_illegal(), "no result");
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case lir_tan:
|
||||
case lir_sin:
|
||||
|
@ -801,6 +801,7 @@ enum LIR_Code {
|
||||
, lir_monaddr
|
||||
, lir_roundfp
|
||||
, lir_safepoint
|
||||
, lir_unwind
|
||||
, end_op1
|
||||
, begin_op2
|
||||
, lir_cmp
|
||||
@ -830,7 +831,6 @@ enum LIR_Code {
|
||||
, lir_ushr
|
||||
, lir_alloc_array
|
||||
, lir_throw
|
||||
, lir_unwind
|
||||
, lir_compare_to
|
||||
, end_op2
|
||||
, begin_op3
|
||||
@ -1827,8 +1827,12 @@ class LIR_List: public CompilationResourceObj {
|
||||
void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); }
|
||||
|
||||
void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); }
|
||||
void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
|
||||
void unwind_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_unwind, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
|
||||
void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
|
||||
append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
|
||||
}
|
||||
void unwind_exception(LIR_Opr exceptionOop) {
|
||||
append(new LIR_Op1(lir_unwind, exceptionOop));
|
||||
}
|
||||
|
||||
void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
|
||||
append(new LIR_Op2(lir_compare_to, left, right, dst));
|
||||
|
@ -552,6 +552,10 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
|
||||
monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
|
||||
break;
|
||||
|
||||
case lir_unwind:
|
||||
unwind_op(op->in_opr());
|
||||
break;
|
||||
|
||||
default:
|
||||
Unimplemented();
|
||||
break;
|
||||
@ -707,8 +711,7 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) {
|
||||
break;
|
||||
|
||||
case lir_throw:
|
||||
case lir_unwind:
|
||||
throw_op(op->in_opr1(), op->in_opr2(), op->info(), op->code() == lir_unwind);
|
||||
throw_op(op->in_opr1(), op->in_opr2(), op->info());
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -39,6 +39,8 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
Instruction* _pending_non_safepoint;
|
||||
int _pending_non_safepoint_offset;
|
||||
|
||||
Label _unwind_handler_entry;
|
||||
|
||||
#ifdef ASSERT
|
||||
BlockList _branch_target_blocks;
|
||||
void check_no_unbound_labels();
|
||||
@ -134,6 +136,7 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
|
||||
// code patterns
|
||||
int emit_exception_handler();
|
||||
int emit_unwind_handler();
|
||||
void emit_exception_entries(ExceptionInfoList* info_list);
|
||||
int emit_deopt_handler();
|
||||
|
||||
@ -217,7 +220,8 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
|
||||
void build_frame();
|
||||
|
||||
void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind);
|
||||
void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info);
|
||||
void unwind_op(LIR_Opr exceptionOop);
|
||||
void monitor_address(int monitor_ix, LIR_Opr dst);
|
||||
|
||||
void align_backward_branch_target();
|
||||
|
@ -1765,35 +1765,17 @@ void LIRGenerator::do_Throw(Throw* x) {
|
||||
__ null_check(exception_opr, new CodeEmitInfo(info, true));
|
||||
}
|
||||
|
||||
if (compilation()->env()->jvmti_can_post_on_exceptions() &&
|
||||
!block()->is_set(BlockBegin::default_exception_handler_flag)) {
|
||||
if (compilation()->env()->jvmti_can_post_on_exceptions()) {
|
||||
// we need to go through the exception lookup path to get JVMTI
|
||||
// notification done
|
||||
unwind = false;
|
||||
}
|
||||
|
||||
assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind,
|
||||
"should be no more handlers to dispatch to");
|
||||
|
||||
if (compilation()->env()->dtrace_method_probes() &&
|
||||
block()->is_set(BlockBegin::default_exception_handler_flag)) {
|
||||
// notify that this frame is unwinding
|
||||
BasicTypeList signature;
|
||||
signature.append(T_INT); // thread
|
||||
signature.append(T_OBJECT); // methodOop
|
||||
LIR_OprList* args = new LIR_OprList();
|
||||
args->append(getThreadPointer());
|
||||
LIR_Opr meth = new_register(T_OBJECT);
|
||||
__ oop2reg(method()->constant_encoding(), meth);
|
||||
args->append(meth);
|
||||
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
|
||||
}
|
||||
|
||||
// move exception oop into fixed register
|
||||
__ move(exception_opr, exceptionOopOpr());
|
||||
|
||||
if (unwind) {
|
||||
__ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info);
|
||||
__ unwind_exception(exceptionOopOpr());
|
||||
} else {
|
||||
__ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2608,12 +2608,17 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
|
||||
} else if (opr->is_double_xmm()) {
|
||||
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
|
||||
VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg();
|
||||
# ifdef _LP64
|
||||
first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
|
||||
second = &_int_0_scope_value;
|
||||
# else
|
||||
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
|
||||
// %%% This is probably a waste but we'll keep things as they were for now
|
||||
if (true) {
|
||||
VMReg rname_second = rname_first->next();
|
||||
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
|
||||
}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
} else if (opr->is_double_fpu()) {
|
||||
@ -2639,13 +2644,17 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
|
||||
#endif
|
||||
|
||||
VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
|
||||
|
||||
#ifdef _LP64
|
||||
first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
|
||||
second = &_int_0_scope_value;
|
||||
#else
|
||||
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
|
||||
// %%% This is probably a waste but we'll keep things as they were for now
|
||||
if (true) {
|
||||
VMReg rname_second = rname_first->next();
|
||||
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
|
||||
}
|
||||
#endif
|
||||
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
@ -2805,9 +2814,6 @@ IRScopeDebugInfo* LinearScan::compute_debug_info_for_scope(int op_id, IRScope* c
|
||||
|
||||
|
||||
void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) {
|
||||
if (!compilation()->needs_debug_information()) {
|
||||
return;
|
||||
}
|
||||
TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id));
|
||||
|
||||
IRScope* innermost_scope = info->scope();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -252,9 +252,6 @@
|
||||
develop(bool, BailoutOnExceptionHandlers, false, \
|
||||
"bailout of compilation for methods with exception handlers") \
|
||||
\
|
||||
develop(bool, AlwaysEmitDebugInfo, false, \
|
||||
"always emit debug info") \
|
||||
\
|
||||
develop(bool, InstallMethods, true, \
|
||||
"Install methods at the end of successful compilations") \
|
||||
\
|
||||
|
@ -1408,8 +1408,11 @@ BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
|
||||
}
|
||||
|
||||
void BCEscapeAnalyzer::copy_dependencies(Dependencies *deps) {
|
||||
if(!has_dependencies())
|
||||
return;
|
||||
if (ciEnv::current()->jvmti_can_hotswap_or_post_breakpoint()) {
|
||||
// Also record evol dependencies so redefinition of the
|
||||
// callee will trigger recompilation.
|
||||
deps->assert_evol_method(method());
|
||||
}
|
||||
for (int i = 0; i < _dependencies.length(); i+=2) {
|
||||
ciKlass *k = _dependencies[i]->as_klass();
|
||||
ciMethod *m = _dependencies[i+1]->as_method();
|
||||
|
@ -36,7 +36,7 @@ void ciConstant::print() {
|
||||
basictype_to_str(basic_type()));
|
||||
switch (basic_type()) {
|
||||
case T_BOOLEAN:
|
||||
tty->print("%s", bool_to_str(_value._int == 0));
|
||||
tty->print("%s", bool_to_str(_value._int != 0));
|
||||
break;
|
||||
case T_CHAR:
|
||||
case T_BYTE:
|
||||
|
@ -176,7 +176,6 @@ void ciEnv::cache_jvmti_state() {
|
||||
// Get Jvmti capabilities under lock to get consistant values.
|
||||
MutexLocker mu(JvmtiThreadState_lock);
|
||||
_jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
|
||||
_jvmti_can_examine_or_deopt_anywhere = JvmtiExport::can_examine_or_deopt_anywhere();
|
||||
_jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables();
|
||||
_jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions();
|
||||
}
|
||||
@ -887,8 +886,6 @@ void ciEnv::register_method(ciMethod* target,
|
||||
if (!failing() &&
|
||||
( (!jvmti_can_hotswap_or_post_breakpoint() &&
|
||||
JvmtiExport::can_hotswap_or_post_breakpoint()) ||
|
||||
(!jvmti_can_examine_or_deopt_anywhere() &&
|
||||
JvmtiExport::can_examine_or_deopt_anywhere()) ||
|
||||
(!jvmti_can_access_local_variables() &&
|
||||
JvmtiExport::can_access_local_variables()) ||
|
||||
(!jvmti_can_post_on_exceptions() &&
|
||||
|
@ -55,7 +55,6 @@ private:
|
||||
|
||||
// Cache Jvmti state
|
||||
bool _jvmti_can_hotswap_or_post_breakpoint;
|
||||
bool _jvmti_can_examine_or_deopt_anywhere;
|
||||
bool _jvmti_can_access_local_variables;
|
||||
bool _jvmti_can_post_on_exceptions;
|
||||
|
||||
@ -257,7 +256,6 @@ public:
|
||||
// Cache Jvmti state
|
||||
void cache_jvmti_state();
|
||||
bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; }
|
||||
bool jvmti_can_examine_or_deopt_anywhere() const { return _jvmti_can_examine_or_deopt_anywhere; }
|
||||
bool jvmti_can_access_local_variables() const { return _jvmti_can_access_local_variables; }
|
||||
bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions; }
|
||||
|
||||
|
@ -2956,8 +2956,8 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
#endif
|
||||
bool compact_fields = CompactFields;
|
||||
int allocation_style = FieldsAllocationStyle;
|
||||
if( allocation_style < 0 || allocation_style > 1 ) { // Out of range?
|
||||
assert(false, "0 <= FieldsAllocationStyle <= 1");
|
||||
if( allocation_style < 0 || allocation_style > 2 ) { // Out of range?
|
||||
assert(false, "0 <= FieldsAllocationStyle <= 2");
|
||||
allocation_style = 1; // Optimistic
|
||||
}
|
||||
|
||||
@ -2993,6 +2993,25 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
} else if( allocation_style == 1 ) {
|
||||
// Fields order: longs/doubles, ints, shorts/chars, bytes, oops
|
||||
next_nonstatic_double_offset = next_nonstatic_field_offset;
|
||||
} else if( allocation_style == 2 ) {
|
||||
// Fields allocation: oops fields in super and sub classes are together.
|
||||
if( nonstatic_field_size > 0 && super_klass() != NULL &&
|
||||
super_klass->nonstatic_oop_map_size() > 0 ) {
|
||||
int map_size = super_klass->nonstatic_oop_map_size();
|
||||
OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps();
|
||||
OopMapBlock* last_map = first_map + map_size - 1;
|
||||
int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
|
||||
if (next_offset == next_nonstatic_field_offset) {
|
||||
allocation_style = 0; // allocate oops first
|
||||
next_nonstatic_oop_offset = next_nonstatic_field_offset;
|
||||
next_nonstatic_double_offset = next_nonstatic_oop_offset +
|
||||
(nonstatic_oop_count * heapOopSize);
|
||||
}
|
||||
}
|
||||
if( allocation_style == 2 ) {
|
||||
allocation_style = 1; // allocate oops last
|
||||
next_nonstatic_double_offset = next_nonstatic_field_offset;
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -357,6 +357,8 @@
|
||||
template(void_double_signature, "()D") \
|
||||
template(int_void_signature, "(I)V") \
|
||||
template(int_int_signature, "(I)I") \
|
||||
template(char_char_signature, "(C)C") \
|
||||
template(short_short_signature, "(S)S") \
|
||||
template(int_bool_signature, "(I)Z") \
|
||||
template(float_int_signature, "(F)I") \
|
||||
template(double_long_signature, "(D)J") \
|
||||
@ -584,6 +586,10 @@
|
||||
do_intrinsic(_reverseBytes_i, java_lang_Integer, reverseBytes_name, int_int_signature, F_S) \
|
||||
do_name( reverseBytes_name, "reverseBytes") \
|
||||
do_intrinsic(_reverseBytes_l, java_lang_Long, reverseBytes_name, long_long_signature, F_S) \
|
||||
/* (symbol reverseBytes_name defined above) */ \
|
||||
do_intrinsic(_reverseBytes_c, java_lang_Character, reverseBytes_name, char_char_signature, F_S) \
|
||||
/* (symbol reverseBytes_name defined above) */ \
|
||||
do_intrinsic(_reverseBytes_s, java_lang_Short, reverseBytes_name, short_short_signature, F_S) \
|
||||
/* (symbol reverseBytes_name defined above) */ \
|
||||
\
|
||||
do_intrinsic(_identityHashCode, java_lang_System, identityHashCode_name, object_int_signature, F_S) \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -284,9 +284,11 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
|
||||
cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
|
||||
}
|
||||
#endif //PRODUCT
|
||||
if (is_live)
|
||||
if (is_live) {
|
||||
// Perform cur->oops_do(f), maybe just once per nmethod.
|
||||
f->do_code_blob(cur);
|
||||
cur->fix_oop_relocations();
|
||||
}
|
||||
}
|
||||
|
||||
// Check for stray marks.
|
||||
|
@ -685,6 +685,7 @@ nmethod::nmethod(
|
||||
_exception_offset = 0;
|
||||
_deoptimize_offset = 0;
|
||||
_deoptimize_mh_offset = 0;
|
||||
_unwind_handler_offset = -1;
|
||||
_trap_offset = offsets->value(CodeOffsets::Dtrace_trap);
|
||||
_orig_pc_offset = 0;
|
||||
_stub_offset = data_offset();
|
||||
@ -798,6 +799,11 @@ nmethod::nmethod(
|
||||
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
|
||||
_deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
|
||||
_deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
|
||||
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
|
||||
_unwind_handler_offset = instructions_offset() + offsets->value(CodeOffsets::UnwindHandler);
|
||||
} else {
|
||||
_unwind_handler_offset = -1;
|
||||
}
|
||||
_consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
|
||||
_scopes_data_offset = data_offset();
|
||||
_scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
|
||||
|
@ -154,6 +154,9 @@ class nmethod : public CodeBlob {
|
||||
// All deoptee's at a MethodHandle call site will resume execution
|
||||
// at this location described by this offset.
|
||||
int _deoptimize_mh_offset;
|
||||
// Offset of the unwind handler if it exists
|
||||
int _unwind_handler_offset;
|
||||
|
||||
#ifdef HAVE_DTRACE_H
|
||||
int _trap_offset;
|
||||
#endif // def HAVE_DTRACE_H
|
||||
@ -341,6 +344,7 @@ class nmethod : public CodeBlob {
|
||||
address exception_begin () const { return header_begin() + _exception_offset ; }
|
||||
address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
|
||||
address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
|
||||
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
|
||||
address stub_begin () const { return header_begin() + _stub_offset ; }
|
||||
address stub_end () const { return header_begin() + _consts_offset ; }
|
||||
address consts_begin () const { return header_begin() + _consts_offset ; }
|
||||
|
@ -1414,9 +1414,14 @@ void CompileBroker::init_compiler_thread_log() {
|
||||
intx thread_id = os::current_thread_id();
|
||||
for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
|
||||
const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL);
|
||||
if (dir == NULL) dir = "";
|
||||
sprintf(fileBuf, "%shs_c" UINTX_FORMAT "_pid%u.log",
|
||||
dir, thread_id, os::current_process_id());
|
||||
if (dir == NULL) {
|
||||
jio_snprintf(fileBuf, sizeof(fileBuf), "hs_c" UINTX_FORMAT "_pid%u.log",
|
||||
thread_id, os::current_process_id());
|
||||
} else {
|
||||
jio_snprintf(fileBuf, sizeof(fileBuf),
|
||||
"%s%shs_c" UINTX_FORMAT "_pid%u.log", dir,
|
||||
os::file_separator(), thread_id, os::current_process_id());
|
||||
}
|
||||
fp = fopen(fileBuf, "at");
|
||||
if (fp != NULL) {
|
||||
file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,11 +32,10 @@ class ConcurrentMarkSweepPolicy : public TwoGenerationCollectorPolicy {
|
||||
ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; }
|
||||
|
||||
void initialize_gc_policy_counters();
|
||||
#if 1
|
||||
|
||||
virtual void initialize_size_policy(size_t init_eden_size,
|
||||
size_t init_promo_size,
|
||||
size_t init_survivor_size);
|
||||
#endif
|
||||
|
||||
// Returns true if the incremental mode is enabled.
|
||||
virtual bool has_soft_ended_eden();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1815,8 +1815,19 @@ NOT_PRODUCT(
|
||||
do_compaction_work(clear_all_soft_refs);
|
||||
|
||||
// Has the GC time limit been exceeded?
|
||||
check_gc_time_limit();
|
||||
|
||||
DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
|
||||
size_t max_eden_size = young_gen->max_capacity() -
|
||||
young_gen->to()->capacity() -
|
||||
young_gen->from()->capacity();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
GCCause::Cause gc_cause = gch->gc_cause();
|
||||
size_policy()->check_gc_overhead_limit(_young_gen->used(),
|
||||
young_gen->eden()->used(),
|
||||
_cmsGen->max_capacity(),
|
||||
max_eden_size,
|
||||
full,
|
||||
gc_cause,
|
||||
gch->collector_policy());
|
||||
} else {
|
||||
do_mark_sweep_work(clear_all_soft_refs, first_state,
|
||||
should_start_over);
|
||||
@ -1828,55 +1839,6 @@ NOT_PRODUCT(
|
||||
return;
|
||||
}
|
||||
|
||||
void CMSCollector::check_gc_time_limit() {
|
||||
|
||||
// Ignore explicit GC's. Exiting here does not set the flag and
|
||||
// does not reset the count. Updating of the averages for system
|
||||
// GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
|
||||
GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
|
||||
if (GCCause::is_user_requested_gc(gc_cause) ||
|
||||
GCCause::is_serviceability_requested_gc(gc_cause)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Calculate the fraction of the CMS generation was freed during
|
||||
// the last collection.
|
||||
// Only consider the STW compacting cost for now.
|
||||
//
|
||||
// Note that the gc time limit test only works for the collections
|
||||
// of the young gen + tenured gen and not for collections of the
|
||||
// permanent gen. That is because the calculation of the space
|
||||
// freed by the collection is the free space in the young gen +
|
||||
// tenured gen.
|
||||
|
||||
double fraction_free =
|
||||
((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
|
||||
if ((100.0 * size_policy()->compacting_gc_cost()) >
|
||||
((double) GCTimeLimit) &&
|
||||
((fraction_free * 100) < GCHeapFreeLimit)) {
|
||||
size_policy()->inc_gc_time_limit_count();
|
||||
if (UseGCOverheadLimit &&
|
||||
(size_policy()->gc_time_limit_count() >
|
||||
AdaptiveSizePolicyGCTimeLimitThreshold)) {
|
||||
size_policy()->set_gc_time_limit_exceeded(true);
|
||||
// Avoid consecutive OOM due to the gc time limit by resetting
|
||||
// the counter.
|
||||
size_policy()->reset_gc_time_limit_count();
|
||||
if (PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" GC is exceeding overhead limit "
|
||||
"of %d%%", GCTimeLimit);
|
||||
}
|
||||
} else {
|
||||
if (PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" GC would exceed overhead limit "
|
||||
"of %d%%", GCTimeLimit);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
size_policy()->reset_gc_time_limit_count();
|
||||
}
|
||||
}
|
||||
|
||||
// Resize the perm generation and the tenured generation
|
||||
// after obtaining the free list locks for the
|
||||
// two generations.
|
||||
@ -6182,6 +6144,11 @@ void CMSCollector::reset(bool asynch) {
|
||||
}
|
||||
curAddr = chunk.end();
|
||||
}
|
||||
// A successful mostly concurrent collection has been done.
|
||||
// Because only the full (i.e., concurrent mode failure) collections
|
||||
// are being measured for gc overhead limits, clean the "near" flag
|
||||
// and count.
|
||||
sp->reset_gc_overhead_limit_count();
|
||||
_collectorState = Idling;
|
||||
} else {
|
||||
// already have the lock
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -570,10 +570,6 @@ class CMSCollector: public CHeapObj {
|
||||
ConcurrentMarkSweepPolicy* _collector_policy;
|
||||
ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
|
||||
|
||||
// Check whether the gc time limit has been
|
||||
// exceeded and set the size policy flag
|
||||
// appropriately.
|
||||
void check_gc_time_limit();
|
||||
// XXX Move these to CMSStats ??? FIX ME !!!
|
||||
elapsedTimer _inter_sweep_timer; // time between sweeps
|
||||
elapsedTimer _intra_sweep_timer; // time _in_ sweeps
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -69,9 +69,9 @@ void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
if (g1p->adaptive_young_list_length()) {
|
||||
int regions_visited = 0;
|
||||
g1h->young_list_rs_length_sampling_init();
|
||||
while (g1h->young_list_rs_length_sampling_more()) {
|
||||
g1h->young_list_rs_length_sampling_next();
|
||||
g1h->young_list()->rs_length_sampling_init();
|
||||
while (g1h->young_list()->rs_length_sampling_more()) {
|
||||
g1h->young_list()->rs_length_sampling_next();
|
||||
++regions_visited;
|
||||
|
||||
// we try to yield every time we visit 10 regions
|
||||
@ -162,6 +162,7 @@ void ConcurrentG1RefineThread::run() {
|
||||
if (_worker_id >= cg1r()->worker_thread_num()) {
|
||||
run_young_rs_sampling();
|
||||
terminate();
|
||||
return;
|
||||
}
|
||||
|
||||
_vtime_start = os::elapsedVTime();
|
||||
|
@ -297,6 +297,11 @@ void CMRegionStack::push(MemRegion mr) {
|
||||
}
|
||||
}
|
||||
|
||||
// Currently we do not call this at all. Normally we would call it
|
||||
// during the concurrent marking / remark phases but we now call
|
||||
// the lock-based version instead. But we might want to resurrect this
|
||||
// code in the future. So, we'll leave it here commented out.
|
||||
#if 0
|
||||
MemRegion CMRegionStack::pop() {
|
||||
while (true) {
|
||||
// Otherwise...
|
||||
@ -321,6 +326,41 @@ MemRegion CMRegionStack::pop() {
|
||||
// Otherwise, we need to try again.
|
||||
}
|
||||
}
|
||||
#endif // 0
|
||||
|
||||
void CMRegionStack::push_with_lock(MemRegion mr) {
|
||||
assert(mr.word_size() > 0, "Precondition");
|
||||
MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
if (isFull()) {
|
||||
_overflow = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_base[_index] = mr;
|
||||
_index += 1;
|
||||
}
|
||||
|
||||
MemRegion CMRegionStack::pop_with_lock() {
|
||||
MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
while (true) {
|
||||
if (_index == 0) {
|
||||
return MemRegion();
|
||||
}
|
||||
_index -= 1;
|
||||
|
||||
MemRegion mr = _base[_index];
|
||||
if (mr.start() != NULL) {
|
||||
assert(mr.end() != NULL, "invariant");
|
||||
assert(mr.word_size() > 0, "invariant");
|
||||
return mr;
|
||||
} else {
|
||||
// that entry was invalidated... let's skip it
|
||||
assert(mr.end() == NULL, "invariant");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool CMRegionStack::invalidate_entries_into_cset() {
|
||||
bool result = false;
|
||||
@ -668,24 +708,46 @@ ConcurrentMark::~ConcurrentMark() {
|
||||
//
|
||||
|
||||
void ConcurrentMark::clearNextBitmap() {
|
||||
guarantee(!G1CollectedHeap::heap()->mark_in_progress(), "Precondition.");
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
|
||||
// clear the mark bitmap (no grey objects to start with).
|
||||
// We need to do this in chunks and offer to yield in between
|
||||
// each chunk.
|
||||
HeapWord* start = _nextMarkBitMap->startWord();
|
||||
HeapWord* end = _nextMarkBitMap->endWord();
|
||||
HeapWord* cur = start;
|
||||
size_t chunkSize = M;
|
||||
while (cur < end) {
|
||||
HeapWord* next = cur + chunkSize;
|
||||
if (next > end)
|
||||
next = end;
|
||||
MemRegion mr(cur,next);
|
||||
_nextMarkBitMap->clearRange(mr);
|
||||
cur = next;
|
||||
do_yield_check();
|
||||
}
|
||||
// Make sure that the concurrent mark thread looks to still be in
|
||||
// the current cycle.
|
||||
guarantee(cmThread()->during_cycle(), "invariant");
|
||||
|
||||
// We are finishing up the current cycle by clearing the next
|
||||
// marking bitmap and getting it ready for the next cycle. During
|
||||
// this time no other cycle can start. So, let's make sure that this
|
||||
// is the case.
|
||||
guarantee(!g1h->mark_in_progress(), "invariant");
|
||||
|
||||
// clear the mark bitmap (no grey objects to start with).
|
||||
// We need to do this in chunks and offer to yield in between
|
||||
// each chunk.
|
||||
HeapWord* start = _nextMarkBitMap->startWord();
|
||||
HeapWord* end = _nextMarkBitMap->endWord();
|
||||
HeapWord* cur = start;
|
||||
size_t chunkSize = M;
|
||||
while (cur < end) {
|
||||
HeapWord* next = cur + chunkSize;
|
||||
if (next > end)
|
||||
next = end;
|
||||
MemRegion mr(cur,next);
|
||||
_nextMarkBitMap->clearRange(mr);
|
||||
cur = next;
|
||||
do_yield_check();
|
||||
|
||||
// Repeat the asserts from above. We'll do them as asserts here to
|
||||
// minimize their overhead on the product. However, we'll have
|
||||
// them as guarantees at the beginning / end of the bitmap
|
||||
// clearing to get some checking in the product.
|
||||
assert(cmThread()->during_cycle(), "invariant");
|
||||
assert(!g1h->mark_in_progress(), "invariant");
|
||||
}
|
||||
|
||||
// Repeat the asserts from above.
|
||||
guarantee(cmThread()->during_cycle(), "invariant");
|
||||
guarantee(!g1h->mark_in_progress(), "invariant");
|
||||
}
|
||||
|
||||
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
|
||||
@ -705,7 +767,8 @@ void ConcurrentMark::checkpointRootsInitialPre() {
|
||||
_has_aborted = false;
|
||||
|
||||
if (G1PrintReachableAtInitialMark) {
|
||||
print_reachable(true, "before");
|
||||
print_reachable("at-cycle-start",
|
||||
true /* use_prev_marking */, true /* all */);
|
||||
}
|
||||
|
||||
// Initialise marking structures. This has to be done in a STW phase.
|
||||
@ -1917,19 +1980,21 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
class ReachablePrinterOopClosure: public OopClosure {
|
||||
class PrintReachableOopClosure: public OopClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
CMBitMapRO* _bitmap;
|
||||
outputStream* _out;
|
||||
bool _use_prev_marking;
|
||||
bool _all;
|
||||
|
||||
public:
|
||||
ReachablePrinterOopClosure(CMBitMapRO* bitmap,
|
||||
outputStream* out,
|
||||
bool use_prev_marking) :
|
||||
PrintReachableOopClosure(CMBitMapRO* bitmap,
|
||||
outputStream* out,
|
||||
bool use_prev_marking,
|
||||
bool all) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
|
||||
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
|
||||
|
||||
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
void do_oop( oop* p) { do_oop_work(p); }
|
||||
@ -1939,9 +2004,11 @@ public:
|
||||
const char* str = NULL;
|
||||
const char* str2 = "";
|
||||
|
||||
if (!_g1h->is_in_g1_reserved(obj))
|
||||
str = "outside G1 reserved";
|
||||
else {
|
||||
if (obj == NULL) {
|
||||
str = "";
|
||||
} else if (!_g1h->is_in_g1_reserved(obj)) {
|
||||
str = " O";
|
||||
} else {
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
guarantee(hr != NULL, "invariant");
|
||||
bool over_tams = false;
|
||||
@ -1950,74 +2017,67 @@ public:
|
||||
} else {
|
||||
over_tams = hr->obj_allocated_since_next_marking(obj);
|
||||
}
|
||||
bool marked = _bitmap->isMarked((HeapWord*) obj);
|
||||
|
||||
if (over_tams) {
|
||||
str = "over TAMS";
|
||||
if (_bitmap->isMarked((HeapWord*) obj)) {
|
||||
str = " >";
|
||||
if (marked) {
|
||||
str2 = " AND MARKED";
|
||||
}
|
||||
} else if (_bitmap->isMarked((HeapWord*) obj)) {
|
||||
str = "marked";
|
||||
} else if (marked) {
|
||||
str = " M";
|
||||
} else {
|
||||
str = "#### NOT MARKED ####";
|
||||
str = " NOT";
|
||||
}
|
||||
}
|
||||
|
||||
_out->print_cr(" "PTR_FORMAT" contains "PTR_FORMAT" %s%s",
|
||||
_out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
|
||||
p, (void*) obj, str, str2);
|
||||
}
|
||||
};
|
||||
|
||||
class ReachablePrinterClosure: public BitMapClosure {
|
||||
class PrintReachableObjectClosure : public ObjectClosure {
|
||||
private:
|
||||
CMBitMapRO* _bitmap;
|
||||
outputStream* _out;
|
||||
bool _use_prev_marking;
|
||||
bool _all;
|
||||
HeapRegion* _hr;
|
||||
|
||||
public:
|
||||
ReachablePrinterClosure(CMBitMapRO* bitmap,
|
||||
outputStream* out,
|
||||
bool use_prev_marking) :
|
||||
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
|
||||
|
||||
bool do_bit(size_t offset) {
|
||||
HeapWord* addr = _bitmap->offsetToHeapWord(offset);
|
||||
ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
|
||||
|
||||
_out->print_cr(" obj "PTR_FORMAT", offset %10d (marked)", addr, offset);
|
||||
oop(addr)->oop_iterate(&oopCl);
|
||||
_out->print_cr("");
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class ObjInRegionReachablePrinterClosure : public ObjectClosure {
|
||||
private:
|
||||
CMBitMapRO* _bitmap;
|
||||
outputStream* _out;
|
||||
bool _use_prev_marking;
|
||||
|
||||
public:
|
||||
ObjInRegionReachablePrinterClosure(CMBitMapRO* bitmap,
|
||||
outputStream* out,
|
||||
bool use_prev_marking) :
|
||||
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
|
||||
PrintReachableObjectClosure(CMBitMapRO* bitmap,
|
||||
outputStream* out,
|
||||
bool use_prev_marking,
|
||||
bool all,
|
||||
HeapRegion* hr) :
|
||||
_bitmap(bitmap), _out(out),
|
||||
_use_prev_marking(use_prev_marking), _all(all), _hr(hr) { }
|
||||
|
||||
void do_object(oop o) {
|
||||
ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
|
||||
bool over_tams;
|
||||
if (_use_prev_marking) {
|
||||
over_tams = _hr->obj_allocated_since_prev_marking(o);
|
||||
} else {
|
||||
over_tams = _hr->obj_allocated_since_next_marking(o);
|
||||
}
|
||||
bool marked = _bitmap->isMarked((HeapWord*) o);
|
||||
bool print_it = _all || over_tams || marked;
|
||||
|
||||
_out->print_cr(" obj "PTR_FORMAT" (over TAMS)", (void*) o);
|
||||
o->oop_iterate(&oopCl);
|
||||
_out->print_cr("");
|
||||
if (print_it) {
|
||||
_out->print_cr(" "PTR_FORMAT"%s",
|
||||
o, (over_tams) ? " >" : (marked) ? " M" : "");
|
||||
PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all);
|
||||
o->oop_iterate(&oopCl);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class RegionReachablePrinterClosure : public HeapRegionClosure {
|
||||
class PrintReachableRegionClosure : public HeapRegionClosure {
|
||||
private:
|
||||
CMBitMapRO* _bitmap;
|
||||
outputStream* _out;
|
||||
bool _use_prev_marking;
|
||||
bool _all;
|
||||
|
||||
public:
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
@ -2032,22 +2092,35 @@ public:
|
||||
}
|
||||
_out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
|
||||
"TAMS: "PTR_FORMAT, b, e, t, p);
|
||||
_out->print_cr("");
|
||||
_out->cr();
|
||||
|
||||
ObjInRegionReachablePrinterClosure ocl(_bitmap, _out, _use_prev_marking);
|
||||
hr->object_iterate_mem_careful(MemRegion(p, t), &ocl);
|
||||
HeapWord* from = b;
|
||||
HeapWord* to = t;
|
||||
|
||||
if (to > from) {
|
||||
_out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
|
||||
_out->cr();
|
||||
PrintReachableObjectClosure ocl(_bitmap, _out,
|
||||
_use_prev_marking, _all, hr);
|
||||
hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
|
||||
_out->cr();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
RegionReachablePrinterClosure(CMBitMapRO* bitmap,
|
||||
outputStream* out,
|
||||
bool use_prev_marking) :
|
||||
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
|
||||
PrintReachableRegionClosure(CMBitMapRO* bitmap,
|
||||
outputStream* out,
|
||||
bool use_prev_marking,
|
||||
bool all) :
|
||||
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
|
||||
};
|
||||
|
||||
void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) {
|
||||
gclog_or_tty->print_cr("== Doing reachable object dump... ");
|
||||
void ConcurrentMark::print_reachable(const char* str,
|
||||
bool use_prev_marking,
|
||||
bool all) {
|
||||
gclog_or_tty->cr();
|
||||
gclog_or_tty->print_cr("== Doing heap dump... ");
|
||||
|
||||
if (G1PrintReachableBaseFile == NULL) {
|
||||
gclog_or_tty->print_cr(" #### error: no base file defined");
|
||||
@ -2082,19 +2155,14 @@ void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) {
|
||||
out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS");
|
||||
out->cr();
|
||||
|
||||
RegionReachablePrinterClosure rcl(bitmap, out, use_prev_marking);
|
||||
out->print_cr("--- ITERATING OVER REGIONS WITH TAMS < TOP");
|
||||
out->print_cr("--- ITERATING OVER REGIONS");
|
||||
out->cr();
|
||||
PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all);
|
||||
_g1h->heap_region_iterate(&rcl);
|
||||
out->cr();
|
||||
|
||||
ReachablePrinterClosure cl(bitmap, out, use_prev_marking);
|
||||
out->print_cr("--- ITERATING OVER MARKED OBJECTS ON THE BITMAP");
|
||||
out->cr();
|
||||
bitmap->iterate(&cl);
|
||||
out->cr();
|
||||
|
||||
gclog_or_tty->print_cr(" done");
|
||||
gclog_or_tty->flush();
|
||||
}
|
||||
|
||||
#endif // PRODUCT
|
||||
@ -3363,7 +3431,7 @@ void CMTask::drain_region_stack(BitMapClosure* bc) {
|
||||
gclog_or_tty->print_cr("[%d] draining region stack, size = %d",
|
||||
_task_id, _cm->region_stack_size());
|
||||
|
||||
MemRegion mr = _cm->region_stack_pop();
|
||||
MemRegion mr = _cm->region_stack_pop_with_lock();
|
||||
// it returns MemRegion() if the pop fails
|
||||
statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
|
||||
|
||||
@ -3384,7 +3452,7 @@ void CMTask::drain_region_stack(BitMapClosure* bc) {
|
||||
if (has_aborted())
|
||||
mr = MemRegion();
|
||||
else {
|
||||
mr = _cm->region_stack_pop();
|
||||
mr = _cm->region_stack_pop_with_lock();
|
||||
// it returns MemRegion() if the pop fails
|
||||
statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
|
||||
}
|
||||
@ -3417,7 +3485,7 @@ void CMTask::drain_region_stack(BitMapClosure* bc) {
|
||||
}
|
||||
// Now push the part of the region we didn't scan on the
|
||||
// region stack to make sure a task scans it later.
|
||||
_cm->region_stack_push(newRegion);
|
||||
_cm->region_stack_push_with_lock(newRegion);
|
||||
}
|
||||
// break from while
|
||||
mr = MemRegion();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -252,9 +252,19 @@ public:
|
||||
// with other "push" operations (no pops).
|
||||
void push(MemRegion mr);
|
||||
|
||||
#if 0
|
||||
// This is currently not used. See the comment in the .cpp file.
|
||||
|
||||
// Lock-free; assumes that it will only be called in parallel
|
||||
// with other "pop" operations (no pushes).
|
||||
MemRegion pop();
|
||||
#endif // 0
|
||||
|
||||
// These two are the implementations that use a lock. They can be
|
||||
// called concurrently with each other but they should not be called
|
||||
// concurrently with the lock-free versions (push() / pop()).
|
||||
void push_with_lock(MemRegion mr);
|
||||
MemRegion pop_with_lock();
|
||||
|
||||
bool isEmpty() { return _index == 0; }
|
||||
bool isFull() { return _index == _capacity; }
|
||||
@ -540,6 +550,10 @@ public:
|
||||
|
||||
// Manipulation of the region stack
|
||||
bool region_stack_push(MemRegion mr) {
|
||||
// Currently we only call the lock-free version during evacuation
|
||||
// pauses.
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
|
||||
|
||||
_regionStack.push(mr);
|
||||
if (_regionStack.overflow()) {
|
||||
set_has_overflown();
|
||||
@ -547,7 +561,33 @@ public:
|
||||
}
|
||||
return true;
|
||||
}
|
||||
MemRegion region_stack_pop() { return _regionStack.pop(); }
|
||||
#if 0
|
||||
// Currently this is not used. See the comment in the .cpp file.
|
||||
MemRegion region_stack_pop() { return _regionStack.pop(); }
|
||||
#endif // 0
|
||||
|
||||
bool region_stack_push_with_lock(MemRegion mr) {
|
||||
// Currently we only call the lock-based version during either
|
||||
// concurrent marking or remark.
|
||||
assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
|
||||
"if we are at a safepoint it should be the remark safepoint");
|
||||
|
||||
_regionStack.push_with_lock(mr);
|
||||
if (_regionStack.overflow()) {
|
||||
set_has_overflown();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
MemRegion region_stack_pop_with_lock() {
|
||||
// Currently we only call the lock-based version during either
|
||||
// concurrent marking or remark.
|
||||
assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
|
||||
"if we are at a safepoint it should be the remark safepoint");
|
||||
|
||||
return _regionStack.pop_with_lock();
|
||||
}
|
||||
|
||||
int region_stack_size() { return _regionStack.size(); }
|
||||
bool region_stack_overflow() { return _regionStack.overflow(); }
|
||||
bool region_stack_empty() { return _regionStack.isEmpty(); }
|
||||
@ -612,11 +652,24 @@ public:
|
||||
// we do nothing.
|
||||
void markAndGrayObjectIfNecessary(oop p);
|
||||
|
||||
// This iterates over the marking bitmap (either prev or next) and
|
||||
// prints out all objects that are marked on the bitmap and indicates
|
||||
// whether what they point to is also marked or not. It also iterates
|
||||
// the objects over TAMS (either prev or next).
|
||||
void print_reachable(bool use_prev_marking, const char* str);
|
||||
// It iterates over the heap and for each object it comes across it
|
||||
// will dump the contents of its reference fields, as well as
|
||||
// liveness information for the object and its referents. The dump
|
||||
// will be written to a file with the following name:
|
||||
// G1PrintReachableBaseFile + "." + str. use_prev_marking decides
|
||||
// whether the prev (use_prev_marking == true) or next
|
||||
// (use_prev_marking == false) marking information will be used to
|
||||
// determine the liveness of each object / referent. If all is true,
|
||||
// all objects in the heap will be dumped, otherwise only the live
|
||||
// ones. In the dump the following symbols / abbreviations are used:
|
||||
// M : an explicitly live object (its bitmap bit is set)
|
||||
// > : an implicitly live object (over tams)
|
||||
// O : an object outside the G1 heap (typically: in the perm gen)
|
||||
// NOT : a reference field whose referent is not live
|
||||
// AND MARKED : indicates that an object is both explicitly and
|
||||
// implicitly live (it should be one or the other, not both)
|
||||
void print_reachable(const char* str,
|
||||
bool use_prev_marking, bool all) PRODUCT_RETURN;
|
||||
|
||||
// Clear the next marking bitmap (will be called concurrently).
|
||||
void clearNextBitmap();
|
||||
@ -680,6 +733,19 @@ public:
|
||||
// to determine whether any heap regions are located above the finger.
|
||||
void registerCSetRegion(HeapRegion* hr);
|
||||
|
||||
// Registers the maximum region-end associated with a set of
|
||||
// regions with CM. Again this is used to determine whether any
|
||||
// heap regions are located above the finger.
|
||||
void register_collection_set_finger(HeapWord* max_finger) {
|
||||
// max_finger is the highest heap region end of the regions currently
|
||||
// contained in the collection set. If this value is larger than
|
||||
// _min_finger then we need to gray objects.
|
||||
// This routine is like registerCSetRegion but for an entire
|
||||
// collection of regions.
|
||||
if (max_finger > _min_finger)
|
||||
_should_gray_objects = true;
|
||||
}
|
||||
|
||||
// Returns "true" if at least one mark has been completed.
|
||||
bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
|
||||
|
||||
|
@ -42,8 +42,8 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
|
||||
|
||||
private:
|
||||
ConcurrentMark* _cm;
|
||||
bool _started;
|
||||
bool _in_progress;
|
||||
volatile bool _started;
|
||||
volatile bool _in_progress;
|
||||
|
||||
void sleepBeforeNextCycle();
|
||||
|
||||
@ -67,15 +67,25 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
|
||||
// Counting virtual time so far.
|
||||
double vtime_count_accum() { return _vtime_count_accum; }
|
||||
|
||||
ConcurrentMark* cm() { return _cm; }
|
||||
ConcurrentMark* cm() { return _cm; }
|
||||
|
||||
void set_started() { _started = true; }
|
||||
void clear_started() { _started = false; }
|
||||
bool started() { return _started; }
|
||||
void set_started() { _started = true; }
|
||||
void clear_started() { _started = false; }
|
||||
bool started() { return _started; }
|
||||
|
||||
void set_in_progress() { _in_progress = true; }
|
||||
void clear_in_progress() { _in_progress = false; }
|
||||
bool in_progress() { return _in_progress; }
|
||||
void set_in_progress() { _in_progress = true; }
|
||||
void clear_in_progress() { _in_progress = false; }
|
||||
bool in_progress() { return _in_progress; }
|
||||
|
||||
// This flag returns true from the moment a marking cycle is
|
||||
// initiated (during the initial-mark pause when started() is set)
|
||||
// to the moment when the cycle completes (just after the next
|
||||
// marking bitmap has been cleared and in_progress() is
|
||||
// cleared). While this flag is true we will not start another cycle
|
||||
// so that cycles do not overlap. We cannot use just in_progress()
|
||||
// as the CM thread might take some time to wake up before noticing
|
||||
// that started() is set and set in_progress().
|
||||
bool during_cycle() { return started() || in_progress(); }
|
||||
|
||||
// Yield for GC
|
||||
void yield();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,7 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
||||
// turn it on so that the contents of the young list (scan-only /
|
||||
// to-be-collected) are printed at "strategic" points before / during
|
||||
// / after the collection --- this is useful for debugging
|
||||
#define SCAN_ONLY_VERBOSE 0
|
||||
#define YOUNG_LIST_VERBOSE 0
|
||||
// CURRENT STATUS
|
||||
// This file is under construction. Search for "FIXME".
|
||||
|
||||
@ -133,8 +133,7 @@ public:
|
||||
|
||||
YoungList::YoungList(G1CollectedHeap* g1h)
|
||||
: _g1h(g1h), _head(NULL),
|
||||
_scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
|
||||
_length(0), _scan_only_length(0),
|
||||
_length(0),
|
||||
_last_sampled_rs_lengths(0),
|
||||
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
|
||||
{
|
||||
@ -166,48 +165,6 @@ void YoungList::add_survivor_region(HeapRegion* hr) {
|
||||
++_survivor_length;
|
||||
}
|
||||
|
||||
HeapRegion* YoungList::pop_region() {
|
||||
while (_head != NULL) {
|
||||
assert( length() > 0, "list should not be empty" );
|
||||
HeapRegion* ret = _head;
|
||||
_head = ret->get_next_young_region();
|
||||
ret->set_next_young_region(NULL);
|
||||
--_length;
|
||||
assert(ret->is_young(), "region should be very young");
|
||||
|
||||
// Replace 'Survivor' region type with 'Young'. So the region will
|
||||
// be treated as a young region and will not be 'confused' with
|
||||
// newly created survivor regions.
|
||||
if (ret->is_survivor()) {
|
||||
ret->set_young();
|
||||
}
|
||||
|
||||
if (!ret->is_scan_only()) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
// scan-only, we'll add it to the scan-only list
|
||||
if (_scan_only_tail == NULL) {
|
||||
guarantee( _scan_only_head == NULL, "invariant" );
|
||||
|
||||
_scan_only_head = ret;
|
||||
_curr_scan_only = ret;
|
||||
} else {
|
||||
guarantee( _scan_only_head != NULL, "invariant" );
|
||||
_scan_only_tail->set_next_young_region(ret);
|
||||
}
|
||||
guarantee( ret->get_next_young_region() == NULL, "invariant" );
|
||||
_scan_only_tail = ret;
|
||||
|
||||
// no need to be tagged as scan-only any more
|
||||
ret->set_young();
|
||||
|
||||
++_scan_only_length;
|
||||
}
|
||||
assert( length() == 0, "list should be empty" );
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void YoungList::empty_list(HeapRegion* list) {
|
||||
while (list != NULL) {
|
||||
HeapRegion* next = list->get_next_young_region();
|
||||
@ -225,12 +182,6 @@ void YoungList::empty_list() {
|
||||
_head = NULL;
|
||||
_length = 0;
|
||||
|
||||
empty_list(_scan_only_head);
|
||||
_scan_only_head = NULL;
|
||||
_scan_only_tail = NULL;
|
||||
_scan_only_length = 0;
|
||||
_curr_scan_only = NULL;
|
||||
|
||||
empty_list(_survivor_head);
|
||||
_survivor_head = NULL;
|
||||
_survivor_tail = NULL;
|
||||
@ -248,11 +199,11 @@ bool YoungList::check_list_well_formed() {
|
||||
HeapRegion* curr = _head;
|
||||
HeapRegion* last = NULL;
|
||||
while (curr != NULL) {
|
||||
if (!curr->is_young() || curr->is_scan_only()) {
|
||||
if (!curr->is_young()) {
|
||||
gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
|
||||
"incorrectly tagged (%d, %d)",
|
||||
"incorrectly tagged (y: %d, surv: %d)",
|
||||
curr->bottom(), curr->end(),
|
||||
curr->is_young(), curr->is_scan_only());
|
||||
curr->is_young(), curr->is_survivor());
|
||||
ret = false;
|
||||
}
|
||||
++length;
|
||||
@ -267,47 +218,10 @@ bool YoungList::check_list_well_formed() {
|
||||
length, _length);
|
||||
}
|
||||
|
||||
bool scan_only_ret = true;
|
||||
length = 0;
|
||||
curr = _scan_only_head;
|
||||
last = NULL;
|
||||
while (curr != NULL) {
|
||||
if (!curr->is_young() || curr->is_scan_only()) {
|
||||
gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
|
||||
"incorrectly tagged (%d, %d)",
|
||||
curr->bottom(), curr->end(),
|
||||
curr->is_young(), curr->is_scan_only());
|
||||
scan_only_ret = false;
|
||||
}
|
||||
++length;
|
||||
last = curr;
|
||||
curr = curr->get_next_young_region();
|
||||
}
|
||||
scan_only_ret = scan_only_ret && (length == _scan_only_length);
|
||||
|
||||
if ( (last != _scan_only_tail) ||
|
||||
(_scan_only_head == NULL && _scan_only_tail != NULL) ||
|
||||
(_scan_only_head != NULL && _scan_only_tail == NULL) ) {
|
||||
gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
|
||||
scan_only_ret = false;
|
||||
}
|
||||
|
||||
if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
|
||||
gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
|
||||
scan_only_ret = false;
|
||||
}
|
||||
|
||||
if (!scan_only_ret) {
|
||||
gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
|
||||
gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d",
|
||||
length, _scan_only_length);
|
||||
}
|
||||
|
||||
return ret && scan_only_ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool YoungList::check_list_empty(bool ignore_scan_only_list,
|
||||
bool check_sample) {
|
||||
bool YoungList::check_list_empty(bool check_sample) {
|
||||
bool ret = true;
|
||||
|
||||
if (_length != 0) {
|
||||
@ -327,28 +241,7 @@ bool YoungList::check_list_empty(bool ignore_scan_only_list,
|
||||
gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
|
||||
}
|
||||
|
||||
if (ignore_scan_only_list)
|
||||
return ret;
|
||||
|
||||
bool scan_only_ret = true;
|
||||
if (_scan_only_length != 0) {
|
||||
gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
|
||||
_scan_only_length);
|
||||
scan_only_ret = false;
|
||||
}
|
||||
if (_scan_only_head != NULL) {
|
||||
gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
|
||||
scan_only_ret = false;
|
||||
}
|
||||
if (_scan_only_tail != NULL) {
|
||||
gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
|
||||
scan_only_ret = false;
|
||||
}
|
||||
if (!scan_only_ret) {
|
||||
gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
|
||||
}
|
||||
|
||||
return ret && scan_only_ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
@ -365,7 +258,18 @@ YoungList::rs_length_sampling_more() {
|
||||
void
|
||||
YoungList::rs_length_sampling_next() {
|
||||
assert( _curr != NULL, "invariant" );
|
||||
_sampled_rs_lengths += _curr->rem_set()->occupied();
|
||||
size_t rs_length = _curr->rem_set()->occupied();
|
||||
|
||||
_sampled_rs_lengths += rs_length;
|
||||
|
||||
// The current region may not yet have been added to the
|
||||
// incremental collection set (it gets added when it is
|
||||
// retired as the current allocation region).
|
||||
if (_curr->in_collection_set()) {
|
||||
// Update the collection set policy information for this region
|
||||
_g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
|
||||
}
|
||||
|
||||
_curr = _curr->get_next_young_region();
|
||||
if (_curr == NULL) {
|
||||
_last_sampled_rs_lengths = _sampled_rs_lengths;
|
||||
@ -375,54 +279,46 @@ YoungList::rs_length_sampling_next() {
|
||||
|
||||
void
|
||||
YoungList::reset_auxilary_lists() {
|
||||
// We could have just "moved" the scan-only list to the young list.
|
||||
// However, the scan-only list is ordered according to the region
|
||||
// age in descending order, so, by moving one entry at a time, we
|
||||
// ensure that it is recreated in ascending order.
|
||||
|
||||
guarantee( is_empty(), "young list should be empty" );
|
||||
assert(check_list_well_formed(), "young list should be well formed");
|
||||
|
||||
// Add survivor regions to SurvRateGroup.
|
||||
_g1h->g1_policy()->note_start_adding_survivor_regions();
|
||||
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
|
||||
|
||||
for (HeapRegion* curr = _survivor_head;
|
||||
curr != NULL;
|
||||
curr = curr->get_next_young_region()) {
|
||||
_g1h->g1_policy()->set_region_survivors(curr);
|
||||
|
||||
// The region is a non-empty survivor so let's add it to
|
||||
// the incremental collection set for the next evacuation
|
||||
// pause.
|
||||
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
|
||||
}
|
||||
_g1h->g1_policy()->note_stop_adding_survivor_regions();
|
||||
|
||||
_head = _survivor_head;
|
||||
_length = _survivor_length;
|
||||
if (_survivor_head != NULL) {
|
||||
_head = _survivor_head;
|
||||
_length = _survivor_length + _scan_only_length;
|
||||
_survivor_tail->set_next_young_region(_scan_only_head);
|
||||
} else {
|
||||
_head = _scan_only_head;
|
||||
_length = _scan_only_length;
|
||||
assert(_survivor_tail != NULL, "cause it shouldn't be");
|
||||
assert(_survivor_length > 0, "invariant");
|
||||
_survivor_tail->set_next_young_region(NULL);
|
||||
}
|
||||
|
||||
for (HeapRegion* curr = _scan_only_head;
|
||||
curr != NULL;
|
||||
curr = curr->get_next_young_region()) {
|
||||
curr->recalculate_age_in_surv_rate_group();
|
||||
}
|
||||
_scan_only_head = NULL;
|
||||
_scan_only_tail = NULL;
|
||||
_scan_only_length = 0;
|
||||
_curr_scan_only = NULL;
|
||||
// Don't clear the survivor list handles until the start of
|
||||
// the next evacuation pause - we need it in order to re-tag
|
||||
// the survivor regions from this evacuation pause as 'young'
|
||||
// at the start of the next.
|
||||
|
||||
_survivor_head = NULL;
|
||||
_survivor_tail = NULL;
|
||||
_survivor_length = 0;
|
||||
_g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
|
||||
|
||||
assert(check_list_well_formed(), "young list should be well formed");
|
||||
}
|
||||
|
||||
void YoungList::print() {
|
||||
HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head};
|
||||
const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"};
|
||||
HeapRegion* lists[] = {_head, _survivor_head};
|
||||
const char* names[] = {"YOUNG", "SURVIVOR"};
|
||||
|
||||
for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
|
||||
gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
|
||||
@ -431,7 +327,7 @@ void YoungList::print() {
|
||||
gclog_or_tty->print_cr(" empty");
|
||||
while (curr != NULL) {
|
||||
gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
|
||||
"age: %4d, y: %d, s-o: %d, surv: %d",
|
||||
"age: %4d, y: %d, surv: %d",
|
||||
curr->bottom(), curr->end(),
|
||||
curr->top(),
|
||||
curr->prev_top_at_mark_start(),
|
||||
@ -439,7 +335,6 @@ void YoungList::print() {
|
||||
curr->top_at_conc_mark_count(),
|
||||
curr->age_in_surv_rate_group_cond(),
|
||||
curr->is_young(),
|
||||
curr->is_scan_only(),
|
||||
curr->is_survivor());
|
||||
curr = curr->get_next_young_region();
|
||||
}
|
||||
@ -707,6 +602,12 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
// region below.
|
||||
if (_cur_alloc_region != NULL) {
|
||||
// We're finished with the _cur_alloc_region.
|
||||
// As we're builing (at least the young portion) of the collection
|
||||
// set incrementally we'll add the current allocation region to
|
||||
// the collection set here.
|
||||
if (_cur_alloc_region->is_young()) {
|
||||
g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
|
||||
}
|
||||
_summary_bytes_used += _cur_alloc_region->used();
|
||||
_cur_alloc_region = NULL;
|
||||
}
|
||||
@ -820,6 +721,12 @@ void G1CollectedHeap::abandon_cur_alloc_region() {
|
||||
_free_regions++;
|
||||
free_region(_cur_alloc_region);
|
||||
} else {
|
||||
// As we're builing (at least the young portion) of the collection
|
||||
// set incrementally we'll add the current allocation region to
|
||||
// the collection set here.
|
||||
if (_cur_alloc_region->is_young()) {
|
||||
g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
|
||||
}
|
||||
_summary_bytes_used += _cur_alloc_region->used();
|
||||
}
|
||||
_cur_alloc_region = NULL;
|
||||
@ -902,6 +809,10 @@ public:
|
||||
|
||||
void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
size_t word_size) {
|
||||
if (GC_locker::check_active_before_gc()) {
|
||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
|
||||
if (PrintHeapAtGC) {
|
||||
@ -909,16 +820,16 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
}
|
||||
|
||||
if (full && DisableExplicitGC) {
|
||||
gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
|
||||
return;
|
||||
}
|
||||
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
|
||||
|
||||
if (GC_locker::is_active()) {
|
||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||
}
|
||||
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
|
||||
collector_policy()->should_clear_all_soft_refs();
|
||||
|
||||
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
|
||||
|
||||
{
|
||||
IsGCActiveMark x;
|
||||
@ -926,7 +837,8 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
// Timing
|
||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
|
||||
TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
|
||||
PrintGC, true, gclog_or_tty);
|
||||
|
||||
TraceMemoryManagerStats tms(true /* fullGC */);
|
||||
|
||||
@ -970,6 +882,15 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
|
||||
tear_down_region_lists();
|
||||
set_used_regions_to_need_zero_fill();
|
||||
|
||||
// We may have added regions to the current incremental collection
|
||||
// set between the last GC or pause and now. We need to clear the
|
||||
// incremental collection set and then start rebuilding it afresh
|
||||
// after this full GC.
|
||||
abandon_collection_set(g1_policy()->inc_cset_head());
|
||||
g1_policy()->clear_incremental_cset();
|
||||
g1_policy()->stop_incremental_cset_building();
|
||||
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
empty_young_list();
|
||||
g1_policy()->set_full_young_gcs(true);
|
||||
@ -985,12 +906,12 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
|
||||
|
||||
ref_processor()->enable_discovery();
|
||||
ref_processor()->setup_policy(clear_all_soft_refs);
|
||||
ref_processor()->setup_policy(do_clear_all_soft_refs);
|
||||
|
||||
// Do collection work
|
||||
{
|
||||
HandleMark hm; // Discard invalid handles created during gc
|
||||
G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
|
||||
G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
|
||||
}
|
||||
// Because freeing humongous regions may have added some unclean
|
||||
// regions, it is necessary to tear down again before rebuilding.
|
||||
@ -1053,6 +974,15 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
perm()->compute_new_size();
|
||||
}
|
||||
|
||||
// Start a new incremental collection set for the next pause
|
||||
assert(g1_policy()->collection_set() == NULL, "must be");
|
||||
g1_policy()->start_incremental_cset_building();
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the next
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
|
||||
double end = os::elapsedTime();
|
||||
g1_policy()->record_full_collection_end();
|
||||
|
||||
@ -1071,7 +1001,9 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
_young_list->reset_sampled_info();
|
||||
assert( check_young_list_empty(false, false),
|
||||
// At this point there should be no regions in the
|
||||
// entire heap tagged as young.
|
||||
assert( check_young_list_empty(true /* check_heap */),
|
||||
"young list should be empty at this point");
|
||||
}
|
||||
|
||||
@ -1208,6 +1140,9 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
|
||||
return result;
|
||||
}
|
||||
|
||||
assert(!collector_policy()->should_clear_all_soft_refs(),
|
||||
"Flag should have been handled and cleared prior to this point");
|
||||
|
||||
// What else? We might try synchronous finalization later. If the total
|
||||
// space available is large enough for the allocation, then a more
|
||||
// complete compaction phase than we've tried so far might be
|
||||
@ -1565,6 +1500,20 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
_g1h = this;
|
||||
|
||||
_in_cset_fast_test_length = max_regions();
|
||||
_in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
|
||||
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the first
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
|
||||
// Create the ConcurrentMark data structure and thread.
|
||||
// (Must do this late, so that "max_regions" is defined.)
|
||||
_cm = new ConcurrentMark(heap_rs, (int) max_regions());
|
||||
@ -2185,8 +2134,10 @@ public:
|
||||
assert(o != NULL, "Huh?");
|
||||
if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
|
||||
o->oop_iterate(&isLive);
|
||||
if (!_hr->obj_allocated_since_prev_marking(o))
|
||||
_live_bytes += (o->size() * HeapWordSize);
|
||||
if (!_hr->obj_allocated_since_prev_marking(o)) {
|
||||
size_t obj_size = o->size(); // Make sure we don't overflow
|
||||
_live_bytes += (obj_size * HeapWordSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
size_t live_bytes() { return _live_bytes; }
|
||||
@ -2388,8 +2339,8 @@ void G1CollectedHeap::verify(bool allow_dirty,
|
||||
print_on(gclog_or_tty, true /* extended */);
|
||||
gclog_or_tty->print_cr("");
|
||||
if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
|
||||
concurrent_mark()->print_reachable(use_prev_marking,
|
||||
"failed-verification");
|
||||
concurrent_mark()->print_reachable("at-verification-failure",
|
||||
use_prev_marking, false /* all */);
|
||||
}
|
||||
gclog_or_tty->flush();
|
||||
}
|
||||
@ -2658,6 +2609,10 @@ struct PrepareForRSScanningClosure : public HeapRegionClosure {
|
||||
|
||||
void
|
||||
G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
if (GC_locker::check_active_before_gc()) {
|
||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||
}
|
||||
|
||||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_before_gc();
|
||||
}
|
||||
@ -2665,6 +2620,11 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
{
|
||||
ResourceMark rm;
|
||||
|
||||
// This call will decide whether this pause is an initial-mark
|
||||
// pause. If it is, during_initial_mark_pause() will return true
|
||||
// for the duration of this pause.
|
||||
g1_policy()->decide_on_conc_mark_initiation();
|
||||
|
||||
char verbose_str[128];
|
||||
sprintf(verbose_str, "GC pause ");
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
@ -2673,7 +2633,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
else
|
||||
strcat(verbose_str, "(partial)");
|
||||
}
|
||||
if (g1_policy()->should_initiate_conc_mark())
|
||||
if (g1_policy()->during_initial_mark_pause())
|
||||
strcat(verbose_str, " (initial-mark)");
|
||||
|
||||
// if PrintGCDetails is on, we'll print long statistics information
|
||||
@ -2697,10 +2657,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
"young list should be well formed");
|
||||
}
|
||||
|
||||
if (GC_locker::is_active()) {
|
||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||
}
|
||||
|
||||
bool abandoned = false;
|
||||
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC
|
||||
IsGCActiveMark x;
|
||||
@ -2736,27 +2692,21 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
double start_time_sec = os::elapsedTime();
|
||||
size_t start_used_bytes = used();
|
||||
|
||||
#if YOUNG_LIST_VERBOSE
|
||||
gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
|
||||
_young_list->print();
|
||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
g1_policy()->record_collection_pause_start(start_time_sec,
|
||||
start_used_bytes);
|
||||
|
||||
guarantee(_in_cset_fast_test == NULL, "invariant");
|
||||
guarantee(_in_cset_fast_test_base == NULL, "invariant");
|
||||
_in_cset_fast_test_length = max_regions();
|
||||
_in_cset_fast_test_base =
|
||||
NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
|
||||
memset(_in_cset_fast_test_base, false,
|
||||
_in_cset_fast_test_length * sizeof(bool));
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
||||
|
||||
#if SCAN_ONLY_VERBOSE
|
||||
#if YOUNG_LIST_VERBOSE
|
||||
gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
if (g1_policy()->should_initiate_conc_mark()) {
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
concurrent_mark()->checkpointRootsInitialPre();
|
||||
}
|
||||
save_marks();
|
||||
@ -2781,12 +2731,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
if (mark_in_progress())
|
||||
concurrent_mark()->newCSet();
|
||||
|
||||
// Now choose the CS.
|
||||
g1_policy()->choose_collection_set();
|
||||
#if YOUNG_LIST_VERBOSE
|
||||
gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
|
||||
_young_list->print();
|
||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
// We may abandon a pause if we find no region that will fit in the MMU
|
||||
// pause.
|
||||
bool abandoned = (g1_policy()->collection_set() == NULL);
|
||||
// Now choose the CS. We may abandon a pause if we find no
|
||||
// region that will fit in the MMU pause.
|
||||
bool abandoned = g1_policy()->choose_collection_set();
|
||||
|
||||
// Nothing to do if we were unable to choose a collection set.
|
||||
if (!abandoned) {
|
||||
@ -2804,40 +2757,64 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
|
||||
// Actually do the work...
|
||||
evacuate_collection_set();
|
||||
|
||||
free_collection_set(g1_policy()->collection_set());
|
||||
g1_policy()->clear_collection_set();
|
||||
|
||||
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
|
||||
// this is more for peace of mind; we're nulling them here and
|
||||
// we're expecting them to be null at the beginning of the next GC
|
||||
_in_cset_fast_test = NULL;
|
||||
_in_cset_fast_test_base = NULL;
|
||||
|
||||
cleanup_surviving_young_words();
|
||||
|
||||
// Start a new incremental collection set for the next pause.
|
||||
g1_policy()->start_incremental_cset_building();
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the next
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
_young_list->reset_sampled_info();
|
||||
assert(check_young_list_empty(true),
|
||||
"young list should be empty");
|
||||
|
||||
#if SCAN_ONLY_VERBOSE
|
||||
// Don't check the whole heap at this point as the
|
||||
// GC alloc regions from this pause have been tagged
|
||||
// as survivors and moved on to the survivor list.
|
||||
// Survivor regions will fail the !is_young() check.
|
||||
assert(check_young_list_empty(false /* check_heap */),
|
||||
"young list should be empty");
|
||||
|
||||
#if YOUNG_LIST_VERBOSE
|
||||
gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
g1_policy()->record_survivor_regions(_young_list->survivor_length(),
|
||||
_young_list->first_survivor_region(),
|
||||
_young_list->last_survivor_region());
|
||||
|
||||
_young_list->reset_auxilary_lists();
|
||||
}
|
||||
} else {
|
||||
if (_in_cset_fast_test != NULL) {
|
||||
assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
|
||||
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
|
||||
// this is more for peace of mind; we're nulling them here and
|
||||
// we're expecting them to be null at the beginning of the next GC
|
||||
_in_cset_fast_test = NULL;
|
||||
_in_cset_fast_test_base = NULL;
|
||||
}
|
||||
// We have abandoned the current collection. This can only happen
|
||||
// if we're not doing young or partially young collections, and
|
||||
// we didn't find an old region that we're able to collect within
|
||||
// the allowed time.
|
||||
|
||||
assert(g1_policy()->collection_set() == NULL, "should be");
|
||||
assert(_young_list->length() == 0, "because it should be");
|
||||
|
||||
// This should be a no-op.
|
||||
abandon_collection_set(g1_policy()->inc_cset_head());
|
||||
|
||||
g1_policy()->clear_incremental_cset();
|
||||
g1_policy()->stop_incremental_cset_building();
|
||||
|
||||
// Start a new incremental collection set for the next pause.
|
||||
g1_policy()->start_incremental_cset_building();
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the next
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
|
||||
// This looks confusing, because the DPT should really be empty
|
||||
// at this point -- since we have not done any collection work,
|
||||
// there should not be any derived pointers in the table to update;
|
||||
@ -2858,7 +2835,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
}
|
||||
|
||||
if (g1_policy()->in_young_gc_mode() &&
|
||||
g1_policy()->should_initiate_conc_mark()) {
|
||||
g1_policy()->during_initial_mark_pause()) {
|
||||
concurrent_mark()->checkpointRootsInitialPost();
|
||||
set_marking_started();
|
||||
// CAUTION: after the doConcurrentMark() call below,
|
||||
@ -2871,9 +2848,11 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
doConcurrentMark();
|
||||
}
|
||||
|
||||
#if SCAN_ONLY_VERBOSE
|
||||
#if YOUNG_LIST_VERBOSE
|
||||
gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
|
||||
@ -2931,12 +2910,34 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
|
||||
{
|
||||
size_t gclab_word_size;
|
||||
switch (purpose) {
|
||||
case GCAllocForSurvived:
|
||||
gclab_word_size = YoungPLABSize;
|
||||
break;
|
||||
case GCAllocForTenured:
|
||||
gclab_word_size = OldPLABSize;
|
||||
break;
|
||||
default:
|
||||
assert(false, "unknown GCAllocPurpose");
|
||||
gclab_word_size = OldPLABSize;
|
||||
break;
|
||||
}
|
||||
return gclab_word_size;
|
||||
}
|
||||
|
||||
|
||||
void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
|
||||
assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
|
||||
// make sure we don't call set_gc_alloc_region() multiple times on
|
||||
// the same region
|
||||
assert(r == NULL || !r->is_gc_alloc_region(),
|
||||
"shouldn't already be a GC alloc region");
|
||||
assert(r == NULL || !r->isHumongous(),
|
||||
"humongous regions shouldn't be used as GC alloc regions");
|
||||
|
||||
HeapWord* original_top = NULL;
|
||||
if (r != NULL)
|
||||
original_top = r->top();
|
||||
@ -3079,12 +3080,17 @@ void G1CollectedHeap::get_gc_alloc_regions() {
|
||||
|
||||
if (alloc_region->in_collection_set() ||
|
||||
alloc_region->top() == alloc_region->end() ||
|
||||
alloc_region->top() == alloc_region->bottom()) {
|
||||
// we will discard the current GC alloc region if it's in the
|
||||
// collection set (it can happen!), if it's already full (no
|
||||
// point in using it), or if it's empty (this means that it
|
||||
// was emptied during a cleanup and it should be on the free
|
||||
// list now).
|
||||
alloc_region->top() == alloc_region->bottom() ||
|
||||
alloc_region->isHumongous()) {
|
||||
// we will discard the current GC alloc region if
|
||||
// * it's in the collection set (it can happen!),
|
||||
// * it's already full (no point in using it),
|
||||
// * it's empty (this means that it was emptied during
|
||||
// a cleanup and it should be on the free list now), or
|
||||
// * it's humongous (this means that it was emptied
|
||||
// during a cleanup and was added to the free list, but
|
||||
// has been subseqently used to allocate a humongous
|
||||
// object that may be less than the region size).
|
||||
|
||||
alloc_region = NULL;
|
||||
}
|
||||
@ -3096,6 +3102,11 @@ void G1CollectedHeap::get_gc_alloc_regions() {
|
||||
} else {
|
||||
// the region was retained from the last collection
|
||||
++_gc_alloc_region_counts[ap];
|
||||
if (G1PrintHeapRegions) {
|
||||
gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
|
||||
"top "PTR_FORMAT,
|
||||
alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
|
||||
}
|
||||
}
|
||||
|
||||
if (alloc_region != NULL) {
|
||||
@ -3652,6 +3663,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
|
||||
_g1_rem(g1h->g1_rem_set()),
|
||||
_hash_seed(17), _queue_num(queue_num),
|
||||
_term_attempts(0),
|
||||
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
|
||||
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
|
||||
_age_table(false),
|
||||
#if G1_DETAILED_STATS
|
||||
_pushes(0), _pops(0), _steals(0),
|
||||
@ -3678,6 +3691,9 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
|
||||
|
||||
_overflowed_refs = new OverflowQueue(10);
|
||||
|
||||
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
|
||||
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
|
||||
|
||||
_start = os::elapsedTime();
|
||||
}
|
||||
|
||||
@ -3975,16 +3991,13 @@ public:
|
||||
|
||||
OopsInHeapRegionClosure *scan_root_cl;
|
||||
OopsInHeapRegionClosure *scan_perm_cl;
|
||||
OopsInHeapRegionClosure *scan_so_cl;
|
||||
|
||||
if (_g1h->g1_policy()->should_initiate_conc_mark()) {
|
||||
if (_g1h->g1_policy()->during_initial_mark_pause()) {
|
||||
scan_root_cl = &scan_mark_root_cl;
|
||||
scan_perm_cl = &scan_mark_perm_cl;
|
||||
scan_so_cl = &scan_mark_heap_rs_cl;
|
||||
} else {
|
||||
scan_root_cl = &only_scan_root_cl;
|
||||
scan_perm_cl = &only_scan_perm_cl;
|
||||
scan_so_cl = &only_scan_heap_rs_cl;
|
||||
}
|
||||
|
||||
pss.start_strong_roots();
|
||||
@ -3992,7 +4005,6 @@ public:
|
||||
SharedHeap::SO_AllClasses,
|
||||
scan_root_cl,
|
||||
&push_heap_rs_cl,
|
||||
scan_so_cl,
|
||||
scan_perm_cl,
|
||||
i);
|
||||
pss.end_strong_roots();
|
||||
@ -4054,7 +4066,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
|
||||
SharedHeap::ScanningOption so,
|
||||
OopClosure* scan_non_heap_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
OopsInHeapRegionClosure* scan_so,
|
||||
OopsInGenClosure* scan_perm,
|
||||
int worker_i) {
|
||||
// First scan the strong roots, including the perm gen.
|
||||
@ -4074,6 +4085,7 @@ g1_process_strong_roots(bool collecting_perm_gen,
|
||||
&buf_scan_non_heap_roots,
|
||||
&eager_scan_code_roots,
|
||||
&buf_scan_perm);
|
||||
|
||||
// Finish up any enqueued closure apps.
|
||||
buf_scan_non_heap_roots.done();
|
||||
buf_scan_perm.done();
|
||||
@ -4096,9 +4108,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
|
||||
|
||||
// XXX What should this be doing in the parallel case?
|
||||
g1_policy()->record_collection_pause_end_CH_strong_roots();
|
||||
if (scan_so != NULL) {
|
||||
scan_scan_only_set(scan_so, worker_i);
|
||||
}
|
||||
// Now scan the complement of the collection set.
|
||||
if (scan_rs != NULL) {
|
||||
g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
|
||||
@ -4111,54 +4120,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
|
||||
_process_strong_tasks->all_tasks_completed();
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
|
||||
OopsInHeapRegionClosure* oc,
|
||||
int worker_i) {
|
||||
HeapWord* startAddr = r->bottom();
|
||||
HeapWord* endAddr = r->used_region().end();
|
||||
|
||||
oc->set_region(r);
|
||||
|
||||
HeapWord* p = r->bottom();
|
||||
HeapWord* t = r->top();
|
||||
guarantee( p == r->next_top_at_mark_start(), "invariant" );
|
||||
while (p < t) {
|
||||
oop obj = oop(p);
|
||||
p += obj->oop_iterate(oc);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
|
||||
int worker_i) {
|
||||
double start = os::elapsedTime();
|
||||
|
||||
BufferingOopsInHeapRegionClosure boc(oc);
|
||||
|
||||
FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
|
||||
FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
|
||||
|
||||
OopsInHeapRegionClosure *foc;
|
||||
if (g1_policy()->should_initiate_conc_mark())
|
||||
foc = &scan_and_mark;
|
||||
else
|
||||
foc = &scan_only;
|
||||
|
||||
HeapRegion* hr;
|
||||
int n = 0;
|
||||
while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
|
||||
scan_scan_only_region(hr, foc, worker_i);
|
||||
++n;
|
||||
}
|
||||
boc.done();
|
||||
|
||||
double closure_app_s = boc.closure_app_seconds();
|
||||
g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
|
||||
double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
|
||||
g1_policy()->record_scan_only_time(worker_i, ms, n);
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
|
||||
OopClosure* non_root_closure) {
|
||||
@ -4357,17 +4318,14 @@ void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRe
|
||||
class G1ParCleanupCTTask : public AbstractGangTask {
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1CollectedHeap* _g1h;
|
||||
HeapRegion* volatile _so_head;
|
||||
HeapRegion* volatile _su_head;
|
||||
public:
|
||||
G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
|
||||
G1CollectedHeap* g1h,
|
||||
HeapRegion* scan_only_list,
|
||||
HeapRegion* survivor_list) :
|
||||
AbstractGangTask("G1 Par Cleanup CT Task"),
|
||||
_ct_bs(ct_bs),
|
||||
_g1h(g1h),
|
||||
_so_head(scan_only_list),
|
||||
_su_head(survivor_list)
|
||||
{ }
|
||||
|
||||
@ -4376,14 +4334,13 @@ public:
|
||||
while (r = _g1h->pop_dirty_cards_region()) {
|
||||
clear_cards(r);
|
||||
}
|
||||
// Redirty the cards of the scan-only and survivor regions.
|
||||
dirty_list(&this->_so_head);
|
||||
// Redirty the cards of the survivor regions.
|
||||
dirty_list(&this->_su_head);
|
||||
}
|
||||
|
||||
void clear_cards(HeapRegion* r) {
|
||||
// Cards for Survivor and Scan-Only regions will be dirtied later.
|
||||
if (!r->is_scan_only() && !r->is_survivor()) {
|
||||
// Cards for Survivor regions will be dirtied later.
|
||||
if (!r->is_survivor()) {
|
||||
_ct_bs->clear(MemRegion(r->bottom(), r->end()));
|
||||
}
|
||||
}
|
||||
@ -4416,7 +4373,7 @@ public:
|
||||
virtual bool doHeapRegion(HeapRegion* r)
|
||||
{
|
||||
MemRegion mr(r->bottom(), r->end());
|
||||
if (r->is_scan_only() || r->is_survivor()) {
|
||||
if (r->is_survivor()) {
|
||||
_ct_bs->verify_dirty_region(mr);
|
||||
} else {
|
||||
_ct_bs->verify_clean_region(mr);
|
||||
@ -4432,8 +4389,8 @@ void G1CollectedHeap::cleanUpCardTable() {
|
||||
|
||||
// Iterate over the dirty cards region list.
|
||||
G1ParCleanupCTTask cleanup_task(ct_bs, this,
|
||||
_young_list->first_scan_only_region(),
|
||||
_young_list->first_survivor_region());
|
||||
|
||||
if (ParallelGCThreads > 0) {
|
||||
set_par_threads(workers()->total_workers());
|
||||
workers()->run_task(&cleanup_task);
|
||||
@ -4449,12 +4406,12 @@ void G1CollectedHeap::cleanUpCardTable() {
|
||||
}
|
||||
r->set_next_dirty_cards_region(NULL);
|
||||
}
|
||||
// now, redirty the cards of the scan-only and survivor regions
|
||||
// now, redirty the cards of the survivor regions
|
||||
// (it seemed faster to do it this way, instead of iterating over
|
||||
// all regions and then clearing / dirtying as appropriate)
|
||||
dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
|
||||
dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
|
||||
}
|
||||
|
||||
double elapsed = os::elapsedTime() - start;
|
||||
g1_policy()->record_clear_ct_time( elapsed * 1000.0);
|
||||
#ifndef PRODUCT
|
||||
@ -4475,6 +4432,11 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
|
||||
double young_time_ms = 0.0;
|
||||
double non_young_time_ms = 0.0;
|
||||
|
||||
// Since the collection set is a superset of the the young list,
|
||||
// all we need to do to clear the young list is clear its
|
||||
// head and length, and unlink any young regions in the code below
|
||||
_young_list->clear();
|
||||
|
||||
G1CollectorPolicy* policy = g1_policy();
|
||||
|
||||
double start_sec = os::elapsedTime();
|
||||
@ -4518,6 +4480,12 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
|
||||
guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
|
||||
size_t words_survived = _surviving_young_words[index];
|
||||
cur->record_surv_words_in_group(words_survived);
|
||||
|
||||
// At this point the we have 'popped' cur from the collection set
|
||||
// (linked via next_in_collection_set()) but it is still in the
|
||||
// young list (linked via next_young_region()). Clear the
|
||||
// _next_young_region field.
|
||||
cur->set_next_young_region(NULL);
|
||||
} else {
|
||||
int index = cur->young_index_in_cset();
|
||||
guarantee( index == -1, "invariant" );
|
||||
@ -4533,7 +4501,6 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
|
||||
"Should not have empty regions in a CS.");
|
||||
free_region(cur);
|
||||
} else {
|
||||
guarantee( !cur->is_scan_only(), "should not be scan only" );
|
||||
cur->uninstall_surv_rate_group();
|
||||
if (cur->is_young())
|
||||
cur->set_young_index_in_cset(-1);
|
||||
@ -4557,6 +4524,27 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
|
||||
policy->record_non_young_free_cset_time_ms(non_young_time_ms);
|
||||
}
|
||||
|
||||
// This routine is similar to the above but does not record
|
||||
// any policy statistics or update free lists; we are abandoning
|
||||
// the current incremental collection set in preparation of a
|
||||
// full collection. After the full GC we will start to build up
|
||||
// the incremental collection set again.
|
||||
// This is only called when we're doing a full collection
|
||||
// and is immediately followed by the tearing down of the young list.
|
||||
|
||||
void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
|
||||
HeapRegion* cur = cs_head;
|
||||
|
||||
while (cur != NULL) {
|
||||
HeapRegion* next = cur->next_in_collection_set();
|
||||
assert(cur->in_collection_set(), "bad CS");
|
||||
cur->set_next_in_collection_set(NULL);
|
||||
cur->set_in_collection_set(false);
|
||||
cur->set_young_index_in_cset(-1);
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
|
||||
HeapRegion*
|
||||
G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
|
||||
assert(ZF_mon->owned_by_self(), "Precondition");
|
||||
@ -4923,12 +4911,10 @@ public:
|
||||
bool success() { return _success; }
|
||||
};
|
||||
|
||||
bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
|
||||
bool check_sample) {
|
||||
bool ret = true;
|
||||
bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
|
||||
bool ret = _young_list->check_list_empty(check_sample);
|
||||
|
||||
ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
|
||||
if (!ignore_scan_only_list) {
|
||||
if (check_heap) {
|
||||
NoYoungRegionsClosure closure;
|
||||
heap_region_iterate(&closure);
|
||||
ret = ret && closure.success();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,33 +81,29 @@ private:
|
||||
|
||||
HeapRegion* _head;
|
||||
|
||||
HeapRegion* _scan_only_head;
|
||||
HeapRegion* _scan_only_tail;
|
||||
HeapRegion* _survivor_head;
|
||||
HeapRegion* _survivor_tail;
|
||||
|
||||
HeapRegion* _curr;
|
||||
|
||||
size_t _length;
|
||||
size_t _scan_only_length;
|
||||
size_t _survivor_length;
|
||||
|
||||
size_t _last_sampled_rs_lengths;
|
||||
size_t _sampled_rs_lengths;
|
||||
HeapRegion* _curr;
|
||||
HeapRegion* _curr_scan_only;
|
||||
|
||||
HeapRegion* _survivor_head;
|
||||
HeapRegion* _survivor_tail;
|
||||
size_t _survivor_length;
|
||||
|
||||
void empty_list(HeapRegion* list);
|
||||
void empty_list(HeapRegion* list);
|
||||
|
||||
public:
|
||||
YoungList(G1CollectedHeap* g1h);
|
||||
|
||||
void push_region(HeapRegion* hr);
|
||||
void add_survivor_region(HeapRegion* hr);
|
||||
HeapRegion* pop_region();
|
||||
void empty_list();
|
||||
bool is_empty() { return _length == 0; }
|
||||
size_t length() { return _length; }
|
||||
size_t scan_only_length() { return _scan_only_length; }
|
||||
size_t survivor_length() { return _survivor_length; }
|
||||
void push_region(HeapRegion* hr);
|
||||
void add_survivor_region(HeapRegion* hr);
|
||||
|
||||
void empty_list();
|
||||
bool is_empty() { return _length == 0; }
|
||||
size_t length() { return _length; }
|
||||
size_t survivor_length() { return _survivor_length; }
|
||||
|
||||
void rs_length_sampling_init();
|
||||
bool rs_length_sampling_more();
|
||||
@ -120,22 +116,21 @@ public:
|
||||
|
||||
// for development purposes
|
||||
void reset_auxilary_lists();
|
||||
void clear() { _head = NULL; _length = 0; }
|
||||
|
||||
void clear_survivors() {
|
||||
_survivor_head = NULL;
|
||||
_survivor_tail = NULL;
|
||||
_survivor_length = 0;
|
||||
}
|
||||
|
||||
HeapRegion* first_region() { return _head; }
|
||||
HeapRegion* first_scan_only_region() { return _scan_only_head; }
|
||||
HeapRegion* first_survivor_region() { return _survivor_head; }
|
||||
HeapRegion* last_survivor_region() { return _survivor_tail; }
|
||||
HeapRegion* par_get_next_scan_only_region() {
|
||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
HeapRegion* ret = _curr_scan_only;
|
||||
if (ret != NULL)
|
||||
_curr_scan_only = ret->get_next_young_region();
|
||||
return ret;
|
||||
}
|
||||
|
||||
// debugging
|
||||
bool check_list_well_formed();
|
||||
bool check_list_empty(bool ignore_scan_only_list,
|
||||
bool check_sample = true);
|
||||
bool check_list_empty(bool check_sample = true);
|
||||
void print();
|
||||
};
|
||||
|
||||
@ -232,6 +227,9 @@ private:
|
||||
// current collection.
|
||||
HeapRegion* _gc_alloc_region_list;
|
||||
|
||||
// Determines PLAB size for a particular allocation purpose.
|
||||
static size_t desired_plab_sz(GCAllocPurpose purpose);
|
||||
|
||||
// When called by par thread, require par_alloc_during_gc_lock() to be held.
|
||||
void push_gc_alloc_region(HeapRegion* hr);
|
||||
|
||||
@ -402,8 +400,7 @@ public:
|
||||
assert(_in_cset_fast_test_base != NULL, "sanity");
|
||||
assert(r->in_collection_set(), "invariant");
|
||||
int index = r->hrs_index();
|
||||
assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length,
|
||||
"invariant");
|
||||
assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant");
|
||||
assert(!_in_cset_fast_test_base[index], "invariant");
|
||||
_in_cset_fast_test_base[index] = true;
|
||||
}
|
||||
@ -428,6 +425,12 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void clear_cset_fast_test() {
|
||||
assert(_in_cset_fast_test_base != NULL, "sanity");
|
||||
memset(_in_cset_fast_test_base, false,
|
||||
_in_cset_fast_test_length * sizeof(bool));
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
||||
@ -473,6 +476,10 @@ protected:
|
||||
// regions.
|
||||
void free_collection_set(HeapRegion* cs_head);
|
||||
|
||||
// Abandon the current collection set without recording policy
|
||||
// statistics or updating free lists.
|
||||
void abandon_collection_set(HeapRegion* cs_head);
|
||||
|
||||
// Applies "scan_non_heap_roots" to roots outside the heap,
|
||||
// "scan_rs" to roots inside the heap (having done "set_region" to
|
||||
// indicate the region in which the root resides), and does "scan_perm"
|
||||
@ -485,16 +492,9 @@ protected:
|
||||
SharedHeap::ScanningOption so,
|
||||
OopClosure* scan_non_heap_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
OopsInHeapRegionClosure* scan_so,
|
||||
OopsInGenClosure* scan_perm,
|
||||
int worker_i);
|
||||
|
||||
void scan_scan_only_set(OopsInHeapRegionClosure* oc,
|
||||
int worker_i);
|
||||
void scan_scan_only_region(HeapRegion* hr,
|
||||
OopsInHeapRegionClosure* oc,
|
||||
int worker_i);
|
||||
|
||||
// Apply "blk" to all the weak roots of the system. These include
|
||||
// JNI weak roots, the code cache, system dictionary, symbol table,
|
||||
// string table, and referents of reachable weak refs.
|
||||
@ -1133,36 +1133,14 @@ public:
|
||||
void set_region_short_lived_locked(HeapRegion* hr);
|
||||
// add appropriate methods for any other surv rate groups
|
||||
|
||||
void young_list_rs_length_sampling_init() {
|
||||
_young_list->rs_length_sampling_init();
|
||||
}
|
||||
bool young_list_rs_length_sampling_more() {
|
||||
return _young_list->rs_length_sampling_more();
|
||||
}
|
||||
void young_list_rs_length_sampling_next() {
|
||||
_young_list->rs_length_sampling_next();
|
||||
}
|
||||
size_t young_list_sampled_rs_lengths() {
|
||||
return _young_list->sampled_rs_lengths();
|
||||
}
|
||||
|
||||
size_t young_list_length() { return _young_list->length(); }
|
||||
size_t young_list_scan_only_length() {
|
||||
return _young_list->scan_only_length(); }
|
||||
|
||||
HeapRegion* pop_region_from_young_list() {
|
||||
return _young_list->pop_region();
|
||||
}
|
||||
|
||||
HeapRegion* young_list_first_region() {
|
||||
return _young_list->first_region();
|
||||
}
|
||||
YoungList* young_list() { return _young_list; }
|
||||
|
||||
// debugging
|
||||
bool check_young_list_well_formed() {
|
||||
return _young_list->check_list_well_formed();
|
||||
}
|
||||
bool check_young_list_empty(bool ignore_scan_only_list,
|
||||
|
||||
bool check_young_list_empty(bool check_heap,
|
||||
bool check_sample = true);
|
||||
|
||||
// *** Stuff related to concurrent marking. It's not clear to me that so
|
||||
@ -1367,12 +1345,18 @@ private:
|
||||
return BitsPerWord << shifter();
|
||||
}
|
||||
|
||||
static size_t gclab_word_size() {
|
||||
return G1ParallelGCAllocBufferSize / HeapWordSize;
|
||||
size_t gclab_word_size() const {
|
||||
return _gclab_word_size;
|
||||
}
|
||||
|
||||
static size_t bitmap_size_in_bits() {
|
||||
size_t bits_in_bitmap = gclab_word_size() >> shifter();
|
||||
// Calculates actual GCLab size in words
|
||||
size_t gclab_real_word_size() const {
|
||||
return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
|
||||
/ BitsPerWord;
|
||||
}
|
||||
|
||||
static size_t bitmap_size_in_bits(size_t gclab_word_size) {
|
||||
size_t bits_in_bitmap = gclab_word_size >> shifter();
|
||||
// We are going to ensure that the beginning of a word in this
|
||||
// bitmap also corresponds to the beginning of a word in the
|
||||
// global marking bitmap. To handle the case where a GCLab
|
||||
@ -1382,13 +1366,13 @@ private:
|
||||
return bits_in_bitmap + BitsPerWord - 1;
|
||||
}
|
||||
public:
|
||||
GCLabBitMap(HeapWord* heap_start)
|
||||
: BitMap(bitmap_size_in_bits()),
|
||||
GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
|
||||
: BitMap(bitmap_size_in_bits(gclab_word_size)),
|
||||
_cm(G1CollectedHeap::heap()->concurrent_mark()),
|
||||
_shifter(shifter()),
|
||||
_bitmap_word_covers_words(bitmap_word_covers_words()),
|
||||
_heap_start(heap_start),
|
||||
_gclab_word_size(gclab_word_size()),
|
||||
_gclab_word_size(gclab_word_size),
|
||||
_real_start_word(NULL),
|
||||
_real_end_word(NULL),
|
||||
_start_word(NULL)
|
||||
@ -1483,7 +1467,7 @@ public:
|
||||
mark_bitmap->mostly_disjoint_range_union(this,
|
||||
0, // always start from the start of the bitmap
|
||||
_start_word,
|
||||
size_in_words());
|
||||
gclab_real_word_size());
|
||||
_cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -1495,9 +1479,10 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
static size_t bitmap_size_in_words() {
|
||||
return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
|
||||
size_t bitmap_size_in_words() const {
|
||||
return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
||||
@ -1507,10 +1492,10 @@ private:
|
||||
GCLabBitMap _bitmap;
|
||||
|
||||
public:
|
||||
G1ParGCAllocBuffer() :
|
||||
ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
|
||||
G1ParGCAllocBuffer(size_t gclab_word_size) :
|
||||
ParGCAllocBuffer(gclab_word_size),
|
||||
_during_marking(G1CollectedHeap::heap()->mark_in_progress()),
|
||||
_bitmap(G1CollectedHeap::heap()->reserved_region().start()),
|
||||
_bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
|
||||
_retired(false)
|
||||
{ }
|
||||
|
||||
@ -1549,8 +1534,10 @@ protected:
|
||||
typedef GrowableArray<StarTask> OverflowQueue;
|
||||
OverflowQueue* _overflowed_refs;
|
||||
|
||||
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
|
||||
ageTable _age_table;
|
||||
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
||||
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
||||
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
||||
ageTable _age_table;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
@ -1619,7 +1606,7 @@ public:
|
||||
ageTable* age_table() { return &_age_table; }
|
||||
|
||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
return &_alloc_buffers[purpose];
|
||||
return _alloc_buffers[purpose];
|
||||
}
|
||||
|
||||
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
|
||||
@ -1684,15 +1671,15 @@ public:
|
||||
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
||||
|
||||
HeapWord* obj = NULL;
|
||||
if (word_sz * 100 <
|
||||
(size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
|
||||
ParallelGCBufferWastePct) {
|
||||
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
||||
assert(gclab_word_size == alloc_buf->word_sz(),
|
||||
"dynamic resizing is not supported");
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||
alloc_buf->retire(false, false);
|
||||
|
||||
HeapWord* buf =
|
||||
_g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
|
||||
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
|
||||
if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
||||
// Otherwise.
|
||||
alloc_buf->set_buf(buf);
|
||||
@ -1786,9 +1773,9 @@ public:
|
||||
|
||||
void retire_alloc_buffers() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
size_t waste = _alloc_buffers[ap].words_remaining();
|
||||
size_t waste = _alloc_buffers[ap]->words_remaining();
|
||||
add_to_alloc_buffer_waste(waste);
|
||||
_alloc_buffers[ap].retire(true, false);
|
||||
_alloc_buffers[ap]->retire(true, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,7 +61,6 @@ class MainBodySummary: public CHeapObj {
|
||||
define_num_seq(parallel) // parallel only
|
||||
define_num_seq(ext_root_scan)
|
||||
define_num_seq(mark_stack_scan)
|
||||
define_num_seq(scan_only)
|
||||
define_num_seq(update_rs)
|
||||
define_num_seq(scan_rs)
|
||||
define_num_seq(scan_new_refs) // Only for temp use; added to
|
||||
@ -174,8 +173,6 @@ protected:
|
||||
|
||||
double* _par_last_ext_root_scan_times_ms;
|
||||
double* _par_last_mark_stack_scan_times_ms;
|
||||
double* _par_last_scan_only_times_ms;
|
||||
double* _par_last_scan_only_regions_scanned;
|
||||
double* _par_last_update_rs_start_times_ms;
|
||||
double* _par_last_update_rs_times_ms;
|
||||
double* _par_last_update_rs_processed_buffers;
|
||||
@ -196,7 +193,6 @@ protected:
|
||||
bool _adaptive_young_list_length;
|
||||
size_t _young_list_min_length;
|
||||
size_t _young_list_target_length;
|
||||
size_t _young_list_so_prefix_length;
|
||||
size_t _young_list_fixed_length;
|
||||
|
||||
size_t _young_cset_length;
|
||||
@ -215,6 +211,8 @@ protected:
|
||||
SurvRateGroup* _survivor_surv_rate_group;
|
||||
// add here any more surv rate groups
|
||||
|
||||
double _gc_overhead_perc;
|
||||
|
||||
bool during_marking() {
|
||||
return _during_marking;
|
||||
}
|
||||
@ -232,7 +230,6 @@ private:
|
||||
TruncatedSeq* _pending_card_diff_seq;
|
||||
TruncatedSeq* _rs_length_diff_seq;
|
||||
TruncatedSeq* _cost_per_card_ms_seq;
|
||||
TruncatedSeq* _cost_per_scan_only_region_ms_seq;
|
||||
TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
|
||||
TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
|
||||
TruncatedSeq* _cost_per_entry_ms_seq;
|
||||
@ -247,19 +244,16 @@ private:
|
||||
TruncatedSeq* _rs_lengths_seq;
|
||||
|
||||
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
|
||||
TruncatedSeq* _cost_per_scan_only_region_ms_during_cm_seq;
|
||||
|
||||
TruncatedSeq* _young_gc_eff_seq;
|
||||
|
||||
TruncatedSeq* _max_conc_overhead_seq;
|
||||
|
||||
size_t _recorded_young_regions;
|
||||
size_t _recorded_scan_only_regions;
|
||||
size_t _recorded_non_young_regions;
|
||||
size_t _recorded_region_num;
|
||||
|
||||
size_t _free_regions_at_end_of_collection;
|
||||
size_t _scan_only_regions_at_end_of_collection;
|
||||
|
||||
size_t _recorded_rs_lengths;
|
||||
size_t _max_rs_lengths;
|
||||
@ -275,7 +269,6 @@ private:
|
||||
double _predicted_survival_ratio;
|
||||
double _predicted_rs_update_time_ms;
|
||||
double _predicted_rs_scan_time_ms;
|
||||
double _predicted_scan_only_scan_time_ms;
|
||||
double _predicted_object_copy_time_ms;
|
||||
double _predicted_constant_other_time_ms;
|
||||
double _predicted_young_other_time_ms;
|
||||
@ -342,8 +335,6 @@ public:
|
||||
bool verify_young_ages();
|
||||
#endif // PRODUCT
|
||||
|
||||
void tag_scan_only(size_t short_lived_scan_only_length);
|
||||
|
||||
double get_new_prediction(TruncatedSeq* seq) {
|
||||
return MAX2(seq->davg() + sigma() * seq->dsd(),
|
||||
seq->davg() * confidence_factor(seq->num()));
|
||||
@ -429,23 +420,6 @@ public:
|
||||
get_new_prediction(_partially_young_cost_per_entry_ms_seq);
|
||||
}
|
||||
|
||||
double predict_scan_only_time_ms_during_cm(size_t scan_only_region_num) {
|
||||
if (_cost_per_scan_only_region_ms_during_cm_seq->num() < 3)
|
||||
return 1.5 * (double) scan_only_region_num *
|
||||
get_new_prediction(_cost_per_scan_only_region_ms_seq);
|
||||
else
|
||||
return (double) scan_only_region_num *
|
||||
get_new_prediction(_cost_per_scan_only_region_ms_during_cm_seq);
|
||||
}
|
||||
|
||||
double predict_scan_only_time_ms(size_t scan_only_region_num) {
|
||||
if (_in_marking_window_im)
|
||||
return predict_scan_only_time_ms_during_cm(scan_only_region_num);
|
||||
else
|
||||
return (double) scan_only_region_num *
|
||||
get_new_prediction(_cost_per_scan_only_region_ms_seq);
|
||||
}
|
||||
|
||||
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
|
||||
if (_cost_per_byte_ms_during_cm_seq->num() < 3)
|
||||
return 1.1 * (double) bytes_to_copy *
|
||||
@ -488,24 +462,21 @@ public:
|
||||
size_t predict_bytes_to_copy(HeapRegion* hr);
|
||||
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
|
||||
|
||||
// for use by: calculate_optimal_so_length(length)
|
||||
void predict_gc_eff(size_t young_region_num,
|
||||
size_t so_length,
|
||||
double base_time_ms,
|
||||
double *gc_eff,
|
||||
double *pause_time_ms);
|
||||
|
||||
// for use by: calculate_young_list_target_config(rs_length)
|
||||
bool predict_gc_eff(size_t young_region_num,
|
||||
size_t so_length,
|
||||
double base_time_with_so_ms,
|
||||
size_t init_free_regions,
|
||||
double target_pause_time_ms,
|
||||
double* gc_eff);
|
||||
// for use by: calculate_young_list_target_length(rs_length)
|
||||
bool predict_will_fit(size_t young_region_num,
|
||||
double base_time_ms,
|
||||
size_t init_free_regions,
|
||||
double target_pause_time_ms);
|
||||
|
||||
void start_recording_regions();
|
||||
void record_cset_region(HeapRegion* hr, bool young);
|
||||
void record_scan_only_regions(size_t scan_only_length);
|
||||
void record_cset_region_info(HeapRegion* hr, bool young);
|
||||
void record_non_young_cset_region(HeapRegion* hr);
|
||||
|
||||
void set_recorded_young_regions(size_t n_regions);
|
||||
void set_recorded_young_bytes(size_t bytes);
|
||||
void set_recorded_rs_lengths(size_t rs_lengths);
|
||||
void set_predicted_bytes_to_copy(size_t bytes);
|
||||
|
||||
void end_recording_regions();
|
||||
|
||||
void record_vtime_diff_ms(double vtime_diff_ms) {
|
||||
@ -636,11 +607,74 @@ protected:
|
||||
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
|
||||
|
||||
// The head of the list (via "next_in_collection_set()") representing the
|
||||
// current collection set.
|
||||
// current collection set. Set from the incrementally built collection
|
||||
// set at the start of the pause.
|
||||
HeapRegion* _collection_set;
|
||||
|
||||
// The number of regions in the collection set. Set from the incrementally
|
||||
// built collection set at the start of an evacuation pause.
|
||||
size_t _collection_set_size;
|
||||
|
||||
// The number of bytes in the collection set before the pause. Set from
|
||||
// the incrementally built collection set at the start of an evacuation
|
||||
// pause.
|
||||
size_t _collection_set_bytes_used_before;
|
||||
|
||||
// The associated information that is maintained while the incremental
|
||||
// collection set is being built with young regions. Used to populate
|
||||
// the recorded info for the evacuation pause.
|
||||
|
||||
enum CSetBuildType {
|
||||
Active, // We are actively building the collection set
|
||||
Inactive // We are not actively building the collection set
|
||||
};
|
||||
|
||||
CSetBuildType _inc_cset_build_state;
|
||||
|
||||
// The head of the incrementally built collection set.
|
||||
HeapRegion* _inc_cset_head;
|
||||
|
||||
// The tail of the incrementally built collection set.
|
||||
HeapRegion* _inc_cset_tail;
|
||||
|
||||
// The number of regions in the incrementally built collection set.
|
||||
// Used to set _collection_set_size at the start of an evacuation
|
||||
// pause.
|
||||
size_t _inc_cset_size;
|
||||
|
||||
// Used as the index in the surving young words structure
|
||||
// which tracks the amount of space, for each young region,
|
||||
// that survives the pause.
|
||||
size_t _inc_cset_young_index;
|
||||
|
||||
// The number of bytes in the incrementally built collection set.
|
||||
// Used to set _collection_set_bytes_used_before at the start of
|
||||
// an evacuation pause.
|
||||
size_t _inc_cset_bytes_used_before;
|
||||
|
||||
// Used to record the highest end of heap region in collection set
|
||||
HeapWord* _inc_cset_max_finger;
|
||||
|
||||
// The number of recorded used bytes in the young regions
|
||||
// of the collection set. This is the sum of the used() bytes
|
||||
// of retired young regions in the collection set.
|
||||
size_t _inc_cset_recorded_young_bytes;
|
||||
|
||||
// The RSet lengths recorded for regions in the collection set
|
||||
// (updated by the periodic sampling of the regions in the
|
||||
// young list/collection set).
|
||||
size_t _inc_cset_recorded_rs_lengths;
|
||||
|
||||
// The predicted elapsed time it will take to collect the regions
|
||||
// in the collection set (updated by the periodic sampling of the
|
||||
// regions in the young list/collection set).
|
||||
double _inc_cset_predicted_elapsed_time_ms;
|
||||
|
||||
// The predicted bytes to copy for the regions in the collection
|
||||
// set (updated by the periodic sampling of the regions in the
|
||||
// young list/collection set).
|
||||
size_t _inc_cset_predicted_bytes_to_copy;
|
||||
|
||||
// Info about marking.
|
||||
int _n_marks; // Sticky at 2, so we know when we've done at least 2.
|
||||
|
||||
@ -722,11 +756,31 @@ protected:
|
||||
|
||||
size_t _n_marks_since_last_pause;
|
||||
|
||||
// True iff CM has been initiated.
|
||||
bool _conc_mark_initiated;
|
||||
// At the end of a pause we check the heap occupancy and we decide
|
||||
// whether we will start a marking cycle during the next pause. If
|
||||
// we decide that we want to do that, we will set this parameter to
|
||||
// true. So, this parameter will stay true between the end of a
|
||||
// pause and the beginning of a subsequent pause (not necessarily
|
||||
// the next one, see the comments on the next field) when we decide
|
||||
// that we will indeed start a marking cycle and do the initial-mark
|
||||
// work.
|
||||
volatile bool _initiate_conc_mark_if_possible;
|
||||
|
||||
// If initiate_conc_mark_if_possible() is set at the beginning of a
|
||||
// pause, it is a suggestion that the pause should start a marking
|
||||
// cycle by doing the initial-mark work. However, it is possible
|
||||
// that the concurrent marking thread is still finishing up the
|
||||
// previous marking cycle (e.g., clearing the next marking
|
||||
// bitmap). If that is the case we cannot start a new cycle and
|
||||
// we'll have to wait for the concurrent marking thread to finish
|
||||
// what it is doing. In this case we will postpone the marking cycle
|
||||
// initiation decision for the next pause. When we eventually decide
|
||||
// to start a cycle, we will set _during_initial_mark_pause which
|
||||
// will stay true until the end of the initial-mark pause and it's
|
||||
// the condition that indicates that a pause is doing the
|
||||
// initial-mark work.
|
||||
volatile bool _during_initial_mark_pause;
|
||||
|
||||
// True iff CM should be initiated
|
||||
bool _should_initiate_conc_mark;
|
||||
bool _should_revert_to_full_young_gcs;
|
||||
bool _last_full_young_gc;
|
||||
|
||||
@ -739,9 +793,8 @@ protected:
|
||||
double _mark_closure_time_ms;
|
||||
|
||||
void calculate_young_list_min_length();
|
||||
void calculate_young_list_target_config();
|
||||
void calculate_young_list_target_config(size_t rs_lengths);
|
||||
size_t calculate_optimal_so_length(size_t young_list_length);
|
||||
void calculate_young_list_target_length();
|
||||
void calculate_young_list_target_length(size_t rs_lengths);
|
||||
|
||||
public:
|
||||
|
||||
@ -846,11 +899,6 @@ public:
|
||||
_par_last_mark_stack_scan_times_ms[worker_i] = ms;
|
||||
}
|
||||
|
||||
void record_scan_only_time(int worker_i, double ms, int n) {
|
||||
_par_last_scan_only_times_ms[worker_i] = ms;
|
||||
_par_last_scan_only_regions_scanned[worker_i] = (double) n;
|
||||
}
|
||||
|
||||
void record_satb_drain_time(double ms) {
|
||||
_cur_satb_drain_time_ms = ms;
|
||||
_satb_drain_time_set = true;
|
||||
@ -965,23 +1013,82 @@ public:
|
||||
// Choose a new collection set. Marks the chosen regions as being
|
||||
// "in_collection_set", and links them together. The head and number of
|
||||
// the collection set are available via access methods.
|
||||
virtual void choose_collection_set() = 0;
|
||||
|
||||
void clear_collection_set() { _collection_set = NULL; }
|
||||
virtual bool choose_collection_set() = 0;
|
||||
|
||||
// The head of the list (via "next_in_collection_set()") representing the
|
||||
// current collection set.
|
||||
HeapRegion* collection_set() { return _collection_set; }
|
||||
|
||||
void clear_collection_set() { _collection_set = NULL; }
|
||||
|
||||
// The number of elements in the current collection set.
|
||||
size_t collection_set_size() { return _collection_set_size; }
|
||||
|
||||
// Add "hr" to the CS.
|
||||
void add_to_collection_set(HeapRegion* hr);
|
||||
|
||||
bool should_initiate_conc_mark() { return _should_initiate_conc_mark; }
|
||||
void set_should_initiate_conc_mark() { _should_initiate_conc_mark = true; }
|
||||
void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; }
|
||||
// Incremental CSet Support
|
||||
|
||||
// The head of the incrementally built collection set.
|
||||
HeapRegion* inc_cset_head() { return _inc_cset_head; }
|
||||
|
||||
// The tail of the incrementally built collection set.
|
||||
HeapRegion* inc_set_tail() { return _inc_cset_tail; }
|
||||
|
||||
// The number of elements in the incrementally built collection set.
|
||||
size_t inc_cset_size() { return _inc_cset_size; }
|
||||
|
||||
// Initialize incremental collection set info.
|
||||
void start_incremental_cset_building();
|
||||
|
||||
void clear_incremental_cset() {
|
||||
_inc_cset_head = NULL;
|
||||
_inc_cset_tail = NULL;
|
||||
}
|
||||
|
||||
// Stop adding regions to the incremental collection set
|
||||
void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
|
||||
|
||||
// Add/remove information about hr to the aggregated information
|
||||
// for the incrementally built collection set.
|
||||
void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
|
||||
void remove_from_incremental_cset_info(HeapRegion* hr);
|
||||
|
||||
// Update information about hr in the aggregated information for
|
||||
// the incrementally built collection set.
|
||||
void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
|
||||
|
||||
private:
|
||||
// Update the incremental cset information when adding a region
|
||||
// (should not be called directly).
|
||||
void add_region_to_incremental_cset_common(HeapRegion* hr);
|
||||
|
||||
public:
|
||||
// Add hr to the LHS of the incremental collection set.
|
||||
void add_region_to_incremental_cset_lhs(HeapRegion* hr);
|
||||
|
||||
// Add hr to the RHS of the incremental collection set.
|
||||
void add_region_to_incremental_cset_rhs(HeapRegion* hr);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_collection_set(HeapRegion* list_head, outputStream* st);
|
||||
#endif // !PRODUCT
|
||||
|
||||
bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
|
||||
void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
|
||||
void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
|
||||
|
||||
bool during_initial_mark_pause() { return _during_initial_mark_pause; }
|
||||
void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
|
||||
void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
|
||||
|
||||
// This is called at the very beginning of an evacuation pause (it
|
||||
// has to be the first thing that the pause does). If
|
||||
// initiate_conc_mark_if_possible() is true, and the concurrent
|
||||
// marking thread has completed its work during the previous cycle,
|
||||
// it will set during_initial_mark_pause() to so that the pause does
|
||||
// the initial-mark work and start a marking cycle.
|
||||
void decide_on_conc_mark_initiation();
|
||||
|
||||
// If an expansion would be appropriate, because recent GC overhead had
|
||||
// exceeded the desired limit, return an amount to expand by.
|
||||
@ -1157,7 +1264,7 @@ class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
|
||||
// If the estimated is less then desirable, resize if possible.
|
||||
void expand_if_possible(size_t numRegions);
|
||||
|
||||
virtual void choose_collection_set();
|
||||
virtual bool choose_collection_set();
|
||||
virtual void record_collection_pause_start(double start_time_sec,
|
||||
size_t start_used);
|
||||
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,6 +31,12 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
bool clear_all_softrefs) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
#ifdef ASSERT
|
||||
if (sh->collector_policy()->should_clear_all_soft_refs()) {
|
||||
assert(clear_all_softrefs, "Policy should have been checked earler");
|
||||
}
|
||||
#endif
|
||||
// hook up weak ref data so it can be used during Mark-Sweep
|
||||
assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
|
||||
assert(rp != NULL, "should be non-NULL");
|
||||
@ -44,7 +50,6 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
|
||||
// Increment the invocation count for the permanent generation, since it is
|
||||
// implicitly collected whenever we do a full mark sweep collection.
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
sh->perm_gen()->stat_record()->invocations++;
|
||||
|
||||
bool marked_for_unloading = false;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,9 +28,6 @@
|
||||
|
||||
#define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
|
||||
\
|
||||
product(intx, G1ParallelGCAllocBufferSize, 8*K, \
|
||||
"Size of parallel G1 allocation buffers in to-space.") \
|
||||
\
|
||||
product(intx, G1ConfidencePercent, 50, \
|
||||
"Confidence level for MMU/pause predictions") \
|
||||
\
|
||||
@ -40,9 +37,6 @@
|
||||
develop(bool, G1Gen, true, \
|
||||
"If true, it will enable the generational G1") \
|
||||
\
|
||||
develop(intx, G1GCPercent, 10, \
|
||||
"The desired percent time spent on GC") \
|
||||
\
|
||||
develop(intx, G1PolicyVerbose, 0, \
|
||||
"The verbosity level on G1 policy decisions") \
|
||||
\
|
||||
@ -232,10 +226,6 @@
|
||||
"the number of regions for which we'll print a surv rate " \
|
||||
"summary.") \
|
||||
\
|
||||
develop(bool, G1UseScanOnlyPrefix, false, \
|
||||
"It determines whether the system will calculate an optimum " \
|
||||
"scan-only set.") \
|
||||
\
|
||||
product(intx, G1ReservePercent, 10, \
|
||||
"It determines the minimum reserve we should have in the heap " \
|
||||
"to minimize the probability of promotion failure.") \
|
||||
@ -270,11 +260,11 @@
|
||||
product(uintx, G1HeapRegionSize, 0, \
|
||||
"Size of the G1 regions.") \
|
||||
\
|
||||
experimental(bool, G1UseParallelRSetUpdating, false, \
|
||||
experimental(bool, G1UseParallelRSetUpdating, true, \
|
||||
"Enables the parallelization of remembered set updating " \
|
||||
"during evacuation pauses") \
|
||||
\
|
||||
experimental(bool, G1UseParallelRSetScanning, false, \
|
||||
experimental(bool, G1UseParallelRSetScanning, true, \
|
||||
"Enables the parallelization of remembered set scanning " \
|
||||
"during evacuation pauses") \
|
||||
\
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -75,6 +75,16 @@ public:
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
void print_object(outputStream* out, oop obj) {
|
||||
#ifdef PRODUCT
|
||||
klassOop k = obj->klass();
|
||||
const char* class_name = instanceKlass::cast(k)->external_name();
|
||||
out->print_cr("class name %s", class_name);
|
||||
#else // PRODUCT
|
||||
obj->print_on(out);
|
||||
#endif // PRODUCT
|
||||
}
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert(_containing_obj != NULL, "Precondition");
|
||||
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
|
||||
@ -90,21 +100,29 @@ public:
|
||||
gclog_or_tty->print_cr("----------");
|
||||
}
|
||||
if (!_g1h->is_in_closed_subset(obj)) {
|
||||
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||
gclog_or_tty->print_cr("Field "PTR_FORMAT
|
||||
" of live obj "PTR_FORMAT
|
||||
" points to obj "PTR_FORMAT
|
||||
" not in the heap.",
|
||||
p, (void*) _containing_obj, (void*) obj);
|
||||
" of live obj "PTR_FORMAT" in region "
|
||||
"["PTR_FORMAT", "PTR_FORMAT")",
|
||||
p, (void*) _containing_obj,
|
||||
from->bottom(), from->end());
|
||||
print_object(gclog_or_tty, _containing_obj);
|
||||
gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
|
||||
(void*) obj);
|
||||
} else {
|
||||
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||
HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
|
||||
gclog_or_tty->print_cr("Field "PTR_FORMAT
|
||||
" of live obj "PTR_FORMAT
|
||||
" points to dead obj "PTR_FORMAT".",
|
||||
p, (void*) _containing_obj, (void*) obj);
|
||||
" of live obj "PTR_FORMAT" in region "
|
||||
"["PTR_FORMAT", "PTR_FORMAT")",
|
||||
p, (void*) _containing_obj,
|
||||
from->bottom(), from->end());
|
||||
print_object(gclog_or_tty, _containing_obj);
|
||||
gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
|
||||
"["PTR_FORMAT", "PTR_FORMAT")",
|
||||
(void*) obj, to->bottom(), to->end());
|
||||
print_object(gclog_or_tty, obj);
|
||||
}
|
||||
gclog_or_tty->print_cr("Live obj:");
|
||||
_containing_obj->print_on(gclog_or_tty);
|
||||
gclog_or_tty->print_cr("Bad referent:");
|
||||
obj->print_on(gclog_or_tty);
|
||||
gclog_or_tty->print_cr("----------");
|
||||
_failures = true;
|
||||
failed = true;
|
||||
@ -432,7 +450,9 @@ HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
_young_type(NotYoung), _next_young_region(NULL),
|
||||
_next_dirty_cards_region(NULL),
|
||||
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
|
||||
_rem_set(NULL), _zfs(NotZeroFilled)
|
||||
_rem_set(NULL), _zfs(NotZeroFilled),
|
||||
_recorded_rs_length(0), _predicted_elapsed_time_ms(0),
|
||||
_predicted_bytes_to_copy(0)
|
||||
{
|
||||
_orig_end = mr.end();
|
||||
// Note that initialize() will set the start of the unmarked area of the
|
||||
@ -715,7 +735,7 @@ void HeapRegion::print_on(outputStream* st) const {
|
||||
else
|
||||
st->print(" ");
|
||||
if (is_young())
|
||||
st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y "));
|
||||
st->print(is_survivor() ? " SU" : " Y ");
|
||||
else
|
||||
st->print(" ");
|
||||
if (is_empty())
|
||||
@ -723,6 +743,8 @@ void HeapRegion::print_on(outputStream* st) const {
|
||||
else
|
||||
st->print(" ");
|
||||
st->print(" %5d", _gc_time_stamp);
|
||||
st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
|
||||
prev_top_at_mark_start(), next_top_at_mark_start());
|
||||
G1OffsetTableContigSpace::print_on(st);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -247,7 +247,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
enum YoungType {
|
||||
NotYoung, // a region is not young
|
||||
ScanOnly, // a region is young and scan-only
|
||||
Young, // a region is young
|
||||
Survivor // a region is young and it contains
|
||||
// survivor
|
||||
@ -292,6 +291,20 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
_young_type = new_type;
|
||||
}
|
||||
|
||||
// Cached attributes used in the collection set policy information
|
||||
|
||||
// The RSet length that was added to the total value
|
||||
// for the collection set.
|
||||
size_t _recorded_rs_length;
|
||||
|
||||
// The predicted elapsed time that was added to total value
|
||||
// for the collection set.
|
||||
double _predicted_elapsed_time_ms;
|
||||
|
||||
// The predicted number of bytes to copy that was added to
|
||||
// the total value for the collection set.
|
||||
size_t _predicted_bytes_to_copy;
|
||||
|
||||
public:
|
||||
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
|
||||
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
@ -614,7 +627,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// </PREDICTION>
|
||||
|
||||
bool is_young() const { return _young_type != NotYoung; }
|
||||
bool is_scan_only() const { return _young_type == ScanOnly; }
|
||||
bool is_survivor() const { return _young_type == Survivor; }
|
||||
|
||||
int young_index_in_cset() const { return _young_index_in_cset; }
|
||||
@ -629,12 +641,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
return _surv_rate_group->age_in_group(_age_index);
|
||||
}
|
||||
|
||||
void recalculate_age_in_surv_rate_group() {
|
||||
assert( _surv_rate_group != NULL, "pre-condition" );
|
||||
assert( _age_index > -1, "pre-condition" );
|
||||
_age_index = _surv_rate_group->recalculate_age_index(_age_index);
|
||||
}
|
||||
|
||||
void record_surv_words_in_group(size_t words_survived) {
|
||||
assert( _surv_rate_group != NULL, "pre-condition" );
|
||||
assert( _age_index > -1, "pre-condition" );
|
||||
@ -676,8 +682,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
void set_young() { set_young_type(Young); }
|
||||
|
||||
void set_scan_only() { set_young_type(ScanOnly); }
|
||||
|
||||
void set_survivor() { set_young_type(Survivor); }
|
||||
|
||||
void set_not_young() { set_young_type(NotYoung); }
|
||||
@ -775,6 +779,22 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
_zero_filler = NULL;
|
||||
}
|
||||
|
||||
size_t recorded_rs_length() const { return _recorded_rs_length; }
|
||||
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
|
||||
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
|
||||
|
||||
void set_recorded_rs_length(size_t rs_length) {
|
||||
_recorded_rs_length = rs_length;
|
||||
}
|
||||
|
||||
void set_predicted_elapsed_time_ms(double ms) {
|
||||
_predicted_elapsed_time_ms = ms;
|
||||
}
|
||||
|
||||
void set_predicted_bytes_to_copy(size_t bytes) {
|
||||
_predicted_bytes_to_copy = bytes;
|
||||
}
|
||||
|
||||
#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
|
||||
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
|
||||
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
|
||||
|
@ -662,8 +662,6 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
prt = PosParPRT::alloc(from_hr);
|
||||
}
|
||||
prt->init(from_hr);
|
||||
// Record the outgoing pointer in the from_region's outgoing bitmap.
|
||||
from_hr->rem_set()->add_outgoing_reference(hr());
|
||||
|
||||
PosParPRT* first_prt = _fine_grain_regions[ind];
|
||||
prt->set_next(first_prt); // XXX Maybe move to init?
|
||||
@ -1073,11 +1071,7 @@ int HeapRegionRemSet::num_par_rem_sets() {
|
||||
|
||||
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
||||
HeapRegion* hr)
|
||||
: _bosa(bosa), _other_regions(hr),
|
||||
_outgoing_region_map(G1CollectedHeap::heap()->max_regions(),
|
||||
false /* in-resource-area */),
|
||||
_iter_state(Unclaimed)
|
||||
{}
|
||||
: _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
|
||||
|
||||
|
||||
void HeapRegionRemSet::setup_remset_size() {
|
||||
@ -1148,30 +1142,11 @@ void HeapRegionRemSet::par_cleanup() {
|
||||
PosParPRT::par_contract_all();
|
||||
}
|
||||
|
||||
void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) {
|
||||
_outgoing_region_map.par_at_put(to_hr->hrs_index(), 1);
|
||||
}
|
||||
|
||||
void HeapRegionRemSet::clear() {
|
||||
clear_outgoing_entries();
|
||||
_outgoing_region_map.clear();
|
||||
_other_regions.clear();
|
||||
assert(occupied() == 0, "Should be clear.");
|
||||
}
|
||||
|
||||
void HeapRegionRemSet::clear_outgoing_entries() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
size_t i = _outgoing_region_map.get_next_one_offset(0);
|
||||
while (i < _outgoing_region_map.size()) {
|
||||
HeapRegion* to_region = g1h->region_at(i);
|
||||
if (!to_region->in_collection_set()) {
|
||||
to_region->rem_set()->clear_incoming_entry(hr());
|
||||
}
|
||||
i = _outgoing_region_map.get_next_one_offset(i+1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
|
||||
BitMap* region_bm, BitMap* card_bm) {
|
||||
_other_regions.scrub(ctbs, region_bm, card_bm);
|
||||
|
@ -179,13 +179,6 @@ private:
|
||||
|
||||
OtherRegionsTable _other_regions;
|
||||
|
||||
// One set bit for every region that has an entry for this one.
|
||||
BitMap _outgoing_region_map;
|
||||
|
||||
// Clear entries for the current region in any rem sets named in
|
||||
// the _outgoing_region_map.
|
||||
void clear_outgoing_entries();
|
||||
|
||||
enum ParIterState { Unclaimed, Claimed, Complete };
|
||||
volatile ParIterState _iter_state;
|
||||
volatile jlong _iter_claimed;
|
||||
@ -243,10 +236,6 @@ public:
|
||||
_other_regions.add_reference(from, tid);
|
||||
}
|
||||
|
||||
// Records the fact that the current region contains an outgoing
|
||||
// reference into "to_hr".
|
||||
void add_outgoing_reference(HeapRegion* to_hr);
|
||||
|
||||
// Removes any entries shown by the given bitmaps to contain only dead
|
||||
// objects.
|
||||
void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,7 +55,6 @@ SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
|
||||
void SurvRateGroup::reset()
|
||||
{
|
||||
_all_regions_allocated = 0;
|
||||
_scan_only_prefix = 0;
|
||||
_setup_seq_num = 0;
|
||||
_stats_arrays_length = 0;
|
||||
_accum_surv_rate = 0.0;
|
||||
@ -74,7 +73,7 @@ void SurvRateGroup::reset()
|
||||
void
|
||||
SurvRateGroup::start_adding_regions() {
|
||||
_setup_seq_num = _stats_arrays_length;
|
||||
_region_num = _scan_only_prefix;
|
||||
_region_num = 0;
|
||||
_accum_surv_rate = 0.0;
|
||||
|
||||
#if 0
|
||||
@ -163,12 +162,6 @@ SurvRateGroup::next_age_index() {
|
||||
return (int) ++_all_regions_allocated;
|
||||
}
|
||||
|
||||
void
|
||||
SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
|
||||
guarantee( scan_only_prefix <= _region_num, "pre-condition" );
|
||||
_scan_only_prefix = scan_only_prefix;
|
||||
}
|
||||
|
||||
void
|
||||
SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
|
||||
guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
|
||||
@ -218,13 +211,12 @@ SurvRateGroup::all_surviving_words_recorded(bool propagate) {
|
||||
#ifndef PRODUCT
|
||||
void
|
||||
SurvRateGroup::print() {
|
||||
gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)",
|
||||
_name, _region_num, _scan_only_prefix);
|
||||
gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries)",
|
||||
_name, _region_num);
|
||||
for (size_t i = 0; i < _region_num; ++i) {
|
||||
gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%%s",
|
||||
gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%",
|
||||
i, _surv_rate[i] * 100.0,
|
||||
_g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0,
|
||||
(i < _scan_only_prefix) ? " S-O" : " ");
|
||||
_g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0);
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user