This commit is contained in:
J. Duke 2017-07-05 17:11:04 +02:00
commit 2f14620595
114 changed files with 2996 additions and 2094 deletions

View File

@ -65,3 +65,4 @@ cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85
82135c848d5fcddb065e98ae77b81077c858f593 jdk7-b88 82135c848d5fcddb065e98ae77b81077c858f593 jdk7-b88
7f1ba4459972bf84b8201dc1cc4f62b1fe1c74f4 jdk7-b89 7f1ba4459972bf84b8201dc1cc4f62b1fe1c74f4 jdk7-b89
425ba3efabbfe0b188105c10aaf7c3c8fa8d1a38 jdk7-b90 425ba3efabbfe0b188105c10aaf7c3c8fa8d1a38 jdk7-b90
97d8b6c659c29c8493a8b2b72c2796a021a8cf79 jdk7-b91

View File

@ -65,3 +65,4 @@ c67a9df7bc0ca291f08f9a9cc05cb78ea15d25e6 jdk7-b85
39e14d2da687c7e592142137517aaf689544820f jdk7-b88 39e14d2da687c7e592142137517aaf689544820f jdk7-b88
bb4424c5e778b842c064a8b1aa902b35f4397654 jdk7-b89 bb4424c5e778b842c064a8b1aa902b35f4397654 jdk7-b89
56ce07b0eb47b93a98a72adef0f21e602c460623 jdk7-b90 56ce07b0eb47b93a98a72adef0f21e602c460623 jdk7-b90
bcd2fc089227559ac5be927923609fac29f067fa jdk7-b91

View File

@ -90,3 +90,5 @@ e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87
15836273ac2494f36ef62088bc1cb6f3f011f565 jdk7-b89 15836273ac2494f36ef62088bc1cb6f3f011f565 jdk7-b89
4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b hs18-b02 4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b hs18-b02
605c9707a766ff518cd841fc04f9bb4b36a3a30b jdk7-b90 605c9707a766ff518cd841fc04f9bb4b36a3a30b jdk7-b90
e0a1a502e402dbe7bf2d9102b4084a7e79a99a9b jdk7-b91
25f53b53aaa3eb8b2d5391a1e8de9a76ae1dd8a2 hs18-b03

View File

@ -884,9 +884,12 @@ static bool read_shared_lib_info(struct ps_prochandle* ph) {
} }
// read name of the shared object // read name of the shared object
if (read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) { lib_name[0] = '\0';
if (lib_name_addr != 0 &&
read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
print_debug("can't read shared object name\n"); print_debug("can't read shared object name\n");
return false; // don't let failure to read the name stop opening the file. If something is really wrong
// it will fail later.
} }
if (lib_name[0] != '\0') { if (lib_name[0] != '\0') {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -494,6 +494,68 @@ public class CommandProcessor {
} }
} }
}, },
new Command("revptrs", "revptrs address", false) {
public void doit(Tokens t) {
int tokens = t.countTokens();
if (tokens != 1 && (tokens != 2 || !t.nextToken().equals("-c"))) {
usage();
return;
}
boolean chase = tokens == 2;
ReversePtrs revptrs = VM.getVM().getRevPtrs();
if (revptrs == null) {
out.println("Computing reverse pointers...");
ReversePtrsAnalysis analysis = new ReversePtrsAnalysis();
final boolean[] complete = new boolean[1];
HeapProgressThunk thunk = new HeapProgressThunk() {
public void heapIterationFractionUpdate(double d) {}
public synchronized void heapIterationComplete() {
complete[0] = true;
notify();
}
};
analysis.setHeapProgressThunk(thunk);
analysis.run();
while (!complete[0]) {
synchronized (thunk) {
try {
thunk.wait();
} catch (Exception e) {
}
}
}
revptrs = VM.getVM().getRevPtrs();
out.println("Done.");
}
Address a = VM.getVM().getDebugger().parseAddress(t.nextToken());
if (VM.getVM().getUniverse().heap().isInReserved(a)) {
OopHandle handle = a.addOffsetToAsOopHandle(0);
Oop oop = VM.getVM().getObjectHeap().newOop(handle);
ArrayList ptrs = revptrs.get(oop);
if (ptrs == null) {
out.println("no live references to " + a);
} else {
if (chase) {
while (ptrs.size() == 1) {
LivenessPathElement e = (LivenessPathElement)ptrs.get(0);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
out.println(bos.toString());
ptrs = revptrs.get(e.getObj());
}
} else {
for (int i = 0; i < ptrs.size(); i++) {
LivenessPathElement e = (LivenessPathElement)ptrs.get(i);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
out.println(bos.toString());
oop = e.getObj();
}
}
}
}
}
},
new Command("inspect", "inspect expression", false) { new Command("inspect", "inspect expression", false) {
public void doit(Tokens t) { public void doit(Tokens t) {
if (t.countTokens() != 1) { if (t.countTokens() != 1) {
@ -816,8 +878,24 @@ public class CommandProcessor {
dumpType(type); dumpType(type);
} else { } else {
Iterator i = agent.getTypeDataBase().getTypes(); Iterator i = agent.getTypeDataBase().getTypes();
// Make sure the types are emitted in an order than can be read back in
HashSet emitted = new HashSet();
Stack pending = new Stack();
while (i.hasNext()) { while (i.hasNext()) {
dumpType((Type)i.next()); Type n = (Type)i.next();
if (emitted.contains(n.getName())) {
continue;
}
while (n != null && !emitted.contains(n.getName())) {
pending.push(n);
n = n.getSuperclass();
}
while (!pending.empty()) {
n = (Type)pending.pop();
dumpType(n);
emitted.add(n.getName());
}
} }
} }
} }
@ -846,83 +924,105 @@ public class CommandProcessor {
} }
}, },
new Command("search", "search [ heap | codecache | threads ] value", false) { new Command("search", "search [ heap | perm | rawheap | codecache | threads ] value", false) {
public void doit(Tokens t) { public void doit(Tokens t) {
if (t.countTokens() != 2) { if (t.countTokens() != 2) {
usage(); usage();
} else { return;
String type = t.nextToken(); }
final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken()); String type = t.nextToken();
final long stride = VM.getVM().getAddressSize(); final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken());
if (type.equals("threads")) { final long stride = VM.getVM().getAddressSize();
Threads threads = VM.getVM().getThreads(); if (type.equals("threads")) {
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) { Threads threads = VM.getVM().getThreads();
Address base = thread.getBaseOfStackPointer(); for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
Address end = thread.getLastJavaSP(); Address base = thread.getBaseOfStackPointer();
if (end == null) continue; Address end = thread.getLastJavaSP();
if (end.lessThan(base)) { if (end == null) continue;
Address tmp = base; if (end.lessThan(base)) {
base = end; Address tmp = base;
end = tmp; base = end;
} end = tmp;
out.println("Searching " + base + " " + end); }
while (base != null && base.lessThan(end)) { out.println("Searching " + base + " " + end);
Address val = base.getAddressAt(0); while (base != null && base.lessThan(end)) {
if (AddressOps.equal(val, value)) { Address val = base.getAddressAt(0);
out.println(base); if (AddressOps.equal(val, value)) {
} out.println(base);
base = base.addOffsetTo(stride); }
} base = base.addOffsetTo(stride);
} }
} else if (type.equals("heap")) {
RawHeapVisitor iterator = new RawHeapVisitor() {
public void prologue(long used) {
}
public void visitAddress(Address addr) {
Address val = addr.getAddressAt(0);
if (AddressOps.equal(val, value)) {
out.println("found at " + addr);
}
}
public void visitCompOopAddress(Address addr) {
Address val = addr.getCompOopAddressAt(0);
if (AddressOps.equal(val, value)) {
out.println("found at " + addr);
}
}
public void epilogue() {
}
};
VM.getVM().getObjectHeap().iterateRaw(iterator);
} else if (type.equals("codecache")) {
CodeCacheVisitor v = new CodeCacheVisitor() {
public void prologue(Address start, Address end) {
}
public void visit(CodeBlob blob) {
boolean printed = false;
Address base = blob.getAddress();
Address end = base.addOffsetTo(blob.getSize());
while (base != null && base.lessThan(end)) {
Address val = base.getAddressAt(0);
if (AddressOps.equal(val, value)) {
if (!printed) {
printed = true;
blob.printOn(out);
}
out.println("found at " + base + "\n");
}
base = base.addOffsetTo(stride);
}
}
public void epilogue() {
}
};
VM.getVM().getCodeCache().iterate(v);
} }
} else if (type.equals("rawheap")) {
RawHeapVisitor iterator = new RawHeapVisitor() {
public void prologue(long used) {
}
public void visitAddress(Address addr) {
Address val = addr.getAddressAt(0);
if (AddressOps.equal(val, value)) {
out.println("found at " + addr);
}
}
public void visitCompOopAddress(Address addr) {
Address val = addr.getCompOopAddressAt(0);
if (AddressOps.equal(val, value)) {
out.println("found at " + addr);
}
}
public void epilogue() {
}
};
VM.getVM().getObjectHeap().iterateRaw(iterator);
} else if (type.equals("heap") || type.equals("perm")) {
HeapVisitor iterator = new DefaultHeapVisitor() {
public boolean doObj(Oop obj) {
int index = 0;
Address start = obj.getHandle();
long end = obj.getObjectSize();
while (index < end) {
Address val = start.getAddressAt(index);
if (AddressOps.equal(val, value)) {
out.println("found in " + obj.getHandle());
break;
}
index += 4;
}
return false;
}
};
if (type.equals("heap")) {
VM.getVM().getObjectHeap().iterate(iterator);
} else {
VM.getVM().getObjectHeap().iteratePerm(iterator);
}
} else if (type.equals("codecache")) {
CodeCacheVisitor v = new CodeCacheVisitor() {
public void prologue(Address start, Address end) {
}
public void visit(CodeBlob blob) {
boolean printed = false;
Address base = blob.getAddress();
Address end = base.addOffsetTo(blob.getSize());
while (base != null && base.lessThan(end)) {
Address val = base.getAddressAt(0);
if (AddressOps.equal(val, value)) {
if (!printed) {
printed = true;
blob.printOn(out);
}
out.println("found at " + base + "\n");
}
base = base.addOffsetTo(stride);
}
}
public void epilogue() {
}
};
VM.getVM().getCodeCache().iterate(v);
} }
} }
}, },
@ -957,12 +1057,19 @@ public class CommandProcessor {
Threads threads = VM.getVM().getThreads(); Threads threads = VM.getVM().getThreads();
boolean all = name.equals("-a"); boolean all = name.equals("-a");
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) { for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
StringWriter sw = new StringWriter();
ByteArrayOutputStream bos = new ByteArrayOutputStream(); ByteArrayOutputStream bos = new ByteArrayOutputStream();
thread.printThreadIDOn(new PrintStream(bos)); thread.printThreadIDOn(new PrintStream(bos));
if (all || bos.toString().equals(name)) { if (all || bos.toString().equals(name)) {
out.println(bos.toString() + " = " + thread.getAddress());
HTMLGenerator gen = new HTMLGenerator(false); HTMLGenerator gen = new HTMLGenerator(false);
out.println(gen.genHTMLForJavaStackTrace(thread)); try {
out.println(gen.genHTMLForJavaStackTrace(thread));
} catch (Exception e) {
err.println("Error: " + e);
if (verboseExceptions) {
e.printStackTrace(err);
}
}
if (!all) return; if (!all) return;
} }
} }
@ -970,6 +1077,26 @@ public class CommandProcessor {
} }
} }
}, },
new Command("thread", "thread { -a | id }", false) {
public void doit(Tokens t) {
if (t.countTokens() != 1) {
usage();
} else {
String name = t.nextToken();
Threads threads = VM.getVM().getThreads();
boolean all = name.equals("-a");
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
thread.printThreadIDOn(new PrintStream(bos));
if (all || bos.toString().equals(name)) {
out.println(bos.toString() + " = " + thread.getAddress());
if (!all) return;
}
}
out.println("Couldn't find thread " + name);
}
}
},
new Command("threads", false) { new Command("threads", false) {
public void doit(Tokens t) { public void doit(Tokens t) {
@ -1161,7 +1288,7 @@ public class CommandProcessor {
} }
} }
static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*))"); static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*)|(![a-zA-Z][^ ]*))");
public void executeCommand(String ln) { public void executeCommand(String ln) {
if (ln.indexOf('!') != -1) { if (ln.indexOf('!') != -1) {
@ -1195,14 +1322,37 @@ public class CommandProcessor {
result.append(item.at(item.countTokens() - 1)); result.append(item.at(item.countTokens() - 1));
} else { } else {
String tail = cmd.substring(1); String tail = cmd.substring(1);
int index = Integer.parseInt(tail); switch (tail.charAt(0)) {
if (index < 0) { case '0':
index = history.size() + index; case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
case '-': {
int index = Integer.parseInt(tail);
if (index < 0) {
index = history.size() + index;
}
if (index > size) {
err.println("No such history item");
} else {
result.append((String)history.get(index));
}
break;
}
default: {
for (int i = history.size() - 1; i >= 0; i--) {
String s = (String)history.get(i);
if (s.startsWith(tail)) {
result.append(s);
}
}
} }
if (index > size) {
err.println("No such history item");
} else {
result.append((String)history.get(index));
} }
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -985,6 +985,12 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
annoPanel.addAnnotation(new Annotation(curFrame.addressOfInterpreterFrameExpressionStack(), annoPanel.addAnnotation(new Annotation(curFrame.addressOfInterpreterFrameExpressionStack(),
curFrame.addressOfInterpreterFrameTOS(), curFrame.addressOfInterpreterFrameTOS(),
"Interpreter expression stack")); "Interpreter expression stack"));
Address monBegin = curFrame.interpreterFrameMonitorBegin().address();
Address monEnd = curFrame.interpreterFrameMonitorEnd().address();
if (!monBegin.equals(monEnd)) {
annoPanel.addAnnotation(new Annotation(monBegin, monEnd,
"BasicObjectLocks"));
}
if (interpreterFrameMethod != null) { if (interpreterFrameMethod != null) {
// The offset is just to get the right stack slots highlighted in the output // The offset is just to get the right stack slots highlighted in the output
int offset = 1; int offset = 1;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2003 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -294,7 +294,7 @@ public class BugSpot extends JPanel {
attachDialog.setSize(400, 300); attachDialog.setSize(400, 300);
GraphicsUtilities.centerInContainer(attachDialog.getComponent(), GraphicsUtilities.centerInContainer(attachDialog.getComponent(),
getParentDimension(attachDialog.getComponent())); getParentDimension(attachDialog.getComponent()));
attachDialog.show(); attachDialog.setVisible(true);
} }
public void showThreadsDialog() { public void showThreadsDialog() {
@ -321,7 +321,7 @@ public class BugSpot extends JPanel {
getParentDimension(threadsDialog.getComponent())); getParentDimension(threadsDialog.getComponent()));
GraphicsUtilities.centerInContainer(threadsDialog.getComponent(), GraphicsUtilities.centerInContainer(threadsDialog.getComponent(),
getParentDimension(threadsDialog.getComponent())); getParentDimension(threadsDialog.getComponent()));
threadsDialog.show(); threadsDialog.setVisible(true);
} }
public void showMemoryDialog() { public void showMemoryDialog() {
@ -341,7 +341,7 @@ public class BugSpot extends JPanel {
getParentDimension(memoryDialog.getComponent())); getParentDimension(memoryDialog.getComponent()));
GraphicsUtilities.centerInContainer(memoryDialog.getComponent(), GraphicsUtilities.centerInContainer(memoryDialog.getComponent(),
getParentDimension(memoryDialog.getComponent())); getParentDimension(memoryDialog.getComponent()));
memoryDialog.show(); memoryDialog.setVisible(true);
} }
/** Changes the editor factory this debugger uses to display source /** Changes the editor factory this debugger uses to display source
@ -530,7 +530,7 @@ public class BugSpot extends JPanel {
addFrame(stackFrame); addFrame(stackFrame);
stackFrame.setSize(400, 200); stackFrame.setSize(400, 200);
GraphicsUtilities.moveToInContainer(stackFrame.getComponent(), 0.0f, 1.0f, 0, 20); GraphicsUtilities.moveToInContainer(stackFrame.getComponent(), 0.0f, 1.0f, 0, 20);
stackFrame.show(); stackFrame.setVisible(true);
// Create register panel // Create register panel
registerPanel = new RegisterPanel(); registerPanel = new RegisterPanel();
@ -544,7 +544,7 @@ public class BugSpot extends JPanel {
registerFrame.setSize(225, 200); registerFrame.setSize(225, 200);
GraphicsUtilities.moveToInContainer(registerFrame.getComponent(), GraphicsUtilities.moveToInContainer(registerFrame.getComponent(),
1.0f, 0.0f, 0, 0); 1.0f, 0.0f, 0, 0);
registerFrame.show(); registerFrame.setVisible(true);
resetCurrentThread(); resetCurrentThread();
} catch (DebuggerException e) { } catch (DebuggerException e) {
@ -979,7 +979,7 @@ public class BugSpot extends JPanel {
1.0f, 1.0f,
0.85f, 0.85f,
getParentDimension(editorFrame.getComponent())); getParentDimension(editorFrame.getComponent()));
editorFrame.show(); editorFrame.setVisible(true);
shown = true; shown = true;
} }
code.showLineNumber(lineNo); code.showLineNumber(lineNo);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -96,10 +96,6 @@ public class BytecodeDisassembler {
addBytecodeClass(Bytecodes._dstore, BytecodeStore.class); addBytecodeClass(Bytecodes._dstore, BytecodeStore.class);
addBytecodeClass(Bytecodes._astore, BytecodeStore.class); addBytecodeClass(Bytecodes._astore, BytecodeStore.class);
addBytecodeClass(Bytecodes._tableswitch, BytecodeTableswitch.class); addBytecodeClass(Bytecodes._tableswitch, BytecodeTableswitch.class);
// only special fast_xxx cases. others are handled differently.
addBytecodeClass(Bytecodes._fast_iaccess_0, BytecodeFastAAccess0.class);
addBytecodeClass(Bytecodes._fast_aaccess_0, BytecodeFastIAccess0.class);
} }
public BytecodeDisassembler(Method method) { public BytecodeDisassembler(Method method) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -263,11 +263,12 @@ public class ConstantPool extends Oop implements ClassConstants {
case JVM_CONSTANT_NameAndType: return "JVM_CONSTANT_NameAndType"; case JVM_CONSTANT_NameAndType: return "JVM_CONSTANT_NameAndType";
case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid"; case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid";
case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass"; case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass";
case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError";
case JVM_CONSTANT_ClassIndex: return "JVM_CONSTANT_ClassIndex"; case JVM_CONSTANT_ClassIndex: return "JVM_CONSTANT_ClassIndex";
case JVM_CONSTANT_UnresolvedString: return "JVM_CONSTANT_UnresolvedString"; case JVM_CONSTANT_UnresolvedString: return "JVM_CONSTANT_UnresolvedString";
case JVM_CONSTANT_StringIndex: return "JVM_CONSTANT_StringIndex"; case JVM_CONSTANT_StringIndex: return "JVM_CONSTANT_StringIndex";
} }
throw new InternalError("unknown tag"); throw new InternalError("Unknown tag: " + tag);
} }
public void iterateFields(OopVisitor visitor, boolean doVMFields) { public void iterateFields(OopVisitor visitor, boolean doVMFields) {
@ -304,6 +305,7 @@ public class ConstantPool extends Oop implements ClassConstants {
index++; index++;
break; break;
case JVM_CONSTANT_UnresolvedClassInError:
case JVM_CONSTANT_UnresolvedClass: case JVM_CONSTANT_UnresolvedClass:
case JVM_CONSTANT_Class: case JVM_CONSTANT_Class:
case JVM_CONSTANT_UnresolvedString: case JVM_CONSTANT_UnresolvedString:
@ -409,6 +411,7 @@ public class ConstantPool extends Oop implements ClassConstants {
} }
// case JVM_CONSTANT_ClassIndex: // case JVM_CONSTANT_ClassIndex:
case JVM_CONSTANT_UnresolvedClassInError:
case JVM_CONSTANT_UnresolvedClass: { case JVM_CONSTANT_UnresolvedClass: {
dos.writeByte(JVM_CONSTANT_Class); dos.writeByte(JVM_CONSTANT_Class);
String klassName = getSymbolAt(ci).asString(); String klassName = getSymbolAt(ci).asString();
@ -464,6 +467,8 @@ public class ConstantPool extends Oop implements ClassConstants {
+ ", type = " + signatureIndex); + ", type = " + signatureIndex);
break; break;
} }
default:
throw new InternalError("unknown tag: " + cpConstType);
} // switch } // switch
} }
dos.flush(); dos.flush();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -58,6 +58,9 @@ public interface ClassConstants
// Temporary tag while constructing constant pool // Temporary tag while constructing constant pool
public static final int JVM_CONSTANT_StringIndex = 103; public static final int JVM_CONSTANT_StringIndex = 103;
// Temporary tag while constructing constant pool
public static final int JVM_CONSTANT_UnresolvedClassInError = 104;
// 1.5 major/minor version numbers from JVM spec. 3rd edition // 1.5 major/minor version numbers from JVM spec. 3rd edition
public static final short MAJOR_VERSION = 49; public static final short MAJOR_VERSION = 49;
public static final short MINOR_VERSION = 0; public static final short MINOR_VERSION = 0;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -108,7 +108,7 @@ public abstract class SignatureIterator {
return BasicTypeSize.getTArraySize(); return BasicTypeSize.getTArraySize();
} }
} }
throw new RuntimeException("Should not reach here"); throw new RuntimeException("Should not reach here: char " + (char)_signature.getByteAt(_index) + " @ " + _index + " in " + _signature.asString());
} }
protected void checkSignatureEnd() { protected void checkSignatureEnd() {
if (_index < _signature.getLength()) { if (_index < _signature.getLength()) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -238,6 +238,7 @@ public class ClassWriter implements /* imports */ ClassConstants
} }
// case JVM_CONSTANT_ClassIndex: // case JVM_CONSTANT_ClassIndex:
case JVM_CONSTANT_UnresolvedClassInError:
case JVM_CONSTANT_UnresolvedClass: { case JVM_CONSTANT_UnresolvedClass: {
dos.writeByte(JVM_CONSTANT_Class); dos.writeByte(JVM_CONSTANT_Class);
String klassName = cpool.getSymbolAt(ci).asString(); String klassName = cpool.getSymbolAt(ci).asString();
@ -296,6 +297,8 @@ public class ClassWriter implements /* imports */ ClassConstants
+ ", type = " + signatureIndex); + ", type = " + signatureIndex);
break; break;
} }
default:
throw new InternalError("Unknown tag: " + cpConstType);
} // switch } // switch
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,6 @@ public interface FrameWrapper {
public void setVisible(boolean visible); public void setVisible(boolean visible);
public void setSize(int x, int y); public void setSize(int x, int y);
public void pack(); public void pack();
public void show();
public void dispose(); public void dispose();
public void setBackground(Color color); public void setBackground(Color color);
public void setResizable(boolean resizable); public void setResizable(boolean resizable);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -95,8 +95,10 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
// list tags // list tags
void beginList() { beginTag("ul"); nl(); } void beginList() { beginTag("ul"); nl(); }
void li(String s) { wrap("li", s); nl(); }
void endList() { endTag("ul"); nl(); } void endList() { endTag("ul"); nl(); }
void beginListItem() { beginTag("li"); }
void endListItem() { endTag("li"); nl(); }
void li(String s) { wrap("li", s); nl(); }
// table tags // table tags
void beginTable(int border) { void beginTable(int border) {
@ -505,6 +507,11 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
buf.cell(cpool.getSymbolAt(index).asString()); buf.cell(cpool.getSymbolAt(index).asString());
break; break;
case JVM_CONSTANT_UnresolvedClassInError:
buf.cell("JVM_CONSTANT_UnresolvedClassInError");
buf.cell(cpool.getSymbolAt(index).asString());
break;
case JVM_CONSTANT_Class: case JVM_CONSTANT_Class:
buf.cell("JVM_CONSTANT_Class"); buf.cell("JVM_CONSTANT_Class");
Klass klass = (Klass) cpool.getObjAt(index); Klass klass = (Klass) cpool.getObjAt(index);
@ -564,6 +571,9 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
buf.cell("JVM_CONSTANT_StringIndex"); buf.cell("JVM_CONSTANT_StringIndex");
buf.cell(Integer.toString(cpool.getIntAt(index))); buf.cell(Integer.toString(cpool.getIntAt(index)));
break; break;
default:
throw new InternalError("unknown tag: " + ctag);
} }
buf.endTag("tr"); buf.endTag("tr");
@ -671,7 +681,16 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
buf.cell(Integer.toString(curBci) + spaces); buf.cell(Integer.toString(curBci) + spaces);
buf.beginTag("td"); buf.beginTag("td");
String instrStr = escapeHTMLSpecialChars(instr.toString()); String instrStr = null;
try {
instrStr = escapeHTMLSpecialChars(instr.toString());
} catch (RuntimeException re) {
buf.append("exception during bytecode processing");
buf.endTag("td");
buf.endTag("tr");
re.printStackTrace();
return;
}
if (instr instanceof BytecodeNew) { if (instr instanceof BytecodeNew) {
BytecodeNew newBytecode = (BytecodeNew) instr; BytecodeNew newBytecode = (BytecodeNew) instr;
@ -1396,9 +1415,7 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
final SymbolFinder symFinder = createSymbolFinder(); final SymbolFinder symFinder = createSymbolFinder();
final Disassembler disasm = createDisassembler(startPc, code); final Disassembler disasm = createDisassembler(startPc, code);
class NMethodVisitor implements InstructionVisitor { class NMethodVisitor implements InstructionVisitor {
boolean prevWasCall;
public void prologue() { public void prologue() {
prevWasCall = false;
} }
public void visit(long currentPc, Instruction instr) { public void visit(long currentPc, Instruction instr) {
@ -1418,8 +1435,7 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
PCDesc pcDesc = (PCDesc) safepoints.get(longToAddress(currentPc)); PCDesc pcDesc = (PCDesc) safepoints.get(longToAddress(currentPc));
boolean isSafepoint = (pcDesc != null); if (pcDesc != null) {
if (isSafepoint && prevWasCall) {
buf.append(genSafepointInfo(nmethod, pcDesc)); buf.append(genSafepointInfo(nmethod, pcDesc));
} }
@ -1435,11 +1451,6 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
} }
buf.br(); buf.br();
if (isSafepoint && !prevWasCall) {
buf.append(genSafepointInfo(nmethod, pcDesc));
}
prevWasCall = instr.isCall();
} }
public void epilogue() { public void epilogue() {
@ -1783,22 +1794,20 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
buf.h3("Fields"); buf.h3("Fields");
buf.beginList(); buf.beginList();
for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) { for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) {
int nameIndex = fields.getShortAt(f + InstanceKlass.NAME_INDEX_OFFSET); sun.jvm.hotspot.oops.Field field = klass.getFieldByIndex(f);
int sigIndex = fields.getShortAt(f + InstanceKlass.SIGNATURE_INDEX_OFFSET); String f_name = ((NamedFieldIdentifier)field.getID()).getName();
int genSigIndex = fields.getShortAt(f + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET); Symbol f_sig = field.getSignature();
Symbol f_name = cp.getSymbolAt(nameIndex); Symbol f_genSig = field.getGenericSignature();
Symbol f_sig = cp.getSymbolAt(sigIndex); AccessFlags acc = field.getAccessFlagsObj();
Symbol f_genSig = (genSigIndex != 0)? cp.getSymbolAt(genSigIndex) : null;
AccessFlags acc = new AccessFlags(fields.getShortAt(f + InstanceKlass.ACCESS_FLAGS_OFFSET));
buf.beginTag("li"); buf.beginListItem();
buf.append(genFieldModifierString(acc)); buf.append(genFieldModifierString(acc));
buf.append(' '); buf.append(' ');
Formatter sigBuf = new Formatter(genHTML); Formatter sigBuf = new Formatter(genHTML);
new SignatureConverter(f_sig, sigBuf.getBuffer()).dispatchField(); new SignatureConverter(f_sig, sigBuf.getBuffer()).dispatchField();
buf.append(sigBuf.toString().replace('/', '.')); buf.append(sigBuf.toString().replace('/', '.'));
buf.append(' '); buf.append(' ');
buf.append(f_name.asString()); buf.append(f_name);
buf.append(';'); buf.append(';');
// is it generic? // is it generic?
if (f_genSig != null) { if (f_genSig != null) {
@ -1806,7 +1815,8 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
buf.append(escapeHTMLSpecialChars(f_genSig.asString())); buf.append(escapeHTMLSpecialChars(f_genSig.asString()));
buf.append("] "); buf.append("] ");
} }
buf.endTag("li"); buf.append(" (offset = " + field.getOffset() + ")");
buf.endListItem();
} }
buf.endList(); buf.endList();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@ public class Assert {
public static boolean ASSERTS_ENABLED = true; public static boolean ASSERTS_ENABLED = true;
public static void that(boolean test, String message) { public static void that(boolean test, String message) {
if (!test) { if (ASSERTS_ENABLED && !test) {
throw new AssertionFailure(message); throw new AssertionFailure(message);
} }
} }

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2010
HS_MAJOR_VER=18 HS_MAJOR_VER=18
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=03 HS_BUILD_NUMBER=04
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=7

View File

@ -127,6 +127,9 @@ $(GENERATEDFILES): refresh_adfiles
# Note that product files are updated via "mv", which is atomic. # Note that product files are updated via "mv", which is atomic.
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$) TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
# Debuggable by default
CFLAGS += -g
# Pass -D flags into ADLC. # Pass -D flags into ADLC.
ADLCFLAGS += $(SYSDEFS) ADLCFLAGS += $(SYSDEFS)
@ -135,7 +138,7 @@ ADLCFLAGS += -q -T
# Normally, debugging is done directly on the ad_<arch>*.cpp files. # Normally, debugging is done directly on the ad_<arch>*.cpp files.
# But -g will put #line directives in those files pointing back to <arch>.ad. # But -g will put #line directives in those files pointing back to <arch>.ad.
#ADLCFLAGS += -g ADLCFLAGS += -g
ifdef LP64 ifdef LP64
ADLCFLAGS += -D_LP64 ADLCFLAGS += -D_LP64

View File

@ -147,6 +147,9 @@ $(GENERATEDFILES): refresh_adfiles
# Note that product files are updated via "mv", which is atomic. # Note that product files are updated via "mv", which is atomic.
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$) TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
# Debuggable by default
CFLAGS += -g
# Pass -D flags into ADLC. # Pass -D flags into ADLC.
ADLCFLAGS += $(SYSDEFS) ADLCFLAGS += $(SYSDEFS)
@ -155,7 +158,7 @@ ADLCFLAGS += -q -T
# Normally, debugging is done directly on the ad_<arch>*.cpp files. # Normally, debugging is done directly on the ad_<arch>*.cpp files.
# But -g will put #line directives in those files pointing back to <arch>.ad. # But -g will put #line directives in those files pointing back to <arch>.ad.
#ADLCFLAGS += -g ADLCFLAGS += -g
ifdef LP64 ifdef LP64
ADLCFLAGS += -D_LP64 ADLCFLAGS += -D_LP64

View File

@ -661,9 +661,6 @@ class Assembler : public AbstractAssembler {
stx_op3 = 0x0e, stx_op3 = 0x0e,
swap_op3 = 0x0f, swap_op3 = 0x0f,
lduwa_op3 = 0x10,
ldxa_op3 = 0x1b,
stwa_op3 = 0x14, stwa_op3 = 0x14,
stxa_op3 = 0x1e, stxa_op3 = 0x1e,

View File

@ -388,6 +388,60 @@ int LIR_Assembler::emit_exception_handler() {
} }
// Emit the code to remove the frame from the stack in the exception
// unwind path.
int LIR_Assembler::emit_unwind_handler() {
#ifndef PRODUCT
if (CommentedAssembly) {
_masm->block_comment("Unwind handler");
}
#endif
int offset = code_offset();
// Fetch the exception from TLS and clear out exception related thread state
__ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
__ bind(_unwind_handler_entry);
__ verify_not_null_oop(O0);
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mov(O0, I0); // Preserve the exception
}
// Preform needed unlocking
MonitorExitStub* stub = NULL;
if (method()->is_synchronized()) {
monitor_address(0, FrameMap::I1_opr);
stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
__ unlock_object(I3, I2, I1, *stub->entry());
__ bind(*stub->continuation());
}
if (compilation()->env()->dtrace_method_probes()) {
jobject2reg(method()->constant_encoding(), O0);
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
__ delayed()->nop();
}
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mov(I0, O0); // Restore the exception
}
// dispatch to the unwind logic
__ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
__ delayed()->nop();
// Emit the slow path assembly
if (stub != NULL) {
stub->emit_code(this);
}
return offset;
}
int LIR_Assembler::emit_deopt_handler() { int LIR_Assembler::emit_deopt_handler() {
// if the last instruction is a call (typically to do a throw which // if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address // is coming at the end after block reordering) the return address
@ -2050,26 +2104,29 @@ int LIR_Assembler::shift_amount(BasicType t) {
} }
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) { void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
assert(exceptionOop->as_register() == Oexception, "should match"); assert(exceptionOop->as_register() == Oexception, "should match");
assert(unwind || exceptionPC->as_register() == Oissuing_pc, "should match"); assert(exceptionPC->as_register() == Oissuing_pc, "should match");
info->add_register_oop(exceptionOop); info->add_register_oop(exceptionOop);
if (unwind) { // reuse the debug info from the safepoint poll for the throw op itself
__ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); address pc_for_athrow = __ pc();
__ delayed()->nop(); int pc_for_athrow_offset = __ offset();
} else { RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
// reuse the debug info from the safepoint poll for the throw op itself __ set(pc_for_athrow, Oissuing_pc, rspec);
address pc_for_athrow = __ pc(); add_call_info(pc_for_athrow_offset, info); // for exception handler
int pc_for_athrow_offset = __ offset();
RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
__ set(pc_for_athrow, Oissuing_pc, rspec);
add_call_info(pc_for_athrow_offset, info); // for exception handler
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
__ delayed()->nop(); __ delayed()->nop();
} }
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
assert(exceptionOop->as_register() == Oexception, "should match");
__ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
__ delayed()->nop();
} }
@ -2358,7 +2415,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
if (UseSlowPath || if (UseSlowPath ||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
__ br(Assembler::always, false, Assembler::pn, *op->stub()->entry()); __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
__ delayed()->nop(); __ delayed()->nop();
} else { } else {
__ allocate_array(op->obj()->as_register(), __ allocate_array(op->obj()->as_register(),

View File

@ -923,38 +923,6 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
#endif #endif
} }
void emit_form3_mem_reg_asi(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary,
int src1_enc, int disp32, int src2_enc, int dst_enc, int asi) {
uint instr;
instr = (Assembler::ldst_op << 30)
| (dst_enc << 25)
| (primary << 19)
| (src1_enc << 14);
int disp = disp32;
int index = src2_enc;
if (src1_enc == R_SP_enc || src1_enc == R_FP_enc)
disp += STACK_BIAS;
// We should have a compiler bailout here rather than a guarantee.
// Better yet would be some mechanism to handle variable-size matches correctly.
guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
if( disp != 0 ) {
// use reg-reg form
// set src2=R_O7 contains offset
index = R_O7_enc;
emit3_simm13( cbuf, Assembler::arith_op, index, Assembler::or_op3, 0, disp);
}
instr |= (asi << 5);
instr |= index;
uint *code = (uint*)cbuf.code_end();
*code = instr;
cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
}
void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) { void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) {
// The method which records debug information at every safepoint // The method which records debug information at every safepoint
// expects the call to be the first instruction in the snippet as // expects the call to be the first instruction in the snippet as
@ -1954,11 +1922,6 @@ encode %{
$mem$$base, $mem$$disp, $mem$$index, $dst$$reg); $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
%} %}
enc_class form3_mem_reg_little( memory mem, iRegI dst) %{
emit_form3_mem_reg_asi(cbuf, this, $primary, -1,
$mem$$base, $mem$$disp, $mem$$index, $dst$$reg, Assembler::ASI_PRIMARY_LITTLE);
%}
enc_class form3_mem_prefetch_read( memory mem ) %{ enc_class form3_mem_prefetch_read( memory mem ) %{
emit_form3_mem_reg(cbuf, this, $primary, -1, emit_form3_mem_reg(cbuf, this, $primary, -1,
$mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/); $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
@ -4311,8 +4274,8 @@ operand cmpOp_commute() %{
// instructions for every form of operand when the instruction accepts // instructions for every form of operand when the instruction accepts
// multiple operand types with the same basic encoding and format. The classic // multiple operand types with the same basic encoding and format. The classic
// case of this is memory operands. // case of this is memory operands.
// Indirect is not included since its use is limited to Compare & Swap
opclass memory( indirect, indOffset13, indIndex ); opclass memory( indirect, indOffset13, indIndex );
opclass indIndexMemory( indIndex );
//----------PIPELINE----------------------------------------------------------- //----------PIPELINE-----------------------------------------------------------
pipeline %{ pipeline %{
@ -9666,84 +9629,179 @@ instruct popCountL(iRegI dst, iRegL src) %{
instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
match(Set dst (ReverseBytesI src)); match(Set dst (ReverseBytesI src));
effect(DEF dst, USE src);
// Op cost is artificially doubled to make sure that load or store // Op cost is artificially doubled to make sure that load or store
// instructions are preferred over this one which requires a spill // instructions are preferred over this one which requires a spill
// onto a stack slot. // onto a stack slot.
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
size(8);
format %{ "LDUWA $src, $dst\t!asi=primary_little" %} format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
opcode(Assembler::lduwa_op3);
ins_encode( form3_mem_reg_little(src, dst) ); ins_encode %{
__ set($src$$disp + STACK_BIAS, O7);
__ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
%}
ins_pipe( iload_mem ); ins_pipe( iload_mem );
%} %}
instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
match(Set dst (ReverseBytesL src)); match(Set dst (ReverseBytesL src));
effect(DEF dst, USE src);
// Op cost is artificially doubled to make sure that load or store // Op cost is artificially doubled to make sure that load or store
// instructions are preferred over this one which requires a spill // instructions are preferred over this one which requires a spill
// onto a stack slot. // onto a stack slot.
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
size(8);
format %{ "LDXA $src, $dst\t!asi=primary_little" %} format %{ "LDXA $src, $dst\t!asi=primary_little" %}
opcode(Assembler::ldxa_op3); ins_encode %{
ins_encode( form3_mem_reg_little(src, dst) ); __ set($src$$disp + STACK_BIAS, O7);
__ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
%}
ins_pipe( iload_mem );
%}
instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
match(Set dst (ReverseBytesUS src));
// Op cost is artificially doubled to make sure that load or store
// instructions are preferred over this one which requires a spill
// onto a stack slot.
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %}
ins_encode %{
// the value was spilled as an int so bias the load
__ set($src$$disp + STACK_BIAS + 2, O7);
__ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
%}
ins_pipe( iload_mem );
%}
instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
match(Set dst (ReverseBytesS src));
// Op cost is artificially doubled to make sure that load or store
// instructions are preferred over this one which requires a spill
// onto a stack slot.
ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %}
ins_encode %{
// the value was spilled as an int so bias the load
__ set($src$$disp + STACK_BIAS + 2, O7);
__ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
%}
ins_pipe( iload_mem ); ins_pipe( iload_mem );
%} %}
// Load Integer reversed byte order // Load Integer reversed byte order
instruct loadI_reversed(iRegI dst, memory src) %{ instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
match(Set dst (ReverseBytesI (LoadI src))); match(Set dst (ReverseBytesI (LoadI src)));
ins_cost(DEFAULT_COST + MEMORY_REF_COST); ins_cost(DEFAULT_COST + MEMORY_REF_COST);
size(8); size(4);
format %{ "LDUWA $src, $dst\t!asi=primary_little" %} format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
opcode(Assembler::lduwa_op3); ins_encode %{
ins_encode( form3_mem_reg_little( src, dst) ); __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
%}
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
// Load Long - aligned and reversed // Load Long - aligned and reversed
instruct loadL_reversed(iRegL dst, memory src) %{ instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
match(Set dst (ReverseBytesL (LoadL src))); match(Set dst (ReverseBytesL (LoadL src)));
ins_cost(DEFAULT_COST + MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
size(8); size(4);
format %{ "LDXA $src, $dst\t!asi=primary_little" %} format %{ "LDXA $src, $dst\t!asi=primary_little" %}
opcode(Assembler::ldxa_op3); ins_encode %{
ins_encode( form3_mem_reg_little( src, dst ) ); __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
%}
ins_pipe(iload_mem);
%}
// Load unsigned short / char reversed byte order
instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
match(Set dst (ReverseBytesUS (LoadUS src)));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "LDUHA $src, $dst\t!asi=primary_little" %}
ins_encode %{
__ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
%}
ins_pipe(iload_mem);
%}
// Load short reversed byte order
instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
match(Set dst (ReverseBytesS (LoadS src)));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "LDSHA $src, $dst\t!asi=primary_little" %}
ins_encode %{
__ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
%}
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
// Store Integer reversed byte order // Store Integer reversed byte order
instruct storeI_reversed(memory dst, iRegI src) %{ instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
match(Set dst (StoreI dst (ReverseBytesI src))); match(Set dst (StoreI dst (ReverseBytesI src)));
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
size(8); size(4);
format %{ "STWA $src, $dst\t!asi=primary_little" %} format %{ "STWA $src, $dst\t!asi=primary_little" %}
opcode(Assembler::stwa_op3); ins_encode %{
ins_encode( form3_mem_reg_little( dst, src) ); __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
%}
ins_pipe(istore_mem_reg); ins_pipe(istore_mem_reg);
%} %}
// Store Long reversed byte order // Store Long reversed byte order
instruct storeL_reversed(memory dst, iRegL src) %{ instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
match(Set dst (StoreL dst (ReverseBytesL src))); match(Set dst (StoreL dst (ReverseBytesL src)));
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
size(8); size(4);
format %{ "STXA $src, $dst\t!asi=primary_little" %} format %{ "STXA $src, $dst\t!asi=primary_little" %}
opcode(Assembler::stxa_op3); ins_encode %{
ins_encode( form3_mem_reg_little( dst, src) ); __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
%}
ins_pipe(istore_mem_reg);
%}
// Store unsighed short/char reversed byte order
instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
match(Set dst (StoreC dst (ReverseBytesUS src)));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "STHA $src, $dst\t!asi=primary_little" %}
ins_encode %{
__ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
%}
ins_pipe(istore_mem_reg);
%}
// Store short reversed byte order
instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
match(Set dst (StoreC dst (ReverseBytesS src)));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "STHA $src, $dst\t!asi=primary_little" %}
ins_encode %{
__ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
%}
ins_pipe(istore_mem_reg); ins_pipe(istore_mem_reg);
%} %}

View File

@ -455,6 +455,60 @@ int LIR_Assembler::emit_exception_handler() {
} }
// Emit the code to remove the frame from the stack in the exception
// unwind path.
int LIR_Assembler::emit_unwind_handler() {
#ifndef PRODUCT
if (CommentedAssembly) {
_masm->block_comment("Unwind handler");
}
#endif
int offset = code_offset();
// Fetch the exception from TLS and clear out exception related thread state
__ get_thread(rsi);
__ movptr(rax, Address(rsi, JavaThread::exception_oop_offset()));
__ movptr(Address(rsi, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
__ movptr(Address(rsi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
__ bind(_unwind_handler_entry);
__ verify_not_null_oop(rax);
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mov(rsi, rax); // Preserve the exception
}
// Preform needed unlocking
MonitorExitStub* stub = NULL;
if (method()->is_synchronized()) {
monitor_address(0, FrameMap::rax_opr);
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
__ unlock_object(rdi, rbx, rax, *stub->entry());
__ bind(*stub->continuation());
}
if (compilation()->env()->dtrace_method_probes()) {
__ movoop(Address(rsp, 0), method()->constant_encoding());
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
}
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mov(rax, rsi); // Restore the exception
}
// remove the activation and dispatch to the unwind handler
__ remove_frame(initial_frame_size_in_bytes());
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
// Emit the slow path assembly
if (stub != NULL) {
stub->emit_code(this);
}
return offset;
}
int LIR_Assembler::emit_deopt_handler() { int LIR_Assembler::emit_deopt_handler() {
// if the last instruction is a call (typically to do a throw which // if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address // is coming at the end after block reordering) the return address
@ -1190,8 +1244,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
break; break;
#endif // _L64 #endif // _L64
case T_INT: case T_INT:
// %%% could this be a movl? this is safer but longer instruction __ movl(dest->as_register(), from_addr);
__ movl2ptr(dest->as_register(), from_addr);
break; break;
case T_LONG: { case T_LONG: {
@ -1249,7 +1302,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ shll(dest_reg, 24); __ shll(dest_reg, 24);
__ sarl(dest_reg, 24); __ sarl(dest_reg, 24);
} }
// These are unsigned so the zero extension on 64bit is just what we need
break; break;
} }
@ -1261,8 +1313,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
} else { } else {
__ movw(dest_reg, from_addr); __ movw(dest_reg, from_addr);
} }
// This is unsigned so the zero extension on 64bit is just what we need
// __ movl2ptr(dest_reg, dest_reg);
break; break;
} }
@ -1275,8 +1325,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ shll(dest_reg, 16); __ shll(dest_reg, 16);
__ sarl(dest_reg, 16); __ sarl(dest_reg, 16);
} }
// Might not be needed in 64bit but certainly doesn't hurt (except for code size)
__ movl2ptr(dest_reg, dest_reg);
break; break;
} }
@ -2795,42 +2843,43 @@ void LIR_Assembler::emit_static_call_stub() {
} }
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) { void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
assert(exceptionOop->as_register() == rax, "must match"); assert(exceptionOop->as_register() == rax, "must match");
assert(unwind || exceptionPC->as_register() == rdx, "must match"); assert(exceptionPC->as_register() == rdx, "must match");
// exception object is not added to oop map by LinearScan // exception object is not added to oop map by LinearScan
// (LinearScan assumes that no oops are in fixed registers) // (LinearScan assumes that no oops are in fixed registers)
info->add_register_oop(exceptionOop); info->add_register_oop(exceptionOop);
Runtime1::StubID unwind_id; Runtime1::StubID unwind_id;
if (!unwind) { // get current pc information
// get current pc information // pc is only needed if the method has an exception handler, the unwind code does not need it.
// pc is only needed if the method has an exception handler, the unwind code does not need it. int pc_for_athrow_offset = __ offset();
int pc_for_athrow_offset = __ offset(); InternalAddress pc_for_athrow(__ pc());
InternalAddress pc_for_athrow(__ pc()); __ lea(exceptionPC->as_register(), pc_for_athrow);
__ lea(exceptionPC->as_register(), pc_for_athrow); add_call_info(pc_for_athrow_offset, info); // for exception handler
add_call_info(pc_for_athrow_offset, info); // for exception handler
__ verify_not_null_oop(rax); __ verify_not_null_oop(rax);
// search an exception handler (rax: exception oop, rdx: throwing pc) // search an exception handler (rax: exception oop, rdx: throwing pc)
if (compilation()->has_fpu_code()) { if (compilation()->has_fpu_code()) {
unwind_id = Runtime1::handle_exception_id; unwind_id = Runtime1::handle_exception_id;
} else {
unwind_id = Runtime1::handle_exception_nofpu_id;
}
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
} else { } else {
// remove the activation unwind_id = Runtime1::handle_exception_nofpu_id;
__ remove_frame(initial_frame_size_in_bytes());
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
} }
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
// enough room for two byte trap // enough room for two byte trap
__ nop(); __ nop();
} }
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
assert(exceptionOop->as_register() == rax, "must match");
__ jmp(_unwind_handler_entry);
}
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
// optimized version for linear scan: // optimized version for linear scan:

View File

@ -1,5 +1,5 @@
// //
// Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. // Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -6272,6 +6272,30 @@ instruct bytes_reverse_long(eRegL dst) %{
ins_pipe( ialu_reg_reg); ins_pipe( ialu_reg_reg);
%} %}
instruct bytes_reverse_unsigned_short(eRegI dst) %{
match(Set dst (ReverseBytesUS dst));
format %{ "BSWAP $dst\n\t"
"SHR $dst,16\n\t" %}
ins_encode %{
__ bswapl($dst$$Register);
__ shrl($dst$$Register, 16);
%}
ins_pipe( ialu_reg );
%}
instruct bytes_reverse_short(eRegI dst) %{
match(Set dst (ReverseBytesS dst));
format %{ "BSWAP $dst\n\t"
"SAR $dst,16\n\t" %}
ins_encode %{
__ bswapl($dst$$Register);
__ sarl($dst$$Register, 16);
%}
ins_pipe( ialu_reg );
%}
//---------- Zeros Count Instructions ------------------------------------------ //---------- Zeros Count Instructions ------------------------------------------

View File

@ -1,5 +1,5 @@
// //
// Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. // Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -7371,6 +7371,30 @@ instruct bytes_reverse_long(rRegL dst) %{
ins_pipe( ialu_reg); ins_pipe( ialu_reg);
%} %}
instruct bytes_reverse_unsigned_short(rRegI dst) %{
match(Set dst (ReverseBytesUS dst));
format %{ "bswapl $dst\n\t"
"shrl $dst,16\n\t" %}
ins_encode %{
__ bswapl($dst$$Register);
__ shrl($dst$$Register, 16);
%}
ins_pipe( ialu_reg );
%}
instruct bytes_reverse_short(rRegI dst) %{
match(Set dst (ReverseBytesS dst));
format %{ "bswapl $dst\n\t"
"sar $dst,16\n\t" %}
ins_encode %{
__ bswapl($dst$$Register);
__ sarl($dst$$Register, 16);
%}
ins_pipe( ialu_reg );
%}
instruct loadI_reversed(rRegI dst, memory src) %{ instruct loadI_reversed(rRegI dst, memory src) %{
match(Set dst (ReverseBytesI (LoadI src))); match(Set dst (ReverseBytesI (LoadI src)));

View File

@ -39,21 +39,9 @@
void CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) { void CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD; JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
// Adjust the caller's stack frame to accomodate any additional
// local variables we have contiguously with our parameters.
int extra_locals = method->max_locals() - method->size_of_parameters();
if (extra_locals > 0) {
if (extra_locals > stack->available_words()) {
Unimplemented();
}
for (int i = 0; i < extra_locals; i++)
stack->push(0);
}
// Allocate and initialize our frame. // Allocate and initialize our frame.
InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread); InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
thread->push_zero_frame(frame); thread->push_zero_frame(frame);
// Execute those bytecodes! // Execute those bytecodes!
@ -76,12 +64,6 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
intptr_t *result = NULL; intptr_t *result = NULL;
int result_slots = 0; int result_slots = 0;
// Check we're not about to run out of stack
if (stack_overflow_imminent(thread)) {
CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
goto unwind_and_return;
}
while (true) { while (true) {
// We can set up the frame anchor with everything we want at // We can set up the frame anchor with everything we want at
// this point as we are thread_in_Java and no safepoints can // this point as we are thread_in_Java and no safepoints can
@ -123,9 +105,9 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
int monitor_words = frame::interpreter_frame_monitor_size(); int monitor_words = frame::interpreter_frame_monitor_size();
// Allocate the space // Allocate the space
if (monitor_words > stack->available_words()) { stack->overflow_check(monitor_words, THREAD);
Unimplemented(); if (HAS_PENDING_EXCEPTION)
} break;
stack->alloc(monitor_words * wordSize); stack->alloc(monitor_words * wordSize);
// Move the expression stack contents // Move the expression stack contents
@ -172,8 +154,6 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
} }
} }
unwind_and_return:
// Unwind the current frame // Unwind the current frame
thread->pop_zero_frame(); thread->pop_zero_frame();
@ -193,17 +173,11 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
ZeroStack *stack = thread->zero_stack(); ZeroStack *stack = thread->zero_stack();
// Allocate and initialize our frame // Allocate and initialize our frame
InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread); InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
thread->push_zero_frame(frame); thread->push_zero_frame(frame);
interpreterState istate = frame->interpreter_state(); interpreterState istate = frame->interpreter_state();
intptr_t *locals = istate->locals(); intptr_t *locals = istate->locals();
// Check we're not about to run out of stack
if (stack_overflow_imminent(thread)) {
CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
goto unwind_and_return;
}
// Update the invocation counter // Update the invocation counter
if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) { if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
InvocationCounter *counter = method->invocation_counter(); InvocationCounter *counter = method->invocation_counter();
@ -264,9 +238,10 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
assert(function != NULL, "should be set if signature handler is"); assert(function != NULL, "should be set if signature handler is");
// Build the argument list // Build the argument list
if (handler->argument_count() * 2 > stack->available_words()) { stack->overflow_check(handler->argument_count() * 2, THREAD);
Unimplemented(); if (HAS_PENDING_EXCEPTION)
} goto unlock_unwind_and_return;
void **arguments; void **arguments;
void *mirror; { void *mirror; {
arguments = arguments =
@ -503,9 +478,7 @@ void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
switch (entry->flag_state()) { switch (entry->flag_state()) {
case ltos: case ltos:
case dtos: case dtos:
if (stack->available_words() < 1) { stack->overflow_check(1, CHECK);
Unimplemented();
}
stack->alloc(wordSize); stack->alloc(wordSize);
break; break;
} }
@ -601,39 +574,30 @@ void CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
stack->set_sp(stack->sp() + method->size_of_parameters()); stack->set_sp(stack->sp() + method->size_of_parameters());
} }
bool CppInterpreter::stack_overflow_imminent(JavaThread *thread) { InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
// How is the ABI stack? JavaThread *thread = (JavaThread *) THREAD;
address stack_top = thread->stack_base() - thread->stack_size(); ZeroStack *stack = thread->zero_stack();
int free_stack = os::current_stack_pointer() - stack_top;
if (free_stack < StackShadowPages * os::vm_page_size()) { // Calculate the size of the frame we'll build, including
return true; // any adjustments to the caller's frame that we'll make.
int extra_locals = 0;
int monitor_words = 0;
int stack_words = 0;
if (!method->is_native()) {
extra_locals = method->max_locals() - method->size_of_parameters();
stack_words = method->max_stack();
} }
if (method->is_synchronized()) {
// How is the Zero stack? monitor_words = frame::interpreter_frame_monitor_size();
// Throwing a StackOverflowError involves a VM call, which means
// we need a frame on the stack. We should be checking here to
// ensure that methods we call have enough room to install the
// largest possible frame, but that's more than twice the size
// of the entire Zero stack we get by default, so we just check
// we have *some* space instead...
free_stack = thread->zero_stack()->available_words() * wordSize;
if (free_stack < StackShadowPages * os::vm_page_size()) {
return true;
} }
stack->overflow_check(
extra_locals + header_words + monitor_words + stack_words, CHECK_NULL);
return false; // Adjust the caller's stack frame to accomodate any additional
} // local variables we have contiguously with our parameters.
for (int i = 0; i < extra_locals; i++)
InterpreterFrame *InterpreterFrame::build(ZeroStack* stack, stack->push(0);
const methodOop method,
JavaThread* thread) {
int monitor_words =
method->is_synchronized() ? frame::interpreter_frame_monitor_size() : 0;
int stack_words = method->is_native() ? 0 : method->max_stack();
if (header_words + monitor_words + stack_words > stack->available_words()) {
Unimplemented();
}
intptr_t *locals; intptr_t *locals;
if (method->is_native()) if (method->is_native())
@ -812,14 +776,13 @@ InterpreterGenerator::InterpreterGenerator(StubQueue* code)
// Deoptimization helpers // Deoptimization helpers
InterpreterFrame *InterpreterFrame::build(ZeroStack* stack, int size) { InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
int size_in_words = size >> LogBytesPerWord; int size_in_words = size >> LogBytesPerWord;
assert(size_in_words * wordSize == size, "unaligned"); assert(size_in_words * wordSize == size, "unaligned");
assert(size_in_words >= header_words, "too small"); assert(size_in_words >= header_words, "too small");
stack->overflow_check(size_in_words, CHECK_NULL);
if (size_in_words > stack->available_words()) {
Unimplemented();
}
stack->push(0); // next_frame, filled in later stack->push(0); // next_frame, filled in later
intptr_t *fp = stack->sp(); intptr_t *fp = stack->sp();

View File

@ -38,10 +38,6 @@
// Main loop of normal_entry // Main loop of normal_entry
static void main_loop(int recurse, TRAPS); static void main_loop(int recurse, TRAPS);
private:
// Stack overflow checks
static bool stack_overflow_imminent(JavaThread *thread);
private: private:
// Fast result type determination // Fast result type determination
static BasicType result_type_of(methodOop method); static BasicType result_type_of(methodOop method);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2008 Red Hat, Inc. * Copyright 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -47,10 +47,10 @@ class EntryFrame : public ZeroFrame {
}; };
public: public:
static EntryFrame *build(ZeroStack* stack, static EntryFrame *build(const intptr_t* parameters,
const intptr_t* parameters,
int parameter_words, int parameter_words,
JavaCallWrapper* call_wrapper); JavaCallWrapper* call_wrapper,
TRAPS);
public: public:
JavaCallWrapper *call_wrapper() const { JavaCallWrapper *call_wrapper() const {
return (JavaCallWrapper *) value_of_word(call_wrapper_off); return (JavaCallWrapper *) value_of_word(call_wrapper_off);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2008 Red Hat, Inc. * Copyright 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,7 @@ class FakeStubFrame : public ZeroFrame {
}; };
public: public:
static FakeStubFrame *build(ZeroStack* stack); static FakeStubFrame *build(TRAPS);
public: public:
void identify_word(int frame_index, void identify_word(int frame_index,

View File

@ -35,6 +35,7 @@ define_pd_global(bool, ImplicitNullChecks, true);
define_pd_global(bool, UncommonNullCast, true); define_pd_global(bool, UncommonNullCast, true);
define_pd_global(intx, CodeEntryAlignment, 32); define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineFrequencyCount, 100); define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, PreInflateSpin, 10); define_pd_global(intx, PreInflateSpin, 10);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2008 Red Hat, Inc. * Copyright 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,10 +55,8 @@ class InterpreterFrame : public ZeroFrame {
}; };
public: public:
static InterpreterFrame *build(ZeroStack* stack, static InterpreterFrame *build(const methodOop method, TRAPS);
const methodOop method, static InterpreterFrame *build(int size, TRAPS);
JavaThread* thread);
static InterpreterFrame *build(ZeroStack* stack, int size);
public: public:
interpreterState interpreter_state() const { interpreterState interpreter_state() const {

View File

@ -1,6 +1,6 @@
/* /*
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008 Red Hat, Inc. * Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -140,9 +140,8 @@ IRT_ENTRY(address,
int required_words = int required_words =
(align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) + (align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
(method->is_static() ? 2 : 1) + method->size_of_parameters() + 1; (method->is_static() ? 2 : 1) + method->size_of_parameters() + 1;
if (required_words > stack->available_words()) {
Unimplemented(); stack->overflow_check(required_words, CHECK_NULL);
}
intptr_t *buf = (intptr_t *) stack->alloc(required_words * wordSize); intptr_t *buf = (intptr_t *) stack->alloc(required_words * wordSize);
SlowSignatureHandlerGenerator sshg(methodHandle(thread, method), buf); SlowSignatureHandlerGenerator sshg(methodHandle(thread, method), buf);

View File

@ -0,0 +1,73 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_stack_zero.cpp.incl"
void ZeroStack::handle_overflow(TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
// Set up the frame anchor if it isn't already
bool has_last_Java_frame = thread->has_last_Java_frame();
if (!has_last_Java_frame) {
ZeroFrame *frame = thread->top_zero_frame();
while (frame) {
if (frame->is_shark_frame())
break;
if (frame->is_interpreter_frame()) {
interpreterState istate =
frame->as_interpreter_frame()->interpreter_state();
if (istate->self_link() == istate)
break;
}
frame = frame->next();
}
if (frame == NULL)
fatal("unrecoverable stack overflow");
thread->set_last_Java_frame(frame);
}
// Throw the exception
switch (thread->thread_state()) {
case _thread_in_Java:
InterpreterRuntime::throw_StackOverflowError(thread);
break;
case _thread_in_vm:
Exceptions::throw_stack_overflow_exception(thread, __FILE__, __LINE__);
break;
default:
ShouldNotReachHere();
}
// Reset the frame anchor if necessary
if (!has_last_Java_frame)
thread->reset_last_Java_frame();
}

View File

@ -1,6 +1,6 @@
/* /*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2008, 2009 Red Hat, Inc. * Copyright 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,9 +29,14 @@ class ZeroStack {
intptr_t *_top; // the word past the end of the stack intptr_t *_top; // the word past the end of the stack
intptr_t *_sp; // the top word on the stack intptr_t *_sp; // the top word on the stack
private:
int _shadow_pages_size; // how much ABI stack must we keep free?
public: public:
ZeroStack() ZeroStack()
: _base(NULL), _top(NULL), _sp(NULL) {} : _base(NULL), _top(NULL), _sp(NULL) {
_shadow_pages_size = StackShadowPages * os::vm_page_size();
}
bool needs_setup() const { bool needs_setup() const {
return _base == NULL; return _base == NULL;
@ -81,6 +86,14 @@ class ZeroStack {
return _sp -= count; return _sp -= count;
} }
int shadow_pages_size() const {
return _shadow_pages_size;
}
public:
void overflow_check(int required_words, TRAPS);
static void handle_overflow(TRAPS);
public: public:
static ByteSize base_offset() { static ByteSize base_offset() {
return byte_offset_of(ZeroStack, _base); return byte_offset_of(ZeroStack, _base);

View File

@ -0,0 +1,43 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
// This function should match SharkStack::CreateStackOverflowCheck
inline void ZeroStack::overflow_check(int required_words, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
// Check the Zero stack
if (required_words > available_words()) {
handle_overflow(THREAD);
return;
}
// Check the ABI stack
address stack_top = thread->stack_base() - thread->stack_size();
int free_stack = ((address) &stack_top) - stack_top;
if (free_stack < shadow_pages_size()) {
handle_overflow(THREAD);
return;
}
}

View File

@ -1,6 +1,6 @@
/* /*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008 Red Hat, Inc. * Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -60,37 +60,42 @@ class StubGenerator: public StubCodeGenerator {
} }
// Allocate and initialize our frame // Allocate and initialize our frame
thread->push_zero_frame( EntryFrame *frame =
EntryFrame::build(stack, parameters, parameter_words, call_wrapper)); EntryFrame::build(parameters, parameter_words, call_wrapper, THREAD);
// Make the call
Interpreter::invoke_method(method, entry_point, THREAD);
// Store result depending on type
if (!HAS_PENDING_EXCEPTION) { if (!HAS_PENDING_EXCEPTION) {
switch (result_type) { // Push the frame
case T_INT: thread->push_zero_frame(frame);
*(jint *) result = *(jint *) stack->sp();
break;
case T_LONG:
*(jlong *) result = *(jlong *) stack->sp();
break;
case T_FLOAT:
*(jfloat *) result = *(jfloat *) stack->sp();
break;
case T_DOUBLE:
*(jdouble *) result = *(jdouble *) stack->sp();
break;
case T_OBJECT:
*(oop *) result = *(oop *) stack->sp();
break;
default:
ShouldNotReachHere();
}
}
// Unwind our frame // Make the call
thread->pop_zero_frame(); Interpreter::invoke_method(method, entry_point, THREAD);
// Store the result
if (!HAS_PENDING_EXCEPTION) {
switch (result_type) {
case T_INT:
*(jint *) result = *(jint *) stack->sp();
break;
case T_LONG:
*(jlong *) result = *(jlong *) stack->sp();
break;
case T_FLOAT:
*(jfloat *) result = *(jfloat *) stack->sp();
break;
case T_DOUBLE:
*(jdouble *) result = *(jdouble *) stack->sp();
break;
case T_OBJECT:
*(oop *) result = *(oop *) stack->sp();
break;
default:
ShouldNotReachHere();
}
}
// Unwind the frame
thread->pop_zero_frame();
}
// Tear down the stack if necessary // Tear down the stack if necessary
if (stack_needs_teardown) if (stack_needs_teardown)
@ -226,13 +231,13 @@ void StubGenerator_generate(CodeBuffer* code, bool all) {
StubGenerator g(code, all); StubGenerator g(code, all);
} }
EntryFrame *EntryFrame::build(ZeroStack* stack, EntryFrame *EntryFrame::build(const intptr_t* parameters,
const intptr_t* parameters,
int parameter_words, int parameter_words,
JavaCallWrapper* call_wrapper) { JavaCallWrapper* call_wrapper,
if (header_words + parameter_words > stack->available_words()) { TRAPS) {
Unimplemented();
} ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
stack->overflow_check(header_words + parameter_words, CHECK_NULL);
stack->push(0); // next_frame, filled in later stack->push(0); // next_frame, filled in later
intptr_t *fp = stack->sp(); intptr_t *fp = stack->sp();

View File

@ -1,6 +1,6 @@
/* /*
* Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc. * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -68,12 +68,13 @@
public: public:
void set_last_Java_frame() { void set_last_Java_frame() {
JavaFrameAnchor *jfa = frame_anchor(); set_last_Java_frame(top_zero_frame());
jfa->set_last_Java_sp((intptr_t *) top_zero_frame());
} }
void reset_last_Java_frame() { void reset_last_Java_frame() {
JavaFrameAnchor *jfa = frame_anchor(); set_last_Java_frame(NULL);
jfa->set_last_Java_sp(NULL); }
void set_last_Java_frame(ZeroFrame* frame) {
frame_anchor()->set_last_Java_sp((intptr_t *) frame);
} }
private: private:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3861,6 +3861,8 @@ int MatchRule::is_expensive() const {
strcmp(opType,"RoundFloat")==0 || strcmp(opType,"RoundFloat")==0 ||
strcmp(opType,"ReverseBytesI")==0 || strcmp(opType,"ReverseBytesI")==0 ||
strcmp(opType,"ReverseBytesL")==0 || strcmp(opType,"ReverseBytesL")==0 ||
strcmp(opType,"ReverseBytesUS")==0 ||
strcmp(opType,"ReverseBytesS")==0 ||
strcmp(opType,"Replicate16B")==0 || strcmp(opType,"Replicate16B")==0 ||
strcmp(opType,"Replicate8B")==0 || strcmp(opType,"Replicate8B")==0 ||
strcmp(opType,"Replicate4B")==0 || strcmp(opType,"Replicate4B")==0 ||

View File

@ -40,6 +40,7 @@ public:
Exceptions, // Offset where exception handler lives Exceptions, // Offset where exception handler lives
Deopt, // Offset where deopt handler lives Deopt, // Offset where deopt handler lives
DeoptMH, // Offset where MethodHandle deopt handler lives DeoptMH, // Offset where MethodHandle deopt handler lives
UnwindHandler, // Offset to default unwind handler
max_Entries }; max_Entries };
// special value to note codeBlobs where profile (forte) stack walking is // special value to note codeBlobs where profile (forte) stack walking is
@ -59,6 +60,7 @@ public:
_values[Exceptions ] = -1; _values[Exceptions ] = -1;
_values[Deopt ] = -1; _values[Deopt ] = -1;
_values[DeoptMH ] = -1; _values[DeoptMH ] = -1;
_values[UnwindHandler ] = -1;
} }
int value(Entries e) { return _values[e]; } int value(Entries e) { return _values[e]; }

View File

@ -229,6 +229,10 @@ void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler()); code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
CHECK_BAILOUT(); CHECK_BAILOUT();
// Emit the handler to remove the activation from the stack and
// dispatch to the caller.
offsets()->set_value(CodeOffsets::UnwindHandler, assembler->emit_unwind_handler());
// done // done
masm()->flush(); masm()->flush();
} }
@ -312,7 +316,7 @@ void Compilation::install_code(int frame_size) {
implicit_exception_table(), implicit_exception_table(),
compiler(), compiler(),
_env->comp_level(), _env->comp_level(),
needs_debug_information(), true,
has_unsafe_access() has_unsafe_access()
); );
} }
@ -445,8 +449,6 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time"); assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time");
_arena = Thread::current()->resource_area(); _arena = Thread::current()->resource_area();
_compilation = this; _compilation = this;
_needs_debug_information = _env->jvmti_can_examine_or_deopt_anywhere() ||
JavaMonitorsInStackTrace || AlwaysEmitDebugInfo || DeoptimizeALot;
_exception_info_list = new ExceptionInfoList(); _exception_info_list = new ExceptionInfoList();
_implicit_exception_table.set_size(0); _implicit_exception_table.set_size(0);
compile_method(); compile_method();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,6 @@ class Compilation: public StackObj {
int _max_spills; int _max_spills;
FrameMap* _frame_map; FrameMap* _frame_map;
C1_MacroAssembler* _masm; C1_MacroAssembler* _masm;
bool _needs_debug_information;
bool _has_exception_handlers; bool _has_exception_handlers;
bool _has_fpu_code; bool _has_fpu_code;
bool _has_unsafe_access; bool _has_unsafe_access;
@ -117,7 +116,6 @@ class Compilation: public StackObj {
// accessors // accessors
ciEnv* env() const { return _env; } ciEnv* env() const { return _env; }
AbstractCompiler* compiler() const { return _compiler; } AbstractCompiler* compiler() const { return _compiler; }
bool needs_debug_information() const { return _needs_debug_information; }
bool has_exception_handlers() const { return _has_exception_handlers; } bool has_exception_handlers() const { return _has_exception_handlers; }
bool has_fpu_code() const { return _has_fpu_code; } bool has_fpu_code() const { return _has_fpu_code; }
bool has_unsafe_access() const { return _has_unsafe_access; } bool has_unsafe_access() const { return _has_unsafe_access; }
@ -132,7 +130,6 @@ class Compilation: public StackObj {
CodeOffsets* offsets() { return &_offsets; } CodeOffsets* offsets() { return &_offsets; }
// setters // setters
void set_needs_debug_information(bool f) { _needs_debug_information = f; }
void set_has_exception_handlers(bool f) { _has_exception_handlers = f; } void set_has_exception_handlers(bool f) { _has_exception_handlers = f; }
void set_has_fpu_code(bool f) { _has_fpu_code = f; } void set_has_fpu_code(bool f) { _has_fpu_code = f; }
void set_has_unsafe_access(bool f) { _has_unsafe_access = f; } void set_has_unsafe_access(bool f) { _has_unsafe_access = f; }

View File

@ -829,12 +829,8 @@ void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
// should be left alone since there can be only one and all code // should be left alone since there can be only one and all code
// should dispatch to the same one. // should dispatch to the same one.
XHandler* h = handlers->handler_at(i); XHandler* h = handlers->handler_at(i);
if (h->handler_bci() != SynchronizationEntryBCI) { assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
h->set_entry_block(block_at(h->handler_bci())); h->set_entry_block(block_at(h->handler_bci()));
} else {
assert(h->entry_block()->is_set(BlockBegin::default_exception_handler_flag),
"should be the synthetic unlock block");
}
} }
_jsr_xhandlers = handlers; _jsr_xhandlers = handlers;
} }
@ -1497,7 +1493,6 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
Dependencies* GraphBuilder::dependency_recorder() const { Dependencies* GraphBuilder::dependency_recorder() const {
assert(DeoptC1, "need debug information"); assert(DeoptC1, "need debug information");
compilation()->set_needs_debug_information(true);
return compilation()->dependency_recorder(); return compilation()->dependency_recorder();
} }
@ -2867,19 +2862,6 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
_initial_state = state_at_entry(); _initial_state = state_at_entry();
start_block->merge(_initial_state); start_block->merge(_initial_state);
// setup an exception handler to do the unlocking and/or
// notification and unwind the frame.
BlockBegin* sync_handler = new BlockBegin(-1);
sync_handler->set(BlockBegin::exception_entry_flag);
sync_handler->set(BlockBegin::is_on_work_list_flag);
sync_handler->set(BlockBegin::default_exception_handler_flag);
ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
XHandler* h = new XHandler(desc);
h->set_entry_block(sync_handler);
scope_data()->xhandlers()->append(h);
scope_data()->set_has_handler();
// complete graph // complete graph
_vmap = new ValueMap(); _vmap = new ValueMap();
scope->compute_lock_stack_size(); scope->compute_lock_stack_size();
@ -2930,19 +2912,6 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
} }
CHECK_BAILOUT(); CHECK_BAILOUT();
if (sync_handler && sync_handler->state() != NULL) {
Value lock = NULL;
if (method()->is_synchronized()) {
lock = method()->is_static() ? new Constant(new InstanceConstant(method()->holder()->java_mirror())) :
_initial_state->local_at(0);
sync_handler->state()->unlock();
sync_handler->state()->lock(scope, lock);
}
fill_sync_handler(lock, sync_handler, true);
}
_start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state); _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
eliminate_redundant_phis(_start); eliminate_redundant_phis(_start);

View File

@ -1628,11 +1628,10 @@ LEAF(BlockBegin, StateSplit)
backward_branch_target_flag = 1 << 4, backward_branch_target_flag = 1 << 4,
is_on_work_list_flag = 1 << 5, is_on_work_list_flag = 1 << 5,
was_visited_flag = 1 << 6, was_visited_flag = 1 << 6,
default_exception_handler_flag = 1 << 8, // identify block which represents the default exception handler parser_loop_header_flag = 1 << 7, // set by parser to identify blocks where phi functions can not be created on demand
parser_loop_header_flag = 1 << 9, // set by parser to identify blocks where phi functions can not be created on demand critical_edge_split_flag = 1 << 8, // set for all blocks that are introduced when critical edges are split
critical_edge_split_flag = 1 << 10, // set for all blocks that are introduced when critical edges are split linear_scan_loop_header_flag = 1 << 9, // set during loop-detection for LinearScan
linear_scan_loop_header_flag = 1 << 11, // set during loop-detection for LinearScan linear_scan_loop_end_flag = 1 << 10 // set during loop-detection for LinearScan
linear_scan_loop_end_flag = 1 << 12 // set during loop-detection for LinearScan
}; };
void set(Flag f) { _flags |= f; } void set(Flag f) { _flags |= f; }

View File

@ -626,8 +626,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
break; break;
} }
case lir_throw: case lir_throw: {
case lir_unwind: {
assert(op->as_Op2() != NULL, "must be"); assert(op->as_Op2() != NULL, "must be");
LIR_Op2* op2 = (LIR_Op2*)op; LIR_Op2* op2 = (LIR_Op2*)op;
@ -639,6 +638,17 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
break; break;
} }
case lir_unwind: {
assert(op->as_Op1() != NULL, "must be");
LIR_Op1* op1 = (LIR_Op1*)op;
assert(op1->_info == NULL, "no info");
assert(op1->_opr->is_valid(), "exception oop"); do_input(op1->_opr);
assert(op1->_result->is_illegal(), "no result");
break;
}
case lir_tan: case lir_tan:
case lir_sin: case lir_sin:

View File

@ -801,6 +801,7 @@ enum LIR_Code {
, lir_monaddr , lir_monaddr
, lir_roundfp , lir_roundfp
, lir_safepoint , lir_safepoint
, lir_unwind
, end_op1 , end_op1
, begin_op2 , begin_op2
, lir_cmp , lir_cmp
@ -830,7 +831,6 @@ enum LIR_Code {
, lir_ushr , lir_ushr
, lir_alloc_array , lir_alloc_array
, lir_throw , lir_throw
, lir_unwind
, lir_compare_to , lir_compare_to
, end_op2 , end_op2
, begin_op3 , begin_op3
@ -1827,8 +1827,12 @@ class LIR_List: public CompilationResourceObj {
void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); } void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); }
void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); } void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); }
void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); } void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
void unwind_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_unwind, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); } append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
}
void unwind_exception(LIR_Opr exceptionOop) {
append(new LIR_Op1(lir_unwind, exceptionOop));
}
void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
append(new LIR_Op2(lir_compare_to, left, right, dst)); append(new LIR_Op2(lir_compare_to, left, right, dst));

View File

@ -552,6 +552,10 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
break; break;
case lir_unwind:
unwind_op(op->in_opr());
break;
default: default:
Unimplemented(); Unimplemented();
break; break;
@ -707,8 +711,7 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) {
break; break;
case lir_throw: case lir_throw:
case lir_unwind: throw_op(op->in_opr1(), op->in_opr2(), op->info());
throw_op(op->in_opr1(), op->in_opr2(), op->info(), op->code() == lir_unwind);
break; break;
default: default:

View File

@ -39,6 +39,8 @@ class LIR_Assembler: public CompilationResourceObj {
Instruction* _pending_non_safepoint; Instruction* _pending_non_safepoint;
int _pending_non_safepoint_offset; int _pending_non_safepoint_offset;
Label _unwind_handler_entry;
#ifdef ASSERT #ifdef ASSERT
BlockList _branch_target_blocks; BlockList _branch_target_blocks;
void check_no_unbound_labels(); void check_no_unbound_labels();
@ -134,6 +136,7 @@ class LIR_Assembler: public CompilationResourceObj {
// code patterns // code patterns
int emit_exception_handler(); int emit_exception_handler();
int emit_unwind_handler();
void emit_exception_entries(ExceptionInfoList* info_list); void emit_exception_entries(ExceptionInfoList* info_list);
int emit_deopt_handler(); int emit_deopt_handler();
@ -217,7 +220,8 @@ class LIR_Assembler: public CompilationResourceObj {
void build_frame(); void build_frame();
void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind); void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info);
void unwind_op(LIR_Opr exceptionOop);
void monitor_address(int monitor_ix, LIR_Opr dst); void monitor_address(int monitor_ix, LIR_Opr dst);
void align_backward_branch_target(); void align_backward_branch_target();

View File

@ -1765,35 +1765,17 @@ void LIRGenerator::do_Throw(Throw* x) {
__ null_check(exception_opr, new CodeEmitInfo(info, true)); __ null_check(exception_opr, new CodeEmitInfo(info, true));
} }
if (compilation()->env()->jvmti_can_post_on_exceptions() && if (compilation()->env()->jvmti_can_post_on_exceptions()) {
!block()->is_set(BlockBegin::default_exception_handler_flag)) {
// we need to go through the exception lookup path to get JVMTI // we need to go through the exception lookup path to get JVMTI
// notification done // notification done
unwind = false; unwind = false;
} }
assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind,
"should be no more handlers to dispatch to");
if (compilation()->env()->dtrace_method_probes() &&
block()->is_set(BlockBegin::default_exception_handler_flag)) {
// notify that this frame is unwinding
BasicTypeList signature;
signature.append(T_INT); // thread
signature.append(T_OBJECT); // methodOop
LIR_OprList* args = new LIR_OprList();
args->append(getThreadPointer());
LIR_Opr meth = new_register(T_OBJECT);
__ oop2reg(method()->constant_encoding(), meth);
args->append(meth);
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
}
// move exception oop into fixed register // move exception oop into fixed register
__ move(exception_opr, exceptionOopOpr()); __ move(exception_opr, exceptionOopOpr());
if (unwind) { if (unwind) {
__ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info); __ unwind_exception(exceptionOopOpr());
} else { } else {
__ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info); __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2814,9 +2814,6 @@ IRScopeDebugInfo* LinearScan::compute_debug_info_for_scope(int op_id, IRScope* c
void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) { void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) {
if (!compilation()->needs_debug_information()) {
return;
}
TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id)); TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id));
IRScope* innermost_scope = info->scope(); IRScope* innermost_scope = info->scope();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -252,9 +252,6 @@
develop(bool, BailoutOnExceptionHandlers, false, \ develop(bool, BailoutOnExceptionHandlers, false, \
"bailout of compilation for methods with exception handlers") \ "bailout of compilation for methods with exception handlers") \
\ \
develop(bool, AlwaysEmitDebugInfo, false, \
"always emit debug info") \
\
develop(bool, InstallMethods, true, \ develop(bool, InstallMethods, true, \
"Install methods at the end of successful compilations") \ "Install methods at the end of successful compilations") \
\ \

View File

@ -1408,8 +1408,11 @@ BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
} }
void BCEscapeAnalyzer::copy_dependencies(Dependencies *deps) { void BCEscapeAnalyzer::copy_dependencies(Dependencies *deps) {
if(!has_dependencies()) if (ciEnv::current()->jvmti_can_hotswap_or_post_breakpoint()) {
return; // Also record evol dependencies so redefinition of the
// callee will trigger recompilation.
deps->assert_evol_method(method());
}
for (int i = 0; i < _dependencies.length(); i+=2) { for (int i = 0; i < _dependencies.length(); i+=2) {
ciKlass *k = _dependencies[i]->as_klass(); ciKlass *k = _dependencies[i]->as_klass();
ciMethod *m = _dependencies[i+1]->as_method(); ciMethod *m = _dependencies[i+1]->as_method();

View File

@ -176,7 +176,6 @@ void ciEnv::cache_jvmti_state() {
// Get Jvmti capabilities under lock to get consistant values. // Get Jvmti capabilities under lock to get consistant values.
MutexLocker mu(JvmtiThreadState_lock); MutexLocker mu(JvmtiThreadState_lock);
_jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint(); _jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
_jvmti_can_examine_or_deopt_anywhere = JvmtiExport::can_examine_or_deopt_anywhere();
_jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables(); _jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables();
_jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions(); _jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions();
} }
@ -887,8 +886,6 @@ void ciEnv::register_method(ciMethod* target,
if (!failing() && if (!failing() &&
( (!jvmti_can_hotswap_or_post_breakpoint() && ( (!jvmti_can_hotswap_or_post_breakpoint() &&
JvmtiExport::can_hotswap_or_post_breakpoint()) || JvmtiExport::can_hotswap_or_post_breakpoint()) ||
(!jvmti_can_examine_or_deopt_anywhere() &&
JvmtiExport::can_examine_or_deopt_anywhere()) ||
(!jvmti_can_access_local_variables() && (!jvmti_can_access_local_variables() &&
JvmtiExport::can_access_local_variables()) || JvmtiExport::can_access_local_variables()) ||
(!jvmti_can_post_on_exceptions() && (!jvmti_can_post_on_exceptions() &&

View File

@ -55,7 +55,6 @@ private:
// Cache Jvmti state // Cache Jvmti state
bool _jvmti_can_hotswap_or_post_breakpoint; bool _jvmti_can_hotswap_or_post_breakpoint;
bool _jvmti_can_examine_or_deopt_anywhere;
bool _jvmti_can_access_local_variables; bool _jvmti_can_access_local_variables;
bool _jvmti_can_post_on_exceptions; bool _jvmti_can_post_on_exceptions;
@ -257,7 +256,6 @@ public:
// Cache Jvmti state // Cache Jvmti state
void cache_jvmti_state(); void cache_jvmti_state();
bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; } bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; }
bool jvmti_can_examine_or_deopt_anywhere() const { return _jvmti_can_examine_or_deopt_anywhere; }
bool jvmti_can_access_local_variables() const { return _jvmti_can_access_local_variables; } bool jvmti_can_access_local_variables() const { return _jvmti_can_access_local_variables; }
bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions; } bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -357,6 +357,8 @@
template(void_double_signature, "()D") \ template(void_double_signature, "()D") \
template(int_void_signature, "(I)V") \ template(int_void_signature, "(I)V") \
template(int_int_signature, "(I)I") \ template(int_int_signature, "(I)I") \
template(char_char_signature, "(C)C") \
template(short_short_signature, "(S)S") \
template(int_bool_signature, "(I)Z") \ template(int_bool_signature, "(I)Z") \
template(float_int_signature, "(F)I") \ template(float_int_signature, "(F)I") \
template(double_long_signature, "(D)J") \ template(double_long_signature, "(D)J") \
@ -584,6 +586,10 @@
do_intrinsic(_reverseBytes_i, java_lang_Integer, reverseBytes_name, int_int_signature, F_S) \ do_intrinsic(_reverseBytes_i, java_lang_Integer, reverseBytes_name, int_int_signature, F_S) \
do_name( reverseBytes_name, "reverseBytes") \ do_name( reverseBytes_name, "reverseBytes") \
do_intrinsic(_reverseBytes_l, java_lang_Long, reverseBytes_name, long_long_signature, F_S) \ do_intrinsic(_reverseBytes_l, java_lang_Long, reverseBytes_name, long_long_signature, F_S) \
/* (symbol reverseBytes_name defined above) */ \
do_intrinsic(_reverseBytes_c, java_lang_Character, reverseBytes_name, char_char_signature, F_S) \
/* (symbol reverseBytes_name defined above) */ \
do_intrinsic(_reverseBytes_s, java_lang_Short, reverseBytes_name, short_short_signature, F_S) \
/* (symbol reverseBytes_name defined above) */ \ /* (symbol reverseBytes_name defined above) */ \
\ \
do_intrinsic(_identityHashCode, java_lang_System, identityHashCode_name, object_int_signature, F_S) \ do_intrinsic(_identityHashCode, java_lang_System, identityHashCode_name, object_int_signature, F_S) \

View File

@ -685,6 +685,7 @@ nmethod::nmethod(
_exception_offset = 0; _exception_offset = 0;
_deoptimize_offset = 0; _deoptimize_offset = 0;
_deoptimize_mh_offset = 0; _deoptimize_mh_offset = 0;
_unwind_handler_offset = -1;
_trap_offset = offsets->value(CodeOffsets::Dtrace_trap); _trap_offset = offsets->value(CodeOffsets::Dtrace_trap);
_orig_pc_offset = 0; _orig_pc_offset = 0;
_stub_offset = data_offset(); _stub_offset = data_offset();
@ -798,6 +799,11 @@ nmethod::nmethod(
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
_deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); _deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
_deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); _deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
_unwind_handler_offset = instructions_offset() + offsets->value(CodeOffsets::UnwindHandler);
} else {
_unwind_handler_offset = -1;
}
_consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start()); _consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
_scopes_data_offset = data_offset(); _scopes_data_offset = data_offset();
_scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);

View File

@ -154,6 +154,9 @@ class nmethod : public CodeBlob {
// All deoptee's at a MethodHandle call site will resume execution // All deoptee's at a MethodHandle call site will resume execution
// at this location described by this offset. // at this location described by this offset.
int _deoptimize_mh_offset; int _deoptimize_mh_offset;
// Offset of the unwind handler if it exists
int _unwind_handler_offset;
#ifdef HAVE_DTRACE_H #ifdef HAVE_DTRACE_H
int _trap_offset; int _trap_offset;
#endif // def HAVE_DTRACE_H #endif // def HAVE_DTRACE_H
@ -341,6 +344,7 @@ class nmethod : public CodeBlob {
address exception_begin () const { return header_begin() + _exception_offset ; } address exception_begin () const { return header_begin() + _exception_offset ; }
address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; } address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; } address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
address stub_begin () const { return header_begin() + _stub_offset ; } address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return header_begin() + _consts_offset ; } address stub_end () const { return header_begin() + _consts_offset ; }
address consts_begin () const { return header_begin() + _consts_offset ; } address consts_begin () const { return header_begin() + _consts_offset ; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,11 +32,10 @@ class ConcurrentMarkSweepPolicy : public TwoGenerationCollectorPolicy {
ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; } ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; }
void initialize_gc_policy_counters(); void initialize_gc_policy_counters();
#if 1
virtual void initialize_size_policy(size_t init_eden_size, virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size, size_t init_promo_size,
size_t init_survivor_size); size_t init_survivor_size);
#endif
// Returns true if the incremental mode is enabled. // Returns true if the incremental mode is enabled.
virtual bool has_soft_ended_eden(); virtual bool has_soft_ended_eden();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1815,8 +1815,19 @@ NOT_PRODUCT(
do_compaction_work(clear_all_soft_refs); do_compaction_work(clear_all_soft_refs);
// Has the GC time limit been exceeded? // Has the GC time limit been exceeded?
check_gc_time_limit(); DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
size_t max_eden_size = young_gen->max_capacity() -
young_gen->to()->capacity() -
young_gen->from()->capacity();
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCause::Cause gc_cause = gch->gc_cause();
size_policy()->check_gc_overhead_limit(_young_gen->used(),
young_gen->eden()->used(),
_cmsGen->max_capacity(),
max_eden_size,
full,
gc_cause,
gch->collector_policy());
} else { } else {
do_mark_sweep_work(clear_all_soft_refs, first_state, do_mark_sweep_work(clear_all_soft_refs, first_state,
should_start_over); should_start_over);
@ -1828,55 +1839,6 @@ NOT_PRODUCT(
return; return;
} }
void CMSCollector::check_gc_time_limit() {
// Ignore explicit GC's. Exiting here does not set the flag and
// does not reset the count. Updating of the averages for system
// GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
if (GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
return;
}
// Calculate the fraction of the CMS generation was freed during
// the last collection.
// Only consider the STW compacting cost for now.
//
// Note that the gc time limit test only works for the collections
// of the young gen + tenured gen and not for collections of the
// permanent gen. That is because the calculation of the space
// freed by the collection is the free space in the young gen +
// tenured gen.
double fraction_free =
((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
if ((100.0 * size_policy()->compacting_gc_cost()) >
((double) GCTimeLimit) &&
((fraction_free * 100) < GCHeapFreeLimit)) {
size_policy()->inc_gc_time_limit_count();
if (UseGCOverheadLimit &&
(size_policy()->gc_time_limit_count() >
AdaptiveSizePolicyGCTimeLimitThreshold)) {
size_policy()->set_gc_time_limit_exceeded(true);
// Avoid consecutive OOM due to the gc time limit by resetting
// the counter.
size_policy()->reset_gc_time_limit_count();
if (PrintGCDetails) {
gclog_or_tty->print_cr(" GC is exceeding overhead limit "
"of %d%%", GCTimeLimit);
}
} else {
if (PrintGCDetails) {
gclog_or_tty->print_cr(" GC would exceed overhead limit "
"of %d%%", GCTimeLimit);
}
}
} else {
size_policy()->reset_gc_time_limit_count();
}
}
// Resize the perm generation and the tenured generation // Resize the perm generation and the tenured generation
// after obtaining the free list locks for the // after obtaining the free list locks for the
// two generations. // two generations.
@ -6182,6 +6144,11 @@ void CMSCollector::reset(bool asynch) {
} }
curAddr = chunk.end(); curAddr = chunk.end();
} }
// A successful mostly concurrent collection has been done.
// Because only the full (i.e., concurrent mode failure) collections
// are being measured for gc overhead limits, clean the "near" flag
// and count.
sp->reset_gc_overhead_limit_count();
_collectorState = Idling; _collectorState = Idling;
} else { } else {
// already have the lock // already have the lock

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -570,10 +570,6 @@ class CMSCollector: public CHeapObj {
ConcurrentMarkSweepPolicy* _collector_policy; ConcurrentMarkSweepPolicy* _collector_policy;
ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
// Check whether the gc time limit has been
// exceeded and set the size policy flag
// appropriately.
void check_gc_time_limit();
// XXX Move these to CMSStats ??? FIX ME !!! // XXX Move these to CMSStats ??? FIX ME !!!
elapsedTimer _inter_sweep_timer; // time between sweeps elapsedTimer _inter_sweep_timer; // time between sweeps
elapsedTimer _intra_sweep_timer; // time _in_ sweeps elapsedTimer _intra_sweep_timer; // time _in_ sweeps

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -69,9 +69,9 @@ void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
G1CollectorPolicy* g1p = g1h->g1_policy(); G1CollectorPolicy* g1p = g1h->g1_policy();
if (g1p->adaptive_young_list_length()) { if (g1p->adaptive_young_list_length()) {
int regions_visited = 0; int regions_visited = 0;
g1h->young_list_rs_length_sampling_init(); g1h->young_list()->rs_length_sampling_init();
while (g1h->young_list_rs_length_sampling_more()) { while (g1h->young_list()->rs_length_sampling_more()) {
g1h->young_list_rs_length_sampling_next(); g1h->young_list()->rs_length_sampling_next();
++regions_visited; ++regions_visited;
// we try to yield every time we visit 10 regions // we try to yield every time we visit 10 regions
@ -162,6 +162,7 @@ void ConcurrentG1RefineThread::run() {
if (_worker_id >= cg1r()->worker_thread_num()) { if (_worker_id >= cg1r()->worker_thread_num()) {
run_young_rs_sampling(); run_young_rs_sampling();
terminate(); terminate();
return;
} }
_vtime_start = os::elapsedVTime(); _vtime_start = os::elapsedVTime();

View File

@ -767,7 +767,8 @@ void ConcurrentMark::checkpointRootsInitialPre() {
_has_aborted = false; _has_aborted = false;
if (G1PrintReachableAtInitialMark) { if (G1PrintReachableAtInitialMark) {
print_reachable(true, "before"); print_reachable("at-cycle-start",
true /* use_prev_marking */, true /* all */);
} }
// Initialise marking structures. This has to be done in a STW phase. // Initialise marking structures. This has to be done in a STW phase.
@ -1979,19 +1980,21 @@ void ConcurrentMark::checkpointRootsFinalWork() {
#ifndef PRODUCT #ifndef PRODUCT
class ReachablePrinterOopClosure: public OopClosure { class PrintReachableOopClosure: public OopClosure {
private: private:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
CMBitMapRO* _bitmap; CMBitMapRO* _bitmap;
outputStream* _out; outputStream* _out;
bool _use_prev_marking; bool _use_prev_marking;
bool _all;
public: public:
ReachablePrinterOopClosure(CMBitMapRO* bitmap, PrintReachableOopClosure(CMBitMapRO* bitmap,
outputStream* out, outputStream* out,
bool use_prev_marking) : bool use_prev_marking,
bool all) :
_g1h(G1CollectedHeap::heap()), _g1h(G1CollectedHeap::heap()),
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
void do_oop(narrowOop* p) { do_oop_work(p); } void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop( oop* p) { do_oop_work(p); } void do_oop( oop* p) { do_oop_work(p); }
@ -2001,9 +2004,11 @@ public:
const char* str = NULL; const char* str = NULL;
const char* str2 = ""; const char* str2 = "";
if (!_g1h->is_in_g1_reserved(obj)) if (obj == NULL) {
str = "outside G1 reserved"; str = "";
else { } else if (!_g1h->is_in_g1_reserved(obj)) {
str = " O";
} else {
HeapRegion* hr = _g1h->heap_region_containing(obj); HeapRegion* hr = _g1h->heap_region_containing(obj);
guarantee(hr != NULL, "invariant"); guarantee(hr != NULL, "invariant");
bool over_tams = false; bool over_tams = false;
@ -2012,74 +2017,67 @@ public:
} else { } else {
over_tams = hr->obj_allocated_since_next_marking(obj); over_tams = hr->obj_allocated_since_next_marking(obj);
} }
bool marked = _bitmap->isMarked((HeapWord*) obj);
if (over_tams) { if (over_tams) {
str = "over TAMS"; str = " >";
if (_bitmap->isMarked((HeapWord*) obj)) { if (marked) {
str2 = " AND MARKED"; str2 = " AND MARKED";
} }
} else if (_bitmap->isMarked((HeapWord*) obj)) { } else if (marked) {
str = "marked"; str = " M";
} else { } else {
str = "#### NOT MARKED ####"; str = " NOT";
} }
} }
_out->print_cr(" "PTR_FORMAT" contains "PTR_FORMAT" %s%s", _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
p, (void*) obj, str, str2); p, (void*) obj, str, str2);
} }
}; };
class ReachablePrinterClosure: public BitMapClosure { class PrintReachableObjectClosure : public ObjectClosure {
private: private:
CMBitMapRO* _bitmap; CMBitMapRO* _bitmap;
outputStream* _out; outputStream* _out;
bool _use_prev_marking; bool _use_prev_marking;
bool _all;
HeapRegion* _hr;
public: public:
ReachablePrinterClosure(CMBitMapRO* bitmap, PrintReachableObjectClosure(CMBitMapRO* bitmap,
outputStream* out, outputStream* out,
bool use_prev_marking) : bool use_prev_marking,
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } bool all,
HeapRegion* hr) :
bool do_bit(size_t offset) { _bitmap(bitmap), _out(out),
HeapWord* addr = _bitmap->offsetToHeapWord(offset); _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { }
ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
_out->print_cr(" obj "PTR_FORMAT", offset %10d (marked)", addr, offset);
oop(addr)->oop_iterate(&oopCl);
_out->print_cr("");
return true;
}
};
class ObjInRegionReachablePrinterClosure : public ObjectClosure {
private:
CMBitMapRO* _bitmap;
outputStream* _out;
bool _use_prev_marking;
public:
ObjInRegionReachablePrinterClosure(CMBitMapRO* bitmap,
outputStream* out,
bool use_prev_marking) :
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
void do_object(oop o) { void do_object(oop o) {
ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking); bool over_tams;
if (_use_prev_marking) {
over_tams = _hr->obj_allocated_since_prev_marking(o);
} else {
over_tams = _hr->obj_allocated_since_next_marking(o);
}
bool marked = _bitmap->isMarked((HeapWord*) o);
bool print_it = _all || over_tams || marked;
_out->print_cr(" obj "PTR_FORMAT" (over TAMS)", (void*) o); if (print_it) {
o->oop_iterate(&oopCl); _out->print_cr(" "PTR_FORMAT"%s",
_out->print_cr(""); o, (over_tams) ? " >" : (marked) ? " M" : "");
PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all);
o->oop_iterate(&oopCl);
}
} }
}; };
class RegionReachablePrinterClosure : public HeapRegionClosure { class PrintReachableRegionClosure : public HeapRegionClosure {
private: private:
CMBitMapRO* _bitmap; CMBitMapRO* _bitmap;
outputStream* _out; outputStream* _out;
bool _use_prev_marking; bool _use_prev_marking;
bool _all;
public: public:
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
@ -2094,22 +2092,35 @@ public:
} }
_out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
"TAMS: "PTR_FORMAT, b, e, t, p); "TAMS: "PTR_FORMAT, b, e, t, p);
_out->print_cr(""); _out->cr();
ObjInRegionReachablePrinterClosure ocl(_bitmap, _out, _use_prev_marking); HeapWord* from = b;
hr->object_iterate_mem_careful(MemRegion(p, t), &ocl); HeapWord* to = t;
if (to > from) {
_out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
_out->cr();
PrintReachableObjectClosure ocl(_bitmap, _out,
_use_prev_marking, _all, hr);
hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
_out->cr();
}
return false; return false;
} }
RegionReachablePrinterClosure(CMBitMapRO* bitmap, PrintReachableRegionClosure(CMBitMapRO* bitmap,
outputStream* out, outputStream* out,
bool use_prev_marking) : bool use_prev_marking,
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } bool all) :
_bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
}; };
void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) { void ConcurrentMark::print_reachable(const char* str,
gclog_or_tty->print_cr("== Doing reachable object dump... "); bool use_prev_marking,
bool all) {
gclog_or_tty->cr();
gclog_or_tty->print_cr("== Doing heap dump... ");
if (G1PrintReachableBaseFile == NULL) { if (G1PrintReachableBaseFile == NULL) {
gclog_or_tty->print_cr(" #### error: no base file defined"); gclog_or_tty->print_cr(" #### error: no base file defined");
@ -2144,19 +2155,14 @@ void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) {
out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS"); out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS");
out->cr(); out->cr();
RegionReachablePrinterClosure rcl(bitmap, out, use_prev_marking); out->print_cr("--- ITERATING OVER REGIONS");
out->print_cr("--- ITERATING OVER REGIONS WITH TAMS < TOP");
out->cr(); out->cr();
PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all);
_g1h->heap_region_iterate(&rcl); _g1h->heap_region_iterate(&rcl);
out->cr(); out->cr();
ReachablePrinterClosure cl(bitmap, out, use_prev_marking);
out->print_cr("--- ITERATING OVER MARKED OBJECTS ON THE BITMAP");
out->cr();
bitmap->iterate(&cl);
out->cr();
gclog_or_tty->print_cr(" done"); gclog_or_tty->print_cr(" done");
gclog_or_tty->flush();
} }
#endif // PRODUCT #endif // PRODUCT

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -652,11 +652,24 @@ public:
// we do nothing. // we do nothing.
void markAndGrayObjectIfNecessary(oop p); void markAndGrayObjectIfNecessary(oop p);
// This iterates over the marking bitmap (either prev or next) and // It iterates over the heap and for each object it comes across it
// prints out all objects that are marked on the bitmap and indicates // will dump the contents of its reference fields, as well as
// whether what they point to is also marked or not. It also iterates // liveness information for the object and its referents. The dump
// the objects over TAMS (either prev or next). // will be written to a file with the following name:
void print_reachable(bool use_prev_marking, const char* str); // G1PrintReachableBaseFile + "." + str. use_prev_marking decides
// whether the prev (use_prev_marking == true) or next
// (use_prev_marking == false) marking information will be used to
// determine the liveness of each object / referent. If all is true,
// all objects in the heap will be dumped, otherwise only the live
// ones. In the dump the following symbols / abbreviations are used:
// M : an explicitly live object (its bitmap bit is set)
// > : an implicitly live object (over tams)
// O : an object outside the G1 heap (typically: in the perm gen)
// NOT : a reference field whose referent is not live
// AND MARKED : indicates that an object is both explicitly and
// implicitly live (it should be one or the other, not both)
void print_reachable(const char* str,
bool use_prev_marking, bool all) PRODUCT_RETURN;
// Clear the next marking bitmap (will be called concurrently). // Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap(); void clearNextBitmap();
@ -720,6 +733,19 @@ public:
// to determine whether any heap regions are located above the finger. // to determine whether any heap regions are located above the finger.
void registerCSetRegion(HeapRegion* hr); void registerCSetRegion(HeapRegion* hr);
// Registers the maximum region-end associated with a set of
// regions with CM. Again this is used to determine whether any
// heap regions are located above the finger.
void register_collection_set_finger(HeapWord* max_finger) {
// max_finger is the highest heap region end of the regions currently
// contained in the collection set. If this value is larger than
// _min_finger then we need to gray objects.
// This routine is like registerCSetRegion but for an entire
// collection of regions.
if (max_finger > _min_finger)
_should_gray_objects = true;
}
// Returns "true" if at least one mark has been completed. // Returns "true" if at least one mark has been completed.
bool at_least_one_mark_complete() { return _at_least_one_mark_complete; } bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// turn it on so that the contents of the young list (scan-only / // turn it on so that the contents of the young list (scan-only /
// to-be-collected) are printed at "strategic" points before / during // to-be-collected) are printed at "strategic" points before / during
// / after the collection --- this is useful for debugging // / after the collection --- this is useful for debugging
#define SCAN_ONLY_VERBOSE 0 #define YOUNG_LIST_VERBOSE 0
// CURRENT STATUS // CURRENT STATUS
// This file is under construction. Search for "FIXME". // This file is under construction. Search for "FIXME".
@ -133,8 +133,7 @@ public:
YoungList::YoungList(G1CollectedHeap* g1h) YoungList::YoungList(G1CollectedHeap* g1h)
: _g1h(g1h), _head(NULL), : _g1h(g1h), _head(NULL),
_scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), _length(0),
_length(0), _scan_only_length(0),
_last_sampled_rs_lengths(0), _last_sampled_rs_lengths(0),
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
{ {
@ -166,48 +165,6 @@ void YoungList::add_survivor_region(HeapRegion* hr) {
++_survivor_length; ++_survivor_length;
} }
HeapRegion* YoungList::pop_region() {
while (_head != NULL) {
assert( length() > 0, "list should not be empty" );
HeapRegion* ret = _head;
_head = ret->get_next_young_region();
ret->set_next_young_region(NULL);
--_length;
assert(ret->is_young(), "region should be very young");
// Replace 'Survivor' region type with 'Young'. So the region will
// be treated as a young region and will not be 'confused' with
// newly created survivor regions.
if (ret->is_survivor()) {
ret->set_young();
}
if (!ret->is_scan_only()) {
return ret;
}
// scan-only, we'll add it to the scan-only list
if (_scan_only_tail == NULL) {
guarantee( _scan_only_head == NULL, "invariant" );
_scan_only_head = ret;
_curr_scan_only = ret;
} else {
guarantee( _scan_only_head != NULL, "invariant" );
_scan_only_tail->set_next_young_region(ret);
}
guarantee( ret->get_next_young_region() == NULL, "invariant" );
_scan_only_tail = ret;
// no need to be tagged as scan-only any more
ret->set_young();
++_scan_only_length;
}
assert( length() == 0, "list should be empty" );
return NULL;
}
void YoungList::empty_list(HeapRegion* list) { void YoungList::empty_list(HeapRegion* list) {
while (list != NULL) { while (list != NULL) {
HeapRegion* next = list->get_next_young_region(); HeapRegion* next = list->get_next_young_region();
@ -225,12 +182,6 @@ void YoungList::empty_list() {
_head = NULL; _head = NULL;
_length = 0; _length = 0;
empty_list(_scan_only_head);
_scan_only_head = NULL;
_scan_only_tail = NULL;
_scan_only_length = 0;
_curr_scan_only = NULL;
empty_list(_survivor_head); empty_list(_survivor_head);
_survivor_head = NULL; _survivor_head = NULL;
_survivor_tail = NULL; _survivor_tail = NULL;
@ -248,11 +199,11 @@ bool YoungList::check_list_well_formed() {
HeapRegion* curr = _head; HeapRegion* curr = _head;
HeapRegion* last = NULL; HeapRegion* last = NULL;
while (curr != NULL) { while (curr != NULL) {
if (!curr->is_young() || curr->is_scan_only()) { if (!curr->is_young()) {
gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
"incorrectly tagged (%d, %d)", "incorrectly tagged (y: %d, surv: %d)",
curr->bottom(), curr->end(), curr->bottom(), curr->end(),
curr->is_young(), curr->is_scan_only()); curr->is_young(), curr->is_survivor());
ret = false; ret = false;
} }
++length; ++length;
@ -267,47 +218,10 @@ bool YoungList::check_list_well_formed() {
length, _length); length, _length);
} }
bool scan_only_ret = true; return ret;
length = 0;
curr = _scan_only_head;
last = NULL;
while (curr != NULL) {
if (!curr->is_young() || curr->is_scan_only()) {
gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
"incorrectly tagged (%d, %d)",
curr->bottom(), curr->end(),
curr->is_young(), curr->is_scan_only());
scan_only_ret = false;
}
++length;
last = curr;
curr = curr->get_next_young_region();
}
scan_only_ret = scan_only_ret && (length == _scan_only_length);
if ( (last != _scan_only_tail) ||
(_scan_only_head == NULL && _scan_only_tail != NULL) ||
(_scan_only_head != NULL && _scan_only_tail == NULL) ) {
gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
scan_only_ret = false;
}
if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
scan_only_ret = false;
}
if (!scan_only_ret) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d",
length, _scan_only_length);
}
return ret && scan_only_ret;
} }
bool YoungList::check_list_empty(bool ignore_scan_only_list, bool YoungList::check_list_empty(bool check_sample) {
bool check_sample) {
bool ret = true; bool ret = true;
if (_length != 0) { if (_length != 0) {
@ -327,28 +241,7 @@ bool YoungList::check_list_empty(bool ignore_scan_only_list,
gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
} }
if (ignore_scan_only_list) return ret;
return ret;
bool scan_only_ret = true;
if (_scan_only_length != 0) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
_scan_only_length);
scan_only_ret = false;
}
if (_scan_only_head != NULL) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
scan_only_ret = false;
}
if (_scan_only_tail != NULL) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
scan_only_ret = false;
}
if (!scan_only_ret) {
gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
}
return ret && scan_only_ret;
} }
void void
@ -365,7 +258,18 @@ YoungList::rs_length_sampling_more() {
void void
YoungList::rs_length_sampling_next() { YoungList::rs_length_sampling_next() {
assert( _curr != NULL, "invariant" ); assert( _curr != NULL, "invariant" );
_sampled_rs_lengths += _curr->rem_set()->occupied(); size_t rs_length = _curr->rem_set()->occupied();
_sampled_rs_lengths += rs_length;
// The current region may not yet have been added to the
// incremental collection set (it gets added when it is
// retired as the current allocation region).
if (_curr->in_collection_set()) {
// Update the collection set policy information for this region
_g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
}
_curr = _curr->get_next_young_region(); _curr = _curr->get_next_young_region();
if (_curr == NULL) { if (_curr == NULL) {
_last_sampled_rs_lengths = _sampled_rs_lengths; _last_sampled_rs_lengths = _sampled_rs_lengths;
@ -375,54 +279,46 @@ YoungList::rs_length_sampling_next() {
void void
YoungList::reset_auxilary_lists() { YoungList::reset_auxilary_lists() {
// We could have just "moved" the scan-only list to the young list.
// However, the scan-only list is ordered according to the region
// age in descending order, so, by moving one entry at a time, we
// ensure that it is recreated in ascending order.
guarantee( is_empty(), "young list should be empty" ); guarantee( is_empty(), "young list should be empty" );
assert(check_list_well_formed(), "young list should be well formed"); assert(check_list_well_formed(), "young list should be well formed");
// Add survivor regions to SurvRateGroup. // Add survivor regions to SurvRateGroup.
_g1h->g1_policy()->note_start_adding_survivor_regions(); _g1h->g1_policy()->note_start_adding_survivor_regions();
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
for (HeapRegion* curr = _survivor_head; for (HeapRegion* curr = _survivor_head;
curr != NULL; curr != NULL;
curr = curr->get_next_young_region()) { curr = curr->get_next_young_region()) {
_g1h->g1_policy()->set_region_survivors(curr); _g1h->g1_policy()->set_region_survivors(curr);
// The region is a non-empty survivor so let's add it to
// the incremental collection set for the next evacuation
// pause.
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
} }
_g1h->g1_policy()->note_stop_adding_survivor_regions(); _g1h->g1_policy()->note_stop_adding_survivor_regions();
_head = _survivor_head;
_length = _survivor_length;
if (_survivor_head != NULL) { if (_survivor_head != NULL) {
_head = _survivor_head; assert(_survivor_tail != NULL, "cause it shouldn't be");
_length = _survivor_length + _scan_only_length; assert(_survivor_length > 0, "invariant");
_survivor_tail->set_next_young_region(_scan_only_head); _survivor_tail->set_next_young_region(NULL);
} else {
_head = _scan_only_head;
_length = _scan_only_length;
} }
for (HeapRegion* curr = _scan_only_head; // Don't clear the survivor list handles until the start of
curr != NULL; // the next evacuation pause - we need it in order to re-tag
curr = curr->get_next_young_region()) { // the survivor regions from this evacuation pause as 'young'
curr->recalculate_age_in_surv_rate_group(); // at the start of the next.
}
_scan_only_head = NULL;
_scan_only_tail = NULL;
_scan_only_length = 0;
_curr_scan_only = NULL;
_survivor_head = NULL;
_survivor_tail = NULL;
_survivor_length = 0;
_g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
assert(check_list_well_formed(), "young list should be well formed"); assert(check_list_well_formed(), "young list should be well formed");
} }
void YoungList::print() { void YoungList::print() {
HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; HeapRegion* lists[] = {_head, _survivor_head};
const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; const char* names[] = {"YOUNG", "SURVIVOR"};
for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
@ -431,7 +327,7 @@ void YoungList::print() {
gclog_or_tty->print_cr(" empty"); gclog_or_tty->print_cr(" empty");
while (curr != NULL) { while (curr != NULL) {
gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
"age: %4d, y: %d, s-o: %d, surv: %d", "age: %4d, y: %d, surv: %d",
curr->bottom(), curr->end(), curr->bottom(), curr->end(),
curr->top(), curr->top(),
curr->prev_top_at_mark_start(), curr->prev_top_at_mark_start(),
@ -439,7 +335,6 @@ void YoungList::print() {
curr->top_at_conc_mark_count(), curr->top_at_conc_mark_count(),
curr->age_in_surv_rate_group_cond(), curr->age_in_surv_rate_group_cond(),
curr->is_young(), curr->is_young(),
curr->is_scan_only(),
curr->is_survivor()); curr->is_survivor());
curr = curr->get_next_young_region(); curr = curr->get_next_young_region();
} }
@ -707,6 +602,12 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// region below. // region below.
if (_cur_alloc_region != NULL) { if (_cur_alloc_region != NULL) {
// We're finished with the _cur_alloc_region. // We're finished with the _cur_alloc_region.
// As we're builing (at least the young portion) of the collection
// set incrementally we'll add the current allocation region to
// the collection set here.
if (_cur_alloc_region->is_young()) {
g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
}
_summary_bytes_used += _cur_alloc_region->used(); _summary_bytes_used += _cur_alloc_region->used();
_cur_alloc_region = NULL; _cur_alloc_region = NULL;
} }
@ -820,6 +721,12 @@ void G1CollectedHeap::abandon_cur_alloc_region() {
_free_regions++; _free_regions++;
free_region(_cur_alloc_region); free_region(_cur_alloc_region);
} else { } else {
// As we're builing (at least the young portion) of the collection
// set incrementally we'll add the current allocation region to
// the collection set here.
if (_cur_alloc_region->is_young()) {
g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
}
_summary_bytes_used += _cur_alloc_region->used(); _summary_bytes_used += _cur_alloc_region->used();
} }
_cur_alloc_region = NULL; _cur_alloc_region = NULL;
@ -913,20 +820,25 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
} }
if (full && DisableExplicitGC) { if (full && DisableExplicitGC) {
gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
return; return;
} }
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
collector_policy()->should_clear_all_soft_refs();
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
{ {
IsGCActiveMark x; IsGCActiveMark x;
// Timing // Timing
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
PrintGC, true, gclog_or_tty);
TraceMemoryManagerStats tms(true /* fullGC */); TraceMemoryManagerStats tms(true /* fullGC */);
@ -970,6 +882,15 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
tear_down_region_lists(); tear_down_region_lists();
set_used_regions_to_need_zero_fill(); set_used_regions_to_need_zero_fill();
// We may have added regions to the current incremental collection
// set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh
// after this full GC.
abandon_collection_set(g1_policy()->inc_cset_head());
g1_policy()->clear_incremental_cset();
g1_policy()->stop_incremental_cset_building();
if (g1_policy()->in_young_gc_mode()) { if (g1_policy()->in_young_gc_mode()) {
empty_young_list(); empty_young_list();
g1_policy()->set_full_young_gcs(true); g1_policy()->set_full_young_gcs(true);
@ -985,12 +906,12 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
ref_processor()->enable_discovery(); ref_processor()->enable_discovery();
ref_processor()->setup_policy(clear_all_soft_refs); ref_processor()->setup_policy(do_clear_all_soft_refs);
// Do collection work // Do collection work
{ {
HandleMark hm; // Discard invalid handles created during gc HandleMark hm; // Discard invalid handles created during gc
G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
} }
// Because freeing humongous regions may have added some unclean // Because freeing humongous regions may have added some unclean
// regions, it is necessary to tear down again before rebuilding. // regions, it is necessary to tear down again before rebuilding.
@ -1053,6 +974,15 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
perm()->compute_new_size(); perm()->compute_new_size();
} }
// Start a new incremental collection set for the next pause
assert(g1_policy()->collection_set() == NULL, "must be");
g1_policy()->start_incremental_cset_building();
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the next
// evacuation pause.
clear_cset_fast_test();
double end = os::elapsedTime(); double end = os::elapsedTime();
g1_policy()->record_full_collection_end(); g1_policy()->record_full_collection_end();
@ -1071,7 +1001,9 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
if (g1_policy()->in_young_gc_mode()) { if (g1_policy()->in_young_gc_mode()) {
_young_list->reset_sampled_info(); _young_list->reset_sampled_info();
assert( check_young_list_empty(false, false), // At this point there should be no regions in the
// entire heap tagged as young.
assert( check_young_list_empty(true /* check_heap */),
"young list should be empty at this point"); "young list should be empty at this point");
} }
@ -1208,6 +1140,9 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
return result; return result;
} }
assert(!collector_policy()->should_clear_all_soft_refs(),
"Flag should have been handled and cleared prior to this point");
// What else? We might try synchronous finalization later. If the total // What else? We might try synchronous finalization later. If the total
// space available is large enough for the allocation, then a more // space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be // complete compaction phase than we've tried so far might be
@ -1565,6 +1500,20 @@ jint G1CollectedHeap::initialize() {
_g1h = this; _g1h = this;
_in_cset_fast_test_length = max_regions();
_in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
// We're biasing _in_cset_fast_test to avoid subtracting the
// beginning of the heap every time we want to index; basically
// it's the same with what we do with the card table.
_in_cset_fast_test = _in_cset_fast_test_base -
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the first
// evacuation pause.
clear_cset_fast_test();
// Create the ConcurrentMark data structure and thread. // Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.) // (Must do this late, so that "max_regions" is defined.)
_cm = new ConcurrentMark(heap_rs, (int) max_regions()); _cm = new ConcurrentMark(heap_rs, (int) max_regions());
@ -2185,8 +2134,10 @@ public:
assert(o != NULL, "Huh?"); assert(o != NULL, "Huh?");
if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
o->oop_iterate(&isLive); o->oop_iterate(&isLive);
if (!_hr->obj_allocated_since_prev_marking(o)) if (!_hr->obj_allocated_since_prev_marking(o)) {
_live_bytes += (o->size() * HeapWordSize); size_t obj_size = o->size(); // Make sure we don't overflow
_live_bytes += (obj_size * HeapWordSize);
}
} }
} }
size_t live_bytes() { return _live_bytes; } size_t live_bytes() { return _live_bytes; }
@ -2388,8 +2339,8 @@ void G1CollectedHeap::verify(bool allow_dirty,
print_on(gclog_or_tty, true /* extended */); print_on(gclog_or_tty, true /* extended */);
gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("");
if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
concurrent_mark()->print_reachable(use_prev_marking, concurrent_mark()->print_reachable("at-verification-failure",
"failed-verification"); use_prev_marking, false /* all */);
} }
gclog_or_tty->flush(); gclog_or_tty->flush();
} }
@ -2741,25 +2692,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
double start_time_sec = os::elapsedTime(); double start_time_sec = os::elapsedTime();
size_t start_used_bytes = used(); size_t start_used_bytes = used();
#if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
_young_list->print();
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
g1_policy()->record_collection_pause_start(start_time_sec, g1_policy()->record_collection_pause_start(start_time_sec,
start_used_bytes); start_used_bytes);
guarantee(_in_cset_fast_test == NULL, "invariant"); #if YOUNG_LIST_VERBOSE
guarantee(_in_cset_fast_test_base == NULL, "invariant"); gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
_in_cset_fast_test_length = max_regions();
_in_cset_fast_test_base =
NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
memset(_in_cset_fast_test_base, false,
_in_cset_fast_test_length * sizeof(bool));
// We're biasing _in_cset_fast_test to avoid subtracting the
// beginning of the heap every time we want to index; basically
// it's the same with what we do with the card table.
_in_cset_fast_test = _in_cset_fast_test_base -
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
#if SCAN_ONLY_VERBOSE
_young_list->print(); _young_list->print();
#endif // SCAN_ONLY_VERBOSE #endif // YOUNG_LIST_VERBOSE
if (g1_policy()->during_initial_mark_pause()) { if (g1_policy()->during_initial_mark_pause()) {
concurrent_mark()->checkpointRootsInitialPre(); concurrent_mark()->checkpointRootsInitialPre();
@ -2786,12 +2731,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
if (mark_in_progress()) if (mark_in_progress())
concurrent_mark()->newCSet(); concurrent_mark()->newCSet();
// Now choose the CS. #if YOUNG_LIST_VERBOSE
g1_policy()->choose_collection_set(); gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
_young_list->print();
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
// We may abandon a pause if we find no region that will fit in the MMU // Now choose the CS. We may abandon a pause if we find no
// pause. // region that will fit in the MMU pause.
bool abandoned = (g1_policy()->collection_set() == NULL); bool abandoned = g1_policy()->choose_collection_set();
// Nothing to do if we were unable to choose a collection set. // Nothing to do if we were unable to choose a collection set.
if (!abandoned) { if (!abandoned) {
@ -2809,40 +2757,64 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
// Actually do the work... // Actually do the work...
evacuate_collection_set(); evacuate_collection_set();
free_collection_set(g1_policy()->collection_set()); free_collection_set(g1_policy()->collection_set());
g1_policy()->clear_collection_set(); g1_policy()->clear_collection_set();
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
// this is more for peace of mind; we're nulling them here and
// we're expecting them to be null at the beginning of the next GC
_in_cset_fast_test = NULL;
_in_cset_fast_test_base = NULL;
cleanup_surviving_young_words(); cleanup_surviving_young_words();
// Start a new incremental collection set for the next pause.
g1_policy()->start_incremental_cset_building();
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the next
// evacuation pause.
clear_cset_fast_test();
if (g1_policy()->in_young_gc_mode()) { if (g1_policy()->in_young_gc_mode()) {
_young_list->reset_sampled_info(); _young_list->reset_sampled_info();
assert(check_young_list_empty(true),
"young list should be empty");
#if SCAN_ONLY_VERBOSE // Don't check the whole heap at this point as the
// GC alloc regions from this pause have been tagged
// as survivors and moved on to the survivor list.
// Survivor regions will fail the !is_young() check.
assert(check_young_list_empty(false /* check_heap */),
"young list should be empty");
#if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
_young_list->print(); _young_list->print();
#endif // SCAN_ONLY_VERBOSE #endif // YOUNG_LIST_VERBOSE
g1_policy()->record_survivor_regions(_young_list->survivor_length(), g1_policy()->record_survivor_regions(_young_list->survivor_length(),
_young_list->first_survivor_region(), _young_list->first_survivor_region(),
_young_list->last_survivor_region()); _young_list->last_survivor_region());
_young_list->reset_auxilary_lists(); _young_list->reset_auxilary_lists();
} }
} else { } else {
if (_in_cset_fast_test != NULL) { // We have abandoned the current collection. This can only happen
assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't"); // if we're not doing young or partially young collections, and
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); // we didn't find an old region that we're able to collect within
// this is more for peace of mind; we're nulling them here and // the allowed time.
// we're expecting them to be null at the beginning of the next GC
_in_cset_fast_test = NULL; assert(g1_policy()->collection_set() == NULL, "should be");
_in_cset_fast_test_base = NULL; assert(_young_list->length() == 0, "because it should be");
}
// This should be a no-op.
abandon_collection_set(g1_policy()->inc_cset_head());
g1_policy()->clear_incremental_cset();
g1_policy()->stop_incremental_cset_building();
// Start a new incremental collection set for the next pause.
g1_policy()->start_incremental_cset_building();
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the next
// evacuation pause.
clear_cset_fast_test();
// This looks confusing, because the DPT should really be empty // This looks confusing, because the DPT should really be empty
// at this point -- since we have not done any collection work, // at this point -- since we have not done any collection work,
// there should not be any derived pointers in the table to update; // there should not be any derived pointers in the table to update;
@ -2876,9 +2848,11 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
doConcurrentMark(); doConcurrentMark();
} }
#if SCAN_ONLY_VERBOSE #if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
_young_list->print(); _young_list->print();
#endif // SCAN_ONLY_VERBOSE g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
double end_time_sec = os::elapsedTime(); double end_time_sec = os::elapsedTime();
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
@ -2936,6 +2910,25 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
} }
} }
size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
{
size_t gclab_word_size;
switch (purpose) {
case GCAllocForSurvived:
gclab_word_size = YoungPLABSize;
break;
case GCAllocForTenured:
gclab_word_size = OldPLABSize;
break;
default:
assert(false, "unknown GCAllocPurpose");
gclab_word_size = OldPLABSize;
break;
}
return gclab_word_size;
}
void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
// make sure we don't call set_gc_alloc_region() multiple times on // make sure we don't call set_gc_alloc_region() multiple times on
@ -3109,6 +3102,11 @@ void G1CollectedHeap::get_gc_alloc_regions() {
} else { } else {
// the region was retained from the last collection // the region was retained from the last collection
++_gc_alloc_region_counts[ap]; ++_gc_alloc_region_counts[ap];
if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT,
alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
}
} }
if (alloc_region != NULL) { if (alloc_region != NULL) {
@ -3665,6 +3663,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
_g1_rem(g1h->g1_rem_set()), _g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num), _hash_seed(17), _queue_num(queue_num),
_term_attempts(0), _term_attempts(0),
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
_age_table(false), _age_table(false),
#if G1_DETAILED_STATS #if G1_DETAILED_STATS
_pushes(0), _pops(0), _steals(0), _pushes(0), _pops(0), _steals(0),
@ -3691,6 +3691,9 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
_overflowed_refs = new OverflowQueue(10); _overflowed_refs = new OverflowQueue(10);
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
_start = os::elapsedTime(); _start = os::elapsedTime();
} }
@ -3988,16 +3991,13 @@ public:
OopsInHeapRegionClosure *scan_root_cl; OopsInHeapRegionClosure *scan_root_cl;
OopsInHeapRegionClosure *scan_perm_cl; OopsInHeapRegionClosure *scan_perm_cl;
OopsInHeapRegionClosure *scan_so_cl;
if (_g1h->g1_policy()->during_initial_mark_pause()) { if (_g1h->g1_policy()->during_initial_mark_pause()) {
scan_root_cl = &scan_mark_root_cl; scan_root_cl = &scan_mark_root_cl;
scan_perm_cl = &scan_mark_perm_cl; scan_perm_cl = &scan_mark_perm_cl;
scan_so_cl = &scan_mark_heap_rs_cl;
} else { } else {
scan_root_cl = &only_scan_root_cl; scan_root_cl = &only_scan_root_cl;
scan_perm_cl = &only_scan_perm_cl; scan_perm_cl = &only_scan_perm_cl;
scan_so_cl = &only_scan_heap_rs_cl;
} }
pss.start_strong_roots(); pss.start_strong_roots();
@ -4005,7 +4005,6 @@ public:
SharedHeap::SO_AllClasses, SharedHeap::SO_AllClasses,
scan_root_cl, scan_root_cl,
&push_heap_rs_cl, &push_heap_rs_cl,
scan_so_cl,
scan_perm_cl, scan_perm_cl,
i); i);
pss.end_strong_roots(); pss.end_strong_roots();
@ -4067,7 +4066,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
SharedHeap::ScanningOption so, SharedHeap::ScanningOption so,
OopClosure* scan_non_heap_roots, OopClosure* scan_non_heap_roots,
OopsInHeapRegionClosure* scan_rs, OopsInHeapRegionClosure* scan_rs,
OopsInHeapRegionClosure* scan_so,
OopsInGenClosure* scan_perm, OopsInGenClosure* scan_perm,
int worker_i) { int worker_i) {
// First scan the strong roots, including the perm gen. // First scan the strong roots, including the perm gen.
@ -4087,6 +4085,7 @@ g1_process_strong_roots(bool collecting_perm_gen,
&buf_scan_non_heap_roots, &buf_scan_non_heap_roots,
&eager_scan_code_roots, &eager_scan_code_roots,
&buf_scan_perm); &buf_scan_perm);
// Finish up any enqueued closure apps. // Finish up any enqueued closure apps.
buf_scan_non_heap_roots.done(); buf_scan_non_heap_roots.done();
buf_scan_perm.done(); buf_scan_perm.done();
@ -4109,9 +4108,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
// XXX What should this be doing in the parallel case? // XXX What should this be doing in the parallel case?
g1_policy()->record_collection_pause_end_CH_strong_roots(); g1_policy()->record_collection_pause_end_CH_strong_roots();
if (scan_so != NULL) {
scan_scan_only_set(scan_so, worker_i);
}
// Now scan the complement of the collection set. // Now scan the complement of the collection set.
if (scan_rs != NULL) { if (scan_rs != NULL) {
g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
@ -4124,54 +4120,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
_process_strong_tasks->all_tasks_completed(); _process_strong_tasks->all_tasks_completed();
} }
void
G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
OopsInHeapRegionClosure* oc,
int worker_i) {
HeapWord* startAddr = r->bottom();
HeapWord* endAddr = r->used_region().end();
oc->set_region(r);
HeapWord* p = r->bottom();
HeapWord* t = r->top();
guarantee( p == r->next_top_at_mark_start(), "invariant" );
while (p < t) {
oop obj = oop(p);
p += obj->oop_iterate(oc);
}
}
void
G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
int worker_i) {
double start = os::elapsedTime();
BufferingOopsInHeapRegionClosure boc(oc);
FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
OopsInHeapRegionClosure *foc;
if (g1_policy()->during_initial_mark_pause())
foc = &scan_and_mark;
else
foc = &scan_only;
HeapRegion* hr;
int n = 0;
while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
scan_scan_only_region(hr, foc, worker_i);
++n;
}
boc.done();
double closure_app_s = boc.closure_app_seconds();
g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
g1_policy()->record_scan_only_time(worker_i, ms, n);
}
void void
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
OopClosure* non_root_closure) { OopClosure* non_root_closure) {
@ -4370,17 +4318,14 @@ void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRe
class G1ParCleanupCTTask : public AbstractGangTask { class G1ParCleanupCTTask : public AbstractGangTask {
CardTableModRefBS* _ct_bs; CardTableModRefBS* _ct_bs;
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
HeapRegion* volatile _so_head;
HeapRegion* volatile _su_head; HeapRegion* volatile _su_head;
public: public:
G1ParCleanupCTTask(CardTableModRefBS* ct_bs, G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
G1CollectedHeap* g1h, G1CollectedHeap* g1h,
HeapRegion* scan_only_list,
HeapRegion* survivor_list) : HeapRegion* survivor_list) :
AbstractGangTask("G1 Par Cleanup CT Task"), AbstractGangTask("G1 Par Cleanup CT Task"),
_ct_bs(ct_bs), _ct_bs(ct_bs),
_g1h(g1h), _g1h(g1h),
_so_head(scan_only_list),
_su_head(survivor_list) _su_head(survivor_list)
{ } { }
@ -4389,14 +4334,13 @@ public:
while (r = _g1h->pop_dirty_cards_region()) { while (r = _g1h->pop_dirty_cards_region()) {
clear_cards(r); clear_cards(r);
} }
// Redirty the cards of the scan-only and survivor regions. // Redirty the cards of the survivor regions.
dirty_list(&this->_so_head);
dirty_list(&this->_su_head); dirty_list(&this->_su_head);
} }
void clear_cards(HeapRegion* r) { void clear_cards(HeapRegion* r) {
// Cards for Survivor and Scan-Only regions will be dirtied later. // Cards for Survivor regions will be dirtied later.
if (!r->is_scan_only() && !r->is_survivor()) { if (!r->is_survivor()) {
_ct_bs->clear(MemRegion(r->bottom(), r->end())); _ct_bs->clear(MemRegion(r->bottom(), r->end()));
} }
} }
@ -4429,7 +4373,7 @@ public:
virtual bool doHeapRegion(HeapRegion* r) virtual bool doHeapRegion(HeapRegion* r)
{ {
MemRegion mr(r->bottom(), r->end()); MemRegion mr(r->bottom(), r->end());
if (r->is_scan_only() || r->is_survivor()) { if (r->is_survivor()) {
_ct_bs->verify_dirty_region(mr); _ct_bs->verify_dirty_region(mr);
} else { } else {
_ct_bs->verify_clean_region(mr); _ct_bs->verify_clean_region(mr);
@ -4445,8 +4389,8 @@ void G1CollectedHeap::cleanUpCardTable() {
// Iterate over the dirty cards region list. // Iterate over the dirty cards region list.
G1ParCleanupCTTask cleanup_task(ct_bs, this, G1ParCleanupCTTask cleanup_task(ct_bs, this,
_young_list->first_scan_only_region(),
_young_list->first_survivor_region()); _young_list->first_survivor_region());
if (ParallelGCThreads > 0) { if (ParallelGCThreads > 0) {
set_par_threads(workers()->total_workers()); set_par_threads(workers()->total_workers());
workers()->run_task(&cleanup_task); workers()->run_task(&cleanup_task);
@ -4462,12 +4406,12 @@ void G1CollectedHeap::cleanUpCardTable() {
} }
r->set_next_dirty_cards_region(NULL); r->set_next_dirty_cards_region(NULL);
} }
// now, redirty the cards of the scan-only and survivor regions // now, redirty the cards of the survivor regions
// (it seemed faster to do it this way, instead of iterating over // (it seemed faster to do it this way, instead of iterating over
// all regions and then clearing / dirtying as appropriate) // all regions and then clearing / dirtying as appropriate)
dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
} }
double elapsed = os::elapsedTime() - start; double elapsed = os::elapsedTime() - start;
g1_policy()->record_clear_ct_time( elapsed * 1000.0); g1_policy()->record_clear_ct_time( elapsed * 1000.0);
#ifndef PRODUCT #ifndef PRODUCT
@ -4488,6 +4432,11 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
double young_time_ms = 0.0; double young_time_ms = 0.0;
double non_young_time_ms = 0.0; double non_young_time_ms = 0.0;
// Since the collection set is a superset of the the young list,
// all we need to do to clear the young list is clear its
// head and length, and unlink any young regions in the code below
_young_list->clear();
G1CollectorPolicy* policy = g1_policy(); G1CollectorPolicy* policy = g1_policy();
double start_sec = os::elapsedTime(); double start_sec = os::elapsedTime();
@ -4531,6 +4480,12 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
size_t words_survived = _surviving_young_words[index]; size_t words_survived = _surviving_young_words[index];
cur->record_surv_words_in_group(words_survived); cur->record_surv_words_in_group(words_survived);
// At this point the we have 'popped' cur from the collection set
// (linked via next_in_collection_set()) but it is still in the
// young list (linked via next_young_region()). Clear the
// _next_young_region field.
cur->set_next_young_region(NULL);
} else { } else {
int index = cur->young_index_in_cset(); int index = cur->young_index_in_cset();
guarantee( index == -1, "invariant" ); guarantee( index == -1, "invariant" );
@ -4546,7 +4501,6 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
"Should not have empty regions in a CS."); "Should not have empty regions in a CS.");
free_region(cur); free_region(cur);
} else { } else {
guarantee( !cur->is_scan_only(), "should not be scan only" );
cur->uninstall_surv_rate_group(); cur->uninstall_surv_rate_group();
if (cur->is_young()) if (cur->is_young())
cur->set_young_index_in_cset(-1); cur->set_young_index_in_cset(-1);
@ -4570,6 +4524,27 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
policy->record_non_young_free_cset_time_ms(non_young_time_ms); policy->record_non_young_free_cset_time_ms(non_young_time_ms);
} }
// This routine is similar to the above but does not record
// any policy statistics or update free lists; we are abandoning
// the current incremental collection set in preparation of a
// full collection. After the full GC we will start to build up
// the incremental collection set again.
// This is only called when we're doing a full collection
// and is immediately followed by the tearing down of the young list.
void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
HeapRegion* cur = cs_head;
while (cur != NULL) {
HeapRegion* next = cur->next_in_collection_set();
assert(cur->in_collection_set(), "bad CS");
cur->set_next_in_collection_set(NULL);
cur->set_in_collection_set(false);
cur->set_young_index_in_cset(-1);
cur = next;
}
}
HeapRegion* HeapRegion*
G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
assert(ZF_mon->owned_by_self(), "Precondition"); assert(ZF_mon->owned_by_self(), "Precondition");
@ -4936,12 +4911,10 @@ public:
bool success() { return _success; } bool success() { return _success; }
}; };
bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
bool check_sample) { bool ret = _young_list->check_list_empty(check_sample);
bool ret = true;
ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); if (check_heap) {
if (!ignore_scan_only_list) {
NoYoungRegionsClosure closure; NoYoungRegionsClosure closure;
heap_region_iterate(&closure); heap_region_iterate(&closure);
ret = ret && closure.success(); ret = ret && closure.success();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -81,33 +81,29 @@ private:
HeapRegion* _head; HeapRegion* _head;
HeapRegion* _scan_only_head; HeapRegion* _survivor_head;
HeapRegion* _scan_only_tail; HeapRegion* _survivor_tail;
HeapRegion* _curr;
size_t _length; size_t _length;
size_t _scan_only_length; size_t _survivor_length;
size_t _last_sampled_rs_lengths; size_t _last_sampled_rs_lengths;
size_t _sampled_rs_lengths; size_t _sampled_rs_lengths;
HeapRegion* _curr;
HeapRegion* _curr_scan_only;
HeapRegion* _survivor_head; void empty_list(HeapRegion* list);
HeapRegion* _survivor_tail;
size_t _survivor_length;
void empty_list(HeapRegion* list);
public: public:
YoungList(G1CollectedHeap* g1h); YoungList(G1CollectedHeap* g1h);
void push_region(HeapRegion* hr); void push_region(HeapRegion* hr);
void add_survivor_region(HeapRegion* hr); void add_survivor_region(HeapRegion* hr);
HeapRegion* pop_region();
void empty_list(); void empty_list();
bool is_empty() { return _length == 0; } bool is_empty() { return _length == 0; }
size_t length() { return _length; } size_t length() { return _length; }
size_t scan_only_length() { return _scan_only_length; } size_t survivor_length() { return _survivor_length; }
size_t survivor_length() { return _survivor_length; }
void rs_length_sampling_init(); void rs_length_sampling_init();
bool rs_length_sampling_more(); bool rs_length_sampling_more();
@ -120,22 +116,21 @@ public:
// for development purposes // for development purposes
void reset_auxilary_lists(); void reset_auxilary_lists();
void clear() { _head = NULL; _length = 0; }
void clear_survivors() {
_survivor_head = NULL;
_survivor_tail = NULL;
_survivor_length = 0;
}
HeapRegion* first_region() { return _head; } HeapRegion* first_region() { return _head; }
HeapRegion* first_scan_only_region() { return _scan_only_head; }
HeapRegion* first_survivor_region() { return _survivor_head; } HeapRegion* first_survivor_region() { return _survivor_head; }
HeapRegion* last_survivor_region() { return _survivor_tail; } HeapRegion* last_survivor_region() { return _survivor_tail; }
HeapRegion* par_get_next_scan_only_region() {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
HeapRegion* ret = _curr_scan_only;
if (ret != NULL)
_curr_scan_only = ret->get_next_young_region();
return ret;
}
// debugging // debugging
bool check_list_well_formed(); bool check_list_well_formed();
bool check_list_empty(bool ignore_scan_only_list, bool check_list_empty(bool check_sample = true);
bool check_sample = true);
void print(); void print();
}; };
@ -232,6 +227,9 @@ private:
// current collection. // current collection.
HeapRegion* _gc_alloc_region_list; HeapRegion* _gc_alloc_region_list;
// Determines PLAB size for a particular allocation purpose.
static size_t desired_plab_sz(GCAllocPurpose purpose);
// When called by par thread, require par_alloc_during_gc_lock() to be held. // When called by par thread, require par_alloc_during_gc_lock() to be held.
void push_gc_alloc_region(HeapRegion* hr); void push_gc_alloc_region(HeapRegion* hr);
@ -402,8 +400,7 @@ public:
assert(_in_cset_fast_test_base != NULL, "sanity"); assert(_in_cset_fast_test_base != NULL, "sanity");
assert(r->in_collection_set(), "invariant"); assert(r->in_collection_set(), "invariant");
int index = r->hrs_index(); int index = r->hrs_index();
assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant");
"invariant");
assert(!_in_cset_fast_test_base[index], "invariant"); assert(!_in_cset_fast_test_base[index], "invariant");
_in_cset_fast_test_base[index] = true; _in_cset_fast_test_base[index] = true;
} }
@ -428,6 +425,12 @@ public:
} }
} }
void clear_cset_fast_test() {
assert(_in_cset_fast_test_base != NULL, "sanity");
memset(_in_cset_fast_test_base, false,
_in_cset_fast_test_length * sizeof(bool));
}
protected: protected:
// Shrink the garbage-first heap by at most the given size (in bytes!). // Shrink the garbage-first heap by at most the given size (in bytes!).
@ -473,6 +476,10 @@ protected:
// regions. // regions.
void free_collection_set(HeapRegion* cs_head); void free_collection_set(HeapRegion* cs_head);
// Abandon the current collection set without recording policy
// statistics or updating free lists.
void abandon_collection_set(HeapRegion* cs_head);
// Applies "scan_non_heap_roots" to roots outside the heap, // Applies "scan_non_heap_roots" to roots outside the heap,
// "scan_rs" to roots inside the heap (having done "set_region" to // "scan_rs" to roots inside the heap (having done "set_region" to
// indicate the region in which the root resides), and does "scan_perm" // indicate the region in which the root resides), and does "scan_perm"
@ -485,16 +492,9 @@ protected:
SharedHeap::ScanningOption so, SharedHeap::ScanningOption so,
OopClosure* scan_non_heap_roots, OopClosure* scan_non_heap_roots,
OopsInHeapRegionClosure* scan_rs, OopsInHeapRegionClosure* scan_rs,
OopsInHeapRegionClosure* scan_so,
OopsInGenClosure* scan_perm, OopsInGenClosure* scan_perm,
int worker_i); int worker_i);
void scan_scan_only_set(OopsInHeapRegionClosure* oc,
int worker_i);
void scan_scan_only_region(HeapRegion* hr,
OopsInHeapRegionClosure* oc,
int worker_i);
// Apply "blk" to all the weak roots of the system. These include // Apply "blk" to all the weak roots of the system. These include
// JNI weak roots, the code cache, system dictionary, symbol table, // JNI weak roots, the code cache, system dictionary, symbol table,
// string table, and referents of reachable weak refs. // string table, and referents of reachable weak refs.
@ -1133,36 +1133,14 @@ public:
void set_region_short_lived_locked(HeapRegion* hr); void set_region_short_lived_locked(HeapRegion* hr);
// add appropriate methods for any other surv rate groups // add appropriate methods for any other surv rate groups
void young_list_rs_length_sampling_init() { YoungList* young_list() { return _young_list; }
_young_list->rs_length_sampling_init();
}
bool young_list_rs_length_sampling_more() {
return _young_list->rs_length_sampling_more();
}
void young_list_rs_length_sampling_next() {
_young_list->rs_length_sampling_next();
}
size_t young_list_sampled_rs_lengths() {
return _young_list->sampled_rs_lengths();
}
size_t young_list_length() { return _young_list->length(); }
size_t young_list_scan_only_length() {
return _young_list->scan_only_length(); }
HeapRegion* pop_region_from_young_list() {
return _young_list->pop_region();
}
HeapRegion* young_list_first_region() {
return _young_list->first_region();
}
// debugging // debugging
bool check_young_list_well_formed() { bool check_young_list_well_formed() {
return _young_list->check_list_well_formed(); return _young_list->check_list_well_formed();
} }
bool check_young_list_empty(bool ignore_scan_only_list,
bool check_young_list_empty(bool check_heap,
bool check_sample = true); bool check_sample = true);
// *** Stuff related to concurrent marking. It's not clear to me that so // *** Stuff related to concurrent marking. It's not clear to me that so
@ -1367,12 +1345,18 @@ private:
return BitsPerWord << shifter(); return BitsPerWord << shifter();
} }
static size_t gclab_word_size() { size_t gclab_word_size() const {
return G1ParallelGCAllocBufferSize / HeapWordSize; return _gclab_word_size;
} }
static size_t bitmap_size_in_bits() { // Calculates actual GCLab size in words
size_t bits_in_bitmap = gclab_word_size() >> shifter(); size_t gclab_real_word_size() const {
return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
/ BitsPerWord;
}
static size_t bitmap_size_in_bits(size_t gclab_word_size) {
size_t bits_in_bitmap = gclab_word_size >> shifter();
// We are going to ensure that the beginning of a word in this // We are going to ensure that the beginning of a word in this
// bitmap also corresponds to the beginning of a word in the // bitmap also corresponds to the beginning of a word in the
// global marking bitmap. To handle the case where a GCLab // global marking bitmap. To handle the case where a GCLab
@ -1382,13 +1366,13 @@ private:
return bits_in_bitmap + BitsPerWord - 1; return bits_in_bitmap + BitsPerWord - 1;
} }
public: public:
GCLabBitMap(HeapWord* heap_start) GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
: BitMap(bitmap_size_in_bits()), : BitMap(bitmap_size_in_bits(gclab_word_size)),
_cm(G1CollectedHeap::heap()->concurrent_mark()), _cm(G1CollectedHeap::heap()->concurrent_mark()),
_shifter(shifter()), _shifter(shifter()),
_bitmap_word_covers_words(bitmap_word_covers_words()), _bitmap_word_covers_words(bitmap_word_covers_words()),
_heap_start(heap_start), _heap_start(heap_start),
_gclab_word_size(gclab_word_size()), _gclab_word_size(gclab_word_size),
_real_start_word(NULL), _real_start_word(NULL),
_real_end_word(NULL), _real_end_word(NULL),
_start_word(NULL) _start_word(NULL)
@ -1483,7 +1467,7 @@ public:
mark_bitmap->mostly_disjoint_range_union(this, mark_bitmap->mostly_disjoint_range_union(this,
0, // always start from the start of the bitmap 0, // always start from the start of the bitmap
_start_word, _start_word,
size_in_words()); gclab_real_word_size());
_cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
#ifndef PRODUCT #ifndef PRODUCT
@ -1495,9 +1479,10 @@ public:
} }
} }
static size_t bitmap_size_in_words() { size_t bitmap_size_in_words() const {
return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
} }
}; };
class G1ParGCAllocBuffer: public ParGCAllocBuffer { class G1ParGCAllocBuffer: public ParGCAllocBuffer {
@ -1507,10 +1492,10 @@ private:
GCLabBitMap _bitmap; GCLabBitMap _bitmap;
public: public:
G1ParGCAllocBuffer() : G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize), ParGCAllocBuffer(gclab_word_size),
_during_marking(G1CollectedHeap::heap()->mark_in_progress()), _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
_bitmap(G1CollectedHeap::heap()->reserved_region().start()), _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
_retired(false) _retired(false)
{ } { }
@ -1549,8 +1534,10 @@ protected:
typedef GrowableArray<StarTask> OverflowQueue; typedef GrowableArray<StarTask> OverflowQueue;
OverflowQueue* _overflowed_refs; OverflowQueue* _overflowed_refs;
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; G1ParGCAllocBuffer _surviving_alloc_buffer;
ageTable _age_table; G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
ageTable _age_table;
size_t _alloc_buffer_waste; size_t _alloc_buffer_waste;
size_t _undo_waste; size_t _undo_waste;
@ -1619,7 +1606,7 @@ public:
ageTable* age_table() { return &_age_table; } ageTable* age_table() { return &_age_table; }
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
return &_alloc_buffers[purpose]; return _alloc_buffers[purpose];
} }
size_t alloc_buffer_waste() { return _alloc_buffer_waste; } size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
@ -1684,15 +1671,15 @@ public:
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL; HeapWord* obj = NULL;
if (word_sz * 100 < size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
(size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) * if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
assert(gclab_word_size == alloc_buf->word_sz(),
"dynamic resizing is not supported");
add_to_alloc_buffer_waste(alloc_buf->words_remaining()); add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false, false); alloc_buf->retire(false, false);
HeapWord* buf = HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
_g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
if (buf == NULL) return NULL; // Let caller handle allocation failure. if (buf == NULL) return NULL; // Let caller handle allocation failure.
// Otherwise. // Otherwise.
alloc_buf->set_buf(buf); alloc_buf->set_buf(buf);
@ -1786,9 +1773,9 @@ public:
void retire_alloc_buffers() { void retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap].words_remaining(); size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste); add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap].retire(true, false); _alloc_buffers[ap]->retire(true, false);
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,6 @@ class MainBodySummary: public CHeapObj {
define_num_seq(parallel) // parallel only define_num_seq(parallel) // parallel only
define_num_seq(ext_root_scan) define_num_seq(ext_root_scan)
define_num_seq(mark_stack_scan) define_num_seq(mark_stack_scan)
define_num_seq(scan_only)
define_num_seq(update_rs) define_num_seq(update_rs)
define_num_seq(scan_rs) define_num_seq(scan_rs)
define_num_seq(scan_new_refs) // Only for temp use; added to define_num_seq(scan_new_refs) // Only for temp use; added to
@ -174,8 +173,6 @@ protected:
double* _par_last_ext_root_scan_times_ms; double* _par_last_ext_root_scan_times_ms;
double* _par_last_mark_stack_scan_times_ms; double* _par_last_mark_stack_scan_times_ms;
double* _par_last_scan_only_times_ms;
double* _par_last_scan_only_regions_scanned;
double* _par_last_update_rs_start_times_ms; double* _par_last_update_rs_start_times_ms;
double* _par_last_update_rs_times_ms; double* _par_last_update_rs_times_ms;
double* _par_last_update_rs_processed_buffers; double* _par_last_update_rs_processed_buffers;
@ -196,7 +193,6 @@ protected:
bool _adaptive_young_list_length; bool _adaptive_young_list_length;
size_t _young_list_min_length; size_t _young_list_min_length;
size_t _young_list_target_length; size_t _young_list_target_length;
size_t _young_list_so_prefix_length;
size_t _young_list_fixed_length; size_t _young_list_fixed_length;
size_t _young_cset_length; size_t _young_cset_length;
@ -234,7 +230,6 @@ private:
TruncatedSeq* _pending_card_diff_seq; TruncatedSeq* _pending_card_diff_seq;
TruncatedSeq* _rs_length_diff_seq; TruncatedSeq* _rs_length_diff_seq;
TruncatedSeq* _cost_per_card_ms_seq; TruncatedSeq* _cost_per_card_ms_seq;
TruncatedSeq* _cost_per_scan_only_region_ms_seq;
TruncatedSeq* _fully_young_cards_per_entry_ratio_seq; TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
TruncatedSeq* _partially_young_cards_per_entry_ratio_seq; TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
TruncatedSeq* _cost_per_entry_ms_seq; TruncatedSeq* _cost_per_entry_ms_seq;
@ -249,19 +244,16 @@ private:
TruncatedSeq* _rs_lengths_seq; TruncatedSeq* _rs_lengths_seq;
TruncatedSeq* _cost_per_byte_ms_during_cm_seq; TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
TruncatedSeq* _cost_per_scan_only_region_ms_during_cm_seq;
TruncatedSeq* _young_gc_eff_seq; TruncatedSeq* _young_gc_eff_seq;
TruncatedSeq* _max_conc_overhead_seq; TruncatedSeq* _max_conc_overhead_seq;
size_t _recorded_young_regions; size_t _recorded_young_regions;
size_t _recorded_scan_only_regions;
size_t _recorded_non_young_regions; size_t _recorded_non_young_regions;
size_t _recorded_region_num; size_t _recorded_region_num;
size_t _free_regions_at_end_of_collection; size_t _free_regions_at_end_of_collection;
size_t _scan_only_regions_at_end_of_collection;
size_t _recorded_rs_lengths; size_t _recorded_rs_lengths;
size_t _max_rs_lengths; size_t _max_rs_lengths;
@ -277,7 +269,6 @@ private:
double _predicted_survival_ratio; double _predicted_survival_ratio;
double _predicted_rs_update_time_ms; double _predicted_rs_update_time_ms;
double _predicted_rs_scan_time_ms; double _predicted_rs_scan_time_ms;
double _predicted_scan_only_scan_time_ms;
double _predicted_object_copy_time_ms; double _predicted_object_copy_time_ms;
double _predicted_constant_other_time_ms; double _predicted_constant_other_time_ms;
double _predicted_young_other_time_ms; double _predicted_young_other_time_ms;
@ -344,8 +335,6 @@ public:
bool verify_young_ages(); bool verify_young_ages();
#endif // PRODUCT #endif // PRODUCT
void tag_scan_only(size_t short_lived_scan_only_length);
double get_new_prediction(TruncatedSeq* seq) { double get_new_prediction(TruncatedSeq* seq) {
return MAX2(seq->davg() + sigma() * seq->dsd(), return MAX2(seq->davg() + sigma() * seq->dsd(),
seq->davg() * confidence_factor(seq->num())); seq->davg() * confidence_factor(seq->num()));
@ -431,23 +420,6 @@ public:
get_new_prediction(_partially_young_cost_per_entry_ms_seq); get_new_prediction(_partially_young_cost_per_entry_ms_seq);
} }
double predict_scan_only_time_ms_during_cm(size_t scan_only_region_num) {
if (_cost_per_scan_only_region_ms_during_cm_seq->num() < 3)
return 1.5 * (double) scan_only_region_num *
get_new_prediction(_cost_per_scan_only_region_ms_seq);
else
return (double) scan_only_region_num *
get_new_prediction(_cost_per_scan_only_region_ms_during_cm_seq);
}
double predict_scan_only_time_ms(size_t scan_only_region_num) {
if (_in_marking_window_im)
return predict_scan_only_time_ms_during_cm(scan_only_region_num);
else
return (double) scan_only_region_num *
get_new_prediction(_cost_per_scan_only_region_ms_seq);
}
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
if (_cost_per_byte_ms_during_cm_seq->num() < 3) if (_cost_per_byte_ms_during_cm_seq->num() < 3)
return 1.1 * (double) bytes_to_copy * return 1.1 * (double) bytes_to_copy *
@ -490,24 +462,21 @@ public:
size_t predict_bytes_to_copy(HeapRegion* hr); size_t predict_bytes_to_copy(HeapRegion* hr);
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
// for use by: calculate_optimal_so_length(length) // for use by: calculate_young_list_target_length(rs_length)
void predict_gc_eff(size_t young_region_num, bool predict_will_fit(size_t young_region_num,
size_t so_length, double base_time_ms,
double base_time_ms, size_t init_free_regions,
double *gc_eff, double target_pause_time_ms);
double *pause_time_ms);
// for use by: calculate_young_list_target_config(rs_length)
bool predict_gc_eff(size_t young_region_num,
size_t so_length,
double base_time_with_so_ms,
size_t init_free_regions,
double target_pause_time_ms,
double* gc_eff);
void start_recording_regions(); void start_recording_regions();
void record_cset_region(HeapRegion* hr, bool young); void record_cset_region_info(HeapRegion* hr, bool young);
void record_scan_only_regions(size_t scan_only_length); void record_non_young_cset_region(HeapRegion* hr);
void set_recorded_young_regions(size_t n_regions);
void set_recorded_young_bytes(size_t bytes);
void set_recorded_rs_lengths(size_t rs_lengths);
void set_predicted_bytes_to_copy(size_t bytes);
void end_recording_regions(); void end_recording_regions();
void record_vtime_diff_ms(double vtime_diff_ms) { void record_vtime_diff_ms(double vtime_diff_ms) {
@ -638,11 +607,74 @@ protected:
void update_recent_gc_times(double end_time_sec, double elapsed_ms); void update_recent_gc_times(double end_time_sec, double elapsed_ms);
// The head of the list (via "next_in_collection_set()") representing the // The head of the list (via "next_in_collection_set()") representing the
// current collection set. // current collection set. Set from the incrementally built collection
// set at the start of the pause.
HeapRegion* _collection_set; HeapRegion* _collection_set;
// The number of regions in the collection set. Set from the incrementally
// built collection set at the start of an evacuation pause.
size_t _collection_set_size; size_t _collection_set_size;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause.
size_t _collection_set_bytes_used_before; size_t _collection_set_bytes_used_before;
// The associated information that is maintained while the incremental
// collection set is being built with young regions. Used to populate
// the recorded info for the evacuation pause.
enum CSetBuildType {
Active, // We are actively building the collection set
Inactive // We are not actively building the collection set
};
CSetBuildType _inc_cset_build_state;
// The head of the incrementally built collection set.
HeapRegion* _inc_cset_head;
// The tail of the incrementally built collection set.
HeapRegion* _inc_cset_tail;
// The number of regions in the incrementally built collection set.
// Used to set _collection_set_size at the start of an evacuation
// pause.
size_t _inc_cset_size;
// Used as the index in the surving young words structure
// which tracks the amount of space, for each young region,
// that survives the pause.
size_t _inc_cset_young_index;
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
size_t _inc_cset_bytes_used_before;
// Used to record the highest end of heap region in collection set
HeapWord* _inc_cset_max_finger;
// The number of recorded used bytes in the young regions
// of the collection set. This is the sum of the used() bytes
// of retired young regions in the collection set.
size_t _inc_cset_recorded_young_bytes;
// The RSet lengths recorded for regions in the collection set
// (updated by the periodic sampling of the regions in the
// young list/collection set).
size_t _inc_cset_recorded_rs_lengths;
// The predicted elapsed time it will take to collect the regions
// in the collection set (updated by the periodic sampling of the
// regions in the young list/collection set).
double _inc_cset_predicted_elapsed_time_ms;
// The predicted bytes to copy for the regions in the collection
// set (updated by the periodic sampling of the regions in the
// young list/collection set).
size_t _inc_cset_predicted_bytes_to_copy;
// Info about marking. // Info about marking.
int _n_marks; // Sticky at 2, so we know when we've done at least 2. int _n_marks; // Sticky at 2, so we know when we've done at least 2.
@ -761,9 +793,8 @@ protected:
double _mark_closure_time_ms; double _mark_closure_time_ms;
void calculate_young_list_min_length(); void calculate_young_list_min_length();
void calculate_young_list_target_config(); void calculate_young_list_target_length();
void calculate_young_list_target_config(size_t rs_lengths); void calculate_young_list_target_length(size_t rs_lengths);
size_t calculate_optimal_so_length(size_t young_list_length);
public: public:
@ -868,11 +899,6 @@ public:
_par_last_mark_stack_scan_times_ms[worker_i] = ms; _par_last_mark_stack_scan_times_ms[worker_i] = ms;
} }
void record_scan_only_time(int worker_i, double ms, int n) {
_par_last_scan_only_times_ms[worker_i] = ms;
_par_last_scan_only_regions_scanned[worker_i] = (double) n;
}
void record_satb_drain_time(double ms) { void record_satb_drain_time(double ms) {
_cur_satb_drain_time_ms = ms; _cur_satb_drain_time_ms = ms;
_satb_drain_time_set = true; _satb_drain_time_set = true;
@ -987,20 +1013,67 @@ public:
// Choose a new collection set. Marks the chosen regions as being // Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of // "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods. // the collection set are available via access methods.
virtual void choose_collection_set() = 0; virtual bool choose_collection_set() = 0;
void clear_collection_set() { _collection_set = NULL; }
// The head of the list (via "next_in_collection_set()") representing the // The head of the list (via "next_in_collection_set()") representing the
// current collection set. // current collection set.
HeapRegion* collection_set() { return _collection_set; } HeapRegion* collection_set() { return _collection_set; }
void clear_collection_set() { _collection_set = NULL; }
// The number of elements in the current collection set. // The number of elements in the current collection set.
size_t collection_set_size() { return _collection_set_size; } size_t collection_set_size() { return _collection_set_size; }
// Add "hr" to the CS. // Add "hr" to the CS.
void add_to_collection_set(HeapRegion* hr); void add_to_collection_set(HeapRegion* hr);
// Incremental CSet Support
// The head of the incrementally built collection set.
HeapRegion* inc_cset_head() { return _inc_cset_head; }
// The tail of the incrementally built collection set.
HeapRegion* inc_set_tail() { return _inc_cset_tail; }
// The number of elements in the incrementally built collection set.
size_t inc_cset_size() { return _inc_cset_size; }
// Initialize incremental collection set info.
void start_incremental_cset_building();
void clear_incremental_cset() {
_inc_cset_head = NULL;
_inc_cset_tail = NULL;
}
// Stop adding regions to the incremental collection set
void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
// Add/remove information about hr to the aggregated information
// for the incrementally built collection set.
void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
void remove_from_incremental_cset_info(HeapRegion* hr);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
private:
// Update the incremental cset information when adding a region
// (should not be called directly).
void add_region_to_incremental_cset_common(HeapRegion* hr);
public:
// Add hr to the LHS of the incremental collection set.
void add_region_to_incremental_cset_lhs(HeapRegion* hr);
// Add hr to the RHS of the incremental collection set.
void add_region_to_incremental_cset_rhs(HeapRegion* hr);
#ifndef PRODUCT
void print_collection_set(HeapRegion* list_head, outputStream* st);
#endif // !PRODUCT
bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
@ -1191,7 +1264,7 @@ class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
// If the estimated is less then desirable, resize if possible. // If the estimated is less then desirable, resize if possible.
void expand_if_possible(size_t numRegions); void expand_if_possible(size_t numRegions);
virtual void choose_collection_set(); virtual bool choose_collection_set();
virtual void record_collection_pause_start(double start_time_sec, virtual void record_collection_pause_start(double start_time_sec,
size_t start_used); size_t start_used);
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,12 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
bool clear_all_softrefs) { bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
SharedHeap* sh = SharedHeap::heap();
#ifdef ASSERT
if (sh->collector_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earler");
}
#endif
// hook up weak ref data so it can be used during Mark-Sweep // hook up weak ref data so it can be used during Mark-Sweep
assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
assert(rp != NULL, "should be non-NULL"); assert(rp != NULL, "should be non-NULL");
@ -44,7 +50,6 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
// Increment the invocation count for the permanent generation, since it is // Increment the invocation count for the permanent generation, since it is
// implicitly collected whenever we do a full mark sweep collection. // implicitly collected whenever we do a full mark sweep collection.
SharedHeap* sh = SharedHeap::heap();
sh->perm_gen()->stat_record()->invocations++; sh->perm_gen()->stat_record()->invocations++;
bool marked_for_unloading = false; bool marked_for_unloading = false;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,9 +28,6 @@
#define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \ #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
\ \
product(intx, G1ParallelGCAllocBufferSize, 8*K, \
"Size of parallel G1 allocation buffers in to-space.") \
\
product(intx, G1ConfidencePercent, 50, \ product(intx, G1ConfidencePercent, 50, \
"Confidence level for MMU/pause predictions") \ "Confidence level for MMU/pause predictions") \
\ \
@ -229,10 +226,6 @@
"the number of regions for which we'll print a surv rate " \ "the number of regions for which we'll print a surv rate " \
"summary.") \ "summary.") \
\ \
develop(bool, G1UseScanOnlyPrefix, false, \
"It determines whether the system will calculate an optimum " \
"scan-only set.") \
\
product(intx, G1ReservePercent, 10, \ product(intx, G1ReservePercent, 10, \
"It determines the minimum reserve we should have in the heap " \ "It determines the minimum reserve we should have in the heap " \
"to minimize the probability of promotion failure.") \ "to minimize the probability of promotion failure.") \

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -75,6 +75,16 @@ public:
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); } virtual void do_oop( oop* p) { do_oop_work(p); }
void print_object(outputStream* out, oop obj) {
#ifdef PRODUCT
klassOop k = obj->klass();
const char* class_name = instanceKlass::cast(k)->external_name();
out->print_cr("class name %s", class_name);
#else // PRODUCT
obj->print_on(out);
#endif // PRODUCT
}
template <class T> void do_oop_work(T* p) { template <class T> void do_oop_work(T* p) {
assert(_containing_obj != NULL, "Precondition"); assert(_containing_obj != NULL, "Precondition");
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking), assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
@ -90,21 +100,29 @@ public:
gclog_or_tty->print_cr("----------"); gclog_or_tty->print_cr("----------");
} }
if (!_g1h->is_in_closed_subset(obj)) { if (!_g1h->is_in_closed_subset(obj)) {
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
gclog_or_tty->print_cr("Field "PTR_FORMAT gclog_or_tty->print_cr("Field "PTR_FORMAT
" of live obj "PTR_FORMAT " of live obj "PTR_FORMAT" in region "
" points to obj "PTR_FORMAT "["PTR_FORMAT", "PTR_FORMAT")",
" not in the heap.", p, (void*) _containing_obj,
p, (void*) _containing_obj, (void*) obj); from->bottom(), from->end());
print_object(gclog_or_tty, _containing_obj);
gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
(void*) obj);
} else { } else {
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
gclog_or_tty->print_cr("Field "PTR_FORMAT gclog_or_tty->print_cr("Field "PTR_FORMAT
" of live obj "PTR_FORMAT " of live obj "PTR_FORMAT" in region "
" points to dead obj "PTR_FORMAT".", "["PTR_FORMAT", "PTR_FORMAT")",
p, (void*) _containing_obj, (void*) obj); p, (void*) _containing_obj,
from->bottom(), from->end());
print_object(gclog_or_tty, _containing_obj);
gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
"["PTR_FORMAT", "PTR_FORMAT")",
(void*) obj, to->bottom(), to->end());
print_object(gclog_or_tty, obj);
} }
gclog_or_tty->print_cr("Live obj:");
_containing_obj->print_on(gclog_or_tty);
gclog_or_tty->print_cr("Bad referent:");
obj->print_on(gclog_or_tty);
gclog_or_tty->print_cr("----------"); gclog_or_tty->print_cr("----------");
_failures = true; _failures = true;
failed = true; failed = true;
@ -432,7 +450,9 @@ HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
_young_type(NotYoung), _next_young_region(NULL), _young_type(NotYoung), _next_young_region(NULL),
_next_dirty_cards_region(NULL), _next_dirty_cards_region(NULL),
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
_rem_set(NULL), _zfs(NotZeroFilled) _rem_set(NULL), _zfs(NotZeroFilled),
_recorded_rs_length(0), _predicted_elapsed_time_ms(0),
_predicted_bytes_to_copy(0)
{ {
_orig_end = mr.end(); _orig_end = mr.end();
// Note that initialize() will set the start of the unmarked area of the // Note that initialize() will set the start of the unmarked area of the
@ -715,7 +735,7 @@ void HeapRegion::print_on(outputStream* st) const {
else else
st->print(" "); st->print(" ");
if (is_young()) if (is_young())
st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y ")); st->print(is_survivor() ? " SU" : " Y ");
else else
st->print(" "); st->print(" ");
if (is_empty()) if (is_empty())
@ -723,6 +743,8 @@ void HeapRegion::print_on(outputStream* st) const {
else else
st->print(" "); st->print(" ");
st->print(" %5d", _gc_time_stamp); st->print(" %5d", _gc_time_stamp);
st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
prev_top_at_mark_start(), next_top_at_mark_start());
G1OffsetTableContigSpace::print_on(st); G1OffsetTableContigSpace::print_on(st);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -247,7 +247,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
enum YoungType { enum YoungType {
NotYoung, // a region is not young NotYoung, // a region is not young
ScanOnly, // a region is young and scan-only
Young, // a region is young Young, // a region is young
Survivor // a region is young and it contains Survivor // a region is young and it contains
// survivor // survivor
@ -292,6 +291,20 @@ class HeapRegion: public G1OffsetTableContigSpace {
_young_type = new_type; _young_type = new_type;
} }
// Cached attributes used in the collection set policy information
// The RSet length that was added to the total value
// for the collection set.
size_t _recorded_rs_length;
// The predicted elapsed time that was added to total value
// for the collection set.
double _predicted_elapsed_time_ms;
// The predicted number of bytes to copy that was added to
// the total value for the collection set.
size_t _predicted_bytes_to_copy;
public: public:
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
@ -614,7 +627,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// </PREDICTION> // </PREDICTION>
bool is_young() const { return _young_type != NotYoung; } bool is_young() const { return _young_type != NotYoung; }
bool is_scan_only() const { return _young_type == ScanOnly; }
bool is_survivor() const { return _young_type == Survivor; } bool is_survivor() const { return _young_type == Survivor; }
int young_index_in_cset() const { return _young_index_in_cset; } int young_index_in_cset() const { return _young_index_in_cset; }
@ -629,12 +641,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
return _surv_rate_group->age_in_group(_age_index); return _surv_rate_group->age_in_group(_age_index);
} }
void recalculate_age_in_surv_rate_group() {
assert( _surv_rate_group != NULL, "pre-condition" );
assert( _age_index > -1, "pre-condition" );
_age_index = _surv_rate_group->recalculate_age_index(_age_index);
}
void record_surv_words_in_group(size_t words_survived) { void record_surv_words_in_group(size_t words_survived) {
assert( _surv_rate_group != NULL, "pre-condition" ); assert( _surv_rate_group != NULL, "pre-condition" );
assert( _age_index > -1, "pre-condition" ); assert( _age_index > -1, "pre-condition" );
@ -676,8 +682,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
void set_young() { set_young_type(Young); } void set_young() { set_young_type(Young); }
void set_scan_only() { set_young_type(ScanOnly); }
void set_survivor() { set_young_type(Survivor); } void set_survivor() { set_young_type(Survivor); }
void set_not_young() { set_young_type(NotYoung); } void set_not_young() { set_young_type(NotYoung); }
@ -775,6 +779,22 @@ class HeapRegion: public G1OffsetTableContigSpace {
_zero_filler = NULL; _zero_filler = NULL;
} }
size_t recorded_rs_length() const { return _recorded_rs_length; }
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
void set_recorded_rs_length(size_t rs_length) {
_recorded_rs_length = rs_length;
}
void set_predicted_elapsed_time_ms(double ms) {
_predicted_elapsed_time_ms = ms;
}
void set_predicted_bytes_to_copy(size_t bytes) {
_predicted_bytes_to_copy = bytes;
}
#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL) SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)

View File

@ -662,8 +662,6 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
prt = PosParPRT::alloc(from_hr); prt = PosParPRT::alloc(from_hr);
} }
prt->init(from_hr); prt->init(from_hr);
// Record the outgoing pointer in the from_region's outgoing bitmap.
from_hr->rem_set()->add_outgoing_reference(hr());
PosParPRT* first_prt = _fine_grain_regions[ind]; PosParPRT* first_prt = _fine_grain_regions[ind];
prt->set_next(first_prt); // XXX Maybe move to init? prt->set_next(first_prt); // XXX Maybe move to init?
@ -1073,11 +1071,7 @@ int HeapRegionRemSet::num_par_rem_sets() {
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr) HeapRegion* hr)
: _bosa(bosa), _other_regions(hr), : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
_outgoing_region_map(G1CollectedHeap::heap()->max_regions(),
false /* in-resource-area */),
_iter_state(Unclaimed)
{}
void HeapRegionRemSet::setup_remset_size() { void HeapRegionRemSet::setup_remset_size() {
@ -1148,30 +1142,11 @@ void HeapRegionRemSet::par_cleanup() {
PosParPRT::par_contract_all(); PosParPRT::par_contract_all();
} }
void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) {
_outgoing_region_map.par_at_put(to_hr->hrs_index(), 1);
}
void HeapRegionRemSet::clear() { void HeapRegionRemSet::clear() {
clear_outgoing_entries();
_outgoing_region_map.clear();
_other_regions.clear(); _other_regions.clear();
assert(occupied() == 0, "Should be clear."); assert(occupied() == 0, "Should be clear.");
} }
void HeapRegionRemSet::clear_outgoing_entries() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
size_t i = _outgoing_region_map.get_next_one_offset(0);
while (i < _outgoing_region_map.size()) {
HeapRegion* to_region = g1h->region_at(i);
if (!to_region->in_collection_set()) {
to_region->rem_set()->clear_incoming_entry(hr());
}
i = _outgoing_region_map.get_next_one_offset(i+1);
}
}
void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
BitMap* region_bm, BitMap* card_bm) { BitMap* region_bm, BitMap* card_bm) {
_other_regions.scrub(ctbs, region_bm, card_bm); _other_regions.scrub(ctbs, region_bm, card_bm);

View File

@ -179,13 +179,6 @@ private:
OtherRegionsTable _other_regions; OtherRegionsTable _other_regions;
// One set bit for every region that has an entry for this one.
BitMap _outgoing_region_map;
// Clear entries for the current region in any rem sets named in
// the _outgoing_region_map.
void clear_outgoing_entries();
enum ParIterState { Unclaimed, Claimed, Complete }; enum ParIterState { Unclaimed, Claimed, Complete };
volatile ParIterState _iter_state; volatile ParIterState _iter_state;
volatile jlong _iter_claimed; volatile jlong _iter_claimed;
@ -243,10 +236,6 @@ public:
_other_regions.add_reference(from, tid); _other_regions.add_reference(from, tid);
} }
// Records the fact that the current region contains an outgoing
// reference into "to_hr".
void add_outgoing_reference(HeapRegion* to_hr);
// Removes any entries shown by the given bitmaps to contain only dead // Removes any entries shown by the given bitmaps to contain only dead
// objects. // objects.
void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,6 @@ SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
void SurvRateGroup::reset() void SurvRateGroup::reset()
{ {
_all_regions_allocated = 0; _all_regions_allocated = 0;
_scan_only_prefix = 0;
_setup_seq_num = 0; _setup_seq_num = 0;
_stats_arrays_length = 0; _stats_arrays_length = 0;
_accum_surv_rate = 0.0; _accum_surv_rate = 0.0;
@ -74,7 +73,7 @@ void SurvRateGroup::reset()
void void
SurvRateGroup::start_adding_regions() { SurvRateGroup::start_adding_regions() {
_setup_seq_num = _stats_arrays_length; _setup_seq_num = _stats_arrays_length;
_region_num = _scan_only_prefix; _region_num = 0;
_accum_surv_rate = 0.0; _accum_surv_rate = 0.0;
#if 0 #if 0
@ -163,12 +162,6 @@ SurvRateGroup::next_age_index() {
return (int) ++_all_regions_allocated; return (int) ++_all_regions_allocated;
} }
void
SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
guarantee( scan_only_prefix <= _region_num, "pre-condition" );
_scan_only_prefix = scan_only_prefix;
}
void void
SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) { SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num, guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
@ -218,13 +211,12 @@ SurvRateGroup::all_surviving_words_recorded(bool propagate) {
#ifndef PRODUCT #ifndef PRODUCT
void void
SurvRateGroup::print() { SurvRateGroup::print() {
gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)", gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries)",
_name, _region_num, _scan_only_prefix); _name, _region_num);
for (size_t i = 0; i < _region_num; ++i) { for (size_t i = 0; i < _region_num; ++i) {
gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%%s", gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%",
i, _surv_rate[i] * 100.0, i, _surv_rate[i] * 100.0,
_g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0, _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0);
(i < _scan_only_prefix) ? " S-O" : " ");
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,6 @@ private:
int _all_regions_allocated; int _all_regions_allocated;
size_t _region_num; size_t _region_num;
size_t _scan_only_prefix;
size_t _setup_seq_num; size_t _setup_seq_num;
public: public:
@ -51,13 +50,11 @@ public:
void reset(); void reset();
void start_adding_regions(); void start_adding_regions();
void stop_adding_regions(); void stop_adding_regions();
void record_scan_only_prefix(size_t scan_only_prefix);
void record_surviving_words(int age_in_group, size_t surv_words); void record_surviving_words(int age_in_group, size_t surv_words);
void all_surviving_words_recorded(bool propagate); void all_surviving_words_recorded(bool propagate);
const char* name() { return _name; } const char* name() { return _name; }
size_t region_num() { return _region_num; } size_t region_num() { return _region_num; }
size_t scan_only_length() { return _scan_only_prefix; }
double accum_surv_rate_pred(int age) { double accum_surv_rate_pred(int age) {
assert(age >= 0, "must be"); assert(age >= 0, "must be");
if ((size_t)age < _stats_arrays_length) if ((size_t)age < _stats_arrays_length)
@ -82,17 +79,12 @@ public:
int next_age_index(); int next_age_index();
int age_in_group(int age_index) { int age_in_group(int age_index) {
int ret = (int) (_all_regions_allocated - age_index); int ret = (int) (_all_regions_allocated - age_index);
assert( ret >= 0, "invariant" ); assert( ret >= 0, "invariant" );
return ret; return ret;
} }
int recalculate_age_index(int age_index) {
int new_age_index = (int) _scan_only_prefix - age_in_group(age_index);
guarantee( new_age_index >= 0, "invariant" );
return new_age_index;
}
void finished_recalculating_age_indexes() { void finished_recalculating_age_indexes() {
_all_regions_allocated = (int) _scan_only_prefix; _all_regions_allocated = 0;
} }
#ifndef PRODUCT #ifndef PRODUCT

View File

@ -1,5 +1,5 @@
// //
// Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. // Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -161,8 +161,10 @@ parMarkBitMap.cpp psParallelCompact.hpp
parMarkBitMap.hpp bitMap.inline.hpp parMarkBitMap.hpp bitMap.inline.hpp
parMarkBitMap.hpp psVirtualspace.hpp parMarkBitMap.hpp psVirtualspace.hpp
psAdaptiveSizePolicy.cpp collectorPolicy.hpp
psAdaptiveSizePolicy.cpp gcPolicyCounters.hpp psAdaptiveSizePolicy.cpp gcPolicyCounters.hpp
psAdaptiveSizePolicy.cpp gcCause.hpp psAdaptiveSizePolicy.cpp gcCause.hpp
psAdaptiveSizePolicy.cpp generationSizer.hpp
psAdaptiveSizePolicy.cpp psAdaptiveSizePolicy.hpp psAdaptiveSizePolicy.cpp psAdaptiveSizePolicy.hpp
psAdaptiveSizePolicy.cpp psGCAdaptivePolicyCounters.hpp psAdaptiveSizePolicy.cpp psGCAdaptivePolicyCounters.hpp
psAdaptiveSizePolicy.cpp psScavenge.hpp psAdaptiveSizePolicy.cpp psScavenge.hpp
@ -215,6 +217,7 @@ psMarkSweep.cpp events.hpp
psMarkSweep.cpp fprofiler.hpp psMarkSweep.cpp fprofiler.hpp
psMarkSweep.cpp gcCause.hpp psMarkSweep.cpp gcCause.hpp
psMarkSweep.cpp gcLocker.inline.hpp psMarkSweep.cpp gcLocker.inline.hpp
psMarkSweep.cpp generationSizer.hpp
psMarkSweep.cpp isGCActiveMark.hpp psMarkSweep.cpp isGCActiveMark.hpp
psMarkSweep.cpp oop.inline.hpp psMarkSweep.cpp oop.inline.hpp
psMarkSweep.cpp memoryService.hpp psMarkSweep.cpp memoryService.hpp
@ -256,6 +259,7 @@ psParallelCompact.cpp fprofiler.hpp
psParallelCompact.cpp gcCause.hpp psParallelCompact.cpp gcCause.hpp
psParallelCompact.cpp gcLocker.inline.hpp psParallelCompact.cpp gcLocker.inline.hpp
psParallelCompact.cpp gcTaskManager.hpp psParallelCompact.cpp gcTaskManager.hpp
psParallelCompact.cpp generationSizer.hpp
psParallelCompact.cpp isGCActiveMark.hpp psParallelCompact.cpp isGCActiveMark.hpp
psParallelCompact.cpp management.hpp psParallelCompact.cpp management.hpp
psParallelCompact.cpp memoryService.hpp psParallelCompact.cpp memoryService.hpp
@ -344,10 +348,12 @@ psPromotionLAB.hpp objectStartArray.hpp
psScavenge.cpp psAdaptiveSizePolicy.hpp psScavenge.cpp psAdaptiveSizePolicy.hpp
psScavenge.cpp biasedLocking.hpp psScavenge.cpp biasedLocking.hpp
psScavenge.cpp cardTableExtension.hpp psScavenge.cpp cardTableExtension.hpp
psScavenge.cpp collectorPolicy.hpp
psScavenge.cpp fprofiler.hpp psScavenge.cpp fprofiler.hpp
psScavenge.cpp gcCause.hpp psScavenge.cpp gcCause.hpp
psScavenge.cpp gcLocker.inline.hpp psScavenge.cpp gcLocker.inline.hpp
psScavenge.cpp gcTaskManager.hpp psScavenge.cpp gcTaskManager.hpp
psScavenge.cpp generationSizer.hpp
psScavenge.cpp handles.inline.hpp psScavenge.cpp handles.inline.hpp
psScavenge.cpp isGCActiveMark.hpp psScavenge.cpp isGCActiveMark.hpp
psScavenge.cpp oop.inline.hpp psScavenge.cpp oop.inline.hpp

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. // Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@ adaptiveSizePolicy.hpp allocation.hpp
adaptiveSizePolicy.hpp universe.hpp adaptiveSizePolicy.hpp universe.hpp
adaptiveSizePolicy.cpp adaptiveSizePolicy.hpp adaptiveSizePolicy.cpp adaptiveSizePolicy.hpp
adaptiveSizePolicy.cpp collectorPolicy.hpp
adaptiveSizePolicy.cpp gcCause.hpp adaptiveSizePolicy.cpp gcCause.hpp
adaptiveSizePolicy.cpp ostream.hpp adaptiveSizePolicy.cpp ostream.hpp
adaptiveSizePolicy.cpp timer.hpp adaptiveSizePolicy.cpp timer.hpp

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -892,6 +892,10 @@ void ParNewGeneration::collect(bool full,
} }
swap_spaces(); swap_spaces();
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
size_policy->reset_gc_overhead_limit_count();
assert(to()->is_empty(), "to space should be empty now"); assert(to()->is_empty(), "to space should be empty now");
} else { } else {
assert(HandlePromotionFailure, assert(HandlePromotionFailure,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -54,15 +54,16 @@ jint ParallelScavengeHeap::initialize() {
CollectedHeap::pre_initialize(); CollectedHeap::pre_initialize();
// Cannot be initialized until after the flags are parsed // Cannot be initialized until after the flags are parsed
GenerationSizer flag_parser; // GenerationSizer flag_parser;
_collector_policy = new GenerationSizer();
size_t yg_min_size = flag_parser.min_young_gen_size(); size_t yg_min_size = _collector_policy->min_young_gen_size();
size_t yg_max_size = flag_parser.max_young_gen_size(); size_t yg_max_size = _collector_policy->max_young_gen_size();
size_t og_min_size = flag_parser.min_old_gen_size(); size_t og_min_size = _collector_policy->min_old_gen_size();
size_t og_max_size = flag_parser.max_old_gen_size(); size_t og_max_size = _collector_policy->max_old_gen_size();
// Why isn't there a min_perm_gen_size()? // Why isn't there a min_perm_gen_size()?
size_t pg_min_size = flag_parser.perm_gen_size(); size_t pg_min_size = _collector_policy->perm_gen_size();
size_t pg_max_size = flag_parser.max_perm_gen_size(); size_t pg_max_size = _collector_policy->max_perm_gen_size();
trace_gen_sizes("ps heap raw", trace_gen_sizes("ps heap raw",
pg_min_size, pg_max_size, pg_min_size, pg_max_size,
@ -89,12 +90,14 @@ jint ParallelScavengeHeap::initialize() {
// move to the common code. // move to the common code.
yg_min_size = align_size_up(yg_min_size, yg_align); yg_min_size = align_size_up(yg_min_size, yg_align);
yg_max_size = align_size_up(yg_max_size, yg_align); yg_max_size = align_size_up(yg_max_size, yg_align);
size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align); size_t yg_cur_size =
align_size_up(_collector_policy->young_gen_size(), yg_align);
yg_cur_size = MAX2(yg_cur_size, yg_min_size); yg_cur_size = MAX2(yg_cur_size, yg_min_size);
og_min_size = align_size_up(og_min_size, og_align); og_min_size = align_size_up(og_min_size, og_align);
og_max_size = align_size_up(og_max_size, og_align); og_max_size = align_size_up(og_max_size, og_align);
size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align); size_t og_cur_size =
align_size_up(_collector_policy->old_gen_size(), og_align);
og_cur_size = MAX2(og_cur_size, og_min_size); og_cur_size = MAX2(og_cur_size, og_min_size);
pg_min_size = align_size_up(pg_min_size, pg_align); pg_min_size = align_size_up(pg_min_size, pg_align);
@ -355,6 +358,11 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
HeapWord* result = young_gen()->allocate(size, is_tlab); HeapWord* result = young_gen()->allocate(size, is_tlab);
uint loop_count = 0; uint loop_count = 0;
@ -428,24 +436,6 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
if (result == NULL) { if (result == NULL) {
// Exit the loop if if the gc time limit has been exceeded.
// The allocation must have failed above (result must be NULL),
// and the most recent collection must have exceeded the
// gc time limit. Exit the loop so that an out-of-memory
// will be thrown (returning a NULL will do that), but
// clear gc_time_limit_exceeded so that the next collection
// will succeeded if the applications decides to handle the
// out-of-memory and tries to go on.
*gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded();
if (size_policy()->gc_time_limit_exceeded()) {
size_policy()->set_gc_time_limit_exceeded(false);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
"return NULL because gc_time_limit_exceeded is set");
}
return NULL;
}
// Generate a VM operation // Generate a VM operation
VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
VMThread::execute(&op); VMThread::execute(&op);
@ -463,16 +453,34 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
assert(op.result() == NULL, "must be NULL if gc_locked() is true"); assert(op.result() == NULL, "must be NULL if gc_locked() is true");
continue; // retry and/or stall as necessary continue; // retry and/or stall as necessary
} }
// If a NULL result is being returned, an out-of-memory
// will be thrown now. Clear the gc_time_limit_exceeded // Exit the loop if the gc time limit has been exceeded.
// flag to avoid the following situation. // The allocation must have failed above ("result" guarding
// gc_time_limit_exceeded is set during a collection // this path is NULL) and the most recent collection has exceeded the
// the collection fails to return enough space and an OOM is thrown // gc overhead limit (although enough may have been collected to
// the next GC is skipped because the gc_time_limit_exceeded // satisfy the allocation). Exit the loop so that an out-of-memory
// flag is set and another OOM is thrown // will be thrown (return a NULL ignoring the contents of
if (op.result() == NULL) { // op.result()),
size_policy()->set_gc_time_limit_exceeded(false); // but clear gc_overhead_limit_exceeded so that the next collection
// starts with a clean slate (i.e., forgets about previous overhead
// excesses). Fill op.result() with a filler object so that the
// heap remains parsable.
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_overhead_limit_exceeded(false);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
"return NULL because gc_overhead_limit_exceeded is set");
}
if (op.result() != NULL) {
CollectedHeap::fill_with_object(op.result(), size);
}
return NULL;
} }
return op.result(); return op.result();
} }
} }
@ -613,14 +621,15 @@ HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
// and the most recent collection must have exceeded the // and the most recent collection must have exceeded the
// gc time limit. Exit the loop so that an out-of-memory // gc time limit. Exit the loop so that an out-of-memory
// will be thrown (returning a NULL will do that), but // will be thrown (returning a NULL will do that), but
// clear gc_time_limit_exceeded so that the next collection // clear gc_overhead_limit_exceeded so that the next collection
// will succeeded if the applications decides to handle the // will succeeded if the applications decides to handle the
// out-of-memory and tries to go on. // out-of-memory and tries to go on.
if (size_policy()->gc_time_limit_exceeded()) { const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
size_policy()->set_gc_time_limit_exceeded(false); if (limit_exceeded) {
size_policy()->set_gc_overhead_limit_exceeded(false);
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: " gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
"return NULL because gc_time_limit_exceeded is set"); " return NULL because gc_overhead_limit_exceeded is set");
} }
assert(result == NULL, "Allocation did not fail"); assert(result == NULL, "Allocation did not fail");
return NULL; return NULL;
@ -643,14 +652,15 @@ HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
continue; // retry and/or stall as necessary continue; // retry and/or stall as necessary
} }
// If a NULL results is being returned, an out-of-memory // If a NULL results is being returned, an out-of-memory
// will be thrown now. Clear the gc_time_limit_exceeded // will be thrown now. Clear the gc_overhead_limit_exceeded
// flag to avoid the following situation. // flag to avoid the following situation.
// gc_time_limit_exceeded is set during a collection // gc_overhead_limit_exceeded is set during a collection
// the collection fails to return enough space and an OOM is thrown // the collection fails to return enough space and an OOM is thrown
// the next GC is skipped because the gc_time_limit_exceeded // a subsequent GC prematurely throws an out-of-memory because
// flag is set and another OOM is thrown // the gc_overhead_limit_exceeded counts did not start
// again from 0.
if (op.result() == NULL) { if (op.result() == NULL) {
size_policy()->set_gc_time_limit_exceeded(false); size_policy()->reset_gc_overhead_limit_count();
} }
return op.result(); return op.result();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,8 @@
class AdjoiningGenerations; class AdjoiningGenerations;
class GCTaskManager; class GCTaskManager;
class PSAdaptiveSizePolicy; class PSAdaptiveSizePolicy;
class GenerationSizer;
class CollectorPolicy;
class ParallelScavengeHeap : public CollectedHeap { class ParallelScavengeHeap : public CollectedHeap {
friend class VMStructs; friend class VMStructs;
@ -43,6 +45,8 @@ class ParallelScavengeHeap : public CollectedHeap {
size_t _young_gen_alignment; size_t _young_gen_alignment;
size_t _old_gen_alignment; size_t _old_gen_alignment;
GenerationSizer* _collector_policy;
inline size_t set_alignment(size_t& var, size_t val); inline size_t set_alignment(size_t& var, size_t val);
// Collection of generations that are adjacent in the // Collection of generations that are adjacent in the
@ -72,6 +76,9 @@ class ParallelScavengeHeap : public CollectedHeap {
return CollectedHeap::ParallelScavengeHeap; return CollectedHeap::ParallelScavengeHeap;
} }
CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
// GenerationSizer* collector_policy() const { return _collector_policy; }
static PSYoungGen* young_gen() { return _young_gen; } static PSYoungGen* young_gen() { return _young_gen; }
static PSOldGen* old_gen() { return _old_gen; } static PSOldGen* old_gen() { return _old_gen; }
static PSPermGen* perm_gen() { return _perm_gen; } static PSPermGen* perm_gen() { return _perm_gen; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -184,18 +184,19 @@ void PSAdaptiveSizePolicy::clear_generation_free_space_flags() {
set_change_young_gen_for_maj_pauses(0); set_change_young_gen_for_maj_pauses(0);
} }
// If this is not a full GC, only test and modify the young generation. // If this is not a full GC, only test and modify the young generation.
void PSAdaptiveSizePolicy::compute_generation_free_space(size_t young_live, void PSAdaptiveSizePolicy::compute_generation_free_space(
size_t eden_live, size_t young_live,
size_t old_live, size_t eden_live,
size_t perm_live, size_t old_live,
size_t cur_eden, size_t perm_live,
size_t max_old_gen_size, size_t cur_eden,
size_t max_eden_size, size_t max_old_gen_size,
bool is_full_gc, size_t max_eden_size,
GCCause::Cause gc_cause) { bool is_full_gc,
GCCause::Cause gc_cause,
CollectorPolicy* collector_policy) {
// Update statistics // Update statistics
// Time statistics are updated as we go, update footprint stats here // Time statistics are updated as we go, update footprint stats here
@ -380,91 +381,16 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(size_t young_live,
// Is too much time being spent in GC? // Is too much time being spent in GC?
// Is the heap trying to grow beyond it's limits? // Is the heap trying to grow beyond it's limits?
const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average()); const size_t free_in_old_gen =
(size_t)(max_old_gen_size - avg_old_live()->average());
if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) { if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) {
check_gc_overhead_limit(young_live,
// eden_limit is the upper limit on the size of eden based on eden_live,
// the maximum size of the young generation and the sizes max_old_gen_size,
// of the survivor space. max_eden_size,
// The question being asked is whether the gc costs are high is_full_gc,
// and the space being recovered by a collection is low. gc_cause,
// free_in_young_gen is the free space in the young generation collector_policy);
// after a collection and promo_live is the free space in the old
// generation after a collection.
//
// Use the minimum of the current value of the live in the
// young gen or the average of the live in the young gen.
// If the current value drops quickly, that should be taken
// into account (i.e., don't trigger if the amount of free
// space has suddenly jumped up). If the current is much
// higher than the average, use the average since it represents
// the longer term behavor.
const size_t live_in_eden = MIN2(eden_live, (size_t) avg_eden_live()->average());
const size_t free_in_eden = eden_limit > live_in_eden ?
eden_limit - live_in_eden : 0;
const size_t total_free_limit = free_in_old_gen + free_in_eden;
const size_t total_mem = max_old_gen_size + max_eden_size;
const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
if (PrintAdaptiveSizePolicy && (Verbose ||
(total_free_limit < (size_t) mem_free_limit))) {
gclog_or_tty->print_cr(
"PSAdaptiveSizePolicy::compute_generation_free_space limits:"
" promo_limit: " SIZE_FORMAT
" eden_limit: " SIZE_FORMAT
" total_free_limit: " SIZE_FORMAT
" max_old_gen_size: " SIZE_FORMAT
" max_eden_size: " SIZE_FORMAT
" mem_free_limit: " SIZE_FORMAT,
promo_limit, eden_limit, total_free_limit,
max_old_gen_size, max_eden_size,
(size_t) mem_free_limit);
}
if (is_full_gc) {
if (gc_cost() > gc_cost_limit &&
total_free_limit < (size_t) mem_free_limit) {
// Collections, on average, are taking too much time, and
// gc_cost() > gc_cost_limit
// we have too little space available after a full gc.
// total_free_limit < mem_free_limit
// where
// total_free_limit is the free space available in
// both generations
// total_mem is the total space available for allocation
// in both generations (survivor spaces are not included
// just as they are not included in eden_limit).
// mem_free_limit is a fraction of total_mem judged to be an
// acceptable amount that is still unused.
// The heap can ask for the value of this variable when deciding
// whether to thrown an OutOfMemory error.
// Note that the gc time limit test only works for the collections
// of the young gen + tenured gen and not for collections of the
// permanent gen. That is because the calculation of the space
// freed by the collection is the free space in the young gen +
// tenured gen.
// Ignore explicit GC's. Ignoring explicit GC's at this level
// is the equivalent of the GC did not happen as far as the
// overhead calculation is concerted (i.e., the flag is not set
// and the count is not affected). Also the average will not
// have been updated unless UseAdaptiveSizePolicyWithSystemGC is on.
if (!GCCause::is_user_requested_gc(gc_cause) &&
!GCCause::is_serviceability_requested_gc(gc_cause)) {
inc_gc_time_limit_count();
if (UseGCOverheadLimit &&
(gc_time_limit_count() > AdaptiveSizePolicyGCTimeLimitThreshold)){
// All conditions have been met for throwing an out-of-memory
_gc_time_limit_exceeded = true;
// Avoid consecutive OOM due to the gc time limit by resetting
// the counter.
reset_gc_time_limit_count();
}
_print_gc_time_limit_would_be_exceeded = true;
}
} else {
// Did not exceed overhead limits
reset_gc_time_limit_count();
}
}
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,6 +45,7 @@
// Forward decls // Forward decls
class elapsedTimer; class elapsedTimer;
class GenerationSizer;
class PSAdaptiveSizePolicy : public AdaptiveSizePolicy { class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
friend class PSGCAdaptivePolicyCounters; friend class PSGCAdaptivePolicyCounters;
@ -340,7 +341,8 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
size_t max_old_gen_size, size_t max_old_gen_size,
size_t max_eden_size, size_t max_eden_size,
bool is_full_gc, bool is_full_gc,
GCCause::Cause gc_cause); GCCause::Cause gc_cause,
CollectorPolicy* collector_policy);
// Calculates new survivor space size; returns a new tenuring threshold // Calculates new survivor space size; returns a new tenuring threshold
// value. Stores new survivor size in _survivor_size. // value. Stores new survivor size in _survivor_size.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -117,11 +117,13 @@ PSGCAdaptivePolicyCounters::PSGCAdaptivePolicyCounters(const char* name_arg,
PerfData::U_Bytes, (jlong) ps_size_policy()->avg_base_footprint()->average(), CHECK); PerfData::U_Bytes, (jlong) ps_size_policy()->avg_base_footprint()->average(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "gcTimeLimitExceeded"); cname = PerfDataManager::counter_name(name_space(), "gcTimeLimitExceeded");
_gc_time_limit_exceeded = PerfDataManager::create_variable(SUN_GC, cname, _gc_overhead_limit_exceeded_counter =
PerfData::U_Events, ps_size_policy()->gc_time_limit_exceeded(), CHECK); PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Events, ps_size_policy()->gc_overhead_limit_exceeded(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "liveAtLastFullGc"); cname = PerfDataManager::counter_name(name_space(), "liveAtLastFullGc");
_live_at_last_full_gc = PerfDataManager::create_variable(SUN_GC, cname, _live_at_last_full_gc_counter =
PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Bytes, ps_size_policy()->live_at_last_full_gc(), CHECK); PerfData::U_Bytes, ps_size_policy()->live_at_last_full_gc(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "majorPauseOldSlope"); cname = PerfDataManager::counter_name(name_space(), "majorPauseOldSlope");
@ -189,6 +191,8 @@ void PSGCAdaptivePolicyCounters::update_counters_from_policy() {
update_minor_pause_old_slope(); update_minor_pause_old_slope();
update_major_pause_young_slope(); update_major_pause_young_slope();
update_minor_collection_slope_counter(); update_minor_collection_slope_counter();
update_gc_overhead_limit_exceeded_counter();
update_live_at_last_full_gc_counter();
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,8 +44,8 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
PerfVariable* _live_space; PerfVariable* _live_space;
PerfVariable* _free_space; PerfVariable* _free_space;
PerfVariable* _avg_base_footprint; PerfVariable* _avg_base_footprint;
PerfVariable* _gc_time_limit_exceeded; PerfVariable* _gc_overhead_limit_exceeded_counter;
PerfVariable* _live_at_last_full_gc; PerfVariable* _live_at_last_full_gc_counter;
PerfVariable* _old_capacity; PerfVariable* _old_capacity;
PerfVariable* _boundary_moved; PerfVariable* _boundary_moved;
@ -169,6 +169,14 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
(jlong)(ps_size_policy()->major_pause_young_slope() * 1000) (jlong)(ps_size_policy()->major_pause_young_slope() * 1000)
); );
} }
inline void update_gc_overhead_limit_exceeded_counter() {
_gc_overhead_limit_exceeded_counter->set_value(
(jlong) ps_size_policy()->gc_overhead_limit_exceeded());
}
inline void update_live_at_last_full_gc_counter() {
_live_at_last_full_gc_counter->set_value(
(jlong)(ps_size_policy()->live_at_last_full_gc()));
}
inline void update_scavenge_skipped(int cause) { inline void update_scavenge_skipped(int cause) {
_scavenge_skipped->set_value(cause); _scavenge_skipped->set_value(cause);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -46,6 +46,12 @@ void PSMarkSweep::initialize() {
// //
// Note that this method should only be called from the vm_thread while // Note that this method should only be called from the vm_thread while
// at a safepoint! // at a safepoint!
//
// Note that the all_soft_refs_clear flag in the collector policy
// may be true because this method can be called without intervening
// activity. For example when the heap space is tight and full measure
// are being taken to free space.
void PSMarkSweep::invoke(bool maximum_heap_compaction) { void PSMarkSweep::invoke(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
@ -54,24 +60,18 @@ void PSMarkSweep::invoke(bool maximum_heap_compaction) {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause(); GCCause::Cause gc_cause = heap->gc_cause();
PSAdaptiveSizePolicy* policy = heap->size_policy(); PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
// Before each allocation/collection attempt, find out from the if (ScavengeBeforeFullGC) {
// policy object if GCs are, on the whole, taking too long. If so, PSScavenge::invoke_no_policy();
// bail out without attempting a collection. The exceptions are
// for explicitly requested GC's.
if (!policy->gc_time_limit_exceeded() ||
GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
IsGCActiveMark mark;
if (ScavengeBeforeFullGC) {
PSScavenge::invoke_no_policy();
}
int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
} }
const bool clear_all_soft_refs =
heap->collector_policy()->should_clear_all_soft_refs();
int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
} }
// This method contains no policy. You should probably // This method contains no policy. You should probably
@ -89,6 +89,10 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSAdaptiveSizePolicy* size_policy = heap->size_policy(); PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
// CollectorPolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
PSYoungGen* young_gen = heap->young_gen(); PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen(); PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen(); PSPermGen* perm_gen = heap->perm_gen();
@ -275,7 +279,8 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
old_gen->max_gen_size(), old_gen->max_gen_size(),
max_eden_size, max_eden_size,
true /* full gc*/, true /* full gc*/,
gc_cause); gc_cause,
heap->collector_policy());
heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
@ -326,19 +331,6 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
// Track memory usage and detect low memory // Track memory usage and detect low memory
MemoryService::track_memory_usage(); MemoryService::track_memory_usage();
heap->update_counters(); heap->update_counters();
if (PrintGCDetails) {
if (size_policy->print_gc_time_limit_would_be_exceeded()) {
if (size_policy->gc_time_limit_exceeded()) {
gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
"of %d%%", GCTimeLimit);
} else {
gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
"of %d%%", GCTimeLimit);
}
}
size_policy->set_print_gc_time_limit_would_be_exceeded(false);
}
} }
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1923,31 +1923,32 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
// //
// Note that this method should only be called from the vm_thread while at a // Note that this method should only be called from the vm_thread while at a
// safepoint. // safepoint.
//
// Note that the all_soft_refs_clear flag in the collector policy
// may be true because this method can be called without intervening
// activity. For example when the heap space is tight and full measure
// are being taken to free space.
void PSParallelCompact::invoke(bool maximum_heap_compaction) { void PSParallelCompact::invoke(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), assert(Thread::current() == (Thread*)VMThread::vm_thread(),
"should be in vm thread"); "should be in vm thread");
ParallelScavengeHeap* heap = gc_heap(); ParallelScavengeHeap* heap = gc_heap();
GCCause::Cause gc_cause = heap->gc_cause(); GCCause::Cause gc_cause = heap->gc_cause();
assert(!heap->is_gc_active(), "not reentrant"); assert(!heap->is_gc_active(), "not reentrant");
PSAdaptiveSizePolicy* policy = heap->size_policy(); PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
// Before each allocation/collection attempt, find out from the if (ScavengeBeforeFullGC) {
// policy object if GCs are, on the whole, taking too long. If so, PSScavenge::invoke_no_policy();
// bail out without attempting a collection. The exceptions are
// for explicitly requested GC's.
if (!policy->gc_time_limit_exceeded() ||
GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
IsGCActiveMark mark;
if (ScavengeBeforeFullGC) {
PSScavenge::invoke_no_policy();
}
PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
} }
const bool clear_all_soft_refs =
heap->collector_policy()->should_clear_all_soft_refs();
PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
maximum_heap_compaction);
} }
bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) { bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
@ -1976,6 +1977,11 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
PSPermGen* perm_gen = heap->perm_gen(); PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy(); PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
// CollectorPolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(maximum_heap_compaction,
heap->collector_policy());
if (ZapUnusedHeapArea) { if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling // Save information needed to minimize mangling
heap->record_gen_tops_before_GC(); heap->record_gen_tops_before_GC();
@ -2109,7 +2115,8 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
old_gen->max_gen_size(), old_gen->max_gen_size(),
max_eden_size, max_eden_size,
true /* full gc*/, true /* full gc*/,
gc_cause); gc_cause,
heap->collector_policy());
heap->resize_old_gen( heap->resize_old_gen(
size_policy->calculated_old_free_size_in_bytes()); size_policy->calculated_old_free_size_in_bytes());
@ -2157,19 +2164,6 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// Track memory usage and detect low memory // Track memory usage and detect low memory
MemoryService::track_memory_usage(); MemoryService::track_memory_usage();
heap->update_counters(); heap->update_counters();
if (PrintGCDetails) {
if (size_policy->print_gc_time_limit_would_be_exceeded()) {
if (size_policy->gc_time_limit_exceeded()) {
gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
"of %d%%", GCTimeLimit);
} else {
gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
"of %d%%", GCTimeLimit);
}
}
size_policy->set_print_gc_time_limit_would_be_exceeded(false);
}
} }
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -187,8 +187,7 @@ void PSRefProcTaskExecutor::execute(EnqueueTask& task)
// //
// Note that this method should only be called from the vm_thread while // Note that this method should only be called from the vm_thread while
// at a safepoint! // at a safepoint!
void PSScavenge::invoke() void PSScavenge::invoke() {
{
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!Universe::heap()->is_gc_active(), "not reentrant"); assert(!Universe::heap()->is_gc_active(), "not reentrant");
@ -197,29 +196,25 @@ void PSScavenge::invoke()
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
PSAdaptiveSizePolicy* policy = heap->size_policy(); PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
// Before each allocation/collection attempt, find out from the bool scavenge_was_done = PSScavenge::invoke_no_policy();
// policy object if GCs are, on the whole, taking too long. If so,
// bail out without attempting a collection.
if (!policy->gc_time_limit_exceeded()) {
IsGCActiveMark mark;
bool scavenge_was_done = PSScavenge::invoke_no_policy(); PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
if (UsePerfData)
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_full_follows_scavenge(0);
if (!scavenge_was_done ||
policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
if (UsePerfData) if (UsePerfData)
counters->update_full_follows_scavenge(0); counters->update_full_follows_scavenge(full_follows_scavenge);
if (!scavenge_was_done || GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
policy->should_full_GC(heap->old_gen()->free_in_bytes())) { CollectorPolicy* cp = heap->collector_policy();
if (UsePerfData) const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
counters->update_full_follows_scavenge(full_follows_scavenge);
GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); if (UseParallelOldGC) {
if (UseParallelOldGC) { PSParallelCompact::invoke_no_policy(clear_all_softrefs);
PSParallelCompact::invoke_no_policy(false); } else {
} else { PSMarkSweep::invoke_no_policy(clear_all_softrefs);
PSMarkSweep::invoke_no_policy(false);
}
} }
} }
} }
@ -447,6 +442,9 @@ bool PSScavenge::invoke_no_policy() {
size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
size_policy->update_averages(_survivor_overflow, survived, promoted); size_policy->update_averages(_survivor_overflow, survived, promoted);
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
size_policy->reset_gc_overhead_limit_count();
if (UseAdaptiveSizePolicy) { if (UseAdaptiveSizePolicy) {
// Calculate the new survivor size and tenuring threshold // Calculate the new survivor size and tenuring threshold
@ -523,7 +521,8 @@ bool PSScavenge::invoke_no_policy() {
old_gen->max_gen_size(), old_gen->max_gen_size(),
max_eden_size, max_eden_size,
false /* full gc*/, false /* full gc*/,
gc_cause); gc_cause,
heap->collector_policy());
} }
// Resize the young generation at every collection // Resize the young generation at every collection

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2004-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,13 +44,15 @@ AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
_survivor_size(init_survivor_size), _survivor_size(init_survivor_size),
_gc_pause_goal_sec(gc_pause_goal_sec), _gc_pause_goal_sec(gc_pause_goal_sec),
_throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))), _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
_gc_time_limit_exceeded(false), _gc_overhead_limit_exceeded(false),
_print_gc_time_limit_would_be_exceeded(false), _print_gc_overhead_limit_would_be_exceeded(false),
_gc_time_limit_count(0), _gc_overhead_limit_count(0),
_latest_minor_mutator_interval_seconds(0), _latest_minor_mutator_interval_seconds(0),
_threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0), _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
_young_gen_change_for_minor_throughput(0), _young_gen_change_for_minor_throughput(0),
_old_gen_change_for_major_throughput(0) { _old_gen_change_for_major_throughput(0) {
assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
"No opportunity to clear SoftReferences before GC overhead limit");
_avg_minor_pause = _avg_minor_pause =
new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding); new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
_avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight); _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
@ -278,6 +280,147 @@ void AdaptiveSizePolicy::clear_generation_free_space_flags() {
set_decide_at_full_gc(0); set_decide_at_full_gc(0);
} }
void AdaptiveSizePolicy::check_gc_overhead_limit(
size_t young_live,
size_t eden_live,
size_t max_old_gen_size,
size_t max_eden_size,
bool is_full_gc,
GCCause::Cause gc_cause,
CollectorPolicy* collector_policy) {
// Ignore explicit GC's. Exiting here does not set the flag and
// does not reset the count. Updating of the averages for system
// GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
if (GCCause::is_user_requested_gc(gc_cause) ||
GCCause::is_serviceability_requested_gc(gc_cause)) {
return;
}
// eden_limit is the upper limit on the size of eden based on
// the maximum size of the young generation and the sizes
// of the survivor space.
// The question being asked is whether the gc costs are high
// and the space being recovered by a collection is low.
// free_in_young_gen is the free space in the young generation
// after a collection and promo_live is the free space in the old
// generation after a collection.
//
// Use the minimum of the current value of the live in the
// young gen or the average of the live in the young gen.
// If the current value drops quickly, that should be taken
// into account (i.e., don't trigger if the amount of free
// space has suddenly jumped up). If the current is much
// higher than the average, use the average since it represents
// the longer term behavor.
const size_t live_in_eden =
MIN2(eden_live, (size_t) avg_eden_live()->average());
const size_t free_in_eden = max_eden_size > live_in_eden ?
max_eden_size - live_in_eden : 0;
const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
const size_t total_free_limit = free_in_old_gen + free_in_eden;
const size_t total_mem = max_old_gen_size + max_eden_size;
const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
const double gc_cost_limit = GCTimeLimit/100.0;
size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
// But don't force a promo size below the current promo size. Otherwise,
// the promo size will shrink for no good reason.
promo_limit = MAX2(promo_limit, _promo_size);
if (PrintAdaptiveSizePolicy && (Verbose ||
(free_in_old_gen < (size_t) mem_free_old_limit &&
free_in_eden < (size_t) mem_free_eden_limit))) {
gclog_or_tty->print_cr(
"PSAdaptiveSizePolicy::compute_generation_free_space limits:"
" promo_limit: " SIZE_FORMAT
" max_eden_size: " SIZE_FORMAT
" total_free_limit: " SIZE_FORMAT
" max_old_gen_size: " SIZE_FORMAT
" max_eden_size: " SIZE_FORMAT
" mem_free_limit: " SIZE_FORMAT,
promo_limit, max_eden_size, total_free_limit,
max_old_gen_size, max_eden_size,
(size_t) mem_free_limit);
}
bool print_gc_overhead_limit_would_be_exceeded = false;
if (is_full_gc) {
if (gc_cost() > gc_cost_limit &&
free_in_old_gen < (size_t) mem_free_old_limit &&
free_in_eden < (size_t) mem_free_eden_limit) {
// Collections, on average, are taking too much time, and
// gc_cost() > gc_cost_limit
// we have too little space available after a full gc.
// total_free_limit < mem_free_limit
// where
// total_free_limit is the free space available in
// both generations
// total_mem is the total space available for allocation
// in both generations (survivor spaces are not included
// just as they are not included in eden_limit).
// mem_free_limit is a fraction of total_mem judged to be an
// acceptable amount that is still unused.
// The heap can ask for the value of this variable when deciding
// whether to thrown an OutOfMemory error.
// Note that the gc time limit test only works for the collections
// of the young gen + tenured gen and not for collections of the
// permanent gen. That is because the calculation of the space
// freed by the collection is the free space in the young gen +
// tenured gen.
// At this point the GC overhead limit is being exceeded.
inc_gc_overhead_limit_count();
if (UseGCOverheadLimit) {
if (gc_overhead_limit_count() >=
AdaptiveSizePolicyGCTimeLimitThreshold){
// All conditions have been met for throwing an out-of-memory
set_gc_overhead_limit_exceeded(true);
// Avoid consecutive OOM due to the gc time limit by resetting
// the counter.
reset_gc_overhead_limit_count();
} else {
// The required consecutive collections which exceed the
// GC time limit may or may not have been reached. We
// are approaching that condition and so as not to
// throw an out-of-memory before all SoftRef's have been
// cleared, set _should_clear_all_soft_refs in CollectorPolicy.
// The clearing will be done on the next GC.
bool near_limit = gc_overhead_limit_near();
if (near_limit) {
collector_policy->set_should_clear_all_soft_refs(true);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr(" Nearing GC overhead limit, "
"will be clearing all SoftReference");
}
}
}
}
// Set this even when the overhead limit will not
// cause an out-of-memory. Diagnostic message indicating
// that the overhead limit is being exceeded is sometimes
// printed.
print_gc_overhead_limit_would_be_exceeded = true;
} else {
// Did not exceed overhead limits
reset_gc_overhead_limit_count();
}
}
if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
if (gc_overhead_limit_exceeded()) {
gclog_or_tty->print_cr(" GC is exceeding overhead limit "
"of %d%%", GCTimeLimit);
reset_gc_overhead_limit_count();
} else if (print_gc_overhead_limit_would_be_exceeded) {
assert(gc_overhead_limit_count() > 0, "Should not be printing");
gclog_or_tty->print_cr(" GC would exceed overhead limit "
"of %d%% %d consecutive time(s)",
GCTimeLimit, gc_overhead_limit_count());
}
}
}
// Printing // Printing
bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const { bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2004-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
// Forward decls // Forward decls
class elapsedTimer; class elapsedTimer;
class CollectorPolicy;
class AdaptiveSizePolicy : public CHeapObj { class AdaptiveSizePolicy : public CHeapObj {
friend class GCAdaptivePolicyCounters; friend class GCAdaptivePolicyCounters;
@ -75,13 +76,16 @@ class AdaptiveSizePolicy : public CHeapObj {
// This is a hint for the heap: we've detected that gc times // This is a hint for the heap: we've detected that gc times
// are taking longer than GCTimeLimit allows. // are taking longer than GCTimeLimit allows.
bool _gc_time_limit_exceeded; bool _gc_overhead_limit_exceeded;
// Use for diagnostics only. If UseGCTimeLimit is false, // Use for diagnostics only. If UseGCOverheadLimit is false,
// this variable is still set. // this variable is still set.
bool _print_gc_time_limit_would_be_exceeded; bool _print_gc_overhead_limit_would_be_exceeded;
// Count of consecutive GC that have exceeded the // Count of consecutive GC that have exceeded the
// GC time limit criterion. // GC time limit criterion.
uint _gc_time_limit_count; uint _gc_overhead_limit_count;
// This flag signals that GCTimeLimit is being exceeded
// but may not have done so for the required number of consequetive
// collections.
// Minor collection timers used to determine both // Minor collection timers used to determine both
// pause and interval times for collections. // pause and interval times for collections.
@ -406,22 +410,21 @@ class AdaptiveSizePolicy : public CHeapObj {
// Most heaps will choose to throw an OutOfMemoryError when // Most heaps will choose to throw an OutOfMemoryError when
// this occurs but it is up to the heap to request this information // this occurs but it is up to the heap to request this information
// of the policy // of the policy
bool gc_time_limit_exceeded() { bool gc_overhead_limit_exceeded() {
return _gc_time_limit_exceeded; return _gc_overhead_limit_exceeded;
} }
void set_gc_time_limit_exceeded(bool v) { void set_gc_overhead_limit_exceeded(bool v) {
_gc_time_limit_exceeded = v; _gc_overhead_limit_exceeded = v;
}
bool print_gc_time_limit_would_be_exceeded() {
return _print_gc_time_limit_would_be_exceeded;
}
void set_print_gc_time_limit_would_be_exceeded(bool v) {
_print_gc_time_limit_would_be_exceeded = v;
} }
uint gc_time_limit_count() { return _gc_time_limit_count; } // Tests conditions indicate the GC overhead limit is being approached.
void reset_gc_time_limit_count() { _gc_time_limit_count = 0; } bool gc_overhead_limit_near() {
void inc_gc_time_limit_count() { _gc_time_limit_count++; } return gc_overhead_limit_count() >=
(AdaptiveSizePolicyGCTimeLimitThreshold - 1);
}
uint gc_overhead_limit_count() { return _gc_overhead_limit_count; }
void reset_gc_overhead_limit_count() { _gc_overhead_limit_count = 0; }
void inc_gc_overhead_limit_count() { _gc_overhead_limit_count++; }
// accessors for flags recording the decisions to resize the // accessors for flags recording the decisions to resize the
// generations to meet the pause goal. // generations to meet the pause goal.
@ -436,6 +439,16 @@ class AdaptiveSizePolicy : public CHeapObj {
int decide_at_full_gc() { return _decide_at_full_gc; } int decide_at_full_gc() { return _decide_at_full_gc; }
void set_decide_at_full_gc(int v) { _decide_at_full_gc = v; } void set_decide_at_full_gc(int v) { _decide_at_full_gc = v; }
// Check the conditions for an out-of-memory due to excessive GC time.
// Set _gc_overhead_limit_exceeded if all the conditions have been met.
void check_gc_overhead_limit(size_t young_live,
size_t eden_live,
size_t max_old_gen_size,
size_t max_eden_size,
bool is_full_gc,
GCCause::Cause gc_cause,
CollectorPolicy* collector_policy);
// Printing support // Printing support
virtual bool print_adaptive_size_policy_on(outputStream* st) const; virtual bool print_adaptive_size_policy_on(outputStream* st) const;
bool print_adaptive_size_policy_on(outputStream* st, int bool print_adaptive_size_policy_on(outputStream* st, int

View File

@ -115,11 +115,25 @@ bool VM_GC_HeapInspection::skip_operation() const {
void VM_GC_HeapInspection::doit() { void VM_GC_HeapInspection::doit() {
HandleMark hm; HandleMark hm;
CollectedHeap* ch = Universe::heap(); CollectedHeap* ch = Universe::heap();
ch->ensure_parsability(false); // must happen, even if collection does
// not happen (e.g. due to GC_locker)
if (_full_gc) { if (_full_gc) {
ch->collect_as_vm_thread(GCCause::_heap_inspection); // The collection attempt below would be skipped anyway if
} else { // the gc locker is held. The following dump may then be a tad
// make the heap parsable (no need to retire TLABs) // misleading to someone expecting only live objects to show
ch->ensure_parsability(false); // up in the dump (see CR 6944195). Just issue a suitable warning
// in that case and do not attempt to do a collection.
// The latter is a subtle point, because even a failed attempt
// to GC will, in fact, induce one in the future, which we
// probably want to avoid in this case because the GC that we may
// be about to attempt holds value for us only
// if it happens now and not if it happens in the eventual
// future.
if (GC_locker::is_active()) {
warning("GC locker is held; pre-dump GC was skipped");
} else {
ch->collect_as_vm_thread(GCCause::_heap_inspection);
}
} }
HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */); HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -89,8 +89,19 @@ class VM_GC_Operation: public VM_Operation {
if (full) { if (full) {
_full_gc_count_before = full_gc_count_before; _full_gc_count_before = full_gc_count_before;
} }
// In ParallelScavengeHeap::mem_allocate() collections can be
// executed within a loop and _all_soft_refs_clear can be set
// true after they have been cleared by a collection and another
// collection started so that _all_soft_refs_clear can be true
// when this collection is started. Don't assert that
// _all_soft_refs_clear have to be false here even though
// mutators have run. Soft refs will be cleared again in this
// collection.
}
~VM_GC_Operation() {
CollectedHeap* ch = Universe::heap();
ch->collector_policy()->set_all_soft_refs_clear(false);
} }
~VM_GC_Operation() {}
// Acquire the reference synchronization lock // Acquire the reference synchronization lock
virtual bool doit_prologue(); virtual bool doit_prologue();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,7 @@ class BarrierSet;
class ThreadClosure; class ThreadClosure;
class AdaptiveSizePolicy; class AdaptiveSizePolicy;
class Thread; class Thread;
class CollectorPolicy;
// //
// CollectedHeap // CollectedHeap
@ -506,6 +507,9 @@ class CollectedHeap : public CHeapObj {
// Return the AdaptiveSizePolicy for the heap. // Return the AdaptiveSizePolicy for the heap.
virtual AdaptiveSizePolicy* size_policy() = 0; virtual AdaptiveSizePolicy* size_policy() = 0;
// Return the CollectorPolicy for the heap
virtual CollectorPolicy* collector_policy() const = 0;
// Iterate over all the ref-containing fields of all objects, calling // Iterate over all the ref-containing fields of all objects, calling
// "cl.do_oop" on each. This includes objects in permanent memory. // "cl.do_oop" on each. This includes objects in permanent memory.
virtual void oop_iterate(OopClosure* cl) = 0; virtual void oop_iterate(OopClosure* cl) = 0;

View File

@ -1,6 +1,6 @@
// //
// Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. // Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
// Copyright 2009 Red Hat, Inc. // Copyright 2009, 2010 Red Hat, Inc.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,8 @@
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
cppInterpreter_<arch>.cpp stack_<arch>.inline.hpp
entryFrame_<arch>.hpp javaCalls.hpp entryFrame_<arch>.hpp javaCalls.hpp
entryFrame_<arch>.hpp stack_<arch>.hpp entryFrame_<arch>.hpp stack_<arch>.hpp
@ -47,9 +49,19 @@ interpreterFrame_<arch>.hpp methodOop.hpp
interpreterFrame_<arch>.hpp stack_<arch>.hpp interpreterFrame_<arch>.hpp stack_<arch>.hpp
interpreterFrame_<arch>.hpp thread.hpp interpreterFrame_<arch>.hpp thread.hpp
interpreterRT_<arch>.cpp stack_<arch>.inline.hpp
sharkFrame_<arch>.hpp methodOop.hpp sharkFrame_<arch>.hpp methodOop.hpp
sharkFrame_<arch>.hpp stack_<arch>.hpp sharkFrame_<arch>.hpp stack_<arch>.hpp
stack_<arch>.hpp sizes.hpp stack_<arch>.hpp sizes.hpp
stack_<arch>.inline.hpp stack_<arch>.hpp
stack_<arch>.inline.hpp thread.hpp
stack_<arch>.cpp interpreterRuntime.hpp
stack_<arch>.cpp stack_<arch>.hpp
stubGenerator_<arch>.cpp stack_<arch>.inline.hpp
thread.hpp stack_<arch>.hpp thread.hpp stack_<arch>.hpp

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -112,6 +112,11 @@ void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) {
} }
} }
bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
bool result = _should_clear_all_soft_refs;
set_should_clear_all_soft_refs(false);
return result;
}
GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
int max_covered_regions) { int max_covered_regions) {
@ -126,6 +131,17 @@ GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
} }
} }
void CollectorPolicy::cleared_all_soft_refs() {
// If near gc overhear limit, continue to clear SoftRefs. SoftRefs may
// have been cleared in the last collection but if the gc overhear
// limit continues to be near, SoftRefs should still be cleared.
if (size_policy() != NULL) {
_should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
}
_all_soft_refs_clear = true;
}
// GenCollectorPolicy methods. // GenCollectorPolicy methods.
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
@ -489,6 +505,12 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
debug_only(gch->check_for_valid_allocation_state()); debug_only(gch->check_for_valid_allocation_state());
assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
HeapWord* result = NULL; HeapWord* result = NULL;
// Loop until the allocation is satisified, // Loop until the allocation is satisified,
@ -524,12 +546,6 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
return result; return result;
} }
// There are NULL's returned for different circumstances below.
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
if (is_tlab) { if (is_tlab) {
return NULL; // Caller will retry allocating individual object return NULL; // Caller will retry allocating individual object
@ -568,18 +584,6 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
gc_count_before = Universe::heap()->total_collections(); gc_count_before = Universe::heap()->total_collections();
} }
// Allocation has failed and a collection is about
// to be done. If the gc time limit was exceeded the
// last time a collection was done, return NULL so
// that an out-of-memory will be thrown. Clear
// gc_time_limit_exceeded so that subsequent attempts
// at a collection will be made.
if (size_policy()->gc_time_limit_exceeded()) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_time_limit_exceeded(false);
return NULL;
}
VM_GenCollectForAllocation op(size, VM_GenCollectForAllocation op(size,
is_tlab, is_tlab,
gc_count_before); gc_count_before);
@ -590,6 +594,24 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
assert(result == NULL, "must be NULL if gc_locked() is true"); assert(result == NULL, "must be NULL if gc_locked() is true");
continue; // retry and/or stall as necessary continue; // retry and/or stall as necessary
} }
// Allocation has failed and a collection
// has been done. If the gc time limit was exceeded the
// this time, return NULL so that an out-of-memory
// will be thrown. Clear gc_overhead_limit_exceeded
// so that the overhead exceeded does not persist.
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = all_soft_refs_clear();
assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_overhead_limit_exceeded(false);
if (op.result() != NULL) {
CollectedHeap::fill_with_object(op.result(), size);
}
return NULL;
}
assert(result == NULL || gch->is_in_reserved(result), assert(result == NULL || gch->is_in_reserved(result),
"result not in heap"); "result not in heap");
return result; return result;
@ -688,6 +710,9 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
return result; return result;
} }
assert(!should_clear_all_soft_refs(),
"Flag should have been handled and cleared prior to this point");
// What else? We might try synchronous finalization later. If the total // What else? We might try synchronous finalization later. If the total
// space available is large enough for the allocation, then a more // space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be // complete compaction phase than we've tried so far might be

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -69,12 +69,28 @@ class CollectorPolicy : public CHeapObj {
size_t _min_alignment; size_t _min_alignment;
size_t _max_alignment; size_t _max_alignment;
// The sizing of the heap are controlled by a sizing policy.
AdaptiveSizePolicy* _size_policy;
// Set to true when policy wants soft refs cleared.
// Reset to false by gc after it clears all soft refs.
bool _should_clear_all_soft_refs;
// Set to true by the GC if the just-completed gc cleared all
// softrefs. This is set to true whenever a gc clears all softrefs, and
// set to false each time gc returns to the mutator. For example, in the
// ParallelScavengeHeap case the latter would be done toward the end of
// mem_allocate() where it returns op.result()
bool _all_soft_refs_clear;
CollectorPolicy() : CollectorPolicy() :
_min_alignment(1), _min_alignment(1),
_max_alignment(1), _max_alignment(1),
_initial_heap_byte_size(0), _initial_heap_byte_size(0),
_max_heap_byte_size(0), _max_heap_byte_size(0),
_min_heap_byte_size(0) _min_heap_byte_size(0),
_size_policy(NULL),
_should_clear_all_soft_refs(false),
_all_soft_refs_clear(false)
{} {}
public: public:
@ -98,6 +114,19 @@ class CollectorPolicy : public CHeapObj {
G1CollectorPolicyKind G1CollectorPolicyKind
}; };
AdaptiveSizePolicy* size_policy() { return _size_policy; }
bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
// Returns the current value of _should_clear_all_soft_refs.
// _should_clear_all_soft_refs is set to false as a side effect.
bool use_should_clear_all_soft_refs(bool v);
bool all_soft_refs_clear() { return _all_soft_refs_clear; }
void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
// Called by the GC after Soft Refs have been cleared to indicate
// that the request in _should_clear_all_soft_refs has been fulfilled.
void cleared_all_soft_refs();
// Identification methods. // Identification methods.
virtual GenCollectorPolicy* as_generation_policy() { return NULL; } virtual GenCollectorPolicy* as_generation_policy() { return NULL; }
virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; } virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; }
@ -165,6 +194,22 @@ class CollectorPolicy : public CHeapObj {
}; };
class ClearedAllSoftRefs : public StackObj {
bool _clear_all_soft_refs;
CollectorPolicy* _collector_policy;
public:
ClearedAllSoftRefs(bool clear_all_soft_refs,
CollectorPolicy* collector_policy) :
_clear_all_soft_refs(clear_all_soft_refs),
_collector_policy(collector_policy) {}
~ClearedAllSoftRefs() {
if (_clear_all_soft_refs) {
_collector_policy->cleared_all_soft_refs();
}
}
};
class GenCollectorPolicy : public CollectorPolicy { class GenCollectorPolicy : public CollectorPolicy {
protected: protected:
size_t _min_gen0_size; size_t _min_gen0_size;
@ -173,10 +218,6 @@ class GenCollectorPolicy : public CollectorPolicy {
GenerationSpec **_generations; GenerationSpec **_generations;
// The sizing of the different generations in the heap are controlled
// by a sizing policy.
AdaptiveSizePolicy* _size_policy;
// Return true if an allocation should be attempted in the older // Return true if an allocation should be attempted in the older
// generation if it fails in the younger generation. Return // generation if it fails in the younger generation. Return
// false, otherwise. // false, otherwise.
@ -236,14 +277,11 @@ class GenCollectorPolicy : public CollectorPolicy {
virtual size_t large_typearray_limit(); virtual size_t large_typearray_limit();
// Adaptive size policy // Adaptive size policy
AdaptiveSizePolicy* size_policy() { return _size_policy; }
virtual void initialize_size_policy(size_t init_eden_size, virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size, size_t init_promo_size,
size_t init_survivor_size); size_t init_survivor_size);
}; };
// All of hotspot's current collectors are subtypes of this // All of hotspot's current collectors are subtypes of this
// class. Currently, these collectors all use the same gen[0], // class. Currently, these collectors all use the same gen[0],
// but have different gen[1] types. If we add another subtype // but have different gen[1] types. If we add another subtype

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -594,6 +594,10 @@ void DefNewGeneration::collect(bool full,
_tenuring_threshold = _tenuring_threshold =
age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
size_policy->reset_gc_overhead_limit_count();
if (PrintGC && !PrintGCDetails) { if (PrintGC && !PrintGCDetails) {
gch->print_heap_change(gch_prev_used); gch->print_heap_change(gch_prev_used);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -428,7 +428,8 @@ void GenCollectedHeap::do_collection(bool full,
assert(my_thread->is_VM_thread() || assert(my_thread->is_VM_thread() ||
my_thread->is_ConcurrentGC_thread(), my_thread->is_ConcurrentGC_thread(),
"incorrect thread type capability"); "incorrect thread type capability");
assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); assert(Heap_lock->is_locked(),
"the requesting thread should have the Heap_lock");
guarantee(!is_gc_active(), "collection is not reentrant"); guarantee(!is_gc_active(), "collection is not reentrant");
assert(max_level < n_gens(), "sanity check"); assert(max_level < n_gens(), "sanity check");
@ -436,6 +437,11 @@ void GenCollectedHeap::do_collection(bool full,
return; // GC is disabled (e.g. JNI GetXXXCritical operation) return; // GC is disabled (e.g. JNI GetXXXCritical operation)
} }
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
collector_policy()->should_clear_all_soft_refs();
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
const size_t perm_prev_used = perm_gen()->used(); const size_t perm_prev_used = perm_gen()->used();
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
@ -560,11 +566,11 @@ void GenCollectedHeap::do_collection(bool full,
if (rp->discovery_is_atomic()) { if (rp->discovery_is_atomic()) {
rp->verify_no_references_recorded(); rp->verify_no_references_recorded();
rp->enable_discovery(); rp->enable_discovery();
rp->setup_policy(clear_all_soft_refs); rp->setup_policy(do_clear_all_soft_refs);
} else { } else {
// collect() below will enable discovery as appropriate // collect() below will enable discovery as appropriate
} }
_gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
if (!rp->enqueuing_is_done()) { if (!rp->enqueuing_is_done()) {
rp->enqueue_discovered_references(); rp->enqueue_discovered_references();
} else { } else {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,13 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
bool clear_all_softrefs) { bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
GenCollectedHeap* gch = GenCollectedHeap::heap();
#ifdef ASSERT
if (gch->collector_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earlier");
}
#endif
// hook up weak ref data so it can be used during Mark-Sweep // hook up weak ref data so it can be used during Mark-Sweep
assert(ref_processor() == NULL, "no stomping"); assert(ref_processor() == NULL, "no stomping");
assert(rp != NULL, "should be non-NULL"); assert(rp != NULL, "should be non-NULL");
@ -44,7 +51,6 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
// Increment the invocation count for the permanent generation, since it is // Increment the invocation count for the permanent generation, since it is
// implicitly collected whenever we do a full mark sweep collection. // implicitly collected whenever we do a full mark sweep collection.
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->perm_gen()->stat_record()->invocations++; gch->perm_gen()->stat_record()->invocations++;
// Capture heap size before collection for printing. // Capture heap size before collection for printing.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -105,8 +105,7 @@ void C2Compiler::compile_method(ciEnv* env,
} }
bool subsume_loads = true; bool subsume_loads = true;
bool do_escape_analysis = DoEscapeAnalysis && bool do_escape_analysis = DoEscapeAnalysis &&
!(env->jvmti_can_hotswap_or_post_breakpoint() || !env->jvmti_can_access_local_variables();
env->jvmti_can_examine_or_deopt_anywhere());
while (!env->failing()) { while (!env->failing()) {
// Attempt to compile while subsuming loads into machine instructions. // Attempt to compile while subsuming loads into machine instructions.
Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis); Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,6 +44,8 @@ macro(Bool)
macro(BoxLock) macro(BoxLock)
macro(ReverseBytesI) macro(ReverseBytesI)
macro(ReverseBytesL) macro(ReverseBytesL)
macro(ReverseBytesUS)
macro(ReverseBytesS)
macro(CProj) macro(CProj)
macro(CallDynamicJava) macro(CallDynamicJava)
macro(CallJava) macro(CallJava)

View File

@ -871,7 +871,6 @@ void Compile::Init(int aliaslevel) {
set_has_split_ifs(false); set_has_split_ifs(false);
set_has_loops(has_method() && method()->has_loops()); // first approximation set_has_loops(has_method() && method()->has_loops()); // first approximation
set_has_stringbuilder(false); set_has_stringbuilder(false);
_deopt_happens = true; // start out assuming the worst
_trap_can_recompile = false; // no traps emitted yet _trap_can_recompile = false; // no traps emitted yet
_major_progress = true; // start out assuming good things will happen _major_progress = true; // start out assuming good things will happen
set_has_unsafe_access(false); set_has_unsafe_access(false);

View File

@ -146,7 +146,6 @@ class Compile : public Phase {
int _orig_pc_slot_offset_in_bytes; int _orig_pc_slot_offset_in_bytes;
int _major_progress; // Count of something big happening int _major_progress; // Count of something big happening
bool _deopt_happens; // TRUE if de-optimization CAN happen
bool _has_loops; // True if the method _may_ have some loops bool _has_loops; // True if the method _may_ have some loops
bool _has_split_ifs; // True if the method _may_ have some split-if bool _has_split_ifs; // True if the method _may_ have some split-if
bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
@ -300,7 +299,6 @@ class Compile : public Phase {
void set_freq_inline_size(int n) { _freq_inline_size = n; } void set_freq_inline_size(int n) { _freq_inline_size = n; }
int freq_inline_size() const { return _freq_inline_size; } int freq_inline_size() const { return _freq_inline_size; }
void set_max_inline_size(int n) { _max_inline_size = n; } void set_max_inline_size(int n) { _max_inline_size = n; }
bool deopt_happens() const { return _deopt_happens; }
bool has_loops() const { return _has_loops; } bool has_loops() const { return _has_loops; }
void set_has_loops(bool z) { _has_loops = z; } void set_has_loops(bool z) { _has_loops = z; }
bool has_split_ifs() const { return _has_split_ifs; } bool has_split_ifs() const { return _has_split_ifs; }

Some files were not shown because too many files have changed in this diff Show More