Merge
This commit is contained in:
commit
9b4e46d215
@ -38,3 +38,4 @@ ffd09e767dfa6d21466183a400f72cf62d53297f jdk7-b57
|
||||
472c21584cfd7e9c0229ad6a100366a5c03d2976 jdk7-b61
|
||||
c7ed15ab92ce36a09d264a5e34025884b2d7607f jdk7-b62
|
||||
57f7e028c7ad1806500ae89eb3f4cd9a51b10e18 jdk7-b63
|
||||
269c1ec4435dfb7b452ae6e3bdde005d55c5c830 jdk7-b64
|
||||
|
@ -120,6 +120,7 @@
|
||||
<th>Base OS and Architecture</th>
|
||||
<th>OS</th>
|
||||
<th>C/C++ Compiler</th>
|
||||
<th>BOOT JDK</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
@ -127,57 +128,65 @@
|
||||
<td>Linux X86 (32-bit)</td>
|
||||
<td>Fedora 9</td>
|
||||
<td>gcc 4 </td>
|
||||
<td>JDK 6u14 FCS </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Linux X64 (64-bit)</td>
|
||||
<td>Fedora 9</td>
|
||||
<td>gcc 4 </td>
|
||||
<td>JDK 6u14 FCS </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Solaris SPARC (32-bit)</td>
|
||||
<td>Solaris 10 + patches
|
||||
<td>Solaris 10u2 + patches
|
||||
<br>
|
||||
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
|
||||
SunSolve</a> for patch downloads.
|
||||
</td>
|
||||
<td>Sun Studio 12</td>
|
||||
<td>JDK 6u14 FCS </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Solaris SPARCV9 (64-bit)</td>
|
||||
<td>Solaris 10 + patches
|
||||
<td>Solaris 10u2 + patches
|
||||
<br>
|
||||
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
|
||||
SunSolve</a> for patch downloads.
|
||||
</td>
|
||||
<td>Sun Studio 12</td>
|
||||
<td>JDK 6u14 FCS </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Solaris X86 (32-bit)</td>
|
||||
<td>Solaris 10 + patches
|
||||
<td>Solaris 10u2 + patches
|
||||
<br>
|
||||
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
|
||||
SunSolve</a> for patch downloads.
|
||||
</td>
|
||||
<td>Sun Studio 12</td>
|
||||
<td>JDK 6u14 FCS </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Solaris X64 (64-bit)</td>
|
||||
<td>Solaris 10 + patches
|
||||
<td>Solaris 10u2 + patches
|
||||
<br>
|
||||
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
|
||||
SunSolve</a> for patch downloads.
|
||||
</td>
|
||||
<td>Sun Studio 12</td>
|
||||
<td>JDK 6u14 FCS </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Windows X86 (32-bit)</td>
|
||||
<td>Windows XP</td>
|
||||
<td>Microsoft Visual Studio C++ 2008 Standard Edition</td>
|
||||
<td>JDK 6u14 FCS </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Windows X64 (64-bit)</td>
|
||||
<td>Windows Server 2003 - Enterprise x64 Edition</td>
|
||||
<td>Microsoft Platform SDK - April 2005</td>
|
||||
<td>JDK 6u14 FCS </td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
@ -38,3 +38,4 @@ f1e1cccbd13aa96d2d8bd872782ff764010bc22c jdk7-b60
|
||||
e906b16a12a9a63b615898afa5d9673cbd1c5ab8 jdk7-b61
|
||||
65b66117dbd70a493e9644aeb4033cf95a4e3c99 jdk7-b62
|
||||
d20e45cd539f20405ff843652069cfd7550c5ab3 jdk7-b63
|
||||
047dd27fddb607f8135296b3754131f6e13cb8c7 jdk7-b64
|
||||
|
@ -38,3 +38,4 @@ a77eddcd510c3972717c025cfcef9a60bfa4ecac jdk7-b60
|
||||
27b728fd1281ab62e9d7e4424f8bbb6ca438d803 jdk7-b61
|
||||
a88386380bdaaa5ab4ffbedf22c57bac5dbec034 jdk7-b62
|
||||
32c83fb84370a35344676991a48440378e6b6c8a jdk7-b63
|
||||
ba36394eb84b949b31212bdb32a518a8f92bab5b jdk7-b64
|
||||
|
@ -24,23 +24,64 @@
|
||||
|
||||
package sun.jvm.hotspot.code;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
public class DebugInfoReadStream extends CompressedReadStream {
|
||||
private NMethod code;
|
||||
private int InvocationEntryBCI;
|
||||
private List objectPool; // ArrayList<ObjectValue>
|
||||
|
||||
public DebugInfoReadStream(NMethod code, int offset) {
|
||||
super(code.scopesDataBegin(), offset);
|
||||
InvocationEntryBCI = VM.getVM().getInvocationEntryBCI();
|
||||
this.code = code;
|
||||
this.objectPool = null;
|
||||
}
|
||||
|
||||
public DebugInfoReadStream(NMethod code, int offset, List objectPool) {
|
||||
super(code.scopesDataBegin(), offset);
|
||||
InvocationEntryBCI = VM.getVM().getInvocationEntryBCI();
|
||||
this.code = code;
|
||||
this.objectPool = objectPool;
|
||||
}
|
||||
|
||||
public OopHandle readOopHandle() {
|
||||
return code.getOopAt(readInt());
|
||||
}
|
||||
|
||||
ScopeValue readObjectValue() {
|
||||
int id = readInt();
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(objectPool != null, "object pool does not exist");
|
||||
for (Iterator itr = objectPool.iterator(); itr.hasNext();) {
|
||||
ObjectValue ov = (ObjectValue) itr.next();
|
||||
Assert.that(ov.id() != id, "should not be read twice");
|
||||
}
|
||||
}
|
||||
ObjectValue result = new ObjectValue(id);
|
||||
// Cache the object since an object field could reference it.
|
||||
objectPool.add(result);
|
||||
result.readObject(this);
|
||||
return result;
|
||||
}
|
||||
|
||||
ScopeValue getCachedObject() {
|
||||
int id = readInt();
|
||||
Assert.that(objectPool != null, "object pool does not exist");
|
||||
for (Iterator itr = objectPool.iterator(); itr.hasNext();) {
|
||||
ObjectValue ov = (ObjectValue) itr.next();
|
||||
if (ov.id() == id) {
|
||||
return ov;
|
||||
}
|
||||
}
|
||||
Assert.that(false, "should not reach here");
|
||||
return null;
|
||||
}
|
||||
|
||||
public int readBCI() {
|
||||
return readInt() + InvocationEntryBCI;
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import java.io.*;
|
||||
public class MonitorValue {
|
||||
private ScopeValue owner;
|
||||
private Location basicLock;
|
||||
private boolean eliminated;
|
||||
|
||||
// FIXME: not useful yet
|
||||
// MonitorValue(ScopeValue* owner, Location basic_lock);
|
||||
@ -36,10 +37,12 @@ public class MonitorValue {
|
||||
public MonitorValue(DebugInfoReadStream stream) {
|
||||
basicLock = new Location(stream);
|
||||
owner = ScopeValue.readFrom(stream);
|
||||
eliminated= stream.readBoolean();
|
||||
}
|
||||
|
||||
public ScopeValue owner() { return owner; }
|
||||
public Location basicLock() { return basicLock; }
|
||||
public boolean eliminated() { return eliminated; }
|
||||
|
||||
// FIXME: not yet implementable
|
||||
// void write_on(DebugInfoWriteStream* stream);
|
||||
@ -50,5 +53,8 @@ public class MonitorValue {
|
||||
tty.print(",");
|
||||
basicLock().printOn(tty);
|
||||
tty.print("}");
|
||||
if (eliminated) {
|
||||
tty.print(" (eliminated)");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.code;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
/** An ObjectValue describes an object eliminated by escape analysis. */
|
||||
|
||||
public class ObjectValue extends ScopeValue {
|
||||
private int id;
|
||||
private ScopeValue klass;
|
||||
private List fieldsValue; // ArrayList<ScopeValue>
|
||||
|
||||
// Field "boolean visited" is not implemented here since
|
||||
// it is used only a during debug info creation.
|
||||
|
||||
public ObjectValue(int id) {
|
||||
this.id = id;
|
||||
klass = null;
|
||||
fieldsValue = new ArrayList();
|
||||
}
|
||||
|
||||
public boolean isObject() { return true; }
|
||||
public int id() { return id; }
|
||||
public ScopeValue getKlass() { return klass; }
|
||||
public List getFieldsValue() { return fieldsValue; }
|
||||
public ScopeValue getFieldAt(int i) { return (ScopeValue)fieldsValue.get(i); }
|
||||
public int fieldsSize() { return fieldsValue.size(); }
|
||||
|
||||
// Field "value" is always NULL here since it is used
|
||||
// only during deoptimization of a compiled frame
|
||||
// pointing to reallocated object.
|
||||
public OopHandle getValue() { return null; }
|
||||
|
||||
/** Serialization of debugging information */
|
||||
|
||||
void readObject(DebugInfoReadStream stream) {
|
||||
klass = readFrom(stream);
|
||||
Assert.that(klass.isConstantOop(), "should be constant klass oop");
|
||||
int length = stream.readInt();
|
||||
for (int i = 0; i < length; i++) {
|
||||
ScopeValue val = readFrom(stream);
|
||||
fieldsValue.add(val);
|
||||
}
|
||||
}
|
||||
|
||||
// Printing
|
||||
|
||||
public void print() {
|
||||
printOn(System.out);
|
||||
}
|
||||
|
||||
public void printOn(PrintStream tty) {
|
||||
tty.print("scalarObj[" + id + "]");
|
||||
}
|
||||
|
||||
void printFieldsOn(PrintStream tty) {
|
||||
if (fieldsValue.size() > 0) {
|
||||
((ScopeValue)fieldsValue.get(0)).printOn(tty);
|
||||
}
|
||||
for (int i = 1; i < fieldsValue.size(); i++) {
|
||||
tty.print(", ");
|
||||
((ScopeValue)fieldsValue.get(i)).printOn(tty);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
@ -27,8 +27,10 @@ package sun.jvm.hotspot.code;
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.oops.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
/** ScopeDescs contain the information that makes source-level
|
||||
debugging of nmethods possible; each scopeDesc describes a method
|
||||
@ -45,10 +47,31 @@ public class ScopeDesc {
|
||||
private int localsDecodeOffset;
|
||||
private int expressionsDecodeOffset;
|
||||
private int monitorsDecodeOffset;
|
||||
/** Scalar replaced bjects pool */
|
||||
private List objects; // ArrayList<ScopeValue>
|
||||
|
||||
|
||||
public ScopeDesc(NMethod code, int decodeOffset) {
|
||||
this.code = code;
|
||||
this.decodeOffset = decodeOffset;
|
||||
this.objects = decodeObjectValues(DebugInformationRecorder.SERIALIZED_NULL);
|
||||
|
||||
// Decode header
|
||||
DebugInfoReadStream stream = streamAt(decodeOffset);
|
||||
|
||||
senderDecodeOffset = stream.readInt();
|
||||
method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle());
|
||||
bci = stream.readBCI();
|
||||
// Decode offsets for body and sender
|
||||
localsDecodeOffset = stream.readInt();
|
||||
expressionsDecodeOffset = stream.readInt();
|
||||
monitorsDecodeOffset = stream.readInt();
|
||||
}
|
||||
|
||||
public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset) {
|
||||
this.code = code;
|
||||
this.decodeOffset = decodeOffset;
|
||||
this.objects = decodeObjectValues(objectDecodeOffset);
|
||||
|
||||
// Decode header
|
||||
DebugInfoReadStream stream = streamAt(decodeOffset);
|
||||
@ -81,6 +104,11 @@ public class ScopeDesc {
|
||||
return decodeMonitorValues(monitorsDecodeOffset);
|
||||
}
|
||||
|
||||
/** Returns a List<MonitorValue> */
|
||||
public List getObjects() {
|
||||
return objects;
|
||||
}
|
||||
|
||||
/** Stack walking. Returns null if this is the outermost scope. */
|
||||
public ScopeDesc sender() {
|
||||
if (isTop()) {
|
||||
@ -131,7 +159,7 @@ public class ScopeDesc {
|
||||
//
|
||||
|
||||
private DebugInfoReadStream streamAt(int decodeOffset) {
|
||||
return new DebugInfoReadStream(code, decodeOffset);
|
||||
return new DebugInfoReadStream(code, decodeOffset, objects);
|
||||
}
|
||||
|
||||
/** Returns a List<ScopeValue> or null if no values were present */
|
||||
@ -161,4 +189,22 @@ public class ScopeDesc {
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Returns a List<ObjectValue> or null if no values were present */
|
||||
private List decodeObjectValues(int decodeOffset) {
|
||||
if (decodeOffset == DebugInformationRecorder.SERIALIZED_NULL) {
|
||||
return null;
|
||||
}
|
||||
List res = new ArrayList();
|
||||
DebugInfoReadStream stream = new DebugInfoReadStream(code, decodeOffset, res);
|
||||
int length = stream.readInt();
|
||||
for (int i = 0; i < length; i++) {
|
||||
// Objects values are pushed to 'res' array during read so that
|
||||
// object's fields could reference it (OBJECT_ID_CODE).
|
||||
ScopeValue.readFrom(stream);
|
||||
// res.add(ScopeValue.readFrom(stream));
|
||||
}
|
||||
Assert.that(res.size() == length, "inconsistent debug information");
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
@ -49,12 +49,15 @@ public abstract class ScopeValue {
|
||||
static final int CONSTANT_OOP_CODE = 2;
|
||||
static final int CONSTANT_LONG_CODE = 3;
|
||||
static final int CONSTANT_DOUBLE_CODE = 4;
|
||||
static final int CONSTANT_OBJECT_CODE = 5;
|
||||
static final int CONSTANT_OBJECT_ID_CODE = 6;
|
||||
|
||||
public boolean isLocation() { return false; }
|
||||
public boolean isConstantInt() { return false; }
|
||||
public boolean isConstantDouble() { return false; }
|
||||
public boolean isConstantLong() { return false; }
|
||||
public boolean isConstantOop() { return false; }
|
||||
public boolean isObject() { return false; }
|
||||
|
||||
public static ScopeValue readFrom(DebugInfoReadStream stream) {
|
||||
switch (stream.readInt()) {
|
||||
@ -68,6 +71,10 @@ public abstract class ScopeValue {
|
||||
return new ConstantLongValue(stream);
|
||||
case CONSTANT_DOUBLE_CODE:
|
||||
return new ConstantDoubleValue(stream);
|
||||
case CONSTANT_OBJECT_CODE:
|
||||
return stream.readObjectValue();
|
||||
case CONSTANT_OBJECT_ID_CODE:
|
||||
return stream.getCachedObject();
|
||||
default:
|
||||
Assert.that(false, "should not reach here");
|
||||
return null;
|
||||
|
@ -249,6 +249,7 @@ public class ObjectReferenceImpl extends ValueImpl implements ObjectReference {
|
||||
OopHandle givenHandle = obj.getHandle();
|
||||
for (Iterator itr = monitors.iterator(); itr.hasNext();) {
|
||||
MonitorInfo mi = (MonitorInfo) itr.next();
|
||||
if (mi.eliminated() && frame.isCompiledFrame()) continue; // skip eliminated monitor
|
||||
if (givenHandle.equals(mi.owner())) {
|
||||
res++;
|
||||
}
|
||||
|
@ -301,6 +301,9 @@ public class ThreadReferenceImpl extends ObjectReferenceImpl
|
||||
List frameMonitors = frame.getMonitors(); // List<MonitorInfo>
|
||||
for (Iterator miItr = frameMonitors.iterator(); miItr.hasNext(); ) {
|
||||
sun.jvm.hotspot.runtime.MonitorInfo mi = (sun.jvm.hotspot.runtime.MonitorInfo) miItr.next();
|
||||
if (mi.eliminated() && frame.isCompiledFrame()) {
|
||||
continue; // skip eliminated monitor
|
||||
}
|
||||
OopHandle obj = mi.owner();
|
||||
if (obj == null) {
|
||||
// this monitor doesn't have an owning object so skip it
|
||||
|
@ -131,8 +131,18 @@ public class CompiledVFrame extends JavaVFrame {
|
||||
List result = new ArrayList(monitors.size());
|
||||
for (int i = 0; i < monitors.size(); i++) {
|
||||
MonitorValue mv = (MonitorValue) monitors.get(i);
|
||||
StackValue ownerSV = createStackValue(mv.owner()); // it is an oop
|
||||
result.add(new MonitorInfo(ownerSV.getObject(), resolveMonitorLock(mv.basicLock())));
|
||||
ScopeValue ov = mv.owner();
|
||||
StackValue ownerSV = createStackValue(ov); // it is an oop
|
||||
if (ov.isObject()) { // The owner object was scalar replaced
|
||||
Assert.that(mv.eliminated() && ownerSV.objIsScalarReplaced(), "monitor should be eliminated for scalar replaced object");
|
||||
// Put klass for scalar replaced object.
|
||||
ScopeValue kv = ((ObjectValue)ov).getKlass();
|
||||
Assert.that(kv.isConstantOop(), "klass should be oop constant for scalar replaced object");
|
||||
OopHandle k = ((ConstantOopReadValue)kv).getValue();
|
||||
result.add(new MonitorInfo(k, resolveMonitorLock(mv.basicLock()), mv.eliminated(), true));
|
||||
} else {
|
||||
result.add(new MonitorInfo(ownerSV.getObject(), resolveMonitorLock(mv.basicLock()), mv.eliminated(), false));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -212,12 +222,12 @@ public class CompiledVFrame extends JavaVFrame {
|
||||
// long or is unused. He always saves a long. Here we know
|
||||
// a long was saved, but we only want an narrow oop back. Narrow the
|
||||
// saved long to the narrow oop that the JVM wants.
|
||||
return new StackValue(valueAddr.getCompOopHandleAt(VM.getVM().getIntSize()));
|
||||
return new StackValue(valueAddr.getCompOopHandleAt(VM.getVM().getIntSize()), 0);
|
||||
} else {
|
||||
return new StackValue(valueAddr.getCompOopHandleAt(0));
|
||||
return new StackValue(valueAddr.getCompOopHandleAt(0), 0);
|
||||
}
|
||||
} else if( loc.holdsOop() ) { // Holds an oop?
|
||||
return new StackValue(valueAddr.getOopHandleAt(0));
|
||||
return new StackValue(valueAddr.getOopHandleAt(0), 0);
|
||||
} else if( loc.holdsDouble() ) {
|
||||
// Double value in a single stack slot
|
||||
return new StackValue(valueAddr.getJIntAt(0) & 0xFFFFFFFF);
|
||||
@ -277,7 +287,7 @@ public class CompiledVFrame extends JavaVFrame {
|
||||
return new StackValue(((ConstantIntValue) sv).getValue() & 0xFFFFFFFF);
|
||||
} else if (sv.isConstantOop()) {
|
||||
// constant oop
|
||||
return new StackValue(((ConstantOopReadValue) sv).getValue());
|
||||
return new StackValue(((ConstantOopReadValue) sv).getValue(), 0);
|
||||
} else if (sv.isConstantDouble()) {
|
||||
// Constant double in a single stack slot
|
||||
double d = ((ConstantDoubleValue) sv).getValue();
|
||||
@ -285,6 +295,9 @@ public class CompiledVFrame extends JavaVFrame {
|
||||
} else if (VM.getVM().isLP64() && sv.isConstantLong()) {
|
||||
// Constant long in a single stack slot
|
||||
return new StackValue(((ConstantLongValue) sv).getValue() & 0xFFFFFFFF);
|
||||
} else if (sv.isObject()) {
|
||||
// Scalar replaced object in compiled frame
|
||||
return new StackValue(((ObjectValue)sv).getValue(), 1);
|
||||
}
|
||||
|
||||
// Unknown ScopeValue type
|
||||
|
@ -61,7 +61,7 @@ public class InterpretedVFrame extends JavaVFrame {
|
||||
StackValue sv;
|
||||
if (oopMask.isOop(i)) {
|
||||
// oop value
|
||||
sv = new StackValue(addr.getOopHandleAt(0));
|
||||
sv = new StackValue(addr.getOopHandleAt(0), 0);
|
||||
} else {
|
||||
// integer
|
||||
// Fetch a signed integer the size of a stack slot
|
||||
@ -95,7 +95,7 @@ public class InterpretedVFrame extends JavaVFrame {
|
||||
StackValue sv;
|
||||
if (oopMask.isOop(i + nofLocals)) {
|
||||
// oop value
|
||||
sv = new StackValue(addr.getOopHandleAt(0));
|
||||
sv = new StackValue(addr.getOopHandleAt(0), 0);
|
||||
} else {
|
||||
// integer
|
||||
// Fetch a signed integer the size of a stack slot
|
||||
@ -113,7 +113,7 @@ public class InterpretedVFrame extends JavaVFrame {
|
||||
for (BasicObjectLock current = getFrame().interpreterFrameMonitorEnd();
|
||||
current.address().lessThan(getFrame().interpreterFrameMonitorBegin().address());
|
||||
current = getFrame().nextMonitorInInterpreterFrame(current)) {
|
||||
result.add(new MonitorInfo(current.obj(), current.lock()));
|
||||
result.add(new MonitorInfo(current.obj(), current.lock(), false, false));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -25,16 +25,39 @@
|
||||
package sun.jvm.hotspot.runtime;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
public class MonitorInfo {
|
||||
private OopHandle owner;
|
||||
private BasicLock lock;
|
||||
private OopHandle ownerKlass;
|
||||
private boolean eliminated;
|
||||
private boolean ownerIsScalarReplaced;
|
||||
|
||||
public MonitorInfo(OopHandle owner, BasicLock lock) {
|
||||
this.owner = owner;
|
||||
this.lock = lock;
|
||||
public MonitorInfo(OopHandle owner, BasicLock lock, boolean eliminated, boolean ownerIsScalarReplaced) {
|
||||
if (!ownerIsScalarReplaced) {
|
||||
this.owner = owner;
|
||||
this.ownerKlass = null;
|
||||
} else {
|
||||
Assert.that(eliminated, "monitor should be eliminated for scalar replaced object");
|
||||
this.owner = null;
|
||||
this.ownerKlass = owner;
|
||||
}
|
||||
this.eliminated = eliminated;
|
||||
this.ownerIsScalarReplaced = ownerIsScalarReplaced;
|
||||
}
|
||||
|
||||
public OopHandle owner() {
|
||||
Assert.that(!ownerIsScalarReplaced, "should not be called for scalar replaced object");
|
||||
return owner;
|
||||
}
|
||||
|
||||
public OopHandle ownerKlass() {
|
||||
Assert.that(ownerIsScalarReplaced, "should not be called for not scalar replaced object");
|
||||
return ownerKlass;
|
||||
}
|
||||
|
||||
public OopHandle owner() { return owner; }
|
||||
public BasicLock lock() { return lock; }
|
||||
public boolean eliminated() { return eliminated; }
|
||||
public boolean ownerIsScalarReplaced() { return ownerIsScalarReplaced; }
|
||||
}
|
||||
|
@ -37,9 +37,11 @@ public class StackValue {
|
||||
type = BasicType.getTConflict();
|
||||
}
|
||||
|
||||
public StackValue(OopHandle h) {
|
||||
public StackValue(OopHandle h, long scalar_replaced) {
|
||||
handleValue = h;
|
||||
type = BasicType.getTObject();
|
||||
integerValue = scalar_replaced;
|
||||
Assert.that(integerValue == 0 || handleValue == null, "not null object should not be marked as scalar replaced");
|
||||
}
|
||||
|
||||
public StackValue(long i) {
|
||||
@ -59,6 +61,13 @@ public class StackValue {
|
||||
return handleValue;
|
||||
}
|
||||
|
||||
boolean objIsScalarReplaced() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(type == BasicType.getTObject(), "type check");
|
||||
}
|
||||
return integerValue != 0;
|
||||
}
|
||||
|
||||
public long getInteger() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(type == BasicType.getTInt(), "type check");
|
||||
|
@ -161,6 +161,8 @@ public class OopTreeNodeAdapter extends FieldTreeNodeAdapter {
|
||||
child = new OopTreeNodeAdapter(field.getValue(getObj()), field.getID(), getTreeTableMode());
|
||||
} catch (AddressException e) {
|
||||
child = new BadOopTreeNodeAdapter(field.getValueAsOopHandle(getObj()), field.getID(), getTreeTableMode());
|
||||
} catch (UnknownOopException e) {
|
||||
child = new BadOopTreeNodeAdapter(field.getValueAsOopHandle(getObj()), field.getID(), getTreeTableMode());
|
||||
}
|
||||
}
|
||||
++curField;
|
||||
|
@ -135,6 +135,10 @@ public class JSJavaThread extends JSJavaInstance {
|
||||
List frameMonitors = frame.getMonitors(); // List<MonitorInfo>
|
||||
for (Iterator miItr = frameMonitors.iterator(); miItr.hasNext(); ) {
|
||||
MonitorInfo mi = (MonitorInfo) miItr.next();
|
||||
|
||||
if (mi.eliminated() && frame.isCompiledFrame()) {
|
||||
continue; // skip eliminated monitor
|
||||
}
|
||||
OopHandle obj = mi.owner();
|
||||
if (obj == null) {
|
||||
// this monitor doesn't have an owning object so skip it
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2009
|
||||
|
||||
HS_MAJOR_VER=16
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=05
|
||||
HS_BUILD_NUMBER=06
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=7
|
||||
|
@ -68,7 +68,9 @@ endif
|
||||
|
||||
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
|
||||
# Compiler warnings are treated as errors
|
||||
CFLAGS_WARN = +w -errwarn
|
||||
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
|
||||
CFLAGS_WARN = +w -errwarn
|
||||
endif
|
||||
CFLAGS += $(CFLAGS_WARN)
|
||||
|
||||
ifeq ("${Platform_compiler}", "sparcWorks")
|
||||
|
@ -41,7 +41,7 @@ ifeq ($(COMPILER_REV_NUMERIC),509)
|
||||
endif
|
||||
|
||||
# Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12)
|
||||
ifeq ($(COMPILER_REV_NUMERIC),508))
|
||||
ifeq ($(COMPILER_REV_NUMERIC),508)
|
||||
OPT_CFLAGS/ciTypeFlow.o = $(OPT_CFLAGS/O2)
|
||||
endif # COMPILER_REV_NUMERIC == 508
|
||||
|
||||
|
@ -371,7 +371,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
|
||||
}
|
||||
__ move(value.result(), array_addr, null_check_info);
|
||||
if (obj_store) {
|
||||
// Is this precise?
|
||||
// Precise card mark
|
||||
post_barrier(LIR_OprFact::address(array_addr), value.result());
|
||||
}
|
||||
}
|
||||
@ -685,11 +685,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
|
||||
if (type == objectType) { // Write-barrier needed for Object fields.
|
||||
#ifdef PRECISE_CARDMARK
|
||||
// Precise card mark since could either be object or array
|
||||
post_barrier(addr, val.result());
|
||||
#else
|
||||
post_barrier(obj.result(), val.result());
|
||||
#endif // PRECISE_CARDMARK
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1891,17 +1891,17 @@ RegMask Matcher::modL_proj_mask() {
|
||||
// The intptr_t operand types, defined by textual substitution.
|
||||
// (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
|
||||
#ifdef _LP64
|
||||
#define immX immL
|
||||
#define immX13 immL13
|
||||
#define immX13m7 immL13m7
|
||||
#define iRegX iRegL
|
||||
#define g1RegX g1RegL
|
||||
#define immX immL
|
||||
#define immX13 immL13
|
||||
#define immX13m7 immL13m7
|
||||
#define iRegX iRegL
|
||||
#define g1RegX g1RegL
|
||||
#else
|
||||
#define immX immI
|
||||
#define immX13 immI13
|
||||
#define immX13m7 immI13m7
|
||||
#define iRegX iRegI
|
||||
#define g1RegX g1RegI
|
||||
#define immX immI
|
||||
#define immX13 immI13
|
||||
#define immX13m7 immI13m7
|
||||
#define iRegX iRegI
|
||||
#define g1RegX g1RegI
|
||||
#endif
|
||||
|
||||
//----------ENCODING BLOCK-----------------------------------------------------
|
||||
@ -3446,6 +3446,15 @@ operand immI() %{
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Integer Immediate: 8-bit
|
||||
operand immI8() %{
|
||||
predicate(Assembler::is_simm(n->get_int(), 8));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Integer Immediate: 13-bit
|
||||
operand immI13() %{
|
||||
predicate(Assembler::is_simm13(n->get_int()));
|
||||
@ -3466,6 +3475,15 @@ operand immI13m7() %{
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Integer Immediate: 16-bit
|
||||
operand immI16() %{
|
||||
predicate(Assembler::is_simm(n->get_int(), 16));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Unsigned (positive) Integer Immediate: 13-bit
|
||||
operand immU13() %{
|
||||
predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
|
||||
@ -5544,7 +5562,7 @@ instruct loadUB(iRegI dst, memory mem) %{
|
||||
ins_encode %{
|
||||
__ ldub($mem$$Address, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mask_mem);
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Byte (8bit UNsigned) into a Long Register
|
||||
@ -5557,7 +5575,22 @@ instruct loadUB2L(iRegL dst, memory mem) %{
|
||||
ins_encode %{
|
||||
__ ldub($mem$$Address, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mask_mem);
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register
|
||||
instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
|
||||
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
|
||||
|
||||
size(2*4);
|
||||
format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t"
|
||||
"AND $dst,$mask,$dst" %}
|
||||
ins_encode %{
|
||||
__ ldub($mem$$Address, $dst$$Register);
|
||||
__ and3($dst$$Register, $mask$$constant, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Short (16bit signed)
|
||||
@ -5610,7 +5643,7 @@ instruct loadUS(iRegI dst, memory mem) %{
|
||||
ins_encode %{
|
||||
__ lduh($mem$$Address, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mask_mem);
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
|
||||
@ -5636,7 +5669,56 @@ instruct loadUS2L(iRegL dst, memory mem) %{
|
||||
ins_encode %{
|
||||
__ lduh($mem$$Address, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mask_mem);
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
|
||||
instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %}
|
||||
ins_encode %{
|
||||
__ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register
|
||||
instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
|
||||
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
|
||||
|
||||
size(2*4);
|
||||
format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t"
|
||||
"AND $dst,$mask,$dst" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ lduh($mem$$Address, Rdst);
|
||||
__ and3(Rdst, $mask$$constant, Rdst);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register
|
||||
instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
|
||||
effect(TEMP dst, TEMP tmp);
|
||||
ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
|
||||
|
||||
size(3*4);
|
||||
format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t"
|
||||
"SET $mask,$tmp\n\t"
|
||||
"AND $dst,$tmp,$dst" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
Register Rtmp = $tmp$$Register;
|
||||
__ lduh($mem$$Address, Rdst);
|
||||
__ set($mask$$constant, Rtmp);
|
||||
__ and3(Rdst, Rtmp, Rdst);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Integer
|
||||
@ -5718,6 +5800,68 @@ instruct loadI2L(iRegL dst, memory mem) %{
|
||||
ins_encode %{
|
||||
__ ldsw($mem$$Address, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(iload_mask_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with mask 0xFF into a Long Register
|
||||
instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %}
|
||||
ins_encode %{
|
||||
__ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with mask 0xFFFF into a Long Register
|
||||
instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %}
|
||||
ins_encode %{
|
||||
__ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with a 13-bit mask into a Long Register
|
||||
instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
|
||||
|
||||
size(2*4);
|
||||
format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t"
|
||||
"AND $dst,$mask,$dst" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ lduw($mem$$Address, Rdst);
|
||||
__ and3(Rdst, $mask$$constant, Rdst);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with a 32-bit mask into a Long Register
|
||||
instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
effect(TEMP dst, TEMP tmp);
|
||||
ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
|
||||
|
||||
size(3*4);
|
||||
format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t"
|
||||
"SET $mask,$tmp\n\t"
|
||||
"AND $dst,$tmp,$dst" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
Register Rtmp = $tmp$$Register;
|
||||
__ lduw($mem$$Address, Rdst);
|
||||
__ set($mask$$constant, Rtmp);
|
||||
__ and3(Rdst, Rtmp, Rdst);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
|
@ -1372,6 +1372,8 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
|
||||
// If no method data exists, go to profile_continue.
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
|
||||
|
||||
// The method data pointer needs to be updated.
|
||||
int mdp_delta = in_bytes(BitData::bit_data_size());
|
||||
if (TypeProfileCasts) {
|
||||
|
@ -1409,6 +1409,8 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
|
||||
// If no method data exists, go to profile_continue.
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
|
||||
|
||||
// The method data pointer needs to be updated.
|
||||
int mdp_delta = in_bytes(BitData::bit_data_size());
|
||||
if (TypeProfileCasts) {
|
||||
|
@ -6885,8 +6885,9 @@ instruct loadB(xRegI dst, memory mem) %{
|
||||
%}
|
||||
|
||||
// Load Byte (8bit signed) into Long Register
|
||||
instruct loadB2L(eRegL dst, memory mem) %{
|
||||
instruct loadB2L(eRegL dst, memory mem, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (LoadB mem)));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(375);
|
||||
format %{ "MOVSX8 $dst.lo,$mem\t# byte -> long\n\t"
|
||||
@ -6917,22 +6918,40 @@ instruct loadUB(xRegI dst, memory mem) %{
|
||||
%}
|
||||
|
||||
// Load Unsigned Byte (8 bit UNsigned) into Long Register
|
||||
instruct loadUB2L(eRegL dst, memory mem)
|
||||
%{
|
||||
instruct loadUB2L(eRegL dst, memory mem, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (LoadUB mem)));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(250);
|
||||
format %{ "MOVZX8 $dst.lo,$mem\t# ubyte -> long\n\t"
|
||||
"XOR $dst.hi,$dst.hi" %}
|
||||
|
||||
ins_encode %{
|
||||
__ movzbl($dst$$Register, $mem$$Address);
|
||||
__ xorl(HIGH_FROM_LOW($dst$$Register), HIGH_FROM_LOW($dst$$Register));
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movzbl(Rdst, $mem$$Address);
|
||||
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Byte (8 bit UNsigned) with mask into Long Register
|
||||
instruct loadUB2L_immI8(eRegL dst, memory mem, immI8 mask, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "MOVZX8 $dst.lo,$mem\t# ubyte & 8-bit mask -> long\n\t"
|
||||
"XOR $dst.hi,$dst.hi\n\t"
|
||||
"AND $dst.lo,$mask" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movzbl(Rdst, $mem$$Address);
|
||||
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
|
||||
__ andl(Rdst, $mask$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Short (16bit signed)
|
||||
instruct loadS(eRegI dst, memory mem) %{
|
||||
match(Set dst (LoadS mem));
|
||||
@ -6960,8 +6979,9 @@ instruct loadS2B(eRegI dst, memory mem, immI_24 twentyfour) %{
|
||||
%}
|
||||
|
||||
// Load Short (16bit signed) into Long Register
|
||||
instruct loadS2L(eRegL dst, memory mem) %{
|
||||
instruct loadS2L(eRegL dst, memory mem, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (LoadS mem)));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(375);
|
||||
format %{ "MOVSX $dst.lo,$mem\t# short -> long\n\t"
|
||||
@ -7004,8 +7024,9 @@ instruct loadUS2B(eRegI dst, memory mem, immI_24 twentyfour) %{
|
||||
%}
|
||||
|
||||
// Load Unsigned Short/Char (16 bit UNsigned) into Long Register
|
||||
instruct loadUS2L(eRegL dst, memory mem) %{
|
||||
instruct loadUS2L(eRegL dst, memory mem, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (LoadUS mem)));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(250);
|
||||
format %{ "MOVZX $dst.lo,$mem\t# ushort/char -> long\n\t"
|
||||
@ -7019,6 +7040,38 @@ instruct loadUS2L(eRegL dst, memory mem) %{
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Short/Char (16 bit UNsigned) with mask 0xFF into Long Register
|
||||
instruct loadUS2L_immI_255(eRegL dst, memory mem, immI_255 mask, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "MOVZX8 $dst.lo,$mem\t# ushort/char & 0xFF -> long\n\t"
|
||||
"XOR $dst.hi,$dst.hi" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movzbl(Rdst, $mem$$Address);
|
||||
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Short/Char (16 bit UNsigned) with a 16-bit mask into Long Register
|
||||
instruct loadUS2L_immI16(eRegL dst, memory mem, immI16 mask, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "MOVZX $dst.lo, $mem\t# ushort/char & 16-bit mask -> long\n\t"
|
||||
"XOR $dst.hi,$dst.hi\n\t"
|
||||
"AND $dst.lo,$mask" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movzwl(Rdst, $mem$$Address);
|
||||
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
|
||||
__ andl(Rdst, $mask$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Integer
|
||||
instruct loadI(eRegI dst, memory mem) %{
|
||||
match(Set dst (LoadI mem));
|
||||
@ -7082,8 +7135,9 @@ instruct loadI2US(eRegI dst, memory mem, immI_65535 mask) %{
|
||||
%}
|
||||
|
||||
// Load Integer into Long Register
|
||||
instruct loadI2L(eRegL dst, memory mem) %{
|
||||
instruct loadI2L(eRegL dst, memory mem, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (LoadI mem)));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(375);
|
||||
format %{ "MOV $dst.lo,$mem\t# int -> long\n\t"
|
||||
@ -7099,9 +7153,57 @@ instruct loadI2L(eRegL dst, memory mem) %{
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with mask 0xFF into Long Register
|
||||
instruct loadI2L_immI_255(eRegL dst, memory mem, immI_255 mask, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "MOVZX8 $dst.lo,$mem\t# int & 0xFF -> long\n\t"
|
||||
"XOR $dst.hi,$dst.hi" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movzbl(Rdst, $mem$$Address);
|
||||
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with mask 0xFFFF into Long Register
|
||||
instruct loadI2L_immI_65535(eRegL dst, memory mem, immI_65535 mask, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "MOVZX $dst.lo,$mem\t# int & 0xFFFF -> long\n\t"
|
||||
"XOR $dst.hi,$dst.hi" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movzwl(Rdst, $mem$$Address);
|
||||
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with 32-bit mask into Long Register
|
||||
instruct loadI2L_immI(eRegL dst, memory mem, immI mask, eFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "MOV $dst.lo,$mem\t# int & 32-bit mask -> long\n\t"
|
||||
"XOR $dst.hi,$dst.hi\n\t"
|
||||
"AND $dst.lo,$mask" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movl(Rdst, $mem$$Address);
|
||||
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
|
||||
__ andl(Rdst, $mask$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Integer into Long Register
|
||||
instruct loadUI2L(eRegL dst, memory mem) %{
|
||||
instruct loadUI2L(eRegL dst, memory mem, eFlagsReg cr) %{
|
||||
match(Set dst (LoadUI2L mem));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(250);
|
||||
format %{ "MOV $dst.lo,$mem\t# uint -> long\n\t"
|
||||
@ -7695,6 +7797,17 @@ instruct storeL(long_memory mem, eRegL src) %{
|
||||
ins_pipe( ialu_mem_long_reg );
|
||||
%}
|
||||
|
||||
// Store Long to Integer
|
||||
instruct storeL2I(memory mem, eRegL src) %{
|
||||
match(Set mem (StoreI mem (ConvL2I src)));
|
||||
|
||||
format %{ "MOV $mem,$src.lo\t# long -> int" %}
|
||||
ins_encode %{
|
||||
__ movl($mem$$Address, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_mem_reg);
|
||||
%}
|
||||
|
||||
// Volatile Store Long. Must be atomic, so move it into
|
||||
// the FP TOS and then do a 64-bit FIST. Has to probe the
|
||||
// target address before the store (for null-ptr checks)
|
||||
|
@ -6444,6 +6444,21 @@ instruct loadUB2L(rRegL dst, memory mem)
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Byte (8 bit UNsigned) with a 8-bit mask into Long Register
|
||||
instruct loadUB2L_immI8(rRegL dst, memory mem, immI8 mask, rFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "movzbq $dst, $mem\t# ubyte & 8-bit mask -> long\n\t"
|
||||
"andl $dst, $mask" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movzbq(Rdst, $mem$$Address);
|
||||
__ andl(Rdst, $mask$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Short (16 bit signed)
|
||||
instruct loadS(rRegI dst, memory mem)
|
||||
%{
|
||||
@ -6528,6 +6543,32 @@ instruct loadUS2L(rRegL dst, memory mem)
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Short/Char (16 bit UNsigned) with mask 0xFF into Long Register
|
||||
instruct loadUS2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
|
||||
|
||||
format %{ "movzbq $dst, $mem\t# ushort/char & 0xFF -> long" %}
|
||||
ins_encode %{
|
||||
__ movzbq($dst$$Register, $mem$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Short/Char (16 bit UNsigned) with mask into Long Register
|
||||
instruct loadUS2L_immI16(rRegL dst, memory mem, immI16 mask, rFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "movzwq $dst, $mem\t# ushort/char & 16-bit mask -> long\n\t"
|
||||
"andl $dst, $mask" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movzwq(Rdst, $mem$$Address);
|
||||
__ andl(Rdst, $mask$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Integer
|
||||
instruct loadI(rRegI dst, memory mem)
|
||||
%{
|
||||
@ -6606,6 +6647,43 @@ instruct loadI2L(rRegL dst, memory mem)
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with mask 0xFF into Long Register
|
||||
instruct loadI2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
|
||||
format %{ "movzbq $dst, $mem\t# int & 0xFF -> long" %}
|
||||
ins_encode %{
|
||||
__ movzbq($dst$$Register, $mem$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with mask 0xFFFF into Long Register
|
||||
instruct loadI2L_immI_65535(rRegL dst, memory mem, immI_65535 mask) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
|
||||
format %{ "movzwq $dst, $mem\t# int & 0xFFFF -> long" %}
|
||||
ins_encode %{
|
||||
__ movzwq($dst$$Register, $mem$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Integer with a 32-bit mask into Long Register
|
||||
instruct loadI2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{
|
||||
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "movl $dst, $mem\t# int & 32-bit mask -> long\n\t"
|
||||
"andl $dst, $mask" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
__ movl(Rdst, $mem$$Address);
|
||||
__ andl(Rdst, $mask$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Unsigned Integer into Long Register
|
||||
instruct loadUI2L(rRegL dst, memory mem)
|
||||
%{
|
||||
@ -11673,8 +11751,9 @@ instruct convI2L_reg_reg(rRegL dst, rRegI src)
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "movslq $dst, $src\t# i2l" %}
|
||||
opcode(0x63); // needs REX.W
|
||||
ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src));
|
||||
ins_encode %{
|
||||
__ movslq($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,6 @@ public class Database {
|
||||
private HashMap<String,String> platformDepFiles;
|
||||
private long threshold;
|
||||
private int nOuterFiles;
|
||||
private int nPrecompiledFiles;
|
||||
private boolean missingOk;
|
||||
private Platform plat;
|
||||
/** These allow you to specify files not in the include database
|
||||
@ -62,7 +61,6 @@ public class Database {
|
||||
|
||||
threshold = t;
|
||||
nOuterFiles = 0;
|
||||
nPrecompiledFiles = 0;
|
||||
missingOk = false;
|
||||
firstFile = null;
|
||||
lastFile = null;
|
||||
@ -343,7 +341,6 @@ public class Database {
|
||||
plat.getGIFileTemplate().getInvDir() +
|
||||
list.getName() +
|
||||
"\"");
|
||||
nPrecompiledFiles += 1;
|
||||
}
|
||||
}
|
||||
inclFile.println();
|
||||
@ -408,22 +405,22 @@ public class Database {
|
||||
gd.println();
|
||||
}
|
||||
|
||||
if (nPrecompiledFiles > 0) {
|
||||
// write Precompiled_Files = ...
|
||||
gd.println("Precompiled_Files = \\");
|
||||
for (Iterator iter = grandInclude.iterator(); iter.hasNext(); ) {
|
||||
FileList list = (FileList) iter.next();
|
||||
// write Precompiled_Files = ...
|
||||
gd.println("Precompiled_Files = \\");
|
||||
for (Iterator iter = grandInclude.iterator(); iter.hasNext(); ) {
|
||||
FileList list = (FileList) iter.next();
|
||||
if (list.getCount() >= threshold) {
|
||||
gd.println(list.getName() + " \\");
|
||||
String platformDep = platformDepFiles.get(list.getName());
|
||||
if (platformDep != null) {
|
||||
// make sure changes to the platform dependent file will
|
||||
// cause regeneration of the pch file.
|
||||
gd.println(platformDep + " \\");
|
||||
// make sure changes to the platform dependent file will
|
||||
// cause regeneration of the pch file.
|
||||
gd.println(platformDep + " \\");
|
||||
}
|
||||
}
|
||||
gd.println();
|
||||
gd.println();
|
||||
}
|
||||
gd.println();
|
||||
gd.println();
|
||||
|
||||
gd.println("DTraced_Files = \\");
|
||||
for (Iterator iter = outerFiles.iterator(); iter.hasNext(); ) {
|
||||
@ -483,7 +480,6 @@ public class Database {
|
||||
}
|
||||
|
||||
if (plat.includeGIDependencies()
|
||||
&& nPrecompiledFiles > 0
|
||||
&& anII.getUseGrandInclude()) {
|
||||
gd.println(" $(Precompiled_Files) \\");
|
||||
}
|
||||
|
@ -1367,11 +1367,11 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
else if (!strcmp(oper->ideal_type(_globalNames), "ConN")) {
|
||||
// Access the locally stored constant
|
||||
fprintf(fp," virtual intptr_t constant() const {");
|
||||
fprintf(fp, " return _c0->make_oopptr()->get_con();");
|
||||
fprintf(fp, " return _c0->get_ptrtype()->get_con();");
|
||||
fprintf(fp, " }\n");
|
||||
// Generate query to determine if this pointer is an oop
|
||||
fprintf(fp," virtual bool constant_is_oop() const {");
|
||||
fprintf(fp, " return _c0->make_oopptr()->isa_oop_ptr();");
|
||||
fprintf(fp, " return _c0->get_ptrtype()->isa_oop_ptr();");
|
||||
fprintf(fp, " }\n");
|
||||
}
|
||||
else if (!strcmp(oper->ideal_type(_globalNames), "ConL")) {
|
||||
|
@ -1534,12 +1534,8 @@ void LIRGenerator::do_StoreField(StoreField* x) {
|
||||
}
|
||||
|
||||
if (is_oop) {
|
||||
#ifdef PRECISE_CARDMARK
|
||||
// Precise cardmarks don't work
|
||||
post_barrier(LIR_OprFact::address(address), value.result());
|
||||
#else
|
||||
// Store to object so mark the card of the header
|
||||
post_barrier(object.result(), value.result());
|
||||
#endif // PRECISE_CARDMARK
|
||||
}
|
||||
|
||||
if (is_volatile && os::is_MP()) {
|
||||
|
@ -1233,6 +1233,41 @@ public:
|
||||
CardTableModRefBS::card_shift);
|
||||
}
|
||||
|
||||
// It takes a region that's not empty (i.e., it has at least one
|
||||
// live object in it and sets its corresponding bit on the region
|
||||
// bitmap to 1. If the region is "starts humongous" it will also set
|
||||
// to 1 the bits on the region bitmap that correspond to its
|
||||
// associated "continues humongous" regions.
|
||||
void set_bit_for_region(HeapRegion* hr) {
|
||||
assert(!hr->continuesHumongous(), "should have filtered those out");
|
||||
|
||||
size_t index = hr->hrs_index();
|
||||
if (!hr->startsHumongous()) {
|
||||
// Normal (non-humongous) case: just set the bit.
|
||||
_region_bm->par_at_put((BitMap::idx_t) index, true);
|
||||
} else {
|
||||
// Starts humongous case: calculate how many regions are part of
|
||||
// this humongous region and then set the bit range. It might
|
||||
// have been a bit more efficient to look at the object that
|
||||
// spans these humongous regions to calculate their number from
|
||||
// the object's size. However, it's a good idea to calculate
|
||||
// this based on the metadata itself, and not the region
|
||||
// contents, so that this code is not aware of what goes into
|
||||
// the humongous regions (in case this changes in the future).
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
size_t end_index = index + 1;
|
||||
while (end_index < g1h->n_regions()) {
|
||||
HeapRegion* chr = g1h->region_at(end_index);
|
||||
if (!chr->continuesHumongous()) {
|
||||
break;
|
||||
}
|
||||
end_index += 1;
|
||||
}
|
||||
_region_bm->par_at_put_range((BitMap::idx_t) index,
|
||||
(BitMap::idx_t) end_index, true);
|
||||
}
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (_co_tracker != NULL)
|
||||
_co_tracker->update();
|
||||
@ -1241,13 +1276,13 @@ public:
|
||||
_start_vtime_sec = os::elapsedVTime();
|
||||
|
||||
if (hr->continuesHumongous()) {
|
||||
HeapRegion* hum_start = hr->humongous_start_region();
|
||||
// If the head region of the humongous region has been determined
|
||||
// to be alive, then all the tail regions should be marked
|
||||
// such as well.
|
||||
if (_region_bm->at(hum_start->hrs_index())) {
|
||||
_region_bm->par_at_put(hr->hrs_index(), 1);
|
||||
}
|
||||
// We will ignore these here and process them when their
|
||||
// associated "starts humongous" region is processed (see
|
||||
// set_bit_for_heap_region()). Note that we cannot rely on their
|
||||
// associated "starts humongous" region to have their bit set to
|
||||
// 1 since, due to the region chunking in the parallel region
|
||||
// iteration, a "continues humongous" region might be visited
|
||||
// before its associated "starts humongous".
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1343,14 +1378,14 @@ public:
|
||||
intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift);
|
||||
mark_card_num_range(start_card_num, last_card_num);
|
||||
// This definitely means the region has live objects.
|
||||
_region_bm->par_at_put(hr->hrs_index(), 1);
|
||||
set_bit_for_region(hr);
|
||||
}
|
||||
}
|
||||
|
||||
hr->add_to_marked_bytes(marked_bytes);
|
||||
// Update the live region bitmap.
|
||||
if (marked_bytes > 0) {
|
||||
_region_bm->par_at_put(hr->hrs_index(), 1);
|
||||
set_bit_for_region(hr);
|
||||
}
|
||||
hr->set_top_at_conc_mark_count(nextTop);
|
||||
_tot_live += hr->next_live_bytes();
|
||||
|
@ -902,6 +902,10 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
size_t word_size) {
|
||||
ResourceMark rm;
|
||||
|
||||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_before_gc();
|
||||
}
|
||||
|
||||
if (full && DisableExplicitGC) {
|
||||
gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
|
||||
return;
|
||||
@ -927,7 +931,7 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
g1_policy()->record_full_collection_start();
|
||||
|
||||
gc_prologue(true);
|
||||
increment_total_collections();
|
||||
increment_total_collections(true /* full gc */);
|
||||
|
||||
size_t g1h_prev_used = used();
|
||||
assert(used() == recalculate_used(), "Should be equal");
|
||||
@ -1066,6 +1070,10 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
assert( check_young_list_empty(false, false),
|
||||
"young list should be empty at this point");
|
||||
}
|
||||
|
||||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_after_gc();
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
@ -2325,9 +2333,37 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::print() const { print_on(gclog_or_tty); }
|
||||
void G1CollectedHeap::print() const { print_on(tty); }
|
||||
|
||||
void G1CollectedHeap::print_on(outputStream* st) const {
|
||||
print_on(st, PrintHeapAtGCExtended);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
|
||||
st->print(" %-20s", "garbage-first heap");
|
||||
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
||||
capacity()/K, used()/K);
|
||||
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
||||
_g1_storage.low_boundary(),
|
||||
_g1_storage.high(),
|
||||
_g1_storage.high_boundary());
|
||||
st->cr();
|
||||
st->print(" region size " SIZE_FORMAT "K, ",
|
||||
HeapRegion::GrainBytes/K);
|
||||
size_t young_regions = _young_list->length();
|
||||
st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
|
||||
young_regions, young_regions * HeapRegion::GrainBytes / K);
|
||||
size_t survivor_regions = g1_policy()->recorded_survivor_regions();
|
||||
st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
|
||||
survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
|
||||
st->cr();
|
||||
perm()->as_gen()->print_on(st);
|
||||
if (extended) {
|
||||
print_on_extended(st);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::print_on_extended(outputStream* st) const {
|
||||
PrintRegionClosure blk(st);
|
||||
_hrs->iterate(&blk);
|
||||
}
|
||||
@ -2408,10 +2444,6 @@ G1CollectedHeap* G1CollectedHeap::heap() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
||||
if (PrintHeapAtGC){
|
||||
gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections());
|
||||
Universe::print();
|
||||
}
|
||||
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
|
||||
// Call allocation profiler
|
||||
AllocationProfiler::iterate_since_last_gc();
|
||||
@ -2425,12 +2457,6 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||
// is set.
|
||||
COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
|
||||
"derived pointer present"));
|
||||
|
||||
if (PrintHeapAtGC){
|
||||
gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections());
|
||||
Universe::print();
|
||||
gclog_or_tty->print("} ");
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_collection_pause() {
|
||||
@ -2559,241 +2585,250 @@ G1CollectedHeap::cleanup_surviving_young_words() {
|
||||
|
||||
void
|
||||
G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
char verbose_str[128];
|
||||
sprintf(verbose_str, "GC pause ");
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
if (g1_policy()->full_young_gcs())
|
||||
strcat(verbose_str, "(young)");
|
||||
else
|
||||
strcat(verbose_str, "(partial)");
|
||||
}
|
||||
if (g1_policy()->should_initiate_conc_mark())
|
||||
strcat(verbose_str, " (initial-mark)");
|
||||
|
||||
GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
|
||||
|
||||
// if PrintGCDetails is on, we'll print long statistics information
|
||||
// in the collector policy code, so let's not print this as the output
|
||||
// is messy if we do.
|
||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
|
||||
|
||||
ResourceMark rm;
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
|
||||
guarantee(!is_gc_active(), "collection is not reentrant");
|
||||
assert(regions_accounted_for(), "Region leakage!");
|
||||
|
||||
increment_gc_time_stamp();
|
||||
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
assert(check_young_list_well_formed(),
|
||||
"young list should be well formed");
|
||||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_before_gc();
|
||||
}
|
||||
|
||||
if (GC_locker::is_active()) {
|
||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||
}
|
||||
{
|
||||
char verbose_str[128];
|
||||
sprintf(verbose_str, "GC pause ");
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
if (g1_policy()->full_young_gcs())
|
||||
strcat(verbose_str, "(young)");
|
||||
else
|
||||
strcat(verbose_str, "(partial)");
|
||||
}
|
||||
if (g1_policy()->should_initiate_conc_mark())
|
||||
strcat(verbose_str, " (initial-mark)");
|
||||
|
||||
bool abandoned = false;
|
||||
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC
|
||||
IsGCActiveMark x;
|
||||
GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
|
||||
|
||||
gc_prologue(false);
|
||||
increment_total_collections();
|
||||
// if PrintGCDetails is on, we'll print long statistics information
|
||||
// in the collector policy code, so let's not print this as the output
|
||||
// is messy if we do.
|
||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
|
||||
|
||||
ResourceMark rm;
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
|
||||
guarantee(!is_gc_active(), "collection is not reentrant");
|
||||
assert(regions_accounted_for(), "Region leakage!");
|
||||
|
||||
increment_gc_time_stamp();
|
||||
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
assert(check_young_list_well_formed(),
|
||||
"young list should be well formed");
|
||||
}
|
||||
|
||||
if (GC_locker::is_active()) {
|
||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||
}
|
||||
|
||||
bool abandoned = false;
|
||||
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC
|
||||
IsGCActiveMark x;
|
||||
|
||||
gc_prologue(false);
|
||||
increment_total_collections(false /* full gc */);
|
||||
|
||||
#if G1_REM_SET_LOGGING
|
||||
gclog_or_tty->print_cr("\nJust chose CS, heap:");
|
||||
print();
|
||||
#endif
|
||||
|
||||
if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
prepare_for_verify();
|
||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||
Universe::verify(false);
|
||||
}
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
// We want to turn off ref discovery, if necessary, and turn it back on
|
||||
// on again later if we do.
|
||||
bool was_enabled = ref_processor()->discovery_enabled();
|
||||
if (was_enabled) ref_processor()->disable_discovery();
|
||||
|
||||
// Forget the current alloc region (we might even choose it to be part
|
||||
// of the collection set!).
|
||||
abandon_cur_alloc_region();
|
||||
|
||||
// The elapsed time induced by the start time below deliberately elides
|
||||
// the possible verification above.
|
||||
double start_time_sec = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWStart(start_time_sec);
|
||||
size_t start_used_bytes = used();
|
||||
if (!G1ConcMark) {
|
||||
do_sync_mark();
|
||||
}
|
||||
|
||||
g1_policy()->record_collection_pause_start(start_time_sec,
|
||||
start_used_bytes);
|
||||
|
||||
guarantee(_in_cset_fast_test == NULL, "invariant");
|
||||
guarantee(_in_cset_fast_test_base == NULL, "invariant");
|
||||
_in_cset_fast_test_length = max_regions();
|
||||
_in_cset_fast_test_base =
|
||||
NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
|
||||
memset(_in_cset_fast_test_base, false,
|
||||
_in_cset_fast_test_length * sizeof(bool));
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
||||
|
||||
#if SCAN_ONLY_VERBOSE
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
|
||||
if (g1_policy()->should_initiate_conc_mark()) {
|
||||
concurrent_mark()->checkpointRootsInitialPre();
|
||||
}
|
||||
save_marks();
|
||||
|
||||
// We must do this before any possible evacuation that should propagate
|
||||
// marks.
|
||||
if (mark_in_progress()) {
|
||||
double start_time_sec = os::elapsedTime();
|
||||
|
||||
_cm->drainAllSATBBuffers();
|
||||
double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
|
||||
g1_policy()->record_satb_drain_time(finish_mark_ms);
|
||||
|
||||
}
|
||||
// Record the number of elements currently on the mark stack, so we
|
||||
// only iterate over these. (Since evacuation may add to the mark
|
||||
// stack, doing more exposes race conditions.) If no mark is in
|
||||
// progress, this will be zero.
|
||||
_cm->set_oops_do_bound();
|
||||
|
||||
assert(regions_accounted_for(), "Region leakage.");
|
||||
|
||||
if (mark_in_progress())
|
||||
concurrent_mark()->newCSet();
|
||||
|
||||
// Now choose the CS.
|
||||
g1_policy()->choose_collection_set();
|
||||
|
||||
// We may abandon a pause if we find no region that will fit in the MMU
|
||||
// pause.
|
||||
bool abandoned = (g1_policy()->collection_set() == NULL);
|
||||
|
||||
// Nothing to do if we were unable to choose a collection set.
|
||||
if (!abandoned) {
|
||||
#if G1_REM_SET_LOGGING
|
||||
gclog_or_tty->print_cr("\nAfter pause, heap:");
|
||||
gclog_or_tty->print_cr("\nJust chose CS, heap:");
|
||||
print();
|
||||
#endif
|
||||
|
||||
setup_surviving_young_words();
|
||||
if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
prepare_for_verify();
|
||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||
Universe::verify(false);
|
||||
}
|
||||
|
||||
// Set up the gc allocation regions.
|
||||
get_gc_alloc_regions();
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
// Actually do the work...
|
||||
evacuate_collection_set();
|
||||
free_collection_set(g1_policy()->collection_set());
|
||||
g1_policy()->clear_collection_set();
|
||||
// We want to turn off ref discovery, if necessary, and turn it back on
|
||||
// on again later if we do.
|
||||
bool was_enabled = ref_processor()->discovery_enabled();
|
||||
if (was_enabled) ref_processor()->disable_discovery();
|
||||
|
||||
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
|
||||
// this is more for peace of mind; we're nulling them here and
|
||||
// we're expecting them to be null at the beginning of the next GC
|
||||
_in_cset_fast_test = NULL;
|
||||
_in_cset_fast_test_base = NULL;
|
||||
// Forget the current alloc region (we might even choose it to be part
|
||||
// of the collection set!).
|
||||
abandon_cur_alloc_region();
|
||||
|
||||
release_gc_alloc_regions(false /* totally */);
|
||||
// The elapsed time induced by the start time below deliberately elides
|
||||
// the possible verification above.
|
||||
double start_time_sec = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWStart(start_time_sec);
|
||||
size_t start_used_bytes = used();
|
||||
if (!G1ConcMark) {
|
||||
do_sync_mark();
|
||||
}
|
||||
|
||||
cleanup_surviving_young_words();
|
||||
g1_policy()->record_collection_pause_start(start_time_sec,
|
||||
start_used_bytes);
|
||||
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
_young_list->reset_sampled_info();
|
||||
assert(check_young_list_empty(true),
|
||||
"young list should be empty");
|
||||
guarantee(_in_cset_fast_test == NULL, "invariant");
|
||||
guarantee(_in_cset_fast_test_base == NULL, "invariant");
|
||||
_in_cset_fast_test_length = max_regions();
|
||||
_in_cset_fast_test_base =
|
||||
NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
|
||||
memset(_in_cset_fast_test_base, false,
|
||||
_in_cset_fast_test_length * sizeof(bool));
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
||||
|
||||
#if SCAN_ONLY_VERBOSE
|
||||
_young_list->print();
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
|
||||
g1_policy()->record_survivor_regions(_young_list->survivor_length(),
|
||||
_young_list->first_survivor_region(),
|
||||
_young_list->last_survivor_region());
|
||||
_young_list->reset_auxilary_lists();
|
||||
if (g1_policy()->should_initiate_conc_mark()) {
|
||||
concurrent_mark()->checkpointRootsInitialPre();
|
||||
}
|
||||
} else {
|
||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||
}
|
||||
save_marks();
|
||||
|
||||
if (evacuation_failed()) {
|
||||
_summary_bytes_used = recalculate_used();
|
||||
} else {
|
||||
// The "used" of the the collection set have already been subtracted
|
||||
// when they were freed. Add in the bytes evacuated.
|
||||
_summary_bytes_used += g1_policy()->bytes_in_to_space();
|
||||
}
|
||||
// We must do this before any possible evacuation that should propagate
|
||||
// marks.
|
||||
if (mark_in_progress()) {
|
||||
double start_time_sec = os::elapsedTime();
|
||||
|
||||
if (g1_policy()->in_young_gc_mode() &&
|
||||
g1_policy()->should_initiate_conc_mark()) {
|
||||
concurrent_mark()->checkpointRootsInitialPost();
|
||||
set_marking_started();
|
||||
doConcurrentMark();
|
||||
}
|
||||
|
||||
#if SCAN_ONLY_VERBOSE
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
|
||||
g1_policy()->record_pause_time_ms(pause_time_ms);
|
||||
GCOverheadReporter::recordSTWEnd(end_time_sec);
|
||||
g1_policy()->record_collection_pause_end(abandoned);
|
||||
|
||||
assert(regions_accounted_for(), "Region leakage.");
|
||||
|
||||
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyAfterGC:");
|
||||
prepare_for_verify();
|
||||
Universe::verify(false);
|
||||
}
|
||||
|
||||
if (was_enabled) ref_processor()->enable_discovery();
|
||||
|
||||
{
|
||||
size_t expand_bytes = g1_policy()->expansion_amount();
|
||||
if (expand_bytes > 0) {
|
||||
size_t bytes_before = capacity();
|
||||
expand(expand_bytes);
|
||||
_cm->drainAllSATBBuffers();
|
||||
double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
|
||||
g1_policy()->record_satb_drain_time(finish_mark_ms);
|
||||
}
|
||||
}
|
||||
// Record the number of elements currently on the mark stack, so we
|
||||
// only iterate over these. (Since evacuation may add to the mark
|
||||
// stack, doing more exposes race conditions.) If no mark is in
|
||||
// progress, this will be zero.
|
||||
_cm->set_oops_do_bound();
|
||||
|
||||
if (mark_in_progress()) {
|
||||
concurrent_mark()->update_g1_committed();
|
||||
}
|
||||
assert(regions_accounted_for(), "Region leakage.");
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
if (mark_in_progress())
|
||||
concurrent_mark()->newCSet();
|
||||
|
||||
// Now choose the CS.
|
||||
g1_policy()->choose_collection_set();
|
||||
|
||||
// We may abandon a pause if we find no region that will fit in the MMU
|
||||
// pause.
|
||||
bool abandoned = (g1_policy()->collection_set() == NULL);
|
||||
|
||||
// Nothing to do if we were unable to choose a collection set.
|
||||
if (!abandoned) {
|
||||
#if G1_REM_SET_LOGGING
|
||||
gclog_or_tty->print_cr("\nAfter pause, heap:");
|
||||
print();
|
||||
#endif
|
||||
|
||||
gc_epilogue(false);
|
||||
setup_surviving_young_words();
|
||||
|
||||
// Set up the gc allocation regions.
|
||||
get_gc_alloc_regions();
|
||||
|
||||
// Actually do the work...
|
||||
evacuate_collection_set();
|
||||
free_collection_set(g1_policy()->collection_set());
|
||||
g1_policy()->clear_collection_set();
|
||||
|
||||
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
|
||||
// this is more for peace of mind; we're nulling them here and
|
||||
// we're expecting them to be null at the beginning of the next GC
|
||||
_in_cset_fast_test = NULL;
|
||||
_in_cset_fast_test_base = NULL;
|
||||
|
||||
release_gc_alloc_regions(false /* totally */);
|
||||
|
||||
cleanup_surviving_young_words();
|
||||
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
_young_list->reset_sampled_info();
|
||||
assert(check_young_list_empty(true),
|
||||
"young list should be empty");
|
||||
|
||||
#if SCAN_ONLY_VERBOSE
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
|
||||
g1_policy()->record_survivor_regions(_young_list->survivor_length(),
|
||||
_young_list->first_survivor_region(),
|
||||
_young_list->last_survivor_region());
|
||||
_young_list->reset_auxilary_lists();
|
||||
}
|
||||
} else {
|
||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||
}
|
||||
|
||||
if (evacuation_failed()) {
|
||||
_summary_bytes_used = recalculate_used();
|
||||
} else {
|
||||
// The "used" of the the collection set have already been subtracted
|
||||
// when they were freed. Add in the bytes evacuated.
|
||||
_summary_bytes_used += g1_policy()->bytes_in_to_space();
|
||||
}
|
||||
|
||||
if (g1_policy()->in_young_gc_mode() &&
|
||||
g1_policy()->should_initiate_conc_mark()) {
|
||||
concurrent_mark()->checkpointRootsInitialPost();
|
||||
set_marking_started();
|
||||
doConcurrentMark();
|
||||
}
|
||||
|
||||
#if SCAN_ONLY_VERBOSE
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
|
||||
g1_policy()->record_pause_time_ms(pause_time_ms);
|
||||
GCOverheadReporter::recordSTWEnd(end_time_sec);
|
||||
g1_policy()->record_collection_pause_end(abandoned);
|
||||
|
||||
assert(regions_accounted_for(), "Region leakage.");
|
||||
|
||||
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyAfterGC:");
|
||||
prepare_for_verify();
|
||||
Universe::verify(false);
|
||||
}
|
||||
|
||||
if (was_enabled) ref_processor()->enable_discovery();
|
||||
|
||||
{
|
||||
size_t expand_bytes = g1_policy()->expansion_amount();
|
||||
if (expand_bytes > 0) {
|
||||
size_t bytes_before = capacity();
|
||||
expand(expand_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
if (mark_in_progress()) {
|
||||
concurrent_mark()->update_g1_committed();
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
|
||||
gc_epilogue(false);
|
||||
}
|
||||
|
||||
assert(verify_region_lists(), "Bad region lists.");
|
||||
|
||||
if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
|
||||
gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
|
||||
print_tracing_info();
|
||||
vm_exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
assert(verify_region_lists(), "Bad region lists.");
|
||||
|
||||
if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
|
||||
gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
|
||||
print_tracing_info();
|
||||
vm_exit(-1);
|
||||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_after_gc();
|
||||
}
|
||||
}
|
||||
|
||||
@ -5357,7 +5392,7 @@ void G1CollectedHeap::tear_down_region_lists() {
|
||||
assert(_free_region_list == NULL, "Postcondition of loop.");
|
||||
if (_free_region_list_size != 0) {
|
||||
gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
|
||||
print();
|
||||
print_on(gclog_or_tty, true /* extended */);
|
||||
}
|
||||
assert(_free_region_list_size == 0, "Postconditions of loop.");
|
||||
}
|
||||
|
@ -1061,8 +1061,14 @@ public:
|
||||
|
||||
// Override; it uses the "prev" marking information
|
||||
virtual void verify(bool allow_dirty, bool silent);
|
||||
// Default behavior by calling print(tty);
|
||||
virtual void print() const;
|
||||
// This calls print_on(st, PrintHeapAtGCExtended).
|
||||
virtual void print_on(outputStream* st) const;
|
||||
// If extended is true, it will print out information for all
|
||||
// regions in the heap by calling print_on_extended(st).
|
||||
virtual void print_on(outputStream* st, bool extended) const;
|
||||
virtual void print_on_extended(outputStream* st) const;
|
||||
|
||||
virtual void print_gc_threads_on(outputStream* st) const;
|
||||
virtual void gc_threads_do(ThreadClosure* tc) const;
|
||||
|
@ -1097,6 +1097,10 @@ public:
|
||||
_recorded_survivor_tail = tail;
|
||||
}
|
||||
|
||||
size_t recorded_survivor_regions() {
|
||||
return _recorded_survivor_regions;
|
||||
}
|
||||
|
||||
void record_thread_age_table(ageTable* age_table)
|
||||
{
|
||||
_survivors_age_table.merge_par(age_table);
|
||||
|
@ -703,7 +703,7 @@ void HeapRegion::verify(bool allow_dirty, bool use_prev_marking) const {
|
||||
}
|
||||
if (vl_cl.failures()) {
|
||||
gclog_or_tty->print_cr("Heap:");
|
||||
G1CollectedHeap::heap()->print();
|
||||
G1CollectedHeap::heap()->print_on(gclog_or_tty, true /* extended */);
|
||||
gclog_or_tty->print_cr("");
|
||||
}
|
||||
if (VerifyDuringGC &&
|
||||
|
@ -357,6 +357,9 @@ PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
|
||||
#ifndef PRODUCT
|
||||
, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
|
||||
#endif
|
||||
#ifdef ASSERT
|
||||
, _raw_oops(a)
|
||||
#endif
|
||||
{
|
||||
ResourceMark rm;
|
||||
// I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode,
|
||||
|
@ -380,6 +380,10 @@ class PhaseCFG : public Phase {
|
||||
bool _trace_opto_pipelining; // tracing flag
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
Unique_Node_List _raw_oops;
|
||||
#endif
|
||||
|
||||
// Build dominators
|
||||
void Dominators();
|
||||
|
||||
|
@ -74,9 +74,11 @@ struct OopFlow : public ResourceObj {
|
||||
// this block.
|
||||
Block *_b; // Block for this struct
|
||||
OopFlow *_next; // Next free OopFlow
|
||||
// or NULL if dead/conflict
|
||||
Compile* C;
|
||||
|
||||
OopFlow( short *callees, Node **defs ) : _callees(callees), _defs(defs),
|
||||
_b(NULL), _next(NULL) { }
|
||||
OopFlow( short *callees, Node **defs, Compile* c ) : _callees(callees), _defs(defs),
|
||||
_b(NULL), _next(NULL), C(c) { }
|
||||
|
||||
// Given reaching-defs for this block start, compute it for this block end
|
||||
void compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash );
|
||||
@ -88,7 +90,7 @@ struct OopFlow : public ResourceObj {
|
||||
void clone( OopFlow *flow, int max_size);
|
||||
|
||||
// Make a new OopFlow from scratch
|
||||
static OopFlow *make( Arena *A, int max_size );
|
||||
static OopFlow *make( Arena *A, int max_size, Compile* C );
|
||||
|
||||
// Build an oopmap from the current flow info
|
||||
OopMap *build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live );
|
||||
@ -180,11 +182,11 @@ void OopFlow::clone( OopFlow *flow, int max_size ) {
|
||||
}
|
||||
|
||||
//------------------------------make-------------------------------------------
|
||||
OopFlow *OopFlow::make( Arena *A, int max_size ) {
|
||||
OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) {
|
||||
short *callees = NEW_ARENA_ARRAY(A,short,max_size+1);
|
||||
Node **defs = NEW_ARENA_ARRAY(A,Node*,max_size+1);
|
||||
debug_only( memset(defs,0,(max_size+1)*sizeof(Node*)) );
|
||||
OopFlow *flow = new (A) OopFlow(callees+1, defs+1);
|
||||
OopFlow *flow = new (A) OopFlow(callees+1, defs+1, C);
|
||||
assert( &flow->_callees[OptoReg::Bad] == callees, "Ok to index at OptoReg::Bad" );
|
||||
assert( &flow->_defs [OptoReg::Bad] == defs , "Ok to index at OptoReg::Bad" );
|
||||
return flow;
|
||||
@ -288,7 +290,7 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i
|
||||
m = m->in(idx);
|
||||
}
|
||||
}
|
||||
guarantee( 0, "must find derived/base pair" );
|
||||
guarantee( 0, "must find derived/base pair" );
|
||||
}
|
||||
found: ;
|
||||
Node *base = n->in(i+1); // Base is other half of pair
|
||||
@ -347,6 +349,13 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i
|
||||
} else {
|
||||
// Other - some reaching non-oop value
|
||||
omap->set_value( r);
|
||||
#ifdef ASSERT
|
||||
if( t->isa_rawptr() && C->cfg()->_raw_oops.member(def) ) {
|
||||
def->dump();
|
||||
n->dump();
|
||||
assert(false, "there should be a oop in OopMap instead of a live raw oop at safepoint");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
@ -562,7 +571,7 @@ void Compile::BuildOopMaps() {
|
||||
|
||||
// Do the first block 'by hand' to prime the worklist
|
||||
Block *entry = _cfg->_blocks[1];
|
||||
OopFlow *rootflow = OopFlow::make(A,max_reg);
|
||||
OopFlow *rootflow = OopFlow::make(A,max_reg,this);
|
||||
// Initialize to 'bottom' (not 'top')
|
||||
memset( rootflow->_callees, OptoReg::Bad, max_reg*sizeof(short) );
|
||||
memset( rootflow->_defs , 0, max_reg*sizeof(Node*) );
|
||||
@ -628,7 +637,7 @@ void Compile::BuildOopMaps() {
|
||||
// Carry it forward.
|
||||
} else { // Draw a new OopFlow from the freelist
|
||||
if( !free_list )
|
||||
free_list = OopFlow::make(A,max_reg);
|
||||
free_list = OopFlow::make(A,max_reg,C);
|
||||
flow = free_list;
|
||||
assert( flow->_b == NULL, "oopFlow is not free" );
|
||||
free_list = flow->_next;
|
||||
|
@ -104,7 +104,9 @@ void C2Compiler::compile_method(ciEnv* env,
|
||||
initialize();
|
||||
}
|
||||
bool subsume_loads = true;
|
||||
bool do_escape_analysis = DoEscapeAnalysis;
|
||||
bool do_escape_analysis = DoEscapeAnalysis &&
|
||||
!(env->jvmti_can_hotswap_or_post_breakpoint() ||
|
||||
env->jvmti_can_examine_or_deopt_anywhere());
|
||||
while (!env->failing()) {
|
||||
// Attempt to compile while subsuming loads into machine instructions.
|
||||
Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
|
||||
|
@ -1130,6 +1130,9 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
|
||||
Node *def = self->in(1);
|
||||
if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
|
||||
early->add_inst(self);
|
||||
#ifdef ASSERT
|
||||
_raw_oops.push(def);
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
|
@ -1378,7 +1378,7 @@ void GraphKit::pre_barrier(Node* ctl,
|
||||
Node* adr,
|
||||
uint adr_idx,
|
||||
Node *val,
|
||||
const Type* val_type,
|
||||
const TypeOopPtr* val_type,
|
||||
BasicType bt) {
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
set_control(ctl);
|
||||
@ -1436,7 +1436,7 @@ Node* GraphKit::store_oop_to_object(Node* ctl,
|
||||
Node* adr,
|
||||
const TypePtr* adr_type,
|
||||
Node *val,
|
||||
const Type* val_type,
|
||||
const TypeOopPtr* val_type,
|
||||
BasicType bt) {
|
||||
uint adr_idx = C->get_alias_index(adr_type);
|
||||
Node* store;
|
||||
@ -1451,7 +1451,7 @@ Node* GraphKit::store_oop_to_array(Node* ctl,
|
||||
Node* adr,
|
||||
const TypePtr* adr_type,
|
||||
Node *val,
|
||||
const Type* val_type,
|
||||
const TypeOopPtr* val_type,
|
||||
BasicType bt) {
|
||||
uint adr_idx = C->get_alias_index(adr_type);
|
||||
Node* store;
|
||||
@ -1466,12 +1466,29 @@ Node* GraphKit::store_oop_to_unknown(Node* ctl,
|
||||
Node* adr,
|
||||
const TypePtr* adr_type,
|
||||
Node *val,
|
||||
const Type* val_type,
|
||||
BasicType bt) {
|
||||
uint adr_idx = C->get_alias_index(adr_type);
|
||||
Node* store;
|
||||
Compile::AliasType* at = C->alias_type(adr_type);
|
||||
const TypeOopPtr* val_type = NULL;
|
||||
if (adr_type->isa_instptr()) {
|
||||
if (at->field() != NULL) {
|
||||
// known field. This code is a copy of the do_put_xxx logic.
|
||||
ciField* field = at->field();
|
||||
if (!field->type()->is_loaded()) {
|
||||
val_type = TypeInstPtr::BOTTOM;
|
||||
} else {
|
||||
val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
|
||||
}
|
||||
}
|
||||
} else if (adr_type->isa_aryptr()) {
|
||||
val_type = adr_type->is_aryptr()->elem()->make_oopptr();
|
||||
}
|
||||
if (val_type == NULL) {
|
||||
val_type = TypeInstPtr::BOTTOM;
|
||||
}
|
||||
|
||||
uint adr_idx = at->index();
|
||||
pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
|
||||
store = store_to_memory(control(), adr, val, bt, adr_idx);
|
||||
Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
|
||||
post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
|
||||
return store;
|
||||
}
|
||||
@ -3202,7 +3219,7 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||
Node* adr,
|
||||
uint alias_idx,
|
||||
Node* val,
|
||||
const Type* val_type,
|
||||
const TypeOopPtr* val_type,
|
||||
BasicType bt) {
|
||||
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
||||
#define __ ideal.
|
||||
|
@ -454,7 +454,7 @@ class GraphKit : public Phase {
|
||||
Node* adr, // actual adress to store val at
|
||||
const TypePtr* adr_type,
|
||||
Node* val,
|
||||
const Type* val_type,
|
||||
const TypeOopPtr* val_type,
|
||||
BasicType bt);
|
||||
|
||||
Node* store_oop_to_array(Node* ctl,
|
||||
@ -462,7 +462,7 @@ class GraphKit : public Phase {
|
||||
Node* adr, // actual adress to store val at
|
||||
const TypePtr* adr_type,
|
||||
Node* val,
|
||||
const Type* val_type,
|
||||
const TypeOopPtr* val_type,
|
||||
BasicType bt);
|
||||
|
||||
// Could be an array or object we don't know at compile time (unsafe ref.)
|
||||
@ -471,12 +471,11 @@ class GraphKit : public Phase {
|
||||
Node* adr, // actual adress to store val at
|
||||
const TypePtr* adr_type,
|
||||
Node* val,
|
||||
const Type* val_type,
|
||||
BasicType bt);
|
||||
|
||||
// For the few case where the barriers need special help
|
||||
void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
|
||||
Node* val, const Type* val_type, BasicType bt);
|
||||
Node* val, const TypeOopPtr* val_type, BasicType bt);
|
||||
|
||||
void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
|
||||
Node* val, BasicType bt, bool use_precise);
|
||||
@ -599,7 +598,7 @@ class GraphKit : public Phase {
|
||||
Node* adr,
|
||||
uint alias_idx,
|
||||
Node* val,
|
||||
const Type* val_type,
|
||||
const TypeOopPtr* val_type,
|
||||
BasicType bt);
|
||||
|
||||
void g1_write_barrier_post(Node* store,
|
||||
|
@ -165,6 +165,7 @@ class LibraryCallKit : public GraphKit {
|
||||
bool inline_native_getLength();
|
||||
bool inline_array_copyOf(bool is_copyOfRange);
|
||||
bool inline_array_equals();
|
||||
void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
|
||||
bool inline_native_clone(bool is_virtual);
|
||||
bool inline_native_Reflection_getCallerClass();
|
||||
bool inline_native_AtomicLong_get();
|
||||
@ -181,7 +182,6 @@ class LibraryCallKit : public GraphKit {
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* copy_length,
|
||||
int nargs, // arguments on stack for debug info
|
||||
bool disjoint_bases = false,
|
||||
bool length_never_negative = false,
|
||||
RegionNode* slow_region = NULL);
|
||||
@ -202,17 +202,16 @@ class LibraryCallKit : public GraphKit {
|
||||
void generate_slow_arraycopy(const TypePtr* adr_type,
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* copy_length,
|
||||
int nargs);
|
||||
Node* copy_length);
|
||||
Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
|
||||
Node* dest_elem_klass,
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* copy_length, int nargs);
|
||||
Node* copy_length);
|
||||
Node* generate_generic_arraycopy(const TypePtr* adr_type,
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* copy_length, int nargs);
|
||||
Node* copy_length);
|
||||
void generate_unchecked_arraycopy(const TypePtr* adr_type,
|
||||
BasicType basic_elem_type,
|
||||
bool disjoint_bases,
|
||||
@ -2178,9 +2177,8 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
|
||||
// Possibly an oop being stored to Java heap or native memory
|
||||
if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
|
||||
// oop to Java heap.
|
||||
(void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, val->bottom_type(), type);
|
||||
(void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
|
||||
} else {
|
||||
|
||||
// We can't tell at compile time if we are storing in the Java heap or outside
|
||||
// of it. So we need to emit code to conditionally do the proper type of
|
||||
// store.
|
||||
@ -2189,7 +2187,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
|
||||
kit.declares_done();
|
||||
// QQQ who knows what probability is here??
|
||||
kit.if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
|
||||
(void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, val->bottom_type(), type);
|
||||
(void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
|
||||
} kit.else_(); {
|
||||
(void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
|
||||
} kit.end_if();
|
||||
@ -2394,7 +2392,7 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
|
||||
case T_OBJECT:
|
||||
// reference stores need a store barrier.
|
||||
// (They don't if CAS fails, but it isn't worth checking.)
|
||||
pre_barrier(control(), base, adr, alias_idx, newval, value_type, T_OBJECT);
|
||||
pre_barrier(control(), base, adr, alias_idx, newval, value_type->make_oopptr(), T_OBJECT);
|
||||
#ifdef _LP64
|
||||
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
|
||||
Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
|
||||
@ -2489,7 +2487,7 @@ bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
|
||||
bool require_atomic_access = true;
|
||||
Node* store;
|
||||
if (type == T_OBJECT) // reference stores need a store barrier.
|
||||
store = store_oop_to_unknown(control(), base, adr, adr_type, val, value_type, type);
|
||||
store = store_oop_to_unknown(control(), base, adr, adr_type, val, type);
|
||||
else {
|
||||
store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access);
|
||||
}
|
||||
@ -3230,7 +3228,8 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
|
||||
Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
|
||||
Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
|
||||
|
||||
Node* newcopy = new_array(klass_node, length, nargs);
|
||||
const bool raw_mem_only = true;
|
||||
Node* newcopy = new_array(klass_node, length, nargs, raw_mem_only);
|
||||
|
||||
// Generate a direct call to the right arraycopy function(s).
|
||||
// We know the copy is disjoint but we might not know if the
|
||||
@ -3241,7 +3240,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
|
||||
bool length_never_negative = true;
|
||||
generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
|
||||
original, start, newcopy, intcon(0), moved,
|
||||
nargs, disjoint_bases, length_never_negative);
|
||||
disjoint_bases, length_never_negative);
|
||||
|
||||
push(newcopy);
|
||||
}
|
||||
@ -3883,6 +3882,98 @@ bool LibraryCallKit::inline_unsafe_copyMemory() {
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------clone_coping-----------------------------------
|
||||
// Helper function for inline_native_clone.
|
||||
void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
|
||||
assert(obj_size != NULL, "");
|
||||
Node* raw_obj = alloc_obj->in(1);
|
||||
assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
|
||||
|
||||
if (ReduceBulkZeroing) {
|
||||
// We will be completely responsible for initializing this object -
|
||||
// mark Initialize node as complete.
|
||||
AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
|
||||
// The object was just allocated - there should be no any stores!
|
||||
guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
|
||||
}
|
||||
|
||||
// Cast to Object for arraycopy.
|
||||
// We can't use the original CheckCastPP since it should be moved
|
||||
// after the arraycopy to prevent stores flowing above it.
|
||||
Node* new_obj = new(C, 2) CheckCastPPNode(alloc_obj->in(0), raw_obj,
|
||||
TypeInstPtr::NOTNULL);
|
||||
new_obj = _gvn.transform(new_obj);
|
||||
// Substitute in the locally valid dest_oop.
|
||||
replace_in_map(alloc_obj, new_obj);
|
||||
|
||||
// Copy the fastest available way.
|
||||
// TODO: generate fields copies for small objects instead.
|
||||
Node* src = obj;
|
||||
Node* dest = new_obj;
|
||||
Node* size = _gvn.transform(obj_size);
|
||||
|
||||
// Exclude the header but include array length to copy by 8 bytes words.
|
||||
// Can't use base_offset_in_bytes(bt) since basic type is unknown.
|
||||
int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
|
||||
instanceOopDesc::base_offset_in_bytes();
|
||||
// base_off:
|
||||
// 8 - 32-bit VM
|
||||
// 12 - 64-bit VM, compressed oops
|
||||
// 16 - 64-bit VM, normal oops
|
||||
if (base_off % BytesPerLong != 0) {
|
||||
assert(UseCompressedOops, "");
|
||||
if (is_array) {
|
||||
// Exclude length to copy by 8 bytes words.
|
||||
base_off += sizeof(int);
|
||||
} else {
|
||||
// Include klass to copy by 8 bytes words.
|
||||
base_off = instanceOopDesc::klass_offset_in_bytes();
|
||||
}
|
||||
assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
|
||||
}
|
||||
src = basic_plus_adr(src, base_off);
|
||||
dest = basic_plus_adr(dest, base_off);
|
||||
|
||||
// Compute the length also, if needed:
|
||||
Node* countx = size;
|
||||
countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) );
|
||||
countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) ));
|
||||
|
||||
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
|
||||
bool disjoint_bases = true;
|
||||
generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
|
||||
src, NULL, dest, NULL, countx);
|
||||
|
||||
// If necessary, emit some card marks afterwards. (Non-arrays only.)
|
||||
if (card_mark) {
|
||||
assert(!is_array, "");
|
||||
// Put in store barrier for any and all oops we are sticking
|
||||
// into this object. (We could avoid this if we could prove
|
||||
// that the object type contains no oop fields at all.)
|
||||
Node* no_particular_value = NULL;
|
||||
Node* no_particular_field = NULL;
|
||||
int raw_adr_idx = Compile::AliasIdxRaw;
|
||||
post_barrier(control(),
|
||||
memory(raw_adr_type),
|
||||
new_obj,
|
||||
no_particular_field,
|
||||
raw_adr_idx,
|
||||
no_particular_value,
|
||||
T_OBJECT,
|
||||
false);
|
||||
}
|
||||
|
||||
// Move the original CheckCastPP after arraycopy.
|
||||
_gvn.hash_delete(alloc_obj);
|
||||
alloc_obj->set_req(0, control());
|
||||
// Replace raw memory edge with new CheckCastPP to have a live oop
|
||||
// at safepoints instead of raw value.
|
||||
assert(new_obj->is_CheckCastPP() && new_obj->in(1) == alloc_obj->in(1), "sanity");
|
||||
alloc_obj->set_req(1, new_obj); // cast to the original type
|
||||
_gvn.hash_find_insert(alloc_obj); // put back into GVN table
|
||||
// Restore in the locally valid dest_oop.
|
||||
replace_in_map(new_obj, alloc_obj);
|
||||
}
|
||||
|
||||
//------------------------inline_native_clone----------------------------
|
||||
// Here are the simple edge cases:
|
||||
@ -3917,8 +4008,9 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
// paths into result_reg:
|
||||
enum {
|
||||
_slow_path = 1, // out-of-line call to clone method (virtual or not)
|
||||
_objArray_path, // plain allocation, plus arrayof_oop_arraycopy
|
||||
_fast_path, // plain allocation, plus a CopyArray operation
|
||||
_objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
|
||||
_array_path, // plain array allocation, plus arrayof_long_arraycopy
|
||||
_instance_path, // plain instance allocation, plus arrayof_long_arraycopy
|
||||
PATH_LIMIT
|
||||
};
|
||||
RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
|
||||
@ -3933,18 +4025,6 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
int raw_adr_idx = Compile::AliasIdxRaw;
|
||||
const bool raw_mem_only = true;
|
||||
|
||||
// paths into alloc_reg (on the fast path, just before the CopyArray):
|
||||
enum { _typeArray_alloc = 1, _instance_alloc, ALLOC_LIMIT };
|
||||
RegionNode* alloc_reg = new(C, ALLOC_LIMIT) RegionNode(ALLOC_LIMIT);
|
||||
PhiNode* alloc_val = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, raw_adr_type);
|
||||
PhiNode* alloc_siz = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, TypeX_X);
|
||||
PhiNode* alloc_i_o = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, Type::ABIO);
|
||||
PhiNode* alloc_mem = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, Type::MEMORY,
|
||||
raw_adr_type);
|
||||
record_for_igvn(alloc_reg);
|
||||
|
||||
bool card_mark = false; // (see below)
|
||||
|
||||
Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
|
||||
if (array_ctl != NULL) {
|
||||
// It's an array.
|
||||
@ -3954,16 +4034,6 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
Node* obj_size = NULL;
|
||||
Node* alloc_obj = new_array(obj_klass, obj_length, nargs,
|
||||
raw_mem_only, &obj_size);
|
||||
assert(obj_size != NULL, "");
|
||||
Node* raw_obj = alloc_obj->in(1);
|
||||
assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
|
||||
if (ReduceBulkZeroing) {
|
||||
AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
|
||||
if (alloc != NULL) {
|
||||
// We will be completely responsible for initializing this object.
|
||||
alloc->maybe_set_complete(&_gvn);
|
||||
}
|
||||
}
|
||||
|
||||
if (!use_ReduceInitialCardMarks()) {
|
||||
// If it is an oop array, it requires very special treatment,
|
||||
@ -3977,7 +4047,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
bool length_never_negative = true;
|
||||
generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
|
||||
obj, intcon(0), alloc_obj, intcon(0),
|
||||
obj_length, nargs,
|
||||
obj_length,
|
||||
disjoint_bases, length_never_negative);
|
||||
result_reg->init_req(_objArray_path, control());
|
||||
result_val->init_req(_objArray_path, alloc_obj);
|
||||
@ -3992,19 +4062,24 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
// the object.
|
||||
|
||||
// Otherwise, there are no card marks to worry about.
|
||||
alloc_val->init_req(_typeArray_alloc, raw_obj);
|
||||
alloc_siz->init_req(_typeArray_alloc, obj_size);
|
||||
alloc_reg->init_req(_typeArray_alloc, control());
|
||||
alloc_i_o->init_req(_typeArray_alloc, i_o());
|
||||
alloc_mem->init_req(_typeArray_alloc, memory(raw_adr_type));
|
||||
|
||||
if (!stopped()) {
|
||||
copy_to_clone(obj, alloc_obj, obj_size, true, false);
|
||||
|
||||
// Present the results of the copy.
|
||||
result_reg->init_req(_array_path, control());
|
||||
result_val->init_req(_array_path, alloc_obj);
|
||||
result_i_o ->set_req(_array_path, i_o());
|
||||
result_mem ->set_req(_array_path, reset_memory());
|
||||
}
|
||||
}
|
||||
|
||||
// We only go to the fast case code if we pass a number of guards.
|
||||
// We only go to the instance fast case code if we pass a number of guards.
|
||||
// The paths which do not pass are accumulated in the slow_region.
|
||||
RegionNode* slow_region = new (C, 1) RegionNode(1);
|
||||
record_for_igvn(slow_region);
|
||||
if (!stopped()) {
|
||||
// It's an instance. Make the slow-path tests.
|
||||
// It's an instance (we did array above). Make the slow-path tests.
|
||||
// If this is a virtual call, we generate a funny guard. We grab
|
||||
// the vtable entry corresponding to clone() from the target object.
|
||||
// If the target method which we are calling happens to be the
|
||||
@ -4031,25 +4106,14 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
PreserveJVMState pjvms(this);
|
||||
Node* obj_size = NULL;
|
||||
Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
|
||||
assert(obj_size != NULL, "");
|
||||
Node* raw_obj = alloc_obj->in(1);
|
||||
assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
|
||||
if (ReduceBulkZeroing) {
|
||||
AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
|
||||
if (alloc != NULL && !alloc->maybe_set_complete(&_gvn))
|
||||
alloc = NULL;
|
||||
}
|
||||
if (!use_ReduceInitialCardMarks()) {
|
||||
// Put in store barrier for any and all oops we are sticking
|
||||
// into this object. (We could avoid this if we could prove
|
||||
// that the object type contains no oop fields at all.)
|
||||
card_mark = true;
|
||||
}
|
||||
alloc_val->init_req(_instance_alloc, raw_obj);
|
||||
alloc_siz->init_req(_instance_alloc, obj_size);
|
||||
alloc_reg->init_req(_instance_alloc, control());
|
||||
alloc_i_o->init_req(_instance_alloc, i_o());
|
||||
alloc_mem->init_req(_instance_alloc, memory(raw_adr_type));
|
||||
|
||||
copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
|
||||
|
||||
// Present the results of the slow call.
|
||||
result_reg->init_req(_instance_path, control());
|
||||
result_val->init_req(_instance_path, alloc_obj);
|
||||
result_i_o ->set_req(_instance_path, i_o());
|
||||
result_mem ->set_req(_instance_path, reset_memory());
|
||||
}
|
||||
|
||||
// Generate code for the slow case. We make a call to clone().
|
||||
@ -4065,82 +4129,12 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
|
||||
result_mem ->set_req(_slow_path, reset_memory());
|
||||
}
|
||||
|
||||
// The object is allocated, as an array and/or an instance. Now copy it.
|
||||
set_control( _gvn.transform(alloc_reg) );
|
||||
set_i_o( _gvn.transform(alloc_i_o) );
|
||||
set_memory( _gvn.transform(alloc_mem), raw_adr_type );
|
||||
Node* raw_obj = _gvn.transform(alloc_val);
|
||||
|
||||
if (!stopped()) {
|
||||
// Copy the fastest available way.
|
||||
// (No need for PreserveJVMState, since we're using it all up now.)
|
||||
// TODO: generate fields/elements copies for small objects instead.
|
||||
Node* src = obj;
|
||||
Node* dest = raw_obj;
|
||||
Node* size = _gvn.transform(alloc_siz);
|
||||
|
||||
// Exclude the header.
|
||||
int base_off = instanceOopDesc::base_offset_in_bytes();
|
||||
if (UseCompressedOops) {
|
||||
assert(base_off % BytesPerLong != 0, "base with compressed oops");
|
||||
// With compressed oops base_offset_in_bytes is 12 which creates
|
||||
// the gap since countx is rounded by 8 bytes below.
|
||||
// Copy klass and the gap.
|
||||
base_off = instanceOopDesc::klass_offset_in_bytes();
|
||||
}
|
||||
src = basic_plus_adr(src, base_off);
|
||||
dest = basic_plus_adr(dest, base_off);
|
||||
|
||||
// Compute the length also, if needed:
|
||||
Node* countx = size;
|
||||
countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) );
|
||||
countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) ));
|
||||
|
||||
// Select an appropriate instruction to initialize the range.
|
||||
// The CopyArray instruction (if supported) can be optimized
|
||||
// into a discrete set of scalar loads and stores.
|
||||
bool disjoint_bases = true;
|
||||
generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
|
||||
src, NULL, dest, NULL, countx);
|
||||
|
||||
// Now that the object is properly initialized, type it as an oop.
|
||||
// Use a secondary InitializeNode memory barrier.
|
||||
InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, raw_adr_idx,
|
||||
raw_obj)->as_Initialize();
|
||||
init->set_complete(&_gvn); // (there is no corresponding AllocateNode)
|
||||
Node* new_obj = new(C, 2) CheckCastPPNode(control(), raw_obj,
|
||||
TypeInstPtr::NOTNULL);
|
||||
new_obj = _gvn.transform(new_obj);
|
||||
|
||||
// If necessary, emit some card marks afterwards. (Non-arrays only.)
|
||||
if (card_mark) {
|
||||
Node* no_particular_value = NULL;
|
||||
Node* no_particular_field = NULL;
|
||||
post_barrier(control(),
|
||||
memory(raw_adr_type),
|
||||
new_obj,
|
||||
no_particular_field,
|
||||
raw_adr_idx,
|
||||
no_particular_value,
|
||||
T_OBJECT,
|
||||
false);
|
||||
}
|
||||
// Present the results of the slow call.
|
||||
result_reg->init_req(_fast_path, control());
|
||||
result_val->init_req(_fast_path, new_obj);
|
||||
result_i_o ->set_req(_fast_path, i_o());
|
||||
result_mem ->set_req(_fast_path, reset_memory());
|
||||
}
|
||||
|
||||
// Return the combined state.
|
||||
set_control( _gvn.transform(result_reg) );
|
||||
set_i_o( _gvn.transform(result_i_o) );
|
||||
set_all_memory( _gvn.transform(result_mem) );
|
||||
|
||||
// Cast the result to a sharper type, since we know what clone does.
|
||||
Node* new_obj = _gvn.transform(result_val);
|
||||
Node* cast = new (C, 2) CheckCastPPNode(control(), new_obj, toop);
|
||||
push(_gvn.transform(cast));
|
||||
push(_gvn.transform(result_val));
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -4279,8 +4273,7 @@ bool LibraryCallKit::inline_arraycopy() {
|
||||
|
||||
// Call StubRoutines::generic_arraycopy stub.
|
||||
generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
|
||||
src, src_offset, dest, dest_offset, length,
|
||||
nargs);
|
||||
src, src_offset, dest, dest_offset, length);
|
||||
|
||||
// Do not let reads from the destination float above the arraycopy.
|
||||
// Since we cannot type the arrays, we don't know which slices
|
||||
@ -4303,8 +4296,7 @@ bool LibraryCallKit::inline_arraycopy() {
|
||||
// The component types are not the same or are not recognized. Punt.
|
||||
// (But, avoid the native method wrapper to JVM_ArrayCopy.)
|
||||
generate_slow_arraycopy(TypePtr::BOTTOM,
|
||||
src, src_offset, dest, dest_offset, length,
|
||||
nargs);
|
||||
src, src_offset, dest, dest_offset, length);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -4361,7 +4353,7 @@ bool LibraryCallKit::inline_arraycopy() {
|
||||
const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
|
||||
generate_arraycopy(adr_type, dest_elem,
|
||||
src, src_offset, dest, dest_offset, length,
|
||||
nargs, false, false, slow_region);
|
||||
false, false, slow_region);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -4406,7 +4398,6 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* copy_length,
|
||||
int nargs,
|
||||
bool disjoint_bases,
|
||||
bool length_never_negative,
|
||||
RegionNode* slow_region) {
|
||||
@ -4418,7 +4409,6 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
|
||||
Node* original_dest = dest;
|
||||
AllocateArrayNode* alloc = NULL; // used for zeroing, if needed
|
||||
Node* raw_dest = NULL; // used before zeroing, if needed
|
||||
bool must_clear_dest = false;
|
||||
|
||||
// See if this is the initialization of a newly-allocated array.
|
||||
@ -4437,15 +4427,18 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
// "You break it, you buy it."
|
||||
InitializeNode* init = alloc->initialization();
|
||||
assert(init->is_complete(), "we just did this");
|
||||
assert(dest->Opcode() == Op_CheckCastPP, "sanity");
|
||||
assert(dest->is_CheckCastPP(), "sanity");
|
||||
assert(dest->in(0)->in(0) == init, "dest pinned");
|
||||
raw_dest = dest->in(1); // grab the raw pointer!
|
||||
original_dest = dest;
|
||||
dest = raw_dest;
|
||||
|
||||
// Cast to Object for arraycopy.
|
||||
// We can't use the original CheckCastPP since it should be moved
|
||||
// after the arraycopy to prevent stores flowing above it.
|
||||
Node* new_obj = new(C, 2) CheckCastPPNode(dest->in(0), dest->in(1),
|
||||
TypeInstPtr::NOTNULL);
|
||||
dest = _gvn.transform(new_obj);
|
||||
// Substitute in the locally valid dest_oop.
|
||||
replace_in_map(original_dest, dest);
|
||||
adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory
|
||||
// Decouple the original InitializeNode, turning it into a simple membar.
|
||||
// We will build a new one at the end of this routine.
|
||||
init->set_req(InitializeNode::RawAddress, top());
|
||||
// From this point on, every exit path is responsible for
|
||||
// initializing any non-copied parts of the object to zero.
|
||||
must_clear_dest = true;
|
||||
@ -4488,7 +4481,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
assert(!must_clear_dest, "");
|
||||
Node* cv = generate_generic_arraycopy(adr_type,
|
||||
src, src_offset, dest, dest_offset,
|
||||
copy_length, nargs);
|
||||
copy_length);
|
||||
if (cv == NULL) cv = intcon(-1); // failure (no stub available)
|
||||
checked_control = control();
|
||||
checked_i_o = i_o();
|
||||
@ -4507,16 +4500,24 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
generate_negative_guard(copy_length, slow_region);
|
||||
}
|
||||
|
||||
// copy_length is 0.
|
||||
if (!stopped() && must_clear_dest) {
|
||||
Node* dest_length = alloc->in(AllocateNode::ALength);
|
||||
if (_gvn.eqv_uncast(copy_length, dest_length)
|
||||
|| _gvn.find_int_con(dest_length, 1) <= 0) {
|
||||
// There is no zeroing to do.
|
||||
// There is no zeroing to do. No need for a secondary raw memory barrier.
|
||||
} else {
|
||||
// Clear the whole thing since there are no source elements to copy.
|
||||
generate_clear_array(adr_type, dest, basic_elem_type,
|
||||
intcon(0), NULL,
|
||||
alloc->in(AllocateNode::AllocSize));
|
||||
// Use a secondary InitializeNode as raw memory barrier.
|
||||
// Currently it is needed only on this path since other
|
||||
// paths have stub or runtime calls as raw memory barriers.
|
||||
InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
|
||||
Compile::AliasIdxRaw,
|
||||
top())->as_Initialize();
|
||||
init->set_complete(&_gvn); // (there is no corresponding AllocateNode)
|
||||
}
|
||||
}
|
||||
|
||||
@ -4638,8 +4639,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
Node* cv = generate_checkcast_arraycopy(adr_type,
|
||||
dest_elem_klass,
|
||||
src, src_offset, dest, dest_offset,
|
||||
copy_length,
|
||||
nargs);
|
||||
copy_length);
|
||||
if (cv == NULL) cv = intcon(-1); // failure (no stub available)
|
||||
checked_control = control();
|
||||
checked_i_o = i_o();
|
||||
@ -4701,8 +4701,8 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
slow_i_o2 ->init_req(1, slow_i_o);
|
||||
slow_mem2 ->init_req(1, slow_mem);
|
||||
slow_reg2 ->init_req(2, control());
|
||||
slow_i_o2 ->init_req(2, i_o());
|
||||
slow_mem2 ->init_req(2, memory(adr_type));
|
||||
slow_i_o2 ->init_req(2, checked_i_o);
|
||||
slow_mem2 ->init_req(2, checked_mem);
|
||||
|
||||
slow_control = _gvn.transform(slow_reg2);
|
||||
slow_i_o = _gvn.transform(slow_i_o2);
|
||||
@ -4747,21 +4747,9 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
alloc->in(AllocateNode::AllocSize));
|
||||
}
|
||||
|
||||
if (dest != original_dest) {
|
||||
// Promote from rawptr to oop, so it looks right in the call's GC map.
|
||||
dest = _gvn.transform( new(C,2) CheckCastPPNode(control(), dest,
|
||||
TypeInstPtr::NOTNULL) );
|
||||
|
||||
// Edit the call's debug-info to avoid referring to original_dest.
|
||||
// (The problem with original_dest is that it isn't ready until
|
||||
// after the InitializeNode completes, but this stuff is before.)
|
||||
// Substitute in the locally valid dest_oop.
|
||||
replace_in_map(original_dest, dest);
|
||||
}
|
||||
|
||||
generate_slow_arraycopy(adr_type,
|
||||
src, src_offset, dest, dest_offset,
|
||||
copy_length, nargs);
|
||||
copy_length);
|
||||
|
||||
result_region->init_req(slow_call_path, control());
|
||||
result_i_o ->init_req(slow_call_path, i_o());
|
||||
@ -4781,16 +4769,16 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
|
||||
if (dest != original_dest) {
|
||||
// Pin the "finished" array node after the arraycopy/zeroing operations.
|
||||
// Use a secondary InitializeNode memory barrier.
|
||||
InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
|
||||
Compile::AliasIdxRaw,
|
||||
raw_dest)->as_Initialize();
|
||||
init->set_complete(&_gvn); // (there is no corresponding AllocateNode)
|
||||
_gvn.hash_delete(original_dest);
|
||||
original_dest->set_req(0, control());
|
||||
// Replace raw memory edge with new CheckCastPP to have a live oop
|
||||
// at safepoints instead of raw value.
|
||||
assert(dest->is_CheckCastPP() && dest->in(1) == original_dest->in(1), "sanity");
|
||||
original_dest->set_req(1, dest); // cast to the original type
|
||||
_gvn.hash_find_insert(original_dest); // put back into GVN table
|
||||
// Restore in the locally valid dest_oop.
|
||||
replace_in_map(dest, original_dest);
|
||||
}
|
||||
|
||||
// The memory edges above are precise in order to model effects around
|
||||
// array copies accurately to allow value numbering of field loads around
|
||||
// arraycopy. Such field loads, both before and after, are common in Java
|
||||
@ -5074,16 +5062,13 @@ void
|
||||
LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* copy_length,
|
||||
int nargs) {
|
||||
_sp += nargs; // any deopt will start just before call to enclosing method
|
||||
Node* copy_length) {
|
||||
Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
|
||||
OptoRuntime::slow_arraycopy_Type(),
|
||||
OptoRuntime::slow_arraycopy_Java(),
|
||||
"slow_arraycopy", adr_type,
|
||||
src, src_offset, dest, dest_offset,
|
||||
copy_length);
|
||||
_sp -= nargs;
|
||||
|
||||
// Handle exceptions thrown by this fellow:
|
||||
make_slow_call_ex(call, env()->Throwable_klass(), false);
|
||||
@ -5095,8 +5080,7 @@ LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
|
||||
Node* dest_elem_klass,
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* copy_length,
|
||||
int nargs) {
|
||||
Node* copy_length) {
|
||||
if (stopped()) return NULL;
|
||||
|
||||
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
|
||||
@ -5137,8 +5121,7 @@ Node*
|
||||
LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
|
||||
Node* src, Node* src_offset,
|
||||
Node* dest, Node* dest_offset,
|
||||
Node* copy_length,
|
||||
int nargs) {
|
||||
Node* copy_length) {
|
||||
if (stopped()) return NULL;
|
||||
|
||||
address copyfunc_addr = StubRoutines::generic_arraycopy();
|
||||
|
@ -667,7 +667,6 @@ static bool merge_point_too_heavy(Compile* C, Node* region) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
static bool merge_point_safe(Node* region) {
|
||||
// 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
|
||||
// having a PhiNode input. This sidesteps the dangerous case where the split
|
||||
@ -676,20 +675,25 @@ static bool merge_point_safe(Node* region) {
|
||||
// uses.
|
||||
// A better fix for this problem can be found in the BugTraq entry, but
|
||||
// expediency for Mantis demands this hack.
|
||||
// 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop
|
||||
// split_if_with_blocks from splitting a block because we could not move around
|
||||
// the FastLockNode.
|
||||
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
|
||||
Node* n = region->fast_out(i);
|
||||
if (n->is_Phi()) {
|
||||
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* m = n->fast_out(j);
|
||||
if (m->Opcode() == Op_ConvI2L) {
|
||||
if (m->is_FastLock())
|
||||
return false;
|
||||
}
|
||||
#ifdef _LP64
|
||||
if (m->Opcode() == Op_ConvI2L)
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//------------------------------place_near_use---------------------------------
|
||||
@ -771,12 +775,10 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
|
||||
if( get_loop(n_ctrl->in(j)) != n_loop )
|
||||
return;
|
||||
|
||||
#ifdef _LP64
|
||||
// Check for safety of the merge point.
|
||||
if( !merge_point_safe(n_ctrl) ) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Split compare 'n' through the merge point if it is profitable
|
||||
Node *phi = split_thru_phi( n, n_ctrl, policy );
|
||||
|
@ -141,6 +141,10 @@ void Matcher::verify_new_nodes_only(Node* xroot) {
|
||||
|
||||
//---------------------------match---------------------------------------------
|
||||
void Matcher::match( ) {
|
||||
if( MaxLabelRootDepth < 100 ) { // Too small?
|
||||
assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
|
||||
MaxLabelRootDepth = 100;
|
||||
}
|
||||
// One-time initialization of some register masks.
|
||||
init_spill_mask( C->root()->in(1) );
|
||||
_return_addr_mask = return_addr();
|
||||
|
@ -430,31 +430,28 @@ Node *AndINode::Identity( PhaseTransform *phase ) {
|
||||
// x & x => x
|
||||
if (phase->eqv(in(1), in(2))) return in(1);
|
||||
|
||||
Node *load = in(1);
|
||||
const TypeInt *t2 = phase->type( in(2) )->isa_int();
|
||||
if( t2 && t2->is_con() ) {
|
||||
Node* in1 = in(1);
|
||||
uint op = in1->Opcode();
|
||||
const TypeInt* t2 = phase->type(in(2))->isa_int();
|
||||
if (t2 && t2->is_con()) {
|
||||
int con = t2->get_con();
|
||||
// Masking off high bits which are always zero is useless.
|
||||
const TypeInt* t1 = phase->type( in(1) )->isa_int();
|
||||
if (t1 != NULL && t1->_lo >= 0) {
|
||||
jint t1_support = ((jint)1 << (1 + log2_intptr(t1->_hi))) - 1;
|
||||
jint t1_support = right_n_bits(1 + log2_intptr(t1->_hi));
|
||||
if ((t1_support & con) == t1_support)
|
||||
return load;
|
||||
return in1;
|
||||
}
|
||||
uint lop = load->Opcode();
|
||||
if( lop == Op_LoadUS &&
|
||||
con == 0x0000FFFF ) // Already zero-extended
|
||||
return load;
|
||||
// Masking off the high bits of a unsigned-shift-right is not
|
||||
// needed either.
|
||||
if( lop == Op_URShiftI ) {
|
||||
const TypeInt *t12 = phase->type( load->in(2) )->isa_int();
|
||||
if( t12 && t12->is_con() ) { // Shift is by a constant
|
||||
if (op == Op_URShiftI) {
|
||||
const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
|
||||
if (t12 && t12->is_con()) { // Shift is by a constant
|
||||
int shift = t12->get_con();
|
||||
shift &= BitsPerJavaInteger - 1; // semantics of Java shifts
|
||||
int mask = max_juint >> shift;
|
||||
if( (mask&con) == mask ) // If AND is useless, skip it
|
||||
return load;
|
||||
if ((mask & con) == mask) // If AND is useless, skip it
|
||||
return in1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -476,26 +473,17 @@ Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return new (phase->C, 3) AndINode(load,phase->intcon(mask&0xFFFF));
|
||||
|
||||
// Masking bits off of a Short? Loading a Character does some masking
|
||||
if( lop == Op_LoadS &&
|
||||
(mask & 0xFFFF0000) == 0 ) {
|
||||
if (lop == Op_LoadS && (mask & 0xFFFF0000) == 0 ) {
|
||||
Node *ldus = new (phase->C, 3) LoadUSNode(load->in(MemNode::Control),
|
||||
load->in(MemNode::Memory),
|
||||
load->in(MemNode::Address),
|
||||
load->adr_type());
|
||||
load->in(MemNode::Memory),
|
||||
load->in(MemNode::Address),
|
||||
load->adr_type());
|
||||
ldus = phase->transform(ldus);
|
||||
return new (phase->C, 3) AndINode(ldus, phase->intcon(mask&0xFFFF));
|
||||
return new (phase->C, 3) AndINode(ldus, phase->intcon(mask & 0xFFFF));
|
||||
}
|
||||
|
||||
// Masking sign bits off of a Byte? Do an unsigned byte load.
|
||||
if (lop == Op_LoadB && mask == 0x000000FF) {
|
||||
return new (phase->C, 3) LoadUBNode(load->in(MemNode::Control),
|
||||
load->in(MemNode::Memory),
|
||||
load->in(MemNode::Address),
|
||||
load->adr_type());
|
||||
}
|
||||
|
||||
// Masking sign bits off of a Byte plus additional lower bits? Do
|
||||
// an unsigned byte load plus an and.
|
||||
// Masking sign bits off of a Byte? Do an unsigned byte load plus
|
||||
// an and.
|
||||
if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) {
|
||||
Node* ldub = new (phase->C, 3) LoadUBNode(load->in(MemNode::Control),
|
||||
load->in(MemNode::Memory),
|
||||
@ -605,8 +593,13 @@ Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
Node* in1 = in(1);
|
||||
uint op = in1->Opcode();
|
||||
|
||||
// Masking sign bits off of an integer? Do an unsigned integer to long load.
|
||||
if (op == Op_ConvI2L && in1->in(1)->Opcode() == Op_LoadI && mask == 0x00000000FFFFFFFFL) {
|
||||
// Masking sign bits off of an integer? Do an unsigned integer to
|
||||
// long load.
|
||||
// NOTE: This check must be *before* we try to convert the AndLNode
|
||||
// to an AndINode and commute it with ConvI2LNode because
|
||||
// 0xFFFFFFFFL masks the whole integer and we get a sign extension,
|
||||
// which is wrong.
|
||||
if (op == Op_ConvI2L && in1->in(1)->Opcode() == Op_LoadI && mask == CONST64(0x00000000FFFFFFFF)) {
|
||||
Node* load = in1->in(1);
|
||||
return new (phase->C, 3) LoadUI2LNode(load->in(MemNode::Control),
|
||||
load->in(MemNode::Memory),
|
||||
@ -614,9 +607,22 @@ Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
load->adr_type());
|
||||
}
|
||||
|
||||
// Are we masking a long that was converted from an int with a mask
|
||||
// that fits in 32-bits? Commute them and use an AndINode.
|
||||
if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF00000000)) == 0) {
|
||||
// If we are doing an UI2L conversion (i.e. the mask is
|
||||
// 0x00000000FFFFFFFF) we cannot convert the AndL to an AndI
|
||||
// because the AndI would be optimized away later in Identity.
|
||||
if (mask != CONST64(0x00000000FFFFFFFF)) {
|
||||
Node* andi = new (phase->C, 3) AndINode(in1->in(1), phase->intcon(mask));
|
||||
andi = phase->transform(andi);
|
||||
return new (phase->C, 2) ConvI2LNode(andi);
|
||||
}
|
||||
}
|
||||
|
||||
// Masking off sign bits? Dont make them!
|
||||
if (op == Op_RShiftL) {
|
||||
const TypeInt *t12 = phase->type(in1->in(2))->isa_int();
|
||||
const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
|
||||
if( t12 && t12->is_con() ) { // Shift is by a constant
|
||||
int shift = t12->get_con();
|
||||
shift &= BitsPerJavaLong - 1; // semantics of Java shifts
|
||||
@ -626,7 +632,7 @@ Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if( (sign_bits_mask & mask) == 0 ) {
|
||||
// Use zero-fill shift instead
|
||||
Node *zshift = phase->transform(new (phase->C, 3) URShiftLNode(in1->in(1), in1->in(2)));
|
||||
return new (phase->C, 3) AndLNode( zshift, in(2) );
|
||||
return new (phase->C, 3) AndLNode(zshift, in(2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1565,7 +1565,7 @@ void Parse::do_one_bytecode() {
|
||||
c = pop(); // Oop to store
|
||||
b = pop(); // index (already used)
|
||||
a = pop(); // the array itself
|
||||
const Type* elemtype = _gvn.type(a)->is_aryptr()->elem();
|
||||
const TypeOopPtr* elemtype = _gvn.type(a)->is_aryptr()->elem()->make_oopptr();
|
||||
const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
|
||||
Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT);
|
||||
break;
|
||||
|
@ -222,7 +222,7 @@ void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool
|
||||
// Store the value.
|
||||
Node* store;
|
||||
if (bt == T_OBJECT) {
|
||||
const TypePtr* field_type;
|
||||
const TypeOopPtr* field_type;
|
||||
if (!field->type()->is_loaded()) {
|
||||
field_type = TypeInstPtr::BOTTOM;
|
||||
} else {
|
||||
@ -361,7 +361,7 @@ Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, in
|
||||
guarantee(length_con >= 0, "non-constant multianewarray");
|
||||
ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
|
||||
const TypePtr* adr_type = TypeAryPtr::OOPS;
|
||||
const Type* elemtype = _gvn.type(array)->is_aryptr()->elem();
|
||||
const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
|
||||
const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
|
||||
for (jint i = 0; i < length_con; i++) {
|
||||
Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
|
||||
|
@ -487,6 +487,23 @@ bool Type::is_nan() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
//----------------------interface_vs_oop---------------------------------------
|
||||
#ifdef ASSERT
|
||||
bool Type::interface_vs_oop(const Type *t) const {
|
||||
bool result = false;
|
||||
|
||||
const TypeInstPtr* this_inst = this->isa_instptr();
|
||||
const TypeInstPtr* t_inst = t->isa_instptr();
|
||||
if( this_inst && this_inst->is_loaded() && t_inst && t_inst->is_loaded() ) {
|
||||
bool this_interface = this_inst->klass()->is_interface();
|
||||
bool t_interface = t_inst->klass()->is_interface();
|
||||
result = this_interface ^ t_interface;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------meet-------------------------------------------
|
||||
// Compute the MEET of two types. NOT virtual. It enforces that meet is
|
||||
// commutative and the lattice is symmetric.
|
||||
@ -507,16 +524,8 @@ const Type *Type::meet( const Type *t ) const {
|
||||
// Interface meet Oop is Not Symmetric:
|
||||
// Interface:AnyNull meet Oop:AnyNull == Interface:AnyNull
|
||||
// Interface:NotNull meet Oop:NotNull == java/lang/Object:NotNull
|
||||
const TypeInstPtr* this_inst = this->isa_instptr();
|
||||
const TypeInstPtr* t_inst = t->isa_instptr();
|
||||
bool interface_vs_oop = false;
|
||||
if( this_inst && this_inst->is_loaded() && t_inst && t_inst->is_loaded() ) {
|
||||
bool this_interface = this_inst->klass()->is_interface();
|
||||
bool t_interface = t_inst->klass()->is_interface();
|
||||
interface_vs_oop = this_interface ^ t_interface;
|
||||
}
|
||||
|
||||
if( !interface_vs_oop && (t2t != t->_dual || t2this != _dual) ) {
|
||||
if( !interface_vs_oop(t) && (t2t != t->_dual || t2this != _dual) ) {
|
||||
tty->print_cr("=== Meet Not Symmetric ===");
|
||||
tty->print("t = "); t->dump(); tty->cr();
|
||||
tty->print("this= "); dump(); tty->cr();
|
||||
@ -1800,6 +1809,17 @@ int TypeAry::hash(void) const {
|
||||
return (intptr_t)_elem + (intptr_t)_size;
|
||||
}
|
||||
|
||||
//----------------------interface_vs_oop---------------------------------------
|
||||
#ifdef ASSERT
|
||||
bool TypeAry::interface_vs_oop(const Type *t) const {
|
||||
const TypeAry* t_ary = t->is_ary();
|
||||
if (t_ary) {
|
||||
return _elem->interface_vs_oop(t_ary->_elem);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------dump2------------------------------------------
|
||||
#ifndef PRODUCT
|
||||
void TypeAry::dump2( Dict &d, uint depth, outputStream *st ) const {
|
||||
@ -3389,6 +3409,17 @@ const Type *TypeAryPtr::xdual() const {
|
||||
return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id() );
|
||||
}
|
||||
|
||||
//----------------------interface_vs_oop---------------------------------------
|
||||
#ifdef ASSERT
|
||||
bool TypeAryPtr::interface_vs_oop(const Type *t) const {
|
||||
const TypeAryPtr* t_aryptr = t->isa_aryptr();
|
||||
if (t_aryptr) {
|
||||
return _ary->interface_vs_oop(t_aryptr->_ary);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------dump2------------------------------------------
|
||||
#ifndef PRODUCT
|
||||
void TypeAryPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
|
||||
@ -3453,27 +3484,27 @@ const TypeNarrowOop* TypeNarrowOop::make(const TypePtr* type) {
|
||||
//------------------------------hash-------------------------------------------
|
||||
// Type-specific hashing function.
|
||||
int TypeNarrowOop::hash(void) const {
|
||||
return _ooptype->hash() + 7;
|
||||
return _ptrtype->hash() + 7;
|
||||
}
|
||||
|
||||
|
||||
bool TypeNarrowOop::eq( const Type *t ) const {
|
||||
const TypeNarrowOop* tc = t->isa_narrowoop();
|
||||
if (tc != NULL) {
|
||||
if (_ooptype->base() != tc->_ooptype->base()) {
|
||||
if (_ptrtype->base() != tc->_ptrtype->base()) {
|
||||
return false;
|
||||
}
|
||||
return tc->_ooptype->eq(_ooptype);
|
||||
return tc->_ptrtype->eq(_ptrtype);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool TypeNarrowOop::singleton(void) const { // TRUE if type is a singleton
|
||||
return _ooptype->singleton();
|
||||
return _ptrtype->singleton();
|
||||
}
|
||||
|
||||
bool TypeNarrowOop::empty(void) const {
|
||||
return _ooptype->empty();
|
||||
return _ptrtype->empty();
|
||||
}
|
||||
|
||||
//------------------------------xmeet------------------------------------------
|
||||
@ -3507,7 +3538,7 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const {
|
||||
return this;
|
||||
|
||||
case NarrowOop: {
|
||||
const Type* result = _ooptype->xmeet(t->make_ptr());
|
||||
const Type* result = _ptrtype->xmeet(t->make_ptr());
|
||||
if (result->isa_ptr()) {
|
||||
return TypeNarrowOop::make(result->is_ptr());
|
||||
}
|
||||
@ -3523,13 +3554,13 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const {
|
||||
}
|
||||
|
||||
const Type *TypeNarrowOop::xdual() const { // Compute dual right now.
|
||||
const TypePtr* odual = _ooptype->dual()->is_ptr();
|
||||
const TypePtr* odual = _ptrtype->dual()->is_ptr();
|
||||
return new TypeNarrowOop(odual);
|
||||
}
|
||||
|
||||
const Type *TypeNarrowOop::filter( const Type *kills ) const {
|
||||
if (kills->isa_narrowoop()) {
|
||||
const Type* ft =_ooptype->filter(kills->is_narrowoop()->_ooptype);
|
||||
const Type* ft =_ptrtype->filter(kills->is_narrowoop()->_ptrtype);
|
||||
if (ft->empty())
|
||||
return Type::TOP; // Canonical empty value
|
||||
if (ft->isa_ptr()) {
|
||||
@ -3537,7 +3568,7 @@ const Type *TypeNarrowOop::filter( const Type *kills ) const {
|
||||
}
|
||||
return ft;
|
||||
} else if (kills->isa_ptr()) {
|
||||
const Type* ft = _ooptype->join(kills);
|
||||
const Type* ft = _ptrtype->join(kills);
|
||||
if (ft->empty())
|
||||
return Type::TOP; // Canonical empty value
|
||||
return ft;
|
||||
@ -3548,13 +3579,13 @@ const Type *TypeNarrowOop::filter( const Type *kills ) const {
|
||||
|
||||
|
||||
intptr_t TypeNarrowOop::get_con() const {
|
||||
return _ooptype->get_con();
|
||||
return _ptrtype->get_con();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void TypeNarrowOop::dump2( Dict & d, uint depth, outputStream *st ) const {
|
||||
st->print("narrowoop: ");
|
||||
_ooptype->dump2(d, depth, st);
|
||||
_ptrtype->dump2(d, depth, st);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -190,6 +190,11 @@ public:
|
||||
// Currently, it also works around limitations involving interface types.
|
||||
virtual const Type *filter( const Type *kills ) const;
|
||||
|
||||
#ifdef ASSERT
|
||||
// One type is interface, the other is oop
|
||||
virtual bool interface_vs_oop(const Type *t) const;
|
||||
#endif
|
||||
|
||||
// Returns true if this pointer points at memory which contains a
|
||||
// compressed oop references.
|
||||
bool is_ptr_to_narrowoop() const;
|
||||
@ -227,6 +232,11 @@ public:
|
||||
|
||||
// Returns this ptr type or the equivalent ptr type for this compressed pointer.
|
||||
const TypePtr* make_ptr() const;
|
||||
|
||||
// Returns this oopptr type or the equivalent oopptr type for this compressed pointer.
|
||||
// Asserts if the underlying type is not an oopptr or narrowoop.
|
||||
const TypeOopPtr* make_oopptr() const;
|
||||
|
||||
// Returns this compressed pointer or the equivalent compressed version
|
||||
// of this pointer type.
|
||||
const TypeNarrowOop* make_narrowoop() const;
|
||||
@ -546,6 +556,10 @@ public:
|
||||
virtual const Type *xmeet( const Type *t ) const;
|
||||
virtual const Type *xdual() const; // Compute dual right now.
|
||||
bool ary_must_be_exact() const; // true if arrays of such are never generic
|
||||
#ifdef ASSERT
|
||||
// One type is interface, the other is oop
|
||||
virtual bool interface_vs_oop(const Type *t) const;
|
||||
#endif
|
||||
#ifndef PRODUCT
|
||||
virtual void dump2( Dict &d, uint, outputStream *st ) const; // Specialized per-Type dumping
|
||||
#endif
|
||||
@ -867,6 +881,10 @@ public:
|
||||
}
|
||||
static const TypeAryPtr *_array_body_type[T_CONFLICT+1];
|
||||
// sharpen the type of an int which is used as an array size
|
||||
#ifdef ASSERT
|
||||
// One type is interface, the other is oop
|
||||
virtual bool interface_vs_oop(const Type *t) const;
|
||||
#endif
|
||||
#ifndef PRODUCT
|
||||
virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping
|
||||
#endif
|
||||
@ -919,13 +937,13 @@ public:
|
||||
// between the normal and the compressed form.
|
||||
class TypeNarrowOop : public Type {
|
||||
protected:
|
||||
const TypePtr* _ooptype; // Could be TypePtr::NULL_PTR
|
||||
const TypePtr* _ptrtype; // Could be TypePtr::NULL_PTR
|
||||
|
||||
TypeNarrowOop( const TypePtr* ooptype): Type(NarrowOop),
|
||||
_ooptype(ooptype) {
|
||||
assert(ooptype->offset() == 0 ||
|
||||
ooptype->offset() == OffsetBot ||
|
||||
ooptype->offset() == OffsetTop, "no real offsets");
|
||||
TypeNarrowOop( const TypePtr* ptrtype): Type(NarrowOop),
|
||||
_ptrtype(ptrtype) {
|
||||
assert(ptrtype->offset() == 0 ||
|
||||
ptrtype->offset() == OffsetBot ||
|
||||
ptrtype->offset() == OffsetTop, "no real offsets");
|
||||
}
|
||||
public:
|
||||
virtual bool eq( const Type *t ) const;
|
||||
@ -949,8 +967,8 @@ public:
|
||||
}
|
||||
|
||||
// returns the equivalent ptr type for this compressed pointer
|
||||
const TypePtr *make_oopptr() const {
|
||||
return _ooptype;
|
||||
const TypePtr *get_ptrtype() const {
|
||||
return _ptrtype;
|
||||
}
|
||||
|
||||
static const TypeNarrowOop *BOTTOM;
|
||||
@ -1137,10 +1155,14 @@ inline const TypeKlassPtr *Type::is_klassptr() const {
|
||||
}
|
||||
|
||||
inline const TypePtr* Type::make_ptr() const {
|
||||
return (_base == NarrowOop) ? is_narrowoop()->make_oopptr() :
|
||||
return (_base == NarrowOop) ? is_narrowoop()->get_ptrtype() :
|
||||
(isa_ptr() ? is_ptr() : NULL);
|
||||
}
|
||||
|
||||
inline const TypeOopPtr* Type::make_oopptr() const {
|
||||
return (_base == NarrowOop) ? is_narrowoop()->get_ptrtype()->is_oopptr() : is_oopptr();
|
||||
}
|
||||
|
||||
inline const TypeNarrowOop* Type::make_narrowoop() const {
|
||||
return (_base == NarrowOop) ? is_narrowoop() :
|
||||
(isa_ptr() ? TypeNarrowOop::make(is_ptr()) : NULL);
|
||||
|
@ -606,6 +606,7 @@ JvmtiEnvBase::count_locked_objects(JavaThread *java_thread, Handle hobj) {
|
||||
if (!mons->is_empty()) {
|
||||
for (int i = 0; i < mons->length(); i++) {
|
||||
MonitorInfo *mi = mons->at(i);
|
||||
if (mi->owner_is_scalar_replaced()) continue;
|
||||
|
||||
// see if owner of the monitor is our object
|
||||
if (mi->owner() != NULL && mi->owner() == hobj()) {
|
||||
@ -726,6 +727,8 @@ JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread
|
||||
for (int i = 0; i < mons->length(); i++) {
|
||||
MonitorInfo *mi = mons->at(i);
|
||||
|
||||
if (mi->owner_is_scalar_replaced()) continue;
|
||||
|
||||
oop obj = mi->owner();
|
||||
if (obj == NULL) {
|
||||
// this monitor doesn't have an owning object so skip it
|
||||
|
@ -121,6 +121,7 @@ static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thre
|
||||
// Walk monitors youngest to oldest
|
||||
for (int i = len - 1; i >= 0; i--) {
|
||||
MonitorInfo* mon_info = monitors->at(i);
|
||||
if (mon_info->owner_is_scalar_replaced()) continue;
|
||||
oop owner = mon_info->owner();
|
||||
if (owner != NULL) {
|
||||
info->append(mon_info);
|
||||
@ -694,6 +695,7 @@ void BiasedLocking::preserve_marks() {
|
||||
// Walk monitors youngest to oldest
|
||||
for (int i = len - 1; i >= 0; i--) {
|
||||
MonitorInfo* mon_info = monitors->at(i);
|
||||
if (mon_info->owner_is_scalar_replaced()) continue;
|
||||
oop owner = mon_info->owner();
|
||||
if (owner != NULL) {
|
||||
markOop mark = owner->mark();
|
||||
|
@ -933,7 +933,7 @@ static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects
|
||||
GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
|
||||
for (int i = 0; i < monitors->length(); i++) {
|
||||
MonitorInfo* mon_info = monitors->at(i);
|
||||
if (mon_info->owner() != NULL && !mon_info->eliminated()) {
|
||||
if (!mon_info->eliminated() && mon_info->owner() != NULL) {
|
||||
objects_to_revoke->append(Handle(mon_info->owner()));
|
||||
}
|
||||
}
|
||||
|
@ -1994,6 +1994,10 @@ class CommandLineFlags {
|
||||
product_rw(bool, PrintHeapAtGC, false, \
|
||||
"Print heap layout before and after each GC") \
|
||||
\
|
||||
product_rw(bool, PrintHeapAtGCExtended, false, \
|
||||
"Prints extended information about the layout of the heap " \
|
||||
"when -XX:+PrintHeapAtGC is set") \
|
||||
\
|
||||
product(bool, PrintHeapAtSIGBREAK, true, \
|
||||
"Print heap layout in response to SIGBREAK") \
|
||||
\
|
||||
|
@ -146,8 +146,9 @@ StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* r
|
||||
value.jl = ((ConstantLongValue *)sv)->value();
|
||||
return new StackValue(value.p);
|
||||
#endif
|
||||
} else if (sv->is_object()) {
|
||||
return new StackValue(((ObjectValue *)sv)->value());
|
||||
} else if (sv->is_object()) { // Scalar replaced object in compiled frame
|
||||
Handle ov = ((ObjectValue *)sv)->value();
|
||||
return new StackValue(ov, (ov.is_null()) ? 1 : 0);
|
||||
}
|
||||
|
||||
// Unknown ScopeValue type
|
||||
|
@ -34,9 +34,11 @@ class StackValue : public ResourceObj {
|
||||
_i = value;
|
||||
}
|
||||
|
||||
StackValue(Handle value) {
|
||||
StackValue(Handle value, intptr_t scalar_replaced = 0) {
|
||||
_type = T_OBJECT;
|
||||
_i = scalar_replaced;
|
||||
_o = value;
|
||||
assert(_i == 0 || _o.is_null(), "not null object should not be marked as scalar replaced");
|
||||
}
|
||||
|
||||
StackValue() {
|
||||
@ -56,6 +58,11 @@ class StackValue : public ResourceObj {
|
||||
return _o;
|
||||
}
|
||||
|
||||
bool obj_is_scalar_replaced() const {
|
||||
assert(type() == T_OBJECT, "type check");
|
||||
return _i != 0;
|
||||
}
|
||||
|
||||
void set_obj(Handle value) {
|
||||
assert(type() == T_OBJECT, "type check");
|
||||
_o = value;
|
||||
|
@ -106,6 +106,7 @@ GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() {
|
||||
|
||||
for (int index = (mons->length()-1); index >= 0; index--) {
|
||||
MonitorInfo* monitor = mons->at(index);
|
||||
if (monitor->eliminated() && is_compiled_frame()) continue; // skip eliminated monitor
|
||||
oop obj = monitor->owner();
|
||||
if (obj == NULL) continue; // skip unowned monitor
|
||||
//
|
||||
@ -162,6 +163,18 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
|
||||
bool found_first_monitor = false;
|
||||
for (int index = (mons->length()-1); index >= 0; index--) {
|
||||
MonitorInfo* monitor = mons->at(index);
|
||||
if (monitor->eliminated() && is_compiled_frame()) { // Eliminated in compiled code
|
||||
if (monitor->owner_is_scalar_replaced()) {
|
||||
Klass* k = Klass::cast(monitor->owner_klass());
|
||||
st->print("\t- eliminated <owner is scalar replaced> (a %s)", k->external_name());
|
||||
} else {
|
||||
oop obj = monitor->owner();
|
||||
if (obj != NULL) {
|
||||
print_locked_object_class_name(st, obj, "eliminated");
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (monitor->owner() != NULL) {
|
||||
|
||||
// First, assume we have the monitor locked. If we haven't found an
|
||||
@ -171,11 +184,11 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
|
||||
|
||||
const char *lock_state = "locked"; // assume we have the monitor locked
|
||||
if (!found_first_monitor && frame_count == 0) {
|
||||
markOop mark = monitor->owner()->mark();
|
||||
if (mark->has_monitor() &&
|
||||
mark->monitor() == thread()->current_pending_monitor()) {
|
||||
markOop mark = monitor->owner()->mark();
|
||||
if (mark->has_monitor() &&
|
||||
mark->monitor() == thread()->current_pending_monitor()) {
|
||||
lock_state = "waiting to lock";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
found_first_monitor = true;
|
||||
@ -206,7 +219,7 @@ GrowableArray<MonitorInfo*>* interpretedVFrame::monitors() const {
|
||||
for (BasicObjectLock* current = (fr().previous_monitor_in_interpreter_frame(fr().interpreter_frame_monitor_begin()));
|
||||
current >= fr().interpreter_frame_monitor_end();
|
||||
current = fr().previous_monitor_in_interpreter_frame(current)) {
|
||||
result->push(new MonitorInfo(current->obj(), current->lock(), false));
|
||||
result->push(new MonitorInfo(current->obj(), current->lock(), false, false));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -531,8 +544,18 @@ void javaVFrame::print() {
|
||||
tty->print_cr("\tmonitor list:");
|
||||
for (int index = (list->length()-1); index >= 0; index--) {
|
||||
MonitorInfo* monitor = list->at(index);
|
||||
tty->print("\t obj\t"); monitor->owner()->print_value();
|
||||
tty->print("(" INTPTR_FORMAT ")", (address)monitor->owner());
|
||||
tty->print("\t obj\t");
|
||||
if (monitor->owner_is_scalar_replaced()) {
|
||||
Klass* k = Klass::cast(monitor->owner_klass());
|
||||
tty->print("( is scalar replaced %s)", k->external_name());
|
||||
} else if (monitor->owner() == NULL) {
|
||||
tty->print("( null )");
|
||||
} else {
|
||||
monitor->owner()->print_value();
|
||||
tty->print("(" INTPTR_FORMAT ")", (address)monitor->owner());
|
||||
}
|
||||
if (monitor->eliminated() && is_compiled_frame())
|
||||
tty->print(" ( lock is eliminated )");
|
||||
tty->cr();
|
||||
tty->print("\t ");
|
||||
monitor->lock()->print_on(tty);
|
||||
|
@ -230,18 +230,36 @@ class MonitorInfo : public ResourceObj {
|
||||
private:
|
||||
oop _owner; // the object owning the monitor
|
||||
BasicLock* _lock;
|
||||
oop _owner_klass; // klass if owner was scalar replaced
|
||||
bool _eliminated;
|
||||
bool _owner_is_scalar_replaced;
|
||||
public:
|
||||
// Constructor
|
||||
MonitorInfo(oop owner, BasicLock* lock, bool eliminated) {
|
||||
_owner = owner;
|
||||
MonitorInfo(oop owner, BasicLock* lock, bool eliminated, bool owner_is_scalar_replaced) {
|
||||
if (!owner_is_scalar_replaced) {
|
||||
_owner = owner;
|
||||
_owner_klass = NULL;
|
||||
} else {
|
||||
assert(eliminated, "monitor should be eliminated for scalar replaced object");
|
||||
_owner = NULL;
|
||||
_owner_klass = owner;
|
||||
}
|
||||
_lock = lock;
|
||||
_eliminated = eliminated;
|
||||
_owner_is_scalar_replaced = owner_is_scalar_replaced;
|
||||
}
|
||||
// Accessors
|
||||
oop owner() const { return _owner; }
|
||||
oop owner() const {
|
||||
assert(!_owner_is_scalar_replaced, "should not be called for scalar replaced object");
|
||||
return _owner;
|
||||
}
|
||||
klassOop owner_klass() const {
|
||||
assert(_owner_is_scalar_replaced, "should not be called for not scalar replaced object");
|
||||
return (klassOop)_owner_klass;
|
||||
}
|
||||
BasicLock* lock() const { return _lock; }
|
||||
bool eliminated() const { return _eliminated; }
|
||||
bool owner_is_scalar_replaced() const { return _owner_is_scalar_replaced; }
|
||||
};
|
||||
|
||||
class vframeStreamCommon : StackObj {
|
||||
|
@ -61,6 +61,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) {
|
||||
// Migrate the BasicLocks from the stack to the monitor chunk
|
||||
for (index = 0; index < list->length(); index++) {
|
||||
MonitorInfo* monitor = list->at(index);
|
||||
assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already");
|
||||
assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
|
||||
BasicObjectLock* dest = _monitors->at(index);
|
||||
dest->set_obj(monitor->owner());
|
||||
@ -89,6 +90,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) {
|
||||
StackValue* value = locs->at(index);
|
||||
switch(value->type()) {
|
||||
case T_OBJECT:
|
||||
assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
|
||||
// preserve object type
|
||||
_locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
|
||||
break;
|
||||
@ -113,6 +115,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) {
|
||||
StackValue* value = exprs->at(index);
|
||||
switch(value->type()) {
|
||||
case T_OBJECT:
|
||||
assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
|
||||
// preserve object type
|
||||
_expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
|
||||
break;
|
||||
|
@ -190,7 +190,7 @@ GrowableArray<MonitorInfo*>* compiledVFrame::monitors() const {
|
||||
// Casting away const
|
||||
frame& fr = (frame&) _fr;
|
||||
MonitorInfo* info = new MonitorInfo(fr.compiled_synchronized_native_monitor_owner(nm),
|
||||
fr.compiled_synchronized_native_monitor(nm), false);
|
||||
fr.compiled_synchronized_native_monitor(nm), false, false);
|
||||
monitors->push(info);
|
||||
return monitors;
|
||||
}
|
||||
@ -201,8 +201,20 @@ GrowableArray<MonitorInfo*>* compiledVFrame::monitors() const {
|
||||
GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(monitors->length());
|
||||
for (int index = 0; index < monitors->length(); index++) {
|
||||
MonitorValue* mv = monitors->at(index);
|
||||
StackValue *owner_sv = create_stack_value(mv->owner()); // it is an oop
|
||||
result->push(new MonitorInfo(owner_sv->get_obj()(), resolve_monitor_lock(mv->basic_lock()), mv->eliminated()));
|
||||
ScopeValue* ov = mv->owner();
|
||||
StackValue *owner_sv = create_stack_value(ov); // it is an oop
|
||||
if (ov->is_object() && owner_sv->obj_is_scalar_replaced()) { // The owner object was scalar replaced
|
||||
assert(mv->eliminated(), "monitor should be eliminated for scalar replaced object");
|
||||
// Put klass for scalar replaced object.
|
||||
ScopeValue* kv = ((ObjectValue *)ov)->klass();
|
||||
assert(kv->is_constant_oop(), "klass should be oop constant for scalar replaced object");
|
||||
KlassHandle k(((ConstantOopReadValue*)kv)->value()());
|
||||
result->push(new MonitorInfo(k->as_klassOop(), resolve_monitor_lock(mv->basic_lock()),
|
||||
mv->eliminated(), true));
|
||||
} else {
|
||||
result->push(new MonitorInfo(owner_sv->get_obj()(), resolve_monitor_lock(mv->basic_lock()),
|
||||
mv->eliminated(), false));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
140
hotspot/test/compiler/5057225/Test5057225.java
Normal file
140
hotspot/test/compiler/5057225/Test5057225.java
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 5057225
|
||||
* @summary Remove useless I2L conversions
|
||||
*
|
||||
* @run main/othervm -Xcomp -XX:CompileOnly=Test5057225.doload Test5057225
|
||||
*/
|
||||
|
||||
import java.net.URLClassLoader;
|
||||
|
||||
public class Test5057225 {
|
||||
static byte[] ba = new byte[] { -1 };
|
||||
static short[] sa = new short[] { -1 };
|
||||
static int[] ia = new int[] { -1 };
|
||||
|
||||
static final long[] BYTE_MASKS = {
|
||||
0x0FL,
|
||||
0x7FL, // 7-bit
|
||||
0xFFL,
|
||||
};
|
||||
|
||||
static final long[] SHORT_MASKS = {
|
||||
0x000FL,
|
||||
0x007FL, // 7-bit
|
||||
0x00FFL,
|
||||
0x0FFFL,
|
||||
0x3FFFL, // 14-bit
|
||||
0x7FFFL, // 15-bit
|
||||
0xFFFFL,
|
||||
};
|
||||
|
||||
static final long[] INT_MASKS = {
|
||||
0x0000000FL,
|
||||
0x0000007FL, // 7-bit
|
||||
0x000000FFL,
|
||||
0x00000FFFL,
|
||||
0x00003FFFL, // 14-bit
|
||||
0x00007FFFL, // 15-bit
|
||||
0x0000FFFFL,
|
||||
0x00FFFFFFL,
|
||||
0x7FFFFFFFL, // 31-bit
|
||||
0xFFFFFFFFL,
|
||||
};
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
for (int i = 0; i < BYTE_MASKS.length; i++) {
|
||||
System.setProperty("value", "" + BYTE_MASKS[i]);
|
||||
loadAndRunClass("Test5057225$loadUB2L");
|
||||
}
|
||||
|
||||
for (int i = 0; i < SHORT_MASKS.length; i++) {
|
||||
System.setProperty("value", "" + SHORT_MASKS[i]);
|
||||
loadAndRunClass("Test5057225$loadUS2L");
|
||||
}
|
||||
|
||||
for (int i = 0; i < INT_MASKS.length; i++) {
|
||||
System.setProperty("value", "" + INT_MASKS[i]);
|
||||
loadAndRunClass("Test5057225$loadUI2L");
|
||||
}
|
||||
}
|
||||
|
||||
static void check(long result, long expected) {
|
||||
if (result != expected)
|
||||
throw new InternalError(result + " != " + expected);
|
||||
}
|
||||
|
||||
static void loadAndRunClass(String classname) throws Exception {
|
||||
Class cl = Class.forName(classname);
|
||||
URLClassLoader apploader = (URLClassLoader) cl.getClassLoader();
|
||||
ClassLoader loader = new URLClassLoader(apploader.getURLs(), apploader.getParent());
|
||||
Class c = loader.loadClass(classname);
|
||||
Runnable r = (Runnable) c.newInstance();
|
||||
r.run();
|
||||
}
|
||||
|
||||
public static class loadUB2L implements Runnable {
|
||||
static final long MASK;
|
||||
static {
|
||||
long value = 0;
|
||||
try {
|
||||
value = Long.decode(System.getProperty("value"));
|
||||
} catch (Throwable e) {}
|
||||
MASK = value;
|
||||
}
|
||||
|
||||
public void run() { check(doload(ba), MASK); }
|
||||
static long doload(byte[] ba) { return ba[0] & MASK; }
|
||||
}
|
||||
|
||||
public static class loadUS2L implements Runnable {
|
||||
static final long MASK;
|
||||
static {
|
||||
long value = 0;
|
||||
try {
|
||||
value = Long.decode(System.getProperty("value"));
|
||||
} catch (Throwable e) {}
|
||||
MASK = value;
|
||||
}
|
||||
|
||||
public void run() { check(doload(sa), MASK); }
|
||||
static long doload(short[] sa) { return sa[0] & MASK; }
|
||||
}
|
||||
|
||||
public static class loadUI2L implements Runnable {
|
||||
static final long MASK;
|
||||
static {
|
||||
long value = 0;
|
||||
try {
|
||||
value = Long.decode(System.getProperty("value"));
|
||||
} catch (Throwable e) {}
|
||||
MASK = value;
|
||||
}
|
||||
|
||||
public void run() { check(doload(ia), MASK); }
|
||||
static long doload(int[] ia) { return ia[0] & MASK; }
|
||||
}
|
||||
}
|
94
hotspot/test/compiler/6837094/Test.java
Normal file
94
hotspot/test/compiler/6837094/Test.java
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6837094
|
||||
* @summary False positive for "meet not symmetric" failure
|
||||
*
|
||||
* @run main/othervm -Xbatch -XX:CompileOnly=Test.collectIs,Test$Factory$1.getArray,Test$Factory$2.getArray Test
|
||||
*/
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
|
||||
public class Test {
|
||||
|
||||
private interface Factory<M extends Interface> {
|
||||
Factory<Child0> Zero = new Factory<Child0>() {
|
||||
public Child0[] getArray() { return new Child0[1]; }
|
||||
};
|
||||
|
||||
Factory<Child1> One = new Factory<Child1>() {
|
||||
public Child1[] getArray() { return new Child1[1]; }
|
||||
};
|
||||
|
||||
M[] getArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* C2 asserts when compiling this method. Bimorphic inlining happens at
|
||||
* getArray call site. A Phi in the catch block tries to join the meet type
|
||||
* from he inline site (Parent[]) with the type expected by CI (Interface[]).
|
||||
*
|
||||
* C2 throws an assert when it doesn't need to.
|
||||
*/
|
||||
private static <I extends Interface> void collectIs(
|
||||
Factory<I> factory, Set<Interface> s) {
|
||||
for (I i : factory.getArray()) {
|
||||
try {
|
||||
s.add(i);
|
||||
} catch (Exception e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static public void main(String argv[]) {
|
||||
Set<Interface> s = new HashSet();
|
||||
|
||||
for (int i = 0; i < 25000; i++) {
|
||||
collectIs(Factory.Zero, s);
|
||||
collectIs(Factory.One, s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Establish necessary class hierarchy
|
||||
*/
|
||||
|
||||
interface Interface {
|
||||
}
|
||||
|
||||
class Parent {
|
||||
}
|
||||
|
||||
class Child0 extends Parent implements Interface {
|
||||
}
|
||||
|
||||
class Child1 extends Parent implements Interface {
|
||||
}
|
||||
|
||||
class Child2 extends Parent implements Interface {
|
||||
}
|
44
hotspot/test/compiler/6849574/Test.java
Normal file
44
hotspot/test/compiler/6849574/Test.java
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6849574
|
||||
* @summary VM crash using NonBlockingHashMap (high_scale_lib)
|
||||
*
|
||||
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+VerifyBeforeGC Test
|
||||
*/
|
||||
|
||||
import java.util.concurrent.atomic.*;
|
||||
|
||||
public class Test extends Thread {
|
||||
|
||||
public static void main(String[] args) {
|
||||
AtomicReferenceArray a = new AtomicReferenceArray(10000);
|
||||
for (int i = 0; i < 100000; i++) {
|
||||
a.getAndSet(9999, new Object());
|
||||
if (i > 99990) System.gc();
|
||||
}
|
||||
}
|
||||
}
|
55
hotspot/test/compiler/6855164/Test.java
Normal file
55
hotspot/test/compiler/6855164/Test.java
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 6855164
|
||||
* @summary SIGSEGV during compilation of method involving loop over CharSequence
|
||||
* @run main/othervm -Xbatch Test
|
||||
*/
|
||||
|
||||
public class Test{
|
||||
public static void main(String[] args) throws Exception {
|
||||
StringBuffer builder = new StringBuffer();
|
||||
|
||||
for(int i = 0; i < 100; i++)
|
||||
builder.append("I am the very model of a modern major general\n");
|
||||
|
||||
for(int j = 0; j < builder.length(); j++){
|
||||
previousSpaceIndex(builder, j);
|
||||
}
|
||||
}
|
||||
|
||||
private static final int previousSpaceIndex(CharSequence sb, int seek) {
|
||||
seek--;
|
||||
while (seek > 0) {
|
||||
if (sb.charAt(seek) == ' ') {
|
||||
while (seek > 0 && sb.charAt(seek - 1) == ' ')
|
||||
seek--;
|
||||
return seek;
|
||||
}
|
||||
seek--;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -38,3 +38,4 @@ d5a1223e961891564de25c39fba6f2442d0fb045 jdk7-b57
|
||||
f72c0dc047b9b2e797beee68ae0b50decb1f020d jdk7-b61
|
||||
12e11fab9a839a9666a996a8f9a02fd8fa03aab6 jdk7-b62
|
||||
2ed6ed6b5bfc7dd724925b90dbb31223df59c25d jdk7-b63
|
||||
a50217eb3ee10b9f9547e0708e5c9625405083ef jdk7-b64
|
||||
|
Loading…
Reference in New Issue
Block a user