This commit is contained in:
Coleen Phillimore 2009-07-27 17:23:52 -04:00
commit 3e69a71772
1528 changed files with 435337 additions and 58480 deletions

View File

@ -35,3 +35,7 @@ b44f05654c26fcd1f995e712992f9b07ffd7c0c6 jdk7-b57
d60a9ce3c3eabf28f5d50ae839d18be04a551bc2 jdk7-b58
c33e7d38c9210741dbc285507403a4b20bd802a0 jdk7-b59
5a10e4d0b14d7beac53a7b2213ae6864afe1fd3e jdk7-b60
dbb955b1ee59b876dd1f133952b557b48b1d7732 jdk7-b61
6107cbff3130c747d243c25a7874cd59db5744a8 jdk7-b62
dfd8506f74c3731bb169ce93c72612d78ee0413b jdk7-b63
d22867c5f1b295a0a2b3b4bc8999a2676f6e20c3 jdk7-b64

View File

@ -35,3 +35,7 @@ ffd09e767dfa6d21466183a400f72cf62d53297f jdk7-b57
59b497130f82ec809c245ffb5e521e3a5fabf8af jdk7-b58
030142474602b4a067662fffc0c8e541de5a78df jdk7-b59
39565502682c7085369bd09e51640919dc741097 jdk7-b60
472c21584cfd7e9c0229ad6a100366a5c03d2976 jdk7-b61
c7ed15ab92ce36a09d264a5e34025884b2d7607f jdk7-b62
57f7e028c7ad1806500ae89eb3f4cd9a51b10e18 jdk7-b63
269c1ec4435dfb7b452ae6e3bdde005d55c5c830 jdk7-b64

View File

@ -72,6 +72,7 @@
<li>Linux and Solaris:
<ul>
<li><a href="#cups">CUPS Include files</a> </li>
<li><a href="#xrender">XRender Include files</a></li>
</ul>
</li>
<li>Linux only:
@ -119,6 +120,7 @@
<th>Base OS and Architecture</th>
<th>OS</th>
<th>C/C++ Compiler</th>
<th>BOOT JDK</th>
</tr>
</thead>
<tbody>
@ -126,57 +128,65 @@
<td>Linux X86 (32-bit)</td>
<td>Fedora 9</td>
<td>gcc 4 </td>
<td>JDK 6u14 FCS </td>
</tr>
<tr>
<td>Linux X64 (64-bit)</td>
<td>Fedora 9</td>
<td>gcc 4 </td>
<td>JDK 6u14 FCS </td>
</tr>
<tr>
<td>Solaris SPARC (32-bit)</td>
<td>Solaris 10 + patches
<td>Solaris 10u2 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
SunSolve</a> for patch downloads.
</td>
<td>Sun Studio 12</td>
<td>JDK 6u14 FCS </td>
</tr>
<tr>
<td>Solaris SPARCV9 (64-bit)</td>
<td>Solaris 10 + patches
<td>Solaris 10u2 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
SunSolve</a> for patch downloads.
</td>
<td>Sun Studio 12</td>
<td>JDK 6u14 FCS </td>
</tr>
<tr>
<td>Solaris X86 (32-bit)</td>
<td>Solaris 10 + patches
<td>Solaris 10u2 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
SunSolve</a> for patch downloads.
</td>
<td>Sun Studio 12</td>
<td>JDK 6u14 FCS </td>
</tr>
<tr>
<td>Solaris X64 (64-bit)</td>
<td>Solaris 10 + patches
<td>Solaris 10u2 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
SunSolve</a> for patch downloads.
</td>
<td>Sun Studio 12</td>
<td>JDK 6u14 FCS </td>
</tr>
<tr>
<td>Windows X86 (32-bit)</td>
<td>Windows XP</td>
<td>Microsoft Visual Studio C++ 2008 Standard Edition</td>
<td>JDK 6u14 FCS </td>
</tr>
<tr>
<td>Windows X64 (64-bit)</td>
<td>Windows Server 2003 - Enterprise x64 Edition</td>
<td>Microsoft Platform SDK - April 2005</td>
<td>JDK 6u14 FCS </td>
</tr>
</tbody>
</table>
@ -656,6 +666,9 @@
<a href="#cups">CUPS Include files</a>, set
<tt><a href="#ALT_CUPS_HEADERS_PATH">ALT_CUPS_HEADERS_PATH</a></tt>.
</li>
<li>
Install the <a href="#xrender">XRender Include files</a>.
</li>
<li>
Install the
<a href="#jibx">JIBX Libraries</a>, set
@ -1032,6 +1045,27 @@
CUPS Header files.
</blockquote>
<!-- ------------------------------------------------------ -->
<h4><a name="xrender">XRender Extension Headers (Solaris &amp; Linux)</a></h4>
<blockquote>
<p>
<strong>Solaris:</strong>
XRender header files are required for building the
OpenJDK on Solaris.
The XRender header file is included with the other X11 header files
in the package <strong>SFWxwinc</strong> on new enough versions of
Solaris and will be installed in
<tt>/usr/X11/include/X11/extensions/Xrender.h</tt>
</p><p>
<strong>Linux:</strong>
XRender header files are required for building the
OpenJDK on Linux.
The Linux header files are usually available from a "Xrender"
development package, it's recommended that you try and use
the package provided by the particular distribution of Linux that
you are using.
</p>
</blockquote>
<!-- ------------------------------------------------------ -->
<h4><a name="freetype">FreeType 2</a></h4>
<blockquote>
Version 2.3 or newer of FreeType is required for building the OpenJDK.

View File

@ -35,3 +35,7 @@ bec82237d694f9802b820fa11bbb4f7fa9bf8e77 jdk7-b52
2e3b8edab3ef55406494d3dd562e06882e6fc15e jdk7-b58
7e6b2b55c00cc523b468272353ada3979adbbf16 jdk7-b59
f1e1cccbd13aa96d2d8bd872782ff764010bc22c jdk7-b60
e906b16a12a9a63b615898afa5d9673cbd1c5ab8 jdk7-b61
65b66117dbd70a493e9644aeb4033cf95a4e3c99 jdk7-b62
d20e45cd539f20405ff843652069cfd7550c5ab3 jdk7-b63
047dd27fddb607f8135296b3754131f6e13cb8c7 jdk7-b64

View File

@ -106,7 +106,12 @@ endif
SOURCE_LEVEL = 5
BOOTSTRAP_TARGET_LEVEL = 5
TARGET_LEVEL = 6
ifdef TARGET_CLASS_VERSION
TARGET_LEVEL = $(TARGET_CLASS_VERSION)
else
TARGET_LEVEL = 6
endif
ifndef TARGET_JAVA
TARGET_JAVA = java

View File

@ -365,10 +365,10 @@ ifndef COPYRIGHT_YEAR
COPYRIGHT_YEAR = 2007
endif
RC_FLAGS += -d "J2SE_BUILD_ID=$(FULL_VERSION)" \
-d "J2SE_COMPANY=$(COMPANY_NAME)" \
-d "J2SE_COMPONENT=$(PRODUCT_NAME) Platform SE binary" \
-d "J2SE_VER=$(JDK_MINOR_VERSION).$(JDK_MICRO_VERSION).$(J2SE_UPDATE_VER).$(COOKED_BUILD_NUMBER)" \
-d "J2SE_COPYRIGHT=Copyright \xA9 $(COPYRIGHT_YEAR)" \
-d "J2SE_NAME=$(PRODUCT_NAME) Platform SE $(JDK_MINOR_VERSION) $(J2SE_UPDATE_META_TAG)" \
-d "J2SE_FVER=$(JDK_VERSION)"
RC_FLAGS += -d "JDK_BUILD_ID=$(FULL_VERSION)" \
-d "JDK_COMPANY=$(COMPANY_NAME)" \
-d "JDK_COMPONENT=$(PRODUCT_NAME) Platform SE binary" \
-d "JDK_VER=$(JDK_MINOR_VERSION).$(JDK_MICRO_VERSION).$(JDK_UPDATE_VER).$(COOKED_BUILD_NUMBER)" \
-d "JDK_COPYRIGHT=Copyright \xA9 $(COPYRIGHT_YEAR)" \
-d "JDK_NAME=$(PRODUCT_NAME) Platform SE $(JDK_MINOR_VERSION) $(JDK_UPDATE_META_TAG)" \
-d "JDK_FVER=$(JDK_VERSION)"

View File

@ -158,9 +158,9 @@ ifndef LOCAL_RESOURCE_FILE
endif
@$(ECHO) Created $@
RC_FLAGS += /D "J2SE_FNAME=$(LIBRARY).dll" \
/D "J2SE_INTERNAL_NAME=$(LIBRARY)" \
/D "J2SE_FTYPE=0x2L"
RC_FLAGS += /D "JDK_FNAME=$(LIBRARY).dll" \
/D "JDK_INTERNAL_NAME=$(LIBRARY)" \
/D "JDK_FTYPE=0x2L"
$(OBJDIR)/$(LIBRARY).res: $(VERSIONINFO_RESOURCE)
ifndef LOCAL_RESOURCE_FILE

View File

@ -32,6 +32,12 @@ PACKAGE = com.sun.tools.corba.se.idl
PRODUCT = sun
include $(BUILDDIR)/common/Defs.gmk
# This program must contain a manifest that defines the execution level
# needed to follow standard Vista User Access Control Guidelines
# This must be set before Program.gmk is included
#
BUILD_MANIFEST=true
#
# Files
#

View File

@ -37,8 +37,8 @@ LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL
//
VS_VERSION_INFO VERSIONINFO
FILEVERSION J2SE_FVER
PRODUCTVERSION J2SE_FVER
FILEVERSION JDK_FVER
PRODUCTVERSION JDK_FVER
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
@ -48,22 +48,22 @@ VS_VERSION_INFO VERSIONINFO
// FILEOS 0x4 is Win32, 0x40004 is Win32 NT only
FILEOS 0x4L
// FILETYPE should be 0x1 for .exe and 0x2 for .dll
FILETYPE J2SE_FTYPE
FILETYPE JDK_FTYPE
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "000004b0"
BEGIN
VALUE "CompanyName", XSTR(J2SE_COMPANY) "\0"
VALUE "FileDescription", XSTR(J2SE_COMPONENT) "\0"
VALUE "FileVersion", XSTR(J2SE_VER) "\0"
VALUE "Full Version", XSTR(J2SE_BUILD_ID) "\0"
VALUE "InternalName", XSTR(J2SE_INTERNAL_NAME) "\0"
VALUE "LegalCopyright", XSTR(J2SE_COPYRIGHT) "\0"
VALUE "OriginalFilename", XSTR(J2SE_FNAME) "\0"
VALUE "ProductName", XSTR(J2SE_NAME) "\0"
VALUE "ProductVersion", XSTR(J2SE_VER) "\0"
VALUE "CompanyName", XSTR(JDK_COMPANY) "\0"
VALUE "FileDescription", XSTR(JDK_COMPONENT) "\0"
VALUE "FileVersion", XSTR(JDK_VER) "\0"
VALUE "Full Version", XSTR(JDK_BUILD_ID) "\0"
VALUE "InternalName", XSTR(JDK_INTERNAL_NAME) "\0"
VALUE "LegalCopyright", XSTR(JDK_COPYRIGHT) "\0"
VALUE "OriginalFilename", XSTR(JDK_FNAME) "\0"
VALUE "ProductName", XSTR(JDK_NAME) "\0"
VALUE "ProductVersion", XSTR(JDK_VER) "\0"
END
END
BLOCK "VarFileInfo"

View File

@ -35,3 +35,7 @@ f4cbf78110c726919f46b59a3b054c54c7e889b4 jdk7-b57
53d9bf689e80fcc76b221bbe6c5d58e08b80cbc6 jdk7-b58
c55be0c7bd32c016c52218eb4c8b5da8a75450b5 jdk7-b59
a77eddcd510c3972717c025cfcef9a60bfa4ecac jdk7-b60
27b728fd1281ab62e9d7e4424f8bbb6ca438d803 jdk7-b61
a88386380bdaaa5ab4ffbedf22c57bac5dbec034 jdk7-b62
32c83fb84370a35344676991a48440378e6b6c8a jdk7-b63
ba36394eb84b949b31212bdb32a518a8f92bab5b jdk7-b64

View File

@ -24,23 +24,64 @@
package sun.jvm.hotspot.code;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.utilities.*;
public class DebugInfoReadStream extends CompressedReadStream {
private NMethod code;
private int InvocationEntryBCI;
private List objectPool; // ArrayList<ObjectValue>
public DebugInfoReadStream(NMethod code, int offset) {
super(code.scopesDataBegin(), offset);
InvocationEntryBCI = VM.getVM().getInvocationEntryBCI();
this.code = code;
this.objectPool = null;
}
public DebugInfoReadStream(NMethod code, int offset, List objectPool) {
super(code.scopesDataBegin(), offset);
InvocationEntryBCI = VM.getVM().getInvocationEntryBCI();
this.code = code;
this.objectPool = objectPool;
}
public OopHandle readOopHandle() {
return code.getOopAt(readInt());
}
ScopeValue readObjectValue() {
int id = readInt();
if (Assert.ASSERTS_ENABLED) {
Assert.that(objectPool != null, "object pool does not exist");
for (Iterator itr = objectPool.iterator(); itr.hasNext();) {
ObjectValue ov = (ObjectValue) itr.next();
Assert.that(ov.id() != id, "should not be read twice");
}
}
ObjectValue result = new ObjectValue(id);
// Cache the object since an object field could reference it.
objectPool.add(result);
result.readObject(this);
return result;
}
ScopeValue getCachedObject() {
int id = readInt();
Assert.that(objectPool != null, "object pool does not exist");
for (Iterator itr = objectPool.iterator(); itr.hasNext();) {
ObjectValue ov = (ObjectValue) itr.next();
if (ov.id() == id) {
return ov;
}
}
Assert.that(false, "should not reach here");
return null;
}
public int readBCI() {
return readInt() + InvocationEntryBCI;
}

View File

@ -29,6 +29,7 @@ import java.io.*;
public class MonitorValue {
private ScopeValue owner;
private Location basicLock;
private boolean eliminated;
// FIXME: not useful yet
// MonitorValue(ScopeValue* owner, Location basic_lock);
@ -36,10 +37,12 @@ public class MonitorValue {
public MonitorValue(DebugInfoReadStream stream) {
basicLock = new Location(stream);
owner = ScopeValue.readFrom(stream);
eliminated= stream.readBoolean();
}
public ScopeValue owner() { return owner; }
public Location basicLock() { return basicLock; }
public boolean eliminated() { return eliminated; }
// FIXME: not yet implementable
// void write_on(DebugInfoWriteStream* stream);
@ -50,5 +53,8 @@ public class MonitorValue {
tty.print(",");
basicLock().printOn(tty);
tty.print("}");
if (eliminated) {
tty.print(" (eliminated)");
}
}
}

View File

@ -0,0 +1,93 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
package sun.jvm.hotspot.code;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.utilities.*;
/** An ObjectValue describes an object eliminated by escape analysis. */
public class ObjectValue extends ScopeValue {
private int id;
private ScopeValue klass;
private List fieldsValue; // ArrayList<ScopeValue>
// Field "boolean visited" is not implemented here since
// it is used only a during debug info creation.
public ObjectValue(int id) {
this.id = id;
klass = null;
fieldsValue = new ArrayList();
}
public boolean isObject() { return true; }
public int id() { return id; }
public ScopeValue getKlass() { return klass; }
public List getFieldsValue() { return fieldsValue; }
public ScopeValue getFieldAt(int i) { return (ScopeValue)fieldsValue.get(i); }
public int fieldsSize() { return fieldsValue.size(); }
// Field "value" is always NULL here since it is used
// only during deoptimization of a compiled frame
// pointing to reallocated object.
public OopHandle getValue() { return null; }
/** Serialization of debugging information */
void readObject(DebugInfoReadStream stream) {
klass = readFrom(stream);
Assert.that(klass.isConstantOop(), "should be constant klass oop");
int length = stream.readInt();
for (int i = 0; i < length; i++) {
ScopeValue val = readFrom(stream);
fieldsValue.add(val);
}
}
// Printing
public void print() {
printOn(System.out);
}
public void printOn(PrintStream tty) {
tty.print("scalarObj[" + id + "]");
}
void printFieldsOn(PrintStream tty) {
if (fieldsValue.size() > 0) {
((ScopeValue)fieldsValue.get(0)).printOn(tty);
}
for (int i = 1; i < fieldsValue.size(); i++) {
tty.print(", ");
((ScopeValue)fieldsValue.get(i)).printOn(tty);
}
}
};

View File

@ -27,8 +27,10 @@ package sun.jvm.hotspot.code;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.utilities.*;
/** ScopeDescs contain the information that makes source-level
debugging of nmethods possible; each scopeDesc describes a method
@ -45,10 +47,31 @@ public class ScopeDesc {
private int localsDecodeOffset;
private int expressionsDecodeOffset;
private int monitorsDecodeOffset;
/** Scalar replaced bjects pool */
private List objects; // ArrayList<ScopeValue>
public ScopeDesc(NMethod code, int decodeOffset) {
this.code = code;
this.decodeOffset = decodeOffset;
this.objects = decodeObjectValues(DebugInformationRecorder.SERIALIZED_NULL);
// Decode header
DebugInfoReadStream stream = streamAt(decodeOffset);
senderDecodeOffset = stream.readInt();
method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle());
bci = stream.readBCI();
// Decode offsets for body and sender
localsDecodeOffset = stream.readInt();
expressionsDecodeOffset = stream.readInt();
monitorsDecodeOffset = stream.readInt();
}
public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset) {
this.code = code;
this.decodeOffset = decodeOffset;
this.objects = decodeObjectValues(objectDecodeOffset);
// Decode header
DebugInfoReadStream stream = streamAt(decodeOffset);
@ -81,6 +104,11 @@ public class ScopeDesc {
return decodeMonitorValues(monitorsDecodeOffset);
}
/** Returns a List&lt;MonitorValue&gt; */
public List getObjects() {
return objects;
}
/** Stack walking. Returns null if this is the outermost scope. */
public ScopeDesc sender() {
if (isTop()) {
@ -131,7 +159,7 @@ public class ScopeDesc {
//
private DebugInfoReadStream streamAt(int decodeOffset) {
return new DebugInfoReadStream(code, decodeOffset);
return new DebugInfoReadStream(code, decodeOffset, objects);
}
/** Returns a List&lt;ScopeValue&gt; or null if no values were present */
@ -161,4 +189,22 @@ public class ScopeDesc {
}
return res;
}
/** Returns a List&lt;ObjectValue&gt; or null if no values were present */
private List decodeObjectValues(int decodeOffset) {
if (decodeOffset == DebugInformationRecorder.SERIALIZED_NULL) {
return null;
}
List res = new ArrayList();
DebugInfoReadStream stream = new DebugInfoReadStream(code, decodeOffset, res);
int length = stream.readInt();
for (int i = 0; i < length; i++) {
// Objects values are pushed to 'res' array during read so that
// object's fields could reference it (OBJECT_ID_CODE).
ScopeValue.readFrom(stream);
// res.add(ScopeValue.readFrom(stream));
}
Assert.that(res.size() == length, "inconsistent debug information");
return res;
}
}

View File

@ -49,12 +49,15 @@ public abstract class ScopeValue {
static final int CONSTANT_OOP_CODE = 2;
static final int CONSTANT_LONG_CODE = 3;
static final int CONSTANT_DOUBLE_CODE = 4;
static final int CONSTANT_OBJECT_CODE = 5;
static final int CONSTANT_OBJECT_ID_CODE = 6;
public boolean isLocation() { return false; }
public boolean isConstantInt() { return false; }
public boolean isConstantDouble() { return false; }
public boolean isConstantLong() { return false; }
public boolean isConstantOop() { return false; }
public boolean isObject() { return false; }
public static ScopeValue readFrom(DebugInfoReadStream stream) {
switch (stream.readInt()) {
@ -68,6 +71,10 @@ public abstract class ScopeValue {
return new ConstantLongValue(stream);
case CONSTANT_DOUBLE_CODE:
return new ConstantDoubleValue(stream);
case CONSTANT_OBJECT_CODE:
return stream.readObjectValue();
case CONSTANT_OBJECT_ID_CODE:
return stream.getCachedObject();
default:
Assert.that(false, "should not reach here");
return null;

View File

@ -249,6 +249,7 @@ public class ObjectReferenceImpl extends ValueImpl implements ObjectReference {
OopHandle givenHandle = obj.getHandle();
for (Iterator itr = monitors.iterator(); itr.hasNext();) {
MonitorInfo mi = (MonitorInfo) itr.next();
if (mi.eliminated() && frame.isCompiledFrame()) continue; // skip eliminated monitor
if (givenHandle.equals(mi.owner())) {
res++;
}

View File

@ -301,6 +301,9 @@ public class ThreadReferenceImpl extends ObjectReferenceImpl
List frameMonitors = frame.getMonitors(); // List<MonitorInfo>
for (Iterator miItr = frameMonitors.iterator(); miItr.hasNext(); ) {
sun.jvm.hotspot.runtime.MonitorInfo mi = (sun.jvm.hotspot.runtime.MonitorInfo) miItr.next();
if (mi.eliminated() && frame.isCompiledFrame()) {
continue; // skip eliminated monitor
}
OopHandle obj = mi.owner();
if (obj == null) {
// this monitor doesn't have an owning object so skip it

View File

@ -131,8 +131,18 @@ public class CompiledVFrame extends JavaVFrame {
List result = new ArrayList(monitors.size());
for (int i = 0; i < monitors.size(); i++) {
MonitorValue mv = (MonitorValue) monitors.get(i);
StackValue ownerSV = createStackValue(mv.owner()); // it is an oop
result.add(new MonitorInfo(ownerSV.getObject(), resolveMonitorLock(mv.basicLock())));
ScopeValue ov = mv.owner();
StackValue ownerSV = createStackValue(ov); // it is an oop
if (ov.isObject()) { // The owner object was scalar replaced
Assert.that(mv.eliminated() && ownerSV.objIsScalarReplaced(), "monitor should be eliminated for scalar replaced object");
// Put klass for scalar replaced object.
ScopeValue kv = ((ObjectValue)ov).getKlass();
Assert.that(kv.isConstantOop(), "klass should be oop constant for scalar replaced object");
OopHandle k = ((ConstantOopReadValue)kv).getValue();
result.add(new MonitorInfo(k, resolveMonitorLock(mv.basicLock()), mv.eliminated(), true));
} else {
result.add(new MonitorInfo(ownerSV.getObject(), resolveMonitorLock(mv.basicLock()), mv.eliminated(), false));
}
}
return result;
}
@ -212,12 +222,12 @@ public class CompiledVFrame extends JavaVFrame {
// long or is unused. He always saves a long. Here we know
// a long was saved, but we only want an narrow oop back. Narrow the
// saved long to the narrow oop that the JVM wants.
return new StackValue(valueAddr.getCompOopHandleAt(VM.getVM().getIntSize()));
return new StackValue(valueAddr.getCompOopHandleAt(VM.getVM().getIntSize()), 0);
} else {
return new StackValue(valueAddr.getCompOopHandleAt(0));
return new StackValue(valueAddr.getCompOopHandleAt(0), 0);
}
} else if( loc.holdsOop() ) { // Holds an oop?
return new StackValue(valueAddr.getOopHandleAt(0));
return new StackValue(valueAddr.getOopHandleAt(0), 0);
} else if( loc.holdsDouble() ) {
// Double value in a single stack slot
return new StackValue(valueAddr.getJIntAt(0) & 0xFFFFFFFF);
@ -277,7 +287,7 @@ public class CompiledVFrame extends JavaVFrame {
return new StackValue(((ConstantIntValue) sv).getValue() & 0xFFFFFFFF);
} else if (sv.isConstantOop()) {
// constant oop
return new StackValue(((ConstantOopReadValue) sv).getValue());
return new StackValue(((ConstantOopReadValue) sv).getValue(), 0);
} else if (sv.isConstantDouble()) {
// Constant double in a single stack slot
double d = ((ConstantDoubleValue) sv).getValue();
@ -285,6 +295,9 @@ public class CompiledVFrame extends JavaVFrame {
} else if (VM.getVM().isLP64() && sv.isConstantLong()) {
// Constant long in a single stack slot
return new StackValue(((ConstantLongValue) sv).getValue() & 0xFFFFFFFF);
} else if (sv.isObject()) {
// Scalar replaced object in compiled frame
return new StackValue(((ObjectValue)sv).getValue(), 1);
}
// Unknown ScopeValue type

View File

@ -61,7 +61,7 @@ public class InterpretedVFrame extends JavaVFrame {
StackValue sv;
if (oopMask.isOop(i)) {
// oop value
sv = new StackValue(addr.getOopHandleAt(0));
sv = new StackValue(addr.getOopHandleAt(0), 0);
} else {
// integer
// Fetch a signed integer the size of a stack slot
@ -95,7 +95,7 @@ public class InterpretedVFrame extends JavaVFrame {
StackValue sv;
if (oopMask.isOop(i + nofLocals)) {
// oop value
sv = new StackValue(addr.getOopHandleAt(0));
sv = new StackValue(addr.getOopHandleAt(0), 0);
} else {
// integer
// Fetch a signed integer the size of a stack slot
@ -113,7 +113,7 @@ public class InterpretedVFrame extends JavaVFrame {
for (BasicObjectLock current = getFrame().interpreterFrameMonitorEnd();
current.address().lessThan(getFrame().interpreterFrameMonitorBegin().address());
current = getFrame().nextMonitorInInterpreterFrame(current)) {
result.add(new MonitorInfo(current.obj(), current.lock()));
result.add(new MonitorInfo(current.obj(), current.lock(), false, false));
}
return result;
}

View File

@ -25,16 +25,39 @@
package sun.jvm.hotspot.runtime;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.utilities.*;
public class MonitorInfo {
private OopHandle owner;
private BasicLock lock;
private OopHandle ownerKlass;
private boolean eliminated;
private boolean ownerIsScalarReplaced;
public MonitorInfo(OopHandle owner, BasicLock lock) {
this.owner = owner;
this.lock = lock;
public MonitorInfo(OopHandle owner, BasicLock lock, boolean eliminated, boolean ownerIsScalarReplaced) {
if (!ownerIsScalarReplaced) {
this.owner = owner;
this.ownerKlass = null;
} else {
Assert.that(eliminated, "monitor should be eliminated for scalar replaced object");
this.owner = null;
this.ownerKlass = owner;
}
this.eliminated = eliminated;
this.ownerIsScalarReplaced = ownerIsScalarReplaced;
}
public OopHandle owner() {
Assert.that(!ownerIsScalarReplaced, "should not be called for scalar replaced object");
return owner;
}
public OopHandle ownerKlass() {
Assert.that(ownerIsScalarReplaced, "should not be called for not scalar replaced object");
return ownerKlass;
}
public OopHandle owner() { return owner; }
public BasicLock lock() { return lock; }
public boolean eliminated() { return eliminated; }
public boolean ownerIsScalarReplaced() { return ownerIsScalarReplaced; }
}

View File

@ -37,9 +37,11 @@ public class StackValue {
type = BasicType.getTConflict();
}
public StackValue(OopHandle h) {
public StackValue(OopHandle h, long scalar_replaced) {
handleValue = h;
type = BasicType.getTObject();
integerValue = scalar_replaced;
Assert.that(integerValue == 0 || handleValue == null, "not null object should not be marked as scalar replaced");
}
public StackValue(long i) {
@ -59,6 +61,13 @@ public class StackValue {
return handleValue;
}
boolean objIsScalarReplaced() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(type == BasicType.getTObject(), "type check");
}
return integerValue != 0;
}
public long getInteger() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(type == BasicType.getTInt(), "type check");

View File

@ -161,6 +161,8 @@ public class OopTreeNodeAdapter extends FieldTreeNodeAdapter {
child = new OopTreeNodeAdapter(field.getValue(getObj()), field.getID(), getTreeTableMode());
} catch (AddressException e) {
child = new BadOopTreeNodeAdapter(field.getValueAsOopHandle(getObj()), field.getID(), getTreeTableMode());
} catch (UnknownOopException e) {
child = new BadOopTreeNodeAdapter(field.getValueAsOopHandle(getObj()), field.getID(), getTreeTableMode());
}
}
++curField;

View File

@ -135,6 +135,10 @@ public class JSJavaThread extends JSJavaInstance {
List frameMonitors = frame.getMonitors(); // List<MonitorInfo>
for (Iterator miItr = frameMonitors.iterator(); miItr.hasNext(); ) {
MonitorInfo mi = (MonitorInfo) miItr.next();
if (mi.eliminated() && frame.isCompiledFrame()) {
continue; // skip eliminated monitor
}
OopHandle obj = mi.owner();
if (obj == null) {
// this monitor doesn't have an owning object so skip it

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2009
HS_MAJOR_VER=16
HS_MINOR_VER=0
HS_BUILD_NUMBER=04
HS_BUILD_NUMBER=06
JDK_MAJOR_VER=1
JDK_MINOR_VER=7

View File

@ -68,7 +68,9 @@ endif
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
# Compiler warnings are treated as errors
CFLAGS_WARN = +w -errwarn
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
CFLAGS_WARN = +w -errwarn
endif
CFLAGS += $(CFLAGS_WARN)
ifeq ("${Platform_compiler}", "sparcWorks")

View File

@ -41,7 +41,7 @@ ifeq ($(COMPILER_REV_NUMERIC),509)
endif
# Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12)
ifeq ($(COMPILER_REV_NUMERIC),508))
ifeq ($(COMPILER_REV_NUMERIC),508)
OPT_CFLAGS/ciTypeFlow.o = $(OPT_CFLAGS/O2)
endif # COMPILER_REV_NUMERIC == 508

View File

@ -4208,6 +4208,7 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
PtrQueue::byte_offset_of_active()),
tmp);
}
// Check on whether to annul.
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
delayed() -> nop();
@ -4215,13 +4216,13 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
// satb_log_barrier_work1(tmp, offset);
if (index == noreg) {
if (Assembler::is_simm13(offset)) {
ld_ptr(obj, offset, tmp);
load_heap_oop(obj, offset, tmp);
} else {
set(offset, tmp);
ld_ptr(obj, tmp, tmp);
load_heap_oop(obj, tmp, tmp);
}
} else {
ld_ptr(obj, index, tmp);
load_heap_oop(obj, index, tmp);
}
// satb_log_barrier_work2(obj, tmp, offset);

View File

@ -371,7 +371,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
}
__ move(value.result(), array_addr, null_check_info);
if (obj_store) {
// Is this precise?
// Precise card mark
post_barrier(LIR_OprFact::address(array_addr), value.result());
}
}
@ -685,11 +685,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
LIR_Opr result = rlock_result(x);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
if (type == objectType) { // Write-barrier needed for Object fields.
#ifdef PRECISE_CARDMARK
// Precise card mark since could either be object or array
post_barrier(addr, val.result());
#else
post_barrier(obj.result(), val.result());
#endif // PRECISE_CARDMARK
}
}

View File

@ -1891,17 +1891,17 @@ RegMask Matcher::modL_proj_mask() {
// The intptr_t operand types, defined by textual substitution.
// (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
#ifdef _LP64
#define immX immL
#define immX13 immL13
#define immX13m7 immL13m7
#define iRegX iRegL
#define g1RegX g1RegL
#define immX immL
#define immX13 immL13
#define immX13m7 immL13m7
#define iRegX iRegL
#define g1RegX g1RegL
#else
#define immX immI
#define immX13 immI13
#define immX13m7 immI13m7
#define iRegX iRegI
#define g1RegX g1RegI
#define immX immI
#define immX13 immI13
#define immX13m7 immI13m7
#define iRegX iRegI
#define g1RegX g1RegI
#endif
//----------ENCODING BLOCK-----------------------------------------------------
@ -3446,6 +3446,15 @@ operand immI() %{
interface(CONST_INTER);
%}
// Integer Immediate: 8-bit
operand immI8() %{
predicate(Assembler::is_simm(n->get_int(), 8));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer Immediate: 13-bit
operand immI13() %{
predicate(Assembler::is_simm13(n->get_int()));
@ -3466,6 +3475,15 @@ operand immI13m7() %{
interface(CONST_INTER);
%}
// Integer Immediate: 16-bit
operand immI16() %{
predicate(Assembler::is_simm(n->get_int(), 16));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Unsigned (positive) Integer Immediate: 13-bit
operand immU13() %{
predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
@ -5544,7 +5562,7 @@ instruct loadUB(iRegI dst, memory mem) %{
ins_encode %{
__ ldub($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
ins_pipe(iload_mem);
%}
// Load Unsigned Byte (8bit UNsigned) into a Long Register
@ -5557,7 +5575,22 @@ instruct loadUB2L(iRegL dst, memory mem) %{
ins_encode %{
__ ldub($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
ins_pipe(iload_mem);
%}
// Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register
instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{
match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
size(2*4);
format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t"
"AND $dst,$mask,$dst" %}
ins_encode %{
__ ldub($mem$$Address, $dst$$Register);
__ and3($dst$$Register, $mask$$constant, $dst$$Register);
%}
ins_pipe(iload_mem);
%}
// Load Short (16bit signed)
@ -5610,7 +5643,7 @@ instruct loadUS(iRegI dst, memory mem) %{
ins_encode %{
__ lduh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
ins_pipe(iload_mem);
%}
// Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
@ -5636,7 +5669,56 @@ instruct loadUS2L(iRegL dst, memory mem) %{
ins_encode %{
__ lduh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
ins_pipe(iload_mem);
%}
// Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %}
ins_encode %{
__ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE
%}
ins_pipe(iload_mem);
%}
// Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register
instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
size(2*4);
format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t"
"AND $dst,$mask,$dst" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ lduh($mem$$Address, Rdst);
__ and3(Rdst, $mask$$constant, Rdst);
%}
ins_pipe(iload_mem);
%}
// Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register
instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
effect(TEMP dst, TEMP tmp);
ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
size(3*4);
format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t"
"SET $mask,$tmp\n\t"
"AND $dst,$tmp,$dst" %}
ins_encode %{
Register Rdst = $dst$$Register;
Register Rtmp = $tmp$$Register;
__ lduh($mem$$Address, Rdst);
__ set($mask$$constant, Rtmp);
__ and3(Rdst, Rtmp, Rdst);
%}
ins_pipe(iload_mem);
%}
// Load Integer
@ -5718,6 +5800,68 @@ instruct loadI2L(iRegL dst, memory mem) %{
ins_encode %{
__ ldsw($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
%}
// Load Integer with mask 0xFF into a Long Register
instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %}
ins_encode %{
__ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE
%}
ins_pipe(iload_mem);
%}
// Load Integer with mask 0xFFFF into a Long Register
instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %}
ins_encode %{
__ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE
%}
ins_pipe(iload_mem);
%}
// Load Integer with a 13-bit mask into a Long Register
instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
size(2*4);
format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t"
"AND $dst,$mask,$dst" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ lduw($mem$$Address, Rdst);
__ and3(Rdst, $mask$$constant, Rdst);
%}
ins_pipe(iload_mem);
%}
// Load Integer with a 32-bit mask into a Long Register
instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
effect(TEMP dst, TEMP tmp);
ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
size(3*4);
format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t"
"SET $mask,$tmp\n\t"
"AND $dst,$tmp,$dst" %}
ins_encode %{
Register Rdst = $dst$$Register;
Register Rtmp = $tmp$$Register;
__ lduw($mem$$Address, Rdst);
__ set($mask$$constant, Rtmp);
__ and3(Rdst, Rtmp, Rdst);
%}
ins_pipe(iload_mem);
%}

View File

@ -6805,14 +6805,18 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
jcc(Assembler::equal, done);
// if (x.f == NULL) goto done;
cmpptr(Address(obj, 0), NULL_WORD);
#ifdef _LP64
load_heap_oop(tmp2, Address(obj, 0));
#else
movptr(tmp2, Address(obj, 0));
#endif
cmpptr(tmp2, (int32_t) NULL_WORD);
jcc(Assembler::equal, done);
// Can we store original value in the thread's buffer?
LP64_ONLY(movslq(tmp, index);)
movptr(tmp2, Address(obj, 0));
#ifdef _LP64
movslq(tmp, index);
cmpq(tmp, 0);
#else
cmpl(index, 0);
@ -6834,8 +6838,7 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
if(tosca_live) push(rax);
push(obj);
#ifdef _LP64
movq(c_rarg0, Address(obj, 0));
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, r15_thread);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, r15_thread);
#else
push(thread);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);

View File

@ -1372,6 +1372,8 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
// The method data pointer needs to be updated.
int mdp_delta = in_bytes(BitData::bit_data_size());
if (TypeProfileCasts) {

View File

@ -1409,6 +1409,8 @@ void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
// The method data pointer needs to be updated.
int mdp_delta = in_bytes(BitData::bit_data_size());
if (TypeProfileCasts) {

View File

@ -269,11 +269,11 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
#ifndef PRODUCT
void trace_method_handle_stub(const char* adaptername,
oop mh,
oopDesc* mh,
intptr_t* entry_sp,
intptr_t* saved_sp) {
// called as a leaf from native code: do not block the JVM!
printf("MH %s "PTR_FORMAT" "PTR_FORMAT" "INTX_FORMAT"\n", adaptername, mh, entry_sp, entry_sp - saved_sp);
printf("MH %s "PTR_FORMAT" "PTR_FORMAT" "INTX_FORMAT"\n", adaptername, (void*)mh, entry_sp, entry_sp - saved_sp);
}
#endif //PRODUCT

View File

@ -1302,22 +1302,19 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
const Register ic_reg = rax;
const Register receiver = j_rarg0;
const Register tmp = rdx;
Label ok;
Label exception_pending;
assert_different_registers(ic_reg, receiver, rscratch1);
__ verify_oop(receiver);
__ push(tmp); // spill (any other registers free here???)
__ load_klass(tmp, receiver);
__ cmpq(ic_reg, tmp);
__ load_klass(rscratch1, receiver);
__ cmpq(ic_reg, rscratch1);
__ jcc(Assembler::equal, ok);
__ pop(tmp);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(ok);
__ pop(tmp);
// Verified entry point must be aligned
__ align(8);

View File

@ -709,7 +709,7 @@ class StubGenerator: public StubCodeGenerator {
//
// Input:
// start - starting address
// end - element count
// count - element count
void gen_write_ref_array_pre_barrier(Register start, Register count) {
assert_different_registers(start, count);
BarrierSet* bs = Universe::heap()->barrier_set();
@ -757,7 +757,6 @@ class StubGenerator: public StubCodeGenerator {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
__ addptr(rsp, 2*wordSize);
__ popa();
}
break;

View File

@ -1207,9 +1207,9 @@ class StubGenerator: public StubCodeGenerator {
__ pusha(); // push registers (overkill)
// must compute element count unless barrier set interface is changed (other platforms supply count)
assert_different_registers(start, end, scratch);
__ lea(scratch, Address(end, wordSize));
__ subptr(scratch, start);
__ shrptr(scratch, LogBytesPerWord);
__ lea(scratch, Address(end, BytesPerHeapOop));
__ subptr(scratch, start); // subtract start to get #bytes
__ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
__ mov(c_rarg0, start);
__ mov(c_rarg1, scratch);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
@ -1225,6 +1225,7 @@ class StubGenerator: public StubCodeGenerator {
Label L_loop;
__ shrptr(start, CardTableModRefBS::card_shift);
__ addptr(end, BytesPerHeapOop);
__ shrptr(end, CardTableModRefBS::card_shift);
__ subptr(end, start); // number of bytes to copy
@ -2251,6 +2252,7 @@ class StubGenerator: public StubCodeGenerator {
// and report their number to the caller.
assert_different_registers(rax, r14_length, count, to, end_to, rcx);
__ lea(end_to, to_element_addr);
__ addptr(end_to, -heapOopSize); // make an inclusive end pointer
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
__ movptr(rax, r14_length); // original oops
__ addptr(rax, count); // K = (original - remaining) oops
@ -2259,7 +2261,7 @@ class StubGenerator: public StubCodeGenerator {
// Come here on success only.
__ BIND(L_do_card_marks);
__ addptr(end_to, -wordSize); // make an inclusive end pointer
__ addptr(end_to, -heapOopSize); // make an inclusive end pointer
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
__ xorptr(rax, rax); // return 0 on success

View File

@ -6885,8 +6885,9 @@ instruct loadB(xRegI dst, memory mem) %{
%}
// Load Byte (8bit signed) into Long Register
instruct loadB2L(eRegL dst, memory mem) %{
instruct loadB2L(eRegL dst, memory mem, eFlagsReg cr) %{
match(Set dst (ConvI2L (LoadB mem)));
effect(KILL cr);
ins_cost(375);
format %{ "MOVSX8 $dst.lo,$mem\t# byte -> long\n\t"
@ -6917,22 +6918,40 @@ instruct loadUB(xRegI dst, memory mem) %{
%}
// Load Unsigned Byte (8 bit UNsigned) into Long Register
instruct loadUB2L(eRegL dst, memory mem)
%{
instruct loadUB2L(eRegL dst, memory mem, eFlagsReg cr) %{
match(Set dst (ConvI2L (LoadUB mem)));
effect(KILL cr);
ins_cost(250);
format %{ "MOVZX8 $dst.lo,$mem\t# ubyte -> long\n\t"
"XOR $dst.hi,$dst.hi" %}
ins_encode %{
__ movzbl($dst$$Register, $mem$$Address);
__ xorl(HIGH_FROM_LOW($dst$$Register), HIGH_FROM_LOW($dst$$Register));
Register Rdst = $dst$$Register;
__ movzbl(Rdst, $mem$$Address);
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
%}
ins_pipe(ialu_reg_mem);
%}
// Load Unsigned Byte (8 bit UNsigned) with mask into Long Register
instruct loadUB2L_immI8(eRegL dst, memory mem, immI8 mask, eFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
effect(KILL cr);
format %{ "MOVZX8 $dst.lo,$mem\t# ubyte & 8-bit mask -> long\n\t"
"XOR $dst.hi,$dst.hi\n\t"
"AND $dst.lo,$mask" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ movzbl(Rdst, $mem$$Address);
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
__ andl(Rdst, $mask$$constant);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Short (16bit signed)
instruct loadS(eRegI dst, memory mem) %{
match(Set dst (LoadS mem));
@ -6960,8 +6979,9 @@ instruct loadS2B(eRegI dst, memory mem, immI_24 twentyfour) %{
%}
// Load Short (16bit signed) into Long Register
instruct loadS2L(eRegL dst, memory mem) %{
instruct loadS2L(eRegL dst, memory mem, eFlagsReg cr) %{
match(Set dst (ConvI2L (LoadS mem)));
effect(KILL cr);
ins_cost(375);
format %{ "MOVSX $dst.lo,$mem\t# short -> long\n\t"
@ -7004,8 +7024,9 @@ instruct loadUS2B(eRegI dst, memory mem, immI_24 twentyfour) %{
%}
// Load Unsigned Short/Char (16 bit UNsigned) into Long Register
instruct loadUS2L(eRegL dst, memory mem) %{
instruct loadUS2L(eRegL dst, memory mem, eFlagsReg cr) %{
match(Set dst (ConvI2L (LoadUS mem)));
effect(KILL cr);
ins_cost(250);
format %{ "MOVZX $dst.lo,$mem\t# ushort/char -> long\n\t"
@ -7019,6 +7040,38 @@ instruct loadUS2L(eRegL dst, memory mem) %{
ins_pipe(ialu_reg_mem);
%}
// Load Unsigned Short/Char (16 bit UNsigned) with mask 0xFF into Long Register
instruct loadUS2L_immI_255(eRegL dst, memory mem, immI_255 mask, eFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
effect(KILL cr);
format %{ "MOVZX8 $dst.lo,$mem\t# ushort/char & 0xFF -> long\n\t"
"XOR $dst.hi,$dst.hi" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ movzbl(Rdst, $mem$$Address);
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
%}
ins_pipe(ialu_reg_mem);
%}
// Load Unsigned Short/Char (16 bit UNsigned) with a 16-bit mask into Long Register
instruct loadUS2L_immI16(eRegL dst, memory mem, immI16 mask, eFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
effect(KILL cr);
format %{ "MOVZX $dst.lo, $mem\t# ushort/char & 16-bit mask -> long\n\t"
"XOR $dst.hi,$dst.hi\n\t"
"AND $dst.lo,$mask" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ movzwl(Rdst, $mem$$Address);
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
__ andl(Rdst, $mask$$constant);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Integer
instruct loadI(eRegI dst, memory mem) %{
match(Set dst (LoadI mem));
@ -7082,8 +7135,9 @@ instruct loadI2US(eRegI dst, memory mem, immI_65535 mask) %{
%}
// Load Integer into Long Register
instruct loadI2L(eRegL dst, memory mem) %{
instruct loadI2L(eRegL dst, memory mem, eFlagsReg cr) %{
match(Set dst (ConvI2L (LoadI mem)));
effect(KILL cr);
ins_cost(375);
format %{ "MOV $dst.lo,$mem\t# int -> long\n\t"
@ -7099,9 +7153,57 @@ instruct loadI2L(eRegL dst, memory mem) %{
ins_pipe(ialu_reg_mem);
%}
// Load Integer with mask 0xFF into Long Register
instruct loadI2L_immI_255(eRegL dst, memory mem, immI_255 mask, eFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
effect(KILL cr);
format %{ "MOVZX8 $dst.lo,$mem\t# int & 0xFF -> long\n\t"
"XOR $dst.hi,$dst.hi" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ movzbl(Rdst, $mem$$Address);
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
%}
ins_pipe(ialu_reg_mem);
%}
// Load Integer with mask 0xFFFF into Long Register
instruct loadI2L_immI_65535(eRegL dst, memory mem, immI_65535 mask, eFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
effect(KILL cr);
format %{ "MOVZX $dst.lo,$mem\t# int & 0xFFFF -> long\n\t"
"XOR $dst.hi,$dst.hi" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ movzwl(Rdst, $mem$$Address);
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
%}
ins_pipe(ialu_reg_mem);
%}
// Load Integer with 32-bit mask into Long Register
instruct loadI2L_immI(eRegL dst, memory mem, immI mask, eFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
effect(KILL cr);
format %{ "MOV $dst.lo,$mem\t# int & 32-bit mask -> long\n\t"
"XOR $dst.hi,$dst.hi\n\t"
"AND $dst.lo,$mask" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ movl(Rdst, $mem$$Address);
__ xorl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rdst));
__ andl(Rdst, $mask$$constant);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Unsigned Integer into Long Register
instruct loadUI2L(eRegL dst, memory mem) %{
instruct loadUI2L(eRegL dst, memory mem, eFlagsReg cr) %{
match(Set dst (LoadUI2L mem));
effect(KILL cr);
ins_cost(250);
format %{ "MOV $dst.lo,$mem\t# uint -> long\n\t"
@ -7695,6 +7797,17 @@ instruct storeL(long_memory mem, eRegL src) %{
ins_pipe( ialu_mem_long_reg );
%}
// Store Long to Integer
instruct storeL2I(memory mem, eRegL src) %{
match(Set mem (StoreI mem (ConvL2I src)));
format %{ "MOV $mem,$src.lo\t# long -> int" %}
ins_encode %{
__ movl($mem$$Address, $src$$Register);
%}
ins_pipe(ialu_mem_reg);
%}
// Volatile Store Long. Must be atomic, so move it into
// the FP TOS and then do a 64-bit FIST. Has to probe the
// target address before the store (for null-ptr checks)

View File

@ -6444,6 +6444,21 @@ instruct loadUB2L(rRegL dst, memory mem)
ins_pipe(ialu_reg_mem);
%}
// Load Unsigned Byte (8 bit UNsigned) with a 8-bit mask into Long Register
instruct loadUB2L_immI8(rRegL dst, memory mem, immI8 mask, rFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
effect(KILL cr);
format %{ "movzbq $dst, $mem\t# ubyte & 8-bit mask -> long\n\t"
"andl $dst, $mask" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ movzbq(Rdst, $mem$$Address);
__ andl(Rdst, $mask$$constant);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Short (16 bit signed)
instruct loadS(rRegI dst, memory mem)
%{
@ -6528,6 +6543,32 @@ instruct loadUS2L(rRegL dst, memory mem)
ins_pipe(ialu_reg_mem);
%}
// Load Unsigned Short/Char (16 bit UNsigned) with mask 0xFF into Long Register
instruct loadUS2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
format %{ "movzbq $dst, $mem\t# ushort/char & 0xFF -> long" %}
ins_encode %{
__ movzbq($dst$$Register, $mem$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Unsigned Short/Char (16 bit UNsigned) with mask into Long Register
instruct loadUS2L_immI16(rRegL dst, memory mem, immI16 mask, rFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
effect(KILL cr);
format %{ "movzwq $dst, $mem\t# ushort/char & 16-bit mask -> long\n\t"
"andl $dst, $mask" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ movzwq(Rdst, $mem$$Address);
__ andl(Rdst, $mask$$constant);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Integer
instruct loadI(rRegI dst, memory mem)
%{
@ -6606,6 +6647,43 @@ instruct loadI2L(rRegL dst, memory mem)
ins_pipe(ialu_reg_mem);
%}
// Load Integer with mask 0xFF into Long Register
instruct loadI2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
format %{ "movzbq $dst, $mem\t# int & 0xFF -> long" %}
ins_encode %{
__ movzbq($dst$$Register, $mem$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Integer with mask 0xFFFF into Long Register
instruct loadI2L_immI_65535(rRegL dst, memory mem, immI_65535 mask) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
format %{ "movzwq $dst, $mem\t# int & 0xFFFF -> long" %}
ins_encode %{
__ movzwq($dst$$Register, $mem$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Integer with a 32-bit mask into Long Register
instruct loadI2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{
match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
effect(KILL cr);
format %{ "movl $dst, $mem\t# int & 32-bit mask -> long\n\t"
"andl $dst, $mask" %}
ins_encode %{
Register Rdst = $dst$$Register;
__ movl(Rdst, $mem$$Address);
__ andl(Rdst, $mask$$constant);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Unsigned Integer into Long Register
instruct loadUI2L(rRegL dst, memory mem)
%{
@ -11673,8 +11751,9 @@ instruct convI2L_reg_reg(rRegL dst, rRegI src)
ins_cost(125);
format %{ "movslq $dst, $src\t# i2l" %}
opcode(0x63); // needs REX.W
ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src));
ins_encode %{
__ movslq($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg);
%}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,6 @@ public class Database {
private HashMap<String,String> platformDepFiles;
private long threshold;
private int nOuterFiles;
private int nPrecompiledFiles;
private boolean missingOk;
private Platform plat;
/** These allow you to specify files not in the include database
@ -62,7 +61,6 @@ public class Database {
threshold = t;
nOuterFiles = 0;
nPrecompiledFiles = 0;
missingOk = false;
firstFile = null;
lastFile = null;
@ -343,7 +341,6 @@ public class Database {
plat.getGIFileTemplate().getInvDir() +
list.getName() +
"\"");
nPrecompiledFiles += 1;
}
}
inclFile.println();
@ -408,22 +405,22 @@ public class Database {
gd.println();
}
if (nPrecompiledFiles > 0) {
// write Precompiled_Files = ...
gd.println("Precompiled_Files = \\");
for (Iterator iter = grandInclude.iterator(); iter.hasNext(); ) {
FileList list = (FileList) iter.next();
// write Precompiled_Files = ...
gd.println("Precompiled_Files = \\");
for (Iterator iter = grandInclude.iterator(); iter.hasNext(); ) {
FileList list = (FileList) iter.next();
if (list.getCount() >= threshold) {
gd.println(list.getName() + " \\");
String platformDep = platformDepFiles.get(list.getName());
if (platformDep != null) {
// make sure changes to the platform dependent file will
// cause regeneration of the pch file.
gd.println(platformDep + " \\");
// make sure changes to the platform dependent file will
// cause regeneration of the pch file.
gd.println(platformDep + " \\");
}
}
gd.println();
gd.println();
}
gd.println();
gd.println();
gd.println("DTraced_Files = \\");
for (Iterator iter = outerFiles.iterator(); iter.hasNext(); ) {
@ -483,7 +480,6 @@ public class Database {
}
if (plat.includeGIDependencies()
&& nPrecompiledFiles > 0
&& anII.getUseGrandInclude()) {
gd.println(" $(Precompiled_Files) \\");
}

View File

@ -420,6 +420,13 @@ Form::DataType InstructForm::is_ideal_load() const {
return _matrule->is_ideal_load();
}
// Return 'true' if this instruction matches an ideal 'LoadKlass' node
bool InstructForm::skip_antidep_check() const {
if( _matrule == NULL ) return false;
return _matrule->skip_antidep_check();
}
// Return 'true' if this instruction matches an ideal 'Load?' node
Form::DataType InstructForm::is_ideal_store() const {
if( _matrule == NULL ) return Form::none;
@ -567,6 +574,8 @@ bool InstructForm::rematerialize(FormDict &globals, RegisterForm *registers ) {
// loads from memory, so must check for anti-dependence
bool InstructForm::needs_anti_dependence_check(FormDict &globals) const {
if ( skip_antidep_check() ) return false;
// Machine independent loads must be checked for anti-dependences
if( is_ideal_load() != Form::none ) return true;
@ -3957,6 +3966,28 @@ Form::DataType MatchRule::is_ideal_load() const {
}
bool MatchRule::skip_antidep_check() const {
// Some loads operate on what is effectively immutable memory so we
// should skip the anti dep computations. For some of these nodes
// the rewritable field keeps the anti dep logic from triggering but
// for certain kinds of LoadKlass it does not since they are
// actually reading memory which could be rewritten by the runtime,
// though never by generated code. This disables it uniformly for
// the nodes that behave like this: LoadKlass, LoadNKlass and
// LoadRange.
if ( _opType && (strcmp(_opType,"Set") == 0) && _rChild ) {
const char *opType = _rChild->_opType;
if (strcmp("LoadKlass", opType) == 0 ||
strcmp("LoadNKlass", opType) == 0 ||
strcmp("LoadRange", opType) == 0) {
return true;
}
}
return false;
}
Form::DataType MatchRule::is_ideal_store() const {
Form::DataType ideal_store = Form::none;

View File

@ -158,6 +158,9 @@ public:
virtual Form::CallType is_ideal_call() const; // matches ideal 'Call'
virtual Form::DataType is_ideal_load() const; // node matches ideal 'LoadXNode'
// Should antidep checks be disabled for this Instruct
// See definition of MatchRule::skip_antidep_check
bool skip_antidep_check() const;
virtual Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
bool is_ideal_mem() const { return is_ideal_load() != Form::none || is_ideal_store() != Form::none; }
virtual uint two_address(FormDict &globals); // output reg must match input reg
@ -1003,6 +1006,9 @@ public:
bool is_ideal_loopEnd() const; // node matches ideal 'LoopEnd'
bool is_ideal_bool() const; // node matches ideal 'Bool'
Form::DataType is_ideal_load() const;// node matches ideal 'LoadXNode'
// Should antidep checks be disabled for this rule
// See definition of MatchRule::skip_antidep_check
bool skip_antidep_check() const;
Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
// Check if 'mRule2' is a cisc-spill variant of this MatchRule

View File

@ -1367,11 +1367,11 @@ void ArchDesc::declareClasses(FILE *fp) {
else if (!strcmp(oper->ideal_type(_globalNames), "ConN")) {
// Access the locally stored constant
fprintf(fp," virtual intptr_t constant() const {");
fprintf(fp, " return _c0->make_oopptr()->get_con();");
fprintf(fp, " return _c0->get_ptrtype()->get_con();");
fprintf(fp, " }\n");
// Generate query to determine if this pointer is an oop
fprintf(fp," virtual bool constant_is_oop() const {");
fprintf(fp, " return _c0->make_oopptr()->isa_oop_ptr();");
fprintf(fp, " return _c0->get_ptrtype()->isa_oop_ptr();");
fprintf(fp, " }\n");
}
else if (!strcmp(oper->ideal_type(_globalNames), "ConL")) {

View File

@ -1534,12 +1534,8 @@ void LIRGenerator::do_StoreField(StoreField* x) {
}
if (is_oop) {
#ifdef PRECISE_CARDMARK
// Precise cardmarks don't work
post_barrier(LIR_OprFact::address(address), value.result());
#else
// Store to object so mark the card of the header
post_barrier(object.result(), value.result());
#endif // PRECISE_CARDMARK
}
if (is_volatile && os::is_MP()) {

View File

@ -3237,6 +3237,16 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
this_klass->set_minor_version(minor_version);
this_klass->set_major_version(major_version);
// Set up methodOop::intrinsic_id as soon as we know the names of methods.
// (We used to do this lazily, but now we query it in Rewriter,
// which is eagerly done for every method, so we might as well do it now,
// when everything is fresh in memory.)
if (methodOopDesc::klass_id_for_intrinsics(this_klass->as_klassOop()) != vmSymbols::NO_SID) {
for (int j = 0; j < methods->length(); j++) {
((methodOop)methods->obj_at(j))->init_intrinsic_id();
}
}
if (cached_class_file_bytes != NULL) {
// JVMTI: we have an instanceKlass now, tell it about the cached bytes
this_klass->set_cached_class_file(cached_class_file_bytes,

View File

@ -513,9 +513,6 @@
//
// for Emacs: (let ((c-backslash-column 120) (c-backslash-max-column 120)) (c-backslash-region (point) (point-max) nil t))
#define VM_INTRINSICS_DO(do_intrinsic, do_class, do_name, do_signature, do_alias) \
do_intrinsic(_Object_init, java_lang_Object, object_initializer_name, void_method_signature, F_R) \
/* (symbol object_initializer_name defined above) */ \
\
do_intrinsic(_hashCode, java_lang_Object, hashCode_name, void_int_signature, F_R) \
do_name( hashCode_name, "hashCode") \
do_intrinsic(_getClass, java_lang_Object, getClass_name, void_class_signature, F_R) \
@ -635,9 +632,6 @@
do_intrinsic(_equalsC, java_util_Arrays, equals_name, equalsC_signature, F_S) \
do_signature(equalsC_signature, "([C[C)Z") \
\
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
/* (symbols invoke_name and invoke_signature defined above) */ \
\
do_intrinsic(_compareTo, java_lang_String, compareTo_name, string_int_signature, F_R) \
do_name( compareTo_name, "compareTo") \
do_intrinsic(_indexOf, java_lang_String, indexOf_name, string_int_signature, F_R) \
@ -656,8 +650,6 @@
do_name( attemptUpdate_name, "attemptUpdate") \
do_signature(attemptUpdate_signature, "(JJ)Z") \
\
do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \
\
/* support for sun.misc.Unsafe */ \
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
\
@ -819,10 +811,22 @@
do_name( prefetchReadStatic_name, "prefetchReadStatic") \
do_intrinsic(_prefetchWriteStatic, sun_misc_Unsafe, prefetchWriteStatic_name, prefetch_signature, F_SN) \
do_name( prefetchWriteStatic_name, "prefetchWriteStatic") \
/*== LAST_COMPILER_INLINE*/ \
/*the compiler does have special inlining code for these; bytecode inline is just fine */ \
\
do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \
\
do_intrinsic(_Object_init, java_lang_Object, object_initializer_name, void_method_signature, F_R) \
/* (symbol object_initializer_name defined above) */ \
\
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
/* (symbols invoke_name and invoke_signature defined above) */ \
\
/*end*/
// Class vmSymbols
class vmSymbols: AllStatic {
@ -935,6 +939,7 @@ class vmIntrinsics: AllStatic {
#undef VM_INTRINSIC_ENUM
ID_LIMIT,
LAST_COMPILER_INLINE = _prefetchWriteStatic,
FIRST_ID = _none + 1
};
@ -972,4 +977,7 @@ public:
static Flags flags_for(ID id);
static const char* short_name_as_C_string(ID id, char* buf, int size);
// Access to intrinsic methods:
static methodOop method_for(ID id);
};

View File

@ -379,7 +379,15 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
if ( loc != NULL ) {
oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
oop *derived_loc = loc;
derived_oop_fn(base_loc, derived_loc);
oop val = *base_loc;
if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
// Ignore NULL oops and decoded NULL narrow oops which
// equal to Universe::narrow_oop_base when a narrow oop
// implicit null check is used in compiled code.
// The narrow_oop_base could be NULL or be the address
// of the page below heap depending on compressed oops mode.
} else
derived_oop_fn(base_loc, derived_loc);
}
oms.next();
} while (!oms.is_done());
@ -394,6 +402,15 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
if ( loc != NULL ) {
if ( omv.type() == OopMapValue::oop_value ) {
oop val = *loc;
if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
// Ignore NULL oops and decoded NULL narrow oops which
// equal to Universe::narrow_oop_base when a narrow oop
// implicit null check is used in compiled code.
// The narrow_oop_base could be NULL or be the address
// of the page below heap depending on compressed oops mode.
continue;
}
#ifdef ASSERT
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
!Universe::heap()->is_in_or_null(*loc)) {
@ -410,6 +427,8 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
#endif // ASSERT
oop_fn->do_oop(loc);
} else if ( omv.type() == OopMapValue::value_value ) {
assert((*loc) == (oop)NULL || !Universe::is_narrow_oop_base(*loc),
"found invalid value pointer");
value_fn->do_oop(loc);
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
narrowOop *nl = (narrowOop*)loc;

View File

@ -233,6 +233,10 @@ class OopMapSet : public ResourceObj {
int heap_size() const;
void copy_to(address addr);
// Methods oops_do() and all_do() filter out NULL oops and
// oop == Universe::narrow_oop_base() before passing oops
// to closures.
// Iterates through frame for a compiled method
static void oops_do (const frame* fr,
const RegisterMap* reg_map, OopClosure* f);

View File

@ -42,35 +42,40 @@ protected:
BufferLength = 1024
};
oop *_buffer[BufferLength];
oop **_buffer_top;
oop **_buffer_curr;
StarTask _buffer[BufferLength];
StarTask* _buffer_top;
StarTask* _buffer_curr;
OopClosure *_oc;
double _closure_app_seconds;
OopClosure* _oc;
double _closure_app_seconds;
void process_buffer () {
double start = os::elapsedTime();
for (oop **curr = _buffer; curr < _buffer_curr; ++curr) {
_oc->do_oop(*curr);
for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
if (curr->is_narrow()) {
assert(UseCompressedOops, "Error");
_oc->do_oop((narrowOop*)(*curr));
} else {
_oc->do_oop((oop*)(*curr));
}
}
_buffer_curr = _buffer;
_closure_app_seconds += (os::elapsedTime() - start);
}
public:
virtual void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(oop *p) {
template <class T> inline void do_oop_work(T* p) {
if (_buffer_curr == _buffer_top) {
process_buffer();
}
*_buffer_curr = p;
StarTask new_ref(p);
*_buffer_curr = new_ref;
++_buffer_curr;
}
public:
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_work(p); }
void done () {
if (_buffer_curr > _buffer) {
process_buffer();
@ -88,18 +93,17 @@ public:
class BufferingOopsInGenClosure: public OopsInGenClosure {
BufferingOopClosure _boc;
OopsInGenClosure* _oc;
public:
protected:
template <class T> inline void do_oop_work(T* p) {
assert(generation()->is_in_reserved((void*)p), "Must be in!");
_boc.do_oop(p);
}
public:
BufferingOopsInGenClosure(OopsInGenClosure *oc) :
_boc(oc), _oc(oc) {}
virtual void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(oop* p) {
assert(generation()->is_in_reserved(p), "Must be in!");
_boc.do_oop(p);
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_work(p); }
void done() {
_boc.done();
@ -130,14 +134,14 @@ private:
BufferLength = 1024
};
oop *_buffer[BufferLength];
oop **_buffer_top;
oop **_buffer_curr;
StarTask _buffer[BufferLength];
StarTask* _buffer_top;
StarTask* _buffer_curr;
HeapRegion *_hr_buffer[BufferLength];
HeapRegion **_hr_curr;
HeapRegion* _hr_buffer[BufferLength];
HeapRegion** _hr_curr;
OopsInHeapRegionClosure *_oc;
OopsInHeapRegionClosure* _oc;
double _closure_app_seconds;
void process_buffer () {
@ -146,15 +150,20 @@ private:
"the two lengths should be the same");
double start = os::elapsedTime();
HeapRegion **hr_curr = _hr_buffer;
HeapRegion *hr_prev = NULL;
for (oop **curr = _buffer; curr < _buffer_curr; ++curr) {
HeapRegion *region = *hr_curr;
HeapRegion** hr_curr = _hr_buffer;
HeapRegion* hr_prev = NULL;
for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
HeapRegion* region = *hr_curr;
if (region != hr_prev) {
_oc->set_region(region);
hr_prev = region;
}
_oc->do_oop(*curr);
if (curr->is_narrow()) {
assert(UseCompressedOops, "Error");
_oc->do_oop((narrowOop*)(*curr));
} else {
_oc->do_oop((oop*)(*curr));
}
++hr_curr;
}
_buffer_curr = _buffer;
@ -163,17 +172,16 @@ private:
}
public:
virtual void do_oop(narrowOop *p) {
guarantee(false, "NYI");
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
virtual void do_oop(oop *p) {
template <class T> void do_oop_work(T* p) {
if (_buffer_curr == _buffer_top) {
assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
process_buffer();
}
*_buffer_curr = p;
StarTask new_ref(p);
*_buffer_curr = new_ref;
++_buffer_curr;
*_hr_curr = _from;
++_hr_curr;

View File

@ -452,13 +452,10 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_regionStack.allocate(G1MarkRegionStackSize);
// Create & start a ConcurrentMark thread.
if (G1ConcMark) {
_cmThread = new ConcurrentMarkThread(this);
assert(cmThread() != NULL, "CM Thread should have been created");
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
} else {
_cmThread = NULL;
}
_cmThread = new ConcurrentMarkThread(this);
assert(cmThread() != NULL, "CM Thread should have been created");
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
_g1h = G1CollectedHeap::heap();
assert(CGC_lock != NULL, "Where's the CGC_lock?");
assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
@ -783,18 +780,18 @@ public:
bool do_barrier) : _cm(cm), _g1h(g1h),
_do_barrier(do_barrier) { }
virtual void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) {
oop thisOop = *p;
if (thisOop != NULL) {
assert(thisOop->is_oop() || thisOop->mark() == NULL,
template <class T> void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(obj->is_oop() || obj->mark() == NULL,
"expected an oop, possibly with mark word displaced");
HeapWord* addr = (HeapWord*)thisOop;
HeapWord* addr = (HeapWord*)obj;
if (_g1h->is_in_g1_reserved(addr)) {
_cm->grayRoot(thisOop);
_cm->grayRoot(obj);
}
}
if (_do_barrier) {
@ -850,16 +847,6 @@ void ConcurrentMark::checkpointRootsInitial() {
double start = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start);
// If there has not been a GC[n-1] since last GC[n] cycle completed,
// precede our marking with a collection of all
// younger generations to keep floating garbage to a minimum.
// YSR: we won't do this for now -- it's an optimization to be
// done post-beta.
// YSR: ignoring weak refs for now; will do at bug fixing stage
// EVM: assert(discoveredRefsAreClear());
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->record_concurrent_mark_init_start();
checkpointRootsInitialPre();
@ -1135,6 +1122,13 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
return;
}
if (VerifyDuringGC) {
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)");
Universe::heap()->prepare_for_verify();
Universe::verify(true, false, true);
}
G1CollectorPolicy* g1p = g1h->g1_policy();
g1p->record_concurrent_mark_remark_start();
@ -1159,10 +1153,12 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
JavaThread::satb_mark_queue_set().set_active_all_threads(false);
if (VerifyDuringGC) {
g1h->prepare_for_verify();
g1h->verify(/* allow_dirty */ true,
/* silent */ false,
/* use_prev_marking */ false);
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(after)");
Universe::heap()->prepare_for_verify();
Universe::heap()->verify(/* allow_dirty */ true,
/* silent */ false,
/* use_prev_marking */ false);
}
}
@ -1233,6 +1229,41 @@ public:
CardTableModRefBS::card_shift);
}
// It takes a region that's not empty (i.e., it has at least one
// live object in it and sets its corresponding bit on the region
// bitmap to 1. If the region is "starts humongous" it will also set
// to 1 the bits on the region bitmap that correspond to its
// associated "continues humongous" regions.
void set_bit_for_region(HeapRegion* hr) {
assert(!hr->continuesHumongous(), "should have filtered those out");
size_t index = hr->hrs_index();
if (!hr->startsHumongous()) {
// Normal (non-humongous) case: just set the bit.
_region_bm->par_at_put((BitMap::idx_t) index, true);
} else {
// Starts humongous case: calculate how many regions are part of
// this humongous region and then set the bit range. It might
// have been a bit more efficient to look at the object that
// spans these humongous regions to calculate their number from
// the object's size. However, it's a good idea to calculate
// this based on the metadata itself, and not the region
// contents, so that this code is not aware of what goes into
// the humongous regions (in case this changes in the future).
G1CollectedHeap* g1h = G1CollectedHeap::heap();
size_t end_index = index + 1;
while (end_index < g1h->n_regions()) {
HeapRegion* chr = g1h->region_at(end_index);
if (!chr->continuesHumongous()) {
break;
}
end_index += 1;
}
_region_bm->par_at_put_range((BitMap::idx_t) index,
(BitMap::idx_t) end_index, true);
}
}
bool doHeapRegion(HeapRegion* hr) {
if (_co_tracker != NULL)
_co_tracker->update();
@ -1241,13 +1272,13 @@ public:
_start_vtime_sec = os::elapsedVTime();
if (hr->continuesHumongous()) {
HeapRegion* hum_start = hr->humongous_start_region();
// If the head region of the humongous region has been determined
// to be alive, then all the tail regions should be marked
// such as well.
if (_region_bm->at(hum_start->hrs_index())) {
_region_bm->par_at_put(hr->hrs_index(), 1);
}
// We will ignore these here and process them when their
// associated "starts humongous" region is processed (see
// set_bit_for_heap_region()). Note that we cannot rely on their
// associated "starts humongous" region to have their bit set to
// 1 since, due to the region chunking in the parallel region
// iteration, a "continues humongous" region might be visited
// before its associated "starts humongous".
return false;
}
@ -1343,14 +1374,14 @@ public:
intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift);
mark_card_num_range(start_card_num, last_card_num);
// This definitely means the region has live objects.
_region_bm->par_at_put(hr->hrs_index(), 1);
set_bit_for_region(hr);
}
}
hr->add_to_marked_bytes(marked_bytes);
// Update the live region bitmap.
if (marked_bytes > 0) {
_region_bm->par_at_put(hr->hrs_index(), 1);
set_bit_for_region(hr);
}
hr->set_top_at_conc_mark_count(nextTop);
_tot_live += hr->next_live_bytes();
@ -1623,6 +1654,15 @@ void ConcurrentMark::cleanup() {
return;
}
if (VerifyDuringGC) {
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)");
Universe::heap()->prepare_for_verify();
Universe::verify(/* allow dirty */ true,
/* silent */ false,
/* prev marking */ true);
}
_cleanup_co_tracker.disable();
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
@ -1755,10 +1795,12 @@ void ConcurrentMark::cleanup() {
g1h->increment_total_collections();
if (VerifyDuringGC) {
g1h->prepare_for_verify();
g1h->verify(/* allow_dirty */ true,
/* silent */ false,
/* use_prev_marking */ true);
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(after)");
Universe::heap()->prepare_for_verify();
Universe::verify(/* allow dirty */ true,
/* silent */ false,
/* prev marking */ true);
}
}
@ -1817,12 +1859,11 @@ class G1CMKeepAliveClosure: public OopClosure {
_g1(g1), _cm(cm),
_bitMap(bitMap) {}
void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
void do_oop(oop* p) {
oop thisOop = *p;
template <class T> void do_oop_work(T* p) {
oop thisOop = oopDesc::load_decode_heap_oop(p);
HeapWord* addr = (HeapWord*)thisOop;
if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) {
_bitMap->mark(addr);
@ -1981,12 +2022,11 @@ public:
ReachablePrinterOopClosure(CMBitMapRO* bitmap, outputStream* out) :
_bitmap(bitmap), _g1h(G1CollectedHeap::heap()), _out(out) { }
void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop( oop* p) { do_oop_work(p); }
void do_oop(oop* p) {
oop obj = *p;
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
const char* str = NULL;
const char* str2 = "";
@ -2128,6 +2168,7 @@ void ConcurrentMark::deal_with_reference(oop obj) {
HeapWord* objAddr = (HeapWord*) obj;
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
if (_g1h->is_in_g1_reserved(objAddr)) {
tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
HeapRegion* hr = _g1h->heap_region_containing(obj);
@ -2345,7 +2386,7 @@ class CSMarkOopClosure: public OopClosure {
}
}
bool drain() {
template <class T> bool drain() {
while (_ms_ind > 0) {
oop obj = pop();
assert(obj != NULL, "Since index was non-zero.");
@ -2359,9 +2400,8 @@ class CSMarkOopClosure: public OopClosure {
}
// Now process this portion of this one.
int lim = MIN2(next_arr_ind, len);
assert(!UseCompressedOops, "This needs to be fixed");
for (int j = arr_ind; j < lim; j++) {
do_oop(aobj->obj_at_addr<oop>(j));
do_oop(aobj->obj_at_addr<T>(j));
}
} else {
@ -2388,13 +2428,13 @@ public:
FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
}
void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
void do_oop(oop* p) {
oop obj = *p;
if (obj == NULL) return;
template <class T> void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (oopDesc::is_null(heap_oop)) return;
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (obj->is_forwarded()) {
// If the object has already been forwarded, we have to make sure
// that it's marked. So follow the forwarding pointer. Note that
@ -2443,7 +2483,11 @@ public:
oop obj = oop(addr);
if (!obj->is_forwarded()) {
if (!_oop_cl.push(obj)) return false;
if (!_oop_cl.drain()) return false;
if (UseCompressedOops) {
if (!_oop_cl.drain<narrowOop>()) return false;
} else {
if (!_oop_cl.drain<oop>()) return false;
}
}
// Otherwise...
return true;
@ -2601,9 +2645,6 @@ void ConcurrentMark::disable_co_trackers() {
// abandon current marking iteration due to a Full GC
void ConcurrentMark::abort() {
// If we're not marking, nothing to do.
if (!G1ConcMark) return;
// Clear all marks to force marking thread to do nothing
_nextMarkBitMap->clearAll();
// Empty mark stack
@ -2779,14 +2820,14 @@ private:
CMTask* _task;
public:
void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
void do_oop(oop* p) {
template <class T> void do_oop_work(T* p) {
tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" );
tmp_guarantee_CM( !_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(), "invariant" );
oop obj = *p;
oop obj = oopDesc::load_decode_heap_oop(p);
if (_cm->verbose_high())
gclog_or_tty->print_cr("[%d] we're looking at location "
"*"PTR_FORMAT" = "PTR_FORMAT,
@ -2932,6 +2973,7 @@ void CMTask::deal_with_reference(oop obj) {
++_refs_reached;
HeapWord* objAddr = (HeapWord*) obj;
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
if (_g1h->is_in_g1_reserved(objAddr)) {
tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
HeapRegion* hr = _g1h->heap_region_containing(obj);
@ -2995,6 +3037,7 @@ void CMTask::deal_with_reference(oop obj) {
void CMTask::push(oop obj) {
HeapWord* objAddr = (HeapWord*) obj;
tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" );
tmp_guarantee_CM( !_g1h->heap_region_containing(objAddr)->is_on_free_list(), "invariant" );
tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" );
tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" );
@ -3240,6 +3283,8 @@ void CMTask::drain_local_queue(bool partially) {
tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj),
"invariant" );
tmp_guarantee_CM( !_g1h->heap_region_containing(obj)->is_on_free_list(),
"invariant" );
scan_object(obj);

View File

@ -763,6 +763,7 @@ private:
CMBitMap* _nextMarkBitMap;
// the task queue of this task
CMTaskQueue* _task_queue;
private:
// the task queue set---needed for stealing
CMTaskQueueSet* _task_queues;
// indicates whether the task has been claimed---this is only for

View File

@ -424,7 +424,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
while (n <= next_boundary) {
q = n;
oop obj = oop(q);
if (obj->klass() == NULL) return q;
if (obj->klass_or_null() == NULL) return q;
n += obj->size();
}
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
@ -436,7 +436,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
while (n <= next_boundary) {
q = n;
oop obj = oop(q);
if (obj->klass() == NULL) return q;
if (obj->klass_or_null() == NULL) return q;
n += _sp->block_size(q);
}
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");

View File

@ -96,14 +96,14 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
while (n <= addr) {
q = n;
oop obj = oop(q);
if (obj->klass() == NULL) return q;
if (obj->klass_or_null() == NULL) return q;
n += obj->size();
}
} else {
while (n <= addr) {
q = n;
oop obj = oop(q);
if (obj->klass() == NULL) return q;
if (obj->klass_or_null() == NULL) return q;
n += _sp->block_size(q);
}
}
@ -115,7 +115,7 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
inline HeapWord*
G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
const void* addr) {
if (oop(q)->klass() == NULL) return q;
if (oop(q)->klass_or_null() == NULL) return q;
HeapWord* n = q + _sp->block_size(q);
// In the normal case, where the query "addr" is a card boundary, and the
// offset table chunks are the same size as cards, the block starting at

File diff suppressed because it is too large Load Diff

View File

@ -56,8 +56,8 @@ class ConcurrentZFThread;
# define IF_G1_DETAILED_STATS(code)
#endif
typedef GenericTaskQueue<oop*> RefToScanQueue;
typedef GenericTaskQueueSet<oop*> RefToScanQueueSet;
typedef GenericTaskQueue<StarTask> RefToScanQueue;
typedef GenericTaskQueueSet<StarTask> RefToScanQueueSet;
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
@ -700,6 +700,9 @@ public:
size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); }
virtual size_t capacity() const;
virtual size_t used() const;
// This should be called when we're not holding the heap lock. The
// result might be a bit inaccurate.
size_t used_unlocked() const;
size_t recalculate_used() const;
#ifndef PRODUCT
size_t recalculate_used_regions() const;
@ -1061,8 +1064,14 @@ public:
// Override; it uses the "prev" marking information
virtual void verify(bool allow_dirty, bool silent);
// Default behavior by calling print(tty);
virtual void print() const;
// This calls print_on(st, PrintHeapAtGCExtended).
virtual void print_on(outputStream* st) const;
// If extended is true, it will print out information for all
// regions in the heap by calling print_on_extended(st).
virtual void print_on(outputStream* st, bool extended) const;
virtual void print_on_extended(outputStream* st) const;
virtual void print_gc_threads_on(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
@ -1265,6 +1274,552 @@ public:
};
// Local Variables: ***
// c-indentation-style: gnu ***
// End: ***
#define use_local_bitmaps 1
#define verify_local_bitmaps 0
#define oop_buffer_length 256
#ifndef PRODUCT
class GCLabBitMap;
class GCLabBitMapClosure: public BitMapClosure {
private:
ConcurrentMark* _cm;
GCLabBitMap* _bitmap;
public:
GCLabBitMapClosure(ConcurrentMark* cm,
GCLabBitMap* bitmap) {
_cm = cm;
_bitmap = bitmap;
}
virtual bool do_bit(size_t offset);
};
#endif // !PRODUCT
class GCLabBitMap: public BitMap {
private:
ConcurrentMark* _cm;
int _shifter;
size_t _bitmap_word_covers_words;
// beginning of the heap
HeapWord* _heap_start;
// this is the actual start of the GCLab
HeapWord* _real_start_word;
// this is the actual end of the GCLab
HeapWord* _real_end_word;
// this is the first word, possibly located before the actual start
// of the GCLab, that corresponds to the first bit of the bitmap
HeapWord* _start_word;
// size of a GCLab in words
size_t _gclab_word_size;
static int shifter() {
return MinObjAlignment - 1;
}
// how many heap words does a single bitmap word corresponds to?
static size_t bitmap_word_covers_words() {
return BitsPerWord << shifter();
}
static size_t gclab_word_size() {
return G1ParallelGCAllocBufferSize / HeapWordSize;
}
static size_t bitmap_size_in_bits() {
size_t bits_in_bitmap = gclab_word_size() >> shifter();
// We are going to ensure that the beginning of a word in this
// bitmap also corresponds to the beginning of a word in the
// global marking bitmap. To handle the case where a GCLab
// starts from the middle of the bitmap, we need to add enough
// space (i.e. up to a bitmap word) to ensure that we have
// enough bits in the bitmap.
return bits_in_bitmap + BitsPerWord - 1;
}
public:
GCLabBitMap(HeapWord* heap_start)
: BitMap(bitmap_size_in_bits()),
_cm(G1CollectedHeap::heap()->concurrent_mark()),
_shifter(shifter()),
_bitmap_word_covers_words(bitmap_word_covers_words()),
_heap_start(heap_start),
_gclab_word_size(gclab_word_size()),
_real_start_word(NULL),
_real_end_word(NULL),
_start_word(NULL)
{
guarantee( size_in_words() >= bitmap_size_in_words(),
"just making sure");
}
inline unsigned heapWordToOffset(HeapWord* addr) {
unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
assert(offset < size(), "offset should be within bounds");
return offset;
}
inline HeapWord* offsetToHeapWord(size_t offset) {
HeapWord* addr = _start_word + (offset << _shifter);
assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
return addr;
}
bool fields_well_formed() {
bool ret1 = (_real_start_word == NULL) &&
(_real_end_word == NULL) &&
(_start_word == NULL);
if (ret1)
return true;
bool ret2 = _real_start_word >= _start_word &&
_start_word < _real_end_word &&
(_real_start_word + _gclab_word_size) == _real_end_word &&
(_start_word + _gclab_word_size + _bitmap_word_covers_words)
> _real_end_word;
return ret2;
}
inline bool mark(HeapWord* addr) {
guarantee(use_local_bitmaps, "invariant");
assert(fields_well_formed(), "invariant");
if (addr >= _real_start_word && addr < _real_end_word) {
assert(!isMarked(addr), "should not have already been marked");
// first mark it on the bitmap
at_put(heapWordToOffset(addr), true);
return true;
} else {
return false;
}
}
inline bool isMarked(HeapWord* addr) {
guarantee(use_local_bitmaps, "invariant");
assert(fields_well_formed(), "invariant");
return at(heapWordToOffset(addr));
}
void set_buffer(HeapWord* start) {
guarantee(use_local_bitmaps, "invariant");
clear();
assert(start != NULL, "invariant");
_real_start_word = start;
_real_end_word = start + _gclab_word_size;
size_t diff =
pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
_start_word = start - diff;
assert(fields_well_formed(), "invariant");
}
#ifndef PRODUCT
void verify() {
// verify that the marks have been propagated
GCLabBitMapClosure cl(_cm, this);
iterate(&cl);
}
#endif // PRODUCT
void retire() {
guarantee(use_local_bitmaps, "invariant");
assert(fields_well_formed(), "invariant");
if (_start_word != NULL) {
CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
// this means that the bitmap was set up for the GCLab
assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
mark_bitmap->mostly_disjoint_range_union(this,
0, // always start from the start of the bitmap
_start_word,
size_in_words());
_cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
#ifndef PRODUCT
if (use_local_bitmaps && verify_local_bitmaps)
verify();
#endif // PRODUCT
} else {
assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
}
}
static size_t bitmap_size_in_words() {
return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
}
};
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
private:
bool _retired;
bool _during_marking;
GCLabBitMap _bitmap;
public:
G1ParGCAllocBuffer() :
ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
_during_marking(G1CollectedHeap::heap()->mark_in_progress()),
_bitmap(G1CollectedHeap::heap()->reserved_region().start()),
_retired(false)
{ }
inline bool mark(HeapWord* addr) {
guarantee(use_local_bitmaps, "invariant");
assert(_during_marking, "invariant");
return _bitmap.mark(addr);
}
inline void set_buf(HeapWord* buf) {
if (use_local_bitmaps && _during_marking)
_bitmap.set_buffer(buf);
ParGCAllocBuffer::set_buf(buf);
_retired = false;
}
inline void retire(bool end_of_gc, bool retain) {
if (_retired)
return;
if (use_local_bitmaps && _during_marking) {
_bitmap.retire();
}
ParGCAllocBuffer::retire(end_of_gc, retain);
_retired = true;
}
};
class G1ParScanThreadState : public StackObj {
protected:
G1CollectedHeap* _g1h;
RefToScanQueue* _refs;
DirtyCardQueue _dcq;
CardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
typedef GrowableArray<StarTask> OverflowQueue;
OverflowQueue* _overflowed_refs;
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
ageTable _age_table;
size_t _alloc_buffer_waste;
size_t _undo_waste;
OopsInHeapRegionClosure* _evac_failure_cl;
G1ParScanHeapEvacClosure* _evac_cl;
G1ParScanPartialArrayClosure* _partial_scan_cl;
int _hash_seed;
int _queue_num;
int _term_attempts;
#if G1_DETAILED_STATS
int _pushes, _pops, _steals, _steal_attempts;
int _overflow_pushes;
#endif
double _start;
double _start_strong_roots;
double _strong_roots_time;
double _start_term;
double _term_time;
// Map from young-age-index (0 == not young, 1 is youngest) to
// surviving words. base is what we get back from the malloc call
size_t* _surviving_young_words_base;
// this points into the array, as we use the first few entries for padding
size_t* _surviving_young_words;
#define PADDING_ELEM_NUM (64 / sizeof(size_t))
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
DirtyCardQueue& dirty_card_queue() { return _dcq; }
CardTableModRefBS* ctbs() { return _ct_bs; }
template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
if (!from->is_survivor()) {
_g1_rem->par_write_ref(from, p, tid);
}
}
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
// If the new value of the field points to the same region or
// is the to-space, we don't need to include it in the Rset updates.
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
size_t card_index = ctbs()->index_for(p);
// If the card hasn't been added to the buffer, do it.
if (ctbs()->mark_card_deferred(card_index)) {
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
}
}
}
public:
G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
~G1ParScanThreadState() {
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
}
RefToScanQueue* refs() { return _refs; }
OverflowQueue* overflowed_refs() { return _overflowed_refs; }
ageTable* age_table() { return &_age_table; }
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
return &_alloc_buffers[purpose];
}
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
size_t undo_waste() { return _undo_waste; }
template <class T> void push_on_queue(T* ref) {
assert(ref != NULL, "invariant");
assert(has_partial_array_mask(ref) ||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(ref)), "invariant");
#ifdef ASSERT
if (has_partial_array_mask(ref)) {
oop p = clear_partial_array_mask(ref);
// Verify that we point into the CS
assert(_g1h->obj_in_cs(p), "Should be in CS");
}
#endif
if (!refs()->push(ref)) {
overflowed_refs()->push(ref);
IF_G1_DETAILED_STATS(note_overflow_push());
} else {
IF_G1_DETAILED_STATS(note_push());
}
}
void pop_from_queue(StarTask& ref) {
if (refs()->pop_local(ref)) {
assert((oop*)ref != NULL, "pop_local() returned true");
assert(UseCompressedOops || !ref.is_narrow(), "Error");
assert(has_partial_array_mask((oop*)ref) ||
_g1h->obj_in_cs(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
: oopDesc::load_decode_heap_oop((oop*)ref)),
"invariant");
IF_G1_DETAILED_STATS(note_pop());
} else {
StarTask null_task;
ref = null_task;
}
}
void pop_from_overflow_queue(StarTask& ref) {
StarTask new_ref = overflowed_refs()->pop();
assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
assert(has_partial_array_mask((oop*)new_ref) ||
_g1h->obj_in_cs(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
"invariant");
ref = new_ref;
}
int refs_to_scan() { return refs()->size(); }
int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
if (G1DeferredRSUpdate) {
deferred_rs_update(from, p, tid);
} else {
immediate_rs_update(from, p, tid);
}
}
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
if (word_sz * 100 <
(size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false, false);
HeapWord* buf =
_g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
if (buf == NULL) return NULL; // Let caller handle allocation failure.
// Otherwise.
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
}
return obj;
}
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
if (obj != NULL) return obj;
return allocate_slow(purpose, word_sz);
}
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
if (alloc_buffer(purpose)->contains(obj)) {
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
_evac_failure_cl = evac_failure_cl;
}
OopsInHeapRegionClosure* evac_failure_closure() {
return _evac_failure_cl;
}
void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
_evac_cl = evac_cl;
}
void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
_partial_scan_cl = partial_scan_cl;
}
int* hash_seed() { return &_hash_seed; }
int queue_num() { return _queue_num; }
int term_attempts() { return _term_attempts; }
void note_term_attempt() { _term_attempts++; }
#if G1_DETAILED_STATS
int pushes() { return _pushes; }
int pops() { return _pops; }
int steals() { return _steals; }
int steal_attempts() { return _steal_attempts; }
int overflow_pushes() { return _overflow_pushes; }
void note_push() { _pushes++; }
void note_pop() { _pops++; }
void note_steal() { _steals++; }
void note_steal_attempt() { _steal_attempts++; }
void note_overflow_push() { _overflow_pushes++; }
#endif
void start_strong_roots() {
_start_strong_roots = os::elapsedTime();
}
void end_strong_roots() {
_strong_roots_time += (os::elapsedTime() - _start_strong_roots);
}
double strong_roots_time() { return _strong_roots_time; }
void start_term_time() {
note_term_attempt();
_start_term = os::elapsedTime();
}
void end_term_time() {
_term_time += (os::elapsedTime() - _start_term);
}
double term_time() { return _term_time; }
double elapsed() {
return os::elapsedTime() - _start;
}
size_t* surviving_young_words() {
// We add on to hide entry 0 which accumulates surviving words for
// age -1 regions (i.e. non-young ones)
return _surviving_young_words;
}
void retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap].words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap].retire(true, false);
}
}
private:
template <class T> void deal_with_reference(T* ref_to_scan) {
if (has_partial_array_mask(ref_to_scan)) {
_partial_scan_cl->do_oop_nv(ref_to_scan);
} else {
// Note: we can use "raw" versions of "region_containing" because
// "obj_to_scan" is definitely in the heap, and is not in a
// humongous region.
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
_evac_cl->set_region(r);
_evac_cl->do_oop_nv(ref_to_scan);
}
}
public:
void trim_queue() {
// I've replicated the loop twice, first to drain the overflow
// queue, second to drain the task queue. This is better than
// having a single loop, which checks both conditions and, inside
// it, either pops the overflow queue or the task queue, as each
// loop is tighter. Also, the decision to drain the overflow queue
// first is not arbitrary, as the overflow queue is not visible
// to the other workers, whereas the task queue is. So, we want to
// drain the "invisible" entries first, while allowing the other
// workers to potentially steal the "visible" entries.
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
while (overflowed_refs_to_scan() > 0) {
StarTask ref_to_scan;
assert((oop*)ref_to_scan == NULL, "Constructed above");
pop_from_overflow_queue(ref_to_scan);
// We shouldn't have pushed it on the queue if it was not
// pointing into the CSet.
assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
if (ref_to_scan.is_narrow()) {
assert(UseCompressedOops, "Error");
narrowOop* p = (narrowOop*)ref_to_scan;
assert(!has_partial_array_mask(p) &&
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
} else {
oop* p = (oop*)ref_to_scan;
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
}
}
while (refs_to_scan() > 0) {
StarTask ref_to_scan;
assert((oop*)ref_to_scan == NULL, "Constructed above");
pop_from_queue(ref_to_scan);
if ((oop*)ref_to_scan != NULL) {
if (ref_to_scan.is_narrow()) {
assert(UseCompressedOops, "Error");
narrowOop* p = (narrowOop*)ref_to_scan;
assert(!has_partial_array_mask(p) &&
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
} else {
oop* p = (oop*)ref_to_scan;
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
}
}
}
}
}
};

View File

@ -293,10 +293,6 @@ void G1CollectorPolicy::init() {
if (G1SteadyStateUsed < 50) {
vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
}
if (UseConcMarkSweepGC) {
vm_exit_during_initialization("-XX:+UseG1GC is incompatible with "
"-XX:+UseConcMarkSweepGC.");
}
initialize_gc_policy_counters();

View File

@ -1097,6 +1097,10 @@ public:
_recorded_survivor_tail = tail;
}
size_t recorded_survivor_regions() {
return _recorded_survivor_regions;
}
void record_thread_age_table(ageTable* age_table)
{
_survivors_age_table.merge_par(age_table);

View File

@ -42,18 +42,6 @@ public:
virtual void set_region(HeapRegion* from) { _from = from; }
};
class G1ScanAndBalanceClosure : public OopClosure {
G1CollectedHeap* _g1;
static int _nq;
public:
G1ScanAndBalanceClosure(G1CollectedHeap* g1) : _g1(g1) { }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
};
class G1ParClosureSuper : public OopsInHeapRegionClosure {
protected:
G1CollectedHeap* _g1;
@ -69,34 +57,32 @@ class G1ParScanClosure : public G1ParClosureSuper {
public:
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state) { }
void do_oop_nv(oop* p); // should be made inline
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
#define G1_PARTIAL_ARRAY_MASK 1
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) {
return (intptr_t) ref & G1_PARTIAL_ARRAY_MASK;
template <class T> inline bool has_partial_array_mask(T* ref) {
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
}
inline oop* set_partial_array_mask(oop obj) {
return (oop*) ((intptr_t) obj | G1_PARTIAL_ARRAY_MASK);
template <class T> inline T* set_partial_array_mask(T obj) {
assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
}
inline oop clear_partial_array_mask(oop* ref) {
return oop((intptr_t) ref & ~G1_PARTIAL_ARRAY_MASK);
template <class T> inline oop clear_partial_array_mask(T* ref) {
return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
G1ParScanClosure _scanner;
template <class T> void process_array_chunk(oop obj, int start, int end);
public:
G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
void do_oop_nv(oop* p);
void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
@ -105,7 +91,7 @@ public:
class G1ParCopyHelper : public G1ParClosureSuper {
G1ParScanClosure *_scanner;
protected:
void mark_forwardee(oop* p);
template <class T> void mark_forwardee(T* p);
oop copy_to_survivor_space(oop obj);
public:
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
@ -117,36 +103,35 @@ template<bool do_gen_barrier, G1Barrier barrier,
bool do_mark_forwardee, bool skip_cset_test>
class G1ParCopyClosure : public G1ParCopyHelper {
G1ParScanClosure _scanner;
void do_oop_work(oop* p);
void do_oop_work(narrowOop* p) { guarantee(false, "NYI"); }
template <class T> void do_oop_work(T* p);
public:
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
_scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
inline void do_oop_nv(oop* p) {
template <class T> void do_oop_nv(T* p) {
do_oop_work(p);
if (do_mark_forwardee)
mark_forwardee(p);
}
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure;
typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure;
typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure;
typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure;
// This is the only case when we set skip_cset_test. Basically, this
// closure is (should?) only be called directly while we're draining
// the overflow and task queues. In that case we know that the
// reference in question points into the collection set, otherwise we
// would not have pushed it on the queue.
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
// would not have pushed it on the queue. The following is defined in
// g1_specialized_oop_closures.hpp.
// typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
// We need a separate closure to handle references during evacuation
// failure processing, as it cannot asume that the reference already
// points to the collection set (like G1ParScanHeapEvacClosure does).
// failure processing, as we cannot asume that the reference already
// points into the collection set (like G1ParScanHeapEvacClosure does).
typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
class FilterIntoCSClosure: public OopClosure {
@ -158,10 +143,9 @@ public:
G1CollectedHeap* g1, OopClosure* oc) :
_dcto_cl(dcto_cl), _g1(g1), _oc(oc)
{}
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
bool apply_to_weak_ref_discovered_field() { return true; }
bool do_header() { return false; }
};
@ -174,10 +158,9 @@ public:
OopsInHeapRegionClosure* oc) :
_g1(g1), _oc(oc)
{}
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
bool apply_to_weak_ref_discovered_field() { return true; }
bool do_header() { return false; }
void set_region(HeapRegion* from) {
@ -195,10 +178,9 @@ public:
ConcurrentMark* cm)
: _g1(g1), _oc(oc), _cm(cm) { }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
bool apply_to_weak_ref_discovered_field() { return true; }
bool do_header() { return false; }
void set_region(HeapRegion* from) {
@ -213,10 +195,9 @@ class FilterOutOfRegionClosure: public OopClosure {
int _out_of_region;
public:
FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
bool apply_to_weak_ref_discovered_field() { return true; }
bool do_header() { return false; }
int out_of_region() { return _out_of_region; }

View File

@ -31,9 +31,10 @@
// perf-critical inner loop.
#define FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT 0
inline void FilterIntoCSClosure::do_oop_nv(oop* p) {
oop obj = *p;
if (obj != NULL && _g1->obj_in_cs(obj)) {
template <class T> inline void FilterIntoCSClosure::do_oop_nv(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop) &&
_g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
_oc->do_oop(p);
#if FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT
_dcto_cl->incr_count();
@ -41,44 +42,32 @@ inline void FilterIntoCSClosure::do_oop_nv(oop* p) {
}
}
inline void FilterIntoCSClosure::do_oop(oop* p)
{
do_oop_nv(p);
}
#define FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT 0
inline void FilterOutOfRegionClosure::do_oop_nv(oop* p) {
oop obj = *p;
HeapWord* obj_hw = (HeapWord*)obj;
if (obj_hw != NULL && (obj_hw < _r_bottom || obj_hw >= _r_end)) {
_oc->do_oop(p);
template <class T> inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop);
if (obj_hw < _r_bottom || obj_hw >= _r_end) {
_oc->do_oop(p);
#if FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT
_out_of_region++;
_out_of_region++;
#endif
}
}
}
inline void FilterOutOfRegionClosure::do_oop(oop* p)
{
do_oop_nv(p);
}
inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) {
oop obj = *p;
if (obj != NULL && _g1->obj_in_cs(obj))
template <class T> inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop) &&
_g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop)))
_oc->do_oop(p);
}
inline void FilterInHeapRegionAndIntoCSClosure::do_oop(oop* p)
{
do_oop_nv(p);
}
inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) {
oop obj = *p;
if (obj != NULL) {
template <class T> inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj);
if (hr != NULL) {
if (hr->in_collection_set())
@ -89,24 +78,29 @@ inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) {
}
}
inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop(oop* p)
{
do_oop_nv(p);
}
// This closure is applied to the fields of the objects that have just been copied.
template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
inline void G1ScanAndBalanceClosure::do_oop_nv(oop* p) {
RefToScanQueue* q;
if (ParallelGCThreads > 0) {
// Deal the work out equally.
_nq = (_nq + 1) % ParallelGCThreads;
q = _g1->task_queue(_nq);
} else {
q = _g1->task_queue(0);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (_g1->in_cset_fast_test(obj)) {
// We're not going to even bother checking whether the object is
// already forwarded or not, as this usually causes an immediate
// stall. We'll try to prefetch the object (for write, given that
// we might need to install the forwarding reference) and we'll
// get back to it when pop it from the queue
Prefetch::write(obj->mark_addr(), 0);
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
// slightly paranoid test; I'm trying to catch potential
// problems before we go into push_on_queue to know where the
// problem is coming from
assert(obj == oopDesc::load_decode_heap_oop(p),
"p should still be pointing to obj");
_par_scan_state->push_on_queue(p);
} else {
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
}
}
bool nooverflow = q->push(p);
guarantee(nooverflow, "Overflow during poplularity region processing");
}
inline void G1ScanAndBalanceClosure::do_oop(oop* p) {
do_oop_nv(p);
}

View File

@ -65,11 +65,10 @@ public:
void set_region(HeapRegion* from) {
_blk->set_region(from);
}
virtual void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(oop* p) {
oop obj = *p;
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
}
bool apply_to_weak_ref_discovered_field() { return true; }
@ -110,11 +109,10 @@ class VerifyRSCleanCardOopClosure: public OopClosure {
public:
VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
virtual void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(oop* p) {
oop obj = *p;
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
HeapRegion* to = _g1->heap_region_containing(obj);
guarantee(to == NULL || !to->in_collection_set(),
"Missed a rem set member.");
@ -129,9 +127,9 @@ HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
{
_seq_task = new SubTasksDone(NumSeqTasks);
guarantee(n_workers() > 0, "There should be some workers");
_new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, n_workers());
_new_refs = NEW_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, n_workers());
for (uint i = 0; i < n_workers(); i++) {
_new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
_new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<OopOrNarrowOopStar>(8192,true);
}
}
@ -140,7 +138,7 @@ HRInto_G1RemSet::~HRInto_G1RemSet() {
for (uint i = 0; i < n_workers(); i++) {
delete _new_refs[i];
}
FREE_C_HEAP_ARRAY(GrowableArray<oop*>*, _new_refs);
FREE_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, _new_refs);
}
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
@ -428,15 +426,15 @@ public:
}
};
void
HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc,
int worker_i) {
template <class T> void
HRInto_G1RemSet::scanNewRefsRS_work(OopsInHeapRegionClosure* oc,
int worker_i) {
double scan_new_refs_start_sec = os::elapsedTime();
G1CollectedHeap* g1h = G1CollectedHeap::heap();
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
oop* p = _new_refs[worker_i]->at(i);
oop obj = *p;
T* p = (T*) _new_refs[worker_i]->at(i);
oop obj = oopDesc::load_decode_heap_oop(p);
// *p was in the collection set when p was pushed on "_new_refs", but
// another thread may have processed this location from an RS, so it
// might not point into the CS any longer. If so, it's obviously been
@ -549,11 +547,10 @@ class UpdateRSetOopsIntoCSImmediate : public OopClosure {
G1CollectedHeap* _g1;
public:
UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
virtual void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(oop* p) {
HeapRegion* to = _g1->heap_region_containing(*p);
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
HeapRegion* to = _g1->heap_region_containing(oopDesc::load_decode_heap_oop(p));
if (to->in_collection_set()) {
to->rem_set()->add_reference(p, 0);
}
@ -567,11 +564,10 @@ class UpdateRSetOopsIntoCSDeferred : public OopClosure {
public:
UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
virtual void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(oop* p) {
oop obj = *p;
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
if (_g1->obj_in_cs(obj)) {
size_t card_index = _ct_bs->index_for(p);
if (_ct_bs->mark_card_deferred(card_index)) {
@ -581,10 +577,10 @@ public:
}
};
void HRInto_G1RemSet::new_refs_iterate(OopClosure* cl) {
template <class T> void HRInto_G1RemSet::new_refs_iterate_work(OopClosure* cl) {
for (size_t i = 0; i < n_workers(); i++) {
for (int j = 0; j < _new_refs[i]->length(); j++) {
oop* p = _new_refs[i]->at(j);
T* p = (T*) _new_refs[i]->at(j);
cl->do_oop(p);
}
}

View File

@ -62,10 +62,12 @@ public:
// If "this" is of the given subtype, return "this", else "NULL".
virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
// Record, if necessary, the fact that *p (where "p" is in region "from")
// has changed to its new value.
// Record, if necessary, the fact that *p (where "p" is in region "from",
// and is, a fortiori, required to be non-NULL) has changed to its new value.
virtual void write_ref(HeapRegion* from, oop* p) = 0;
virtual void write_ref(HeapRegion* from, narrowOop* p) = 0;
virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0;
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
// or card, respectively, such that a region or card with a corresponding
@ -105,7 +107,9 @@ public:
// Nothing is necessary in the version below.
void write_ref(HeapRegion* from, oop* p) {}
void write_ref(HeapRegion* from, narrowOop* p) {}
void par_write_ref(HeapRegion* from, oop* p, int tid) {}
void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {}
void scrub(BitMap* region_bm, BitMap* card_bm) {}
void scrub_par(BitMap* region_bm, BitMap* card_bm,
@ -143,8 +147,19 @@ protected:
// their references into the collection summarized in "_new_refs".
bool _par_traversal_in_progress;
void set_par_traversal(bool b) { _par_traversal_in_progress = b; }
GrowableArray<oop*>** _new_refs;
void new_refs_iterate(OopClosure* cl);
GrowableArray<OopOrNarrowOopStar>** _new_refs;
template <class T> void new_refs_iterate_work(OopClosure* cl);
void new_refs_iterate(OopClosure* cl) {
if (UseCompressedOops) {
new_refs_iterate_work<narrowOop>(cl);
} else {
new_refs_iterate_work<oop>(cl);
}
}
protected:
template <class T> void write_ref_nv(HeapRegion* from, T* p);
template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid);
public:
// This is called to reset dual hash tables after the gc pause
@ -161,7 +176,14 @@ public:
void prepare_for_oops_into_collection_set_do();
void cleanup_after_oops_into_collection_set_do();
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i);
template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i);
void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) {
if (UseCompressedOops) {
scanNewRefsRS_work<narrowOop>(oc, worker_i);
} else {
scanNewRefsRS_work<oop>(oc, worker_i);
}
}
void updateRS(int worker_i);
HeapRegion* calculateStartRegion(int i);
@ -172,12 +194,22 @@ public:
// Record, if necessary, the fact that *p (where "p" is in region "from",
// which is required to be non-NULL) has changed to a new non-NULL value.
inline void write_ref(HeapRegion* from, oop* p);
// The "_nv" version is the same; it exists just so that it is not virtual.
inline void write_ref_nv(HeapRegion* from, oop* p);
// [Below the virtual version calls a non-virtual protected
// workhorse that is templatified for narrow vs wide oop.]
inline void write_ref(HeapRegion* from, oop* p) {
write_ref_nv(from, p);
}
inline void write_ref(HeapRegion* from, narrowOop* p) {
write_ref_nv(from, p);
}
inline void par_write_ref(HeapRegion* from, oop* p, int tid) {
par_write_ref_nv(from, p, tid);
}
inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {
par_write_ref_nv(from, p, tid);
}
inline bool self_forwarded(oop obj);
inline void par_write_ref(HeapRegion* from, oop* p, int tid);
bool self_forwarded(oop obj);
void scrub(BitMap* region_bm, BitMap* card_bm);
void scrub_par(BitMap* region_bm, BitMap* card_bm,
@ -208,6 +240,9 @@ class UpdateRSOopClosure: public OopClosure {
HeapRegion* _from;
HRInto_G1RemSet* _rs;
int _worker_i;
template <class T> void do_oop_work(T* p);
public:
UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
_from(NULL), _rs(rs), _worker_i(worker_i) {
@ -219,11 +254,10 @@ public:
_from = from;
}
virtual void do_oop(narrowOop* p);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_work(p); }
// Override: this closure is idempotent.
// bool idempotent() { return true; }
bool apply_to_weak_ref_discovered_field() { return true; }
};

View File

@ -30,12 +30,8 @@ inline size_t G1RemSet::n_workers() {
}
}
inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, oop* p) {
par_write_ref(from, p, 0);
}
inline void HRInto_G1RemSet::write_ref(HeapRegion* from, oop* p) {
write_ref_nv(from, p);
template <class T> inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
par_write_ref_nv(from, p, 0);
}
inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
@ -43,8 +39,8 @@ inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
return result;
}
inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
oop obj = *p;
template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
oop obj = oopDesc::load_decode_heap_oop(p);
#ifdef ASSERT
// can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop");
@ -71,7 +67,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
// false during the evacuation failure handing.
if (_par_traversal_in_progress &&
to->in_collection_set() && !self_forwarded(obj)) {
_new_refs[tid]->push(p);
_new_refs[tid]->push((void*)p);
// Deferred updates to the Cset are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
@ -89,11 +85,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
}
}
inline void UpdateRSOopClosure::do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
inline void UpdateRSOopClosure::do_oop(oop* p) {
template <class T> inline void UpdateRSOopClosure::do_oop_work(T* p) {
assert(_from != NULL, "from region must be non-NULL");
_rs->par_write_ref(_from, p, _worker_i);
}

View File

@ -34,6 +34,7 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
assert(pre_val->is_oop_or_null(true), "Error");
if (!JavaThread::satb_mark_queue_set().active()) return;
Thread* thr = Thread::current();
if (thr->is_Java_thread()) {
@ -46,32 +47,31 @@ void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
}
// When we know the current java thread:
void
G1SATBCardTableModRefBS::write_ref_field_pre_static(void* field,
oop newVal,
template <class T> void
G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
oop new_val,
JavaThread* jt) {
if (!JavaThread::satb_mark_queue_set().active()) return;
assert(!UseCompressedOops, "Else will need to modify this to deal with narrowOop");
oop preVal = *(oop*)field;
if (preVal != NULL) {
jt->satb_mark_queue().enqueue(preVal);
T heap_oop = oopDesc::load_heap_oop(field);
if (!oopDesc::is_null(heap_oop)) {
oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(pre_val->is_oop(true /* ignore mark word */), "Error");
jt->satb_mark_queue().enqueue(pre_val);
}
}
void
G1SATBCardTableModRefBS::write_ref_array_pre(MemRegion mr) {
template <class T> void
G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
if (!JavaThread::satb_mark_queue_set().active()) return;
assert(!UseCompressedOops, "Else will need to modify this to deal with narrowOop");
oop* elem_ptr = (oop*)mr.start();
while ((HeapWord*)elem_ptr < mr.end()) {
oop elem = *elem_ptr;
if (elem != NULL) enqueue(elem);
elem_ptr++;
T* elem_ptr = dst;
for (int i = 0; i < count; i++, elem_ptr++) {
T heap_oop = oopDesc::load_heap_oop(elem_ptr);
if (!oopDesc::is_null(heap_oop)) {
enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
}
}
}
G1SATBCardTableLoggingModRefBS::
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
int max_covered_regions) :

View File

@ -47,31 +47,41 @@ public:
// This notes that we don't need to access any BarrierSet data
// structures, so this can be called from a static context.
static void write_ref_field_pre_static(void* field, oop newVal) {
assert(!UseCompressedOops, "Else needs to be templatized");
oop preVal = *((oop*)field);
if (preVal != NULL) {
enqueue(preVal);
template <class T> static void write_ref_field_pre_static(T* field, oop newVal) {
T heap_oop = oopDesc::load_heap_oop(field);
if (!oopDesc::is_null(heap_oop)) {
enqueue(oopDesc::decode_heap_oop(heap_oop));
}
}
// When we know the current java thread:
static void write_ref_field_pre_static(void* field, oop newVal,
JavaThread* jt);
template <class T> static void write_ref_field_pre_static(T* field, oop newVal,
JavaThread* jt);
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
inline void inline_write_ref_field_pre(void* field, oop newVal) {
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
write_ref_field_pre_static(field, newVal);
}
// This is the more general virtual version.
void write_ref_field_pre_work(void* field, oop new_val) {
// These are the more general virtual versions.
virtual void write_ref_field_pre_work(oop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
virtual void write_ref_field_pre_work(void* field, oop new_val) {
guarantee(false, "Not needed");
}
virtual void write_ref_array_pre(MemRegion mr);
template <class T> void write_ref_array_pre_work(T* dst, int count);
virtual void write_ref_array_pre(oop* dst, int count) {
write_ref_array_pre_work(dst, count);
}
virtual void write_ref_array_pre(narrowOop* dst, int count) {
write_ref_array_pre_work(dst, count);
}
};
// Adds card-table logging to the post-barrier.

View File

@ -80,9 +80,6 @@
develop(bool, G1TraceConcurrentRefinement, false, \
"Trace G1 concurrent refinement") \
\
develop(bool, G1ConcMark, true, \
"If true, run concurrent marking for G1") \
\
product(intx, G1MarkStackSize, 2 * 1024 * 1024, \
"Size of the mark stack for concurrent marking.") \
\

View File

@ -37,14 +37,12 @@ template<bool do_gen_barrier, G1Barrier barrier,
class G1ParCopyClosure;
class G1ParScanClosure;
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true>
G1ParScanHeapEvacClosure;
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
class FilterIntoCSClosure;
class FilterOutOfRegionClosure;
class FilterInHeapRegionAndIntoCSClosure;
class FilterAndMarkInHeapRegionAndIntoCSClosure;
class G1ScanAndBalanceClosure;
#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
@ -56,8 +54,7 @@ class G1ScanAndBalanceClosure;
f(FilterIntoCSClosure,_nv) \
f(FilterOutOfRegionClosure,_nv) \
f(FilterInHeapRegionAndIntoCSClosure,_nv) \
f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv) \
f(G1ScanAndBalanceClosure,_nv)
f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv)
#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."

View File

@ -66,16 +66,16 @@ public:
bool failures() { return _failures; }
int n_failures() { return _n_failures; }
virtual void do_oop(narrowOop* p) {
guarantee(false, "NYI");
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
void do_oop(oop* p) {
template <class T> void do_oop_work(T* p) {
assert(_containing_obj != NULL, "Precondition");
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
"Precondition");
oop obj = *p;
if (obj != NULL) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
bool failed = false;
if (!_g1h->is_in_closed_subset(obj) ||
_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
@ -106,8 +106,8 @@ public:
}
if (!_g1h->full_collection()) {
HeapRegion* from = _g1h->heap_region_containing(p);
HeapRegion* to = _g1h->heap_region_containing(*p);
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
HeapRegion* to = _g1h->heap_region_containing(obj);
if (from != NULL && to != NULL &&
from != to &&
!to->isHumongous()) {
@ -534,13 +534,13 @@ HeapRegion::object_iterate_mem_careful(MemRegion mr,
// Otherwise, find the obj that extends onto mr.start().
assert(cur <= mr.start()
&& (oop(cur)->klass() == NULL ||
&& (oop(cur)->klass_or_null() == NULL ||
cur + oop(cur)->size() > mr.start()),
"postcondition of block_start");
oop obj;
while (cur < mr.end()) {
obj = oop(cur);
if (obj->klass() == NULL) {
if (obj->klass_or_null() == NULL) {
// Ran into an unparseable point.
return cur;
} else if (!g1h->is_obj_dead(obj)) {
@ -577,7 +577,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
assert(cur <= mr.start(), "Postcondition");
while (cur <= mr.start()) {
if (oop(cur)->klass() == NULL) {
if (oop(cur)->klass_or_null() == NULL) {
// Ran into an unparseable point.
return cur;
}
@ -591,7 +591,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
obj = oop(cur);
// If we finish this loop...
assert(cur <= mr.start()
&& obj->klass() != NULL
&& obj->klass_or_null() != NULL
&& cur + obj->size() > mr.start(),
"Loop postcondition");
if (!g1h->is_obj_dead(obj)) {
@ -601,7 +601,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
HeapWord* next;
while (cur < mr.end()) {
obj = oop(cur);
if (obj->klass() == NULL) {
if (obj->klass_or_null() == NULL) {
// Ran into an unparseable point.
return cur;
};
@ -703,7 +703,7 @@ void HeapRegion::verify(bool allow_dirty, bool use_prev_marking) const {
}
if (vl_cl.failures()) {
gclog_or_tty->print_cr("Heap:");
G1CollectedHeap::heap()->print();
G1CollectedHeap::heap()->print_on(gclog_or_tty, true /* extended */);
gclog_or_tty->print_cr("");
}
if (VerifyDuringGC &&
@ -781,8 +781,13 @@ void G1OffsetTableContigSpace::set_saved_mark() {
// will pick up the right saved_mark_word() as the high water mark
// of the region. Either way, the behaviour will be correct.
ContiguousSpace::set_saved_mark();
OrderAccess::storestore();
_gc_time_stamp = curr_gc_time_stamp;
OrderAccess::fence();
// The following fence is to force a flush of the writes above, but
// is strictly not needed because when an allocating worker thread
// calls set_saved_mark() it does so under the ParGCRareEvent_lock;
// when the lock is released, the write will be flushed.
// OrderAccess::fence();
}
}

View File

@ -126,7 +126,7 @@ protected:
}
}
void add_reference_work(oop* from, bool par) {
void add_reference_work(OopOrNarrowOopStar from, bool par) {
// Must make this robust in case "from" is not in "_hr", because of
// concurrency.
@ -173,11 +173,11 @@ public:
_bm.clear();
}
void add_reference(oop* from) {
void add_reference(OopOrNarrowOopStar from) {
add_reference_work(from, /*parallel*/ true);
}
void seq_add_reference(oop* from) {
void seq_add_reference(OopOrNarrowOopStar from) {
add_reference_work(from, /*parallel*/ false);
}
@ -220,7 +220,7 @@ public:
}
// Requires "from" to be in "hr()".
bool contains_reference(oop* from) const {
bool contains_reference(OopOrNarrowOopStar from) const {
assert(hr()->is_in_reserved(from), "Precondition.");
size_t card_ind = pointer_delta(from, hr()->bottom(),
CardTableModRefBS::card_size);
@ -394,7 +394,7 @@ public:
void set_next(PosParPRT* nxt) { _next = nxt; }
PosParPRT** next_addr() { return &_next; }
void add_reference(oop* from, int tid) {
void add_reference(OopOrNarrowOopStar from, int tid) {
// Expand if necessary.
PerRegionTable** pt = par_tables();
if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) {
@ -447,7 +447,7 @@ public:
return res;
}
bool contains_reference(oop* from) const {
bool contains_reference(OopOrNarrowOopStar from) const {
if (PerRegionTable::contains_reference(from)) return true;
if (_par_tables != NULL) {
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
@ -564,12 +564,15 @@ void OtherRegionsTable::print_from_card_cache() {
}
#endif
void OtherRegionsTable::add_reference(oop* from, int tid) {
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
size_t cur_hrs_ind = hr()->hrs_index();
#if HRRS_VERBOSE
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
from, *from);
from,
UseCompressedOops
? oopDesc::load_decode_heap_oop((narrowOop*)from)
: oopDesc::load_decode_heap_oop((oop*)from));
#endif
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
@ -1021,13 +1024,13 @@ bool OtherRegionsTable::del_single_region_table(size_t ind,
}
}
bool OtherRegionsTable::contains_reference(oop* from) const {
bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
// Cast away const in this case.
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
return contains_reference_locked(from);
}
bool OtherRegionsTable::contains_reference_locked(oop* from) const {
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
HeapRegion* hr = _g1h->heap_region_containing_raw(from);
if (hr == NULL) return false;
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
@ -1288,24 +1291,24 @@ bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
oop** HeapRegionRemSet::_recorded_oops = NULL;
HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
int HeapRegionRemSet::_n_recorded = 0;
OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
int HeapRegionRemSet::_n_recorded = 0;
HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
int* HeapRegionRemSet::_recorded_event_index = NULL;
int HeapRegionRemSet::_n_recorded_events = 0;
void HeapRegionRemSet::record(HeapRegion* hr, oop* f) {
void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
if (_recorded_oops == NULL) {
assert(_n_recorded == 0
&& _recorded_cards == NULL
&& _recorded_regions == NULL,
"Inv");
_recorded_oops = NEW_C_HEAP_ARRAY(oop*, MaxRecorded);
_recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded);
_recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded);
_recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded);
_recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded);
_recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded);
}
if (_n_recorded == MaxRecorded) {
gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
@ -1408,21 +1411,21 @@ void HeapRegionRemSet::test() {
HeapRegionRemSet* hrrs = hr0->rem_set();
// Make three references from region 0x101...
hrrs->add_reference((oop*)hr1_start);
hrrs->add_reference((oop*)hr1_mid);
hrrs->add_reference((oop*)hr1_last);
hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
hrrs->add_reference((oop*)hr2_start);
hrrs->add_reference((oop*)hr2_mid);
hrrs->add_reference((oop*)hr2_last);
hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
hrrs->add_reference((oop*)hr3_start);
hrrs->add_reference((oop*)hr3_mid);
hrrs->add_reference((oop*)hr3_last);
hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
// Now cause a coarsening.
hrrs->add_reference((oop*)hr4->bottom());
hrrs->add_reference((oop*)hr5->bottom());
hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
// Now, does iteration yield these three?
HeapRegionRemSetIterator iter;

View File

@ -116,9 +116,9 @@ public:
// For now. Could "expand" some tables in the future, so that this made
// sense.
void add_reference(oop* from, int tid);
void add_reference(OopOrNarrowOopStar from, int tid);
void add_reference(oop* from) {
void add_reference(OopOrNarrowOopStar from) {
return add_reference(from, 0);
}
@ -140,8 +140,8 @@ public:
static size_t static_mem_size();
static size_t fl_mem_size();
bool contains_reference(oop* from) const;
bool contains_reference_locked(oop* from) const;
bool contains_reference(OopOrNarrowOopStar from) const;
bool contains_reference_locked(OopOrNarrowOopStar from) const;
void clear();
@ -192,10 +192,10 @@ private:
// Unused unless G1RecordHRRSOops is true.
static const int MaxRecorded = 1000000;
static oop** _recorded_oops;
static HeapWord** _recorded_cards;
static HeapRegion** _recorded_regions;
static int _n_recorded;
static OopOrNarrowOopStar* _recorded_oops;
static HeapWord** _recorded_cards;
static HeapRegion** _recorded_regions;
static int _n_recorded;
static const int MaxRecordedEvents = 1000;
static Event* _recorded_events;
@ -231,13 +231,13 @@ public:
/* Used in the sequential case. Returns "true" iff this addition causes
the size limit to be reached. */
void add_reference(oop* from) {
void add_reference(OopOrNarrowOopStar from) {
_other_regions.add_reference(from);
}
/* Used in the parallel case. Returns "true" iff this addition causes
the size limit to be reached. */
void add_reference(oop* from, int tid) {
void add_reference(OopOrNarrowOopStar from, int tid) {
_other_regions.add_reference(from, tid);
}
@ -301,7 +301,7 @@ public:
return OtherRegionsTable::fl_mem_size();
}
bool contains_reference(oop* from) const {
bool contains_reference(OopOrNarrowOopStar from) const {
return _other_regions.contains_reference(from);
}
void print() const;
@ -329,7 +329,7 @@ public:
}
#endif
static void record(HeapRegion* hr, oop* f);
static void record(HeapRegion* hr, OopOrNarrowOopStar f);
static void print_recorded();
static void record_event(Event evnt);

View File

@ -43,6 +43,18 @@ void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
}
}
}
#ifdef ASSERT
void ObjPtrQueue::verify_oops_in_buffer() {
if (_buf == NULL) return;
for (size_t i = _index; i < _sz; i += oopSize) {
oop obj = (oop)_buf[byte_index_to_index((int)i)];
assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
"Not an oop");
}
}
#endif
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
@ -66,6 +78,7 @@ void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
t->satb_mark_queue().handle_zero_index();
}
@ -143,7 +156,7 @@ void SATBMarkQueueSet::abandon_partial_marking() {
}
_completed_buffers_tail = NULL;
_n_completed_buffers = 0;
debug_only(assert_completed_buffer_list_len_correct_locked());
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
}
while (buffers_to_delete != NULL) {
CompletedBufferNode* nd = buffers_to_delete;

View File

@ -39,6 +39,7 @@ public:
static void apply_closure_to_buffer(ObjectClosure* cl,
void** buf, size_t index, size_t sz);
void verify_oops_in_buffer() NOT_DEBUG_RETURN;
};

View File

@ -27,6 +27,7 @@
bufferingOopClosure.hpp genOopClosures.hpp
bufferingOopClosure.hpp generation.hpp
bufferingOopClosure.hpp os.hpp
bufferingOopClosure.hpp taskqueue.hpp
cardTableRS.cpp concurrentMark.hpp
cardTableRS.cpp g1SATBCardTableModRefBS.hpp
@ -139,7 +140,7 @@ g1CollectedHeap.cpp concurrentZFThread.hpp
g1CollectedHeap.cpp g1CollectedHeap.inline.hpp
g1CollectedHeap.cpp g1CollectorPolicy.hpp
g1CollectedHeap.cpp g1MarkSweep.hpp
g1CollectedHeap.cpp g1RemSet.hpp
g1CollectedHeap.cpp g1RemSet.inline.hpp
g1CollectedHeap.cpp g1OopClosures.inline.hpp
g1CollectedHeap.cpp genOopClosures.inline.hpp
g1CollectedHeap.cpp gcLocker.inline.hpp
@ -151,13 +152,14 @@ g1CollectedHeap.cpp icBuffer.hpp
g1CollectedHeap.cpp isGCActiveMark.hpp
g1CollectedHeap.cpp oop.inline.hpp
g1CollectedHeap.cpp oop.pcgc.inline.hpp
g1CollectedHeap.cpp parGCAllocBuffer.hpp
g1CollectedHeap.cpp vm_operations_g1.hpp
g1CollectedHeap.cpp vmThread.hpp
g1CollectedHeap.hpp barrierSet.hpp
g1CollectedHeap.hpp g1RemSet.hpp
g1CollectedHeap.hpp heapRegion.hpp
g1CollectedHeap.hpp memRegion.hpp
g1CollectedHeap.hpp parGCAllocBuffer.hpp
g1CollectedHeap.hpp sharedHeap.hpp
g1CollectedHeap.inline.hpp concurrentMark.hpp
@ -245,6 +247,7 @@ g1RemSet.cpp intHisto.hpp
g1RemSet.cpp iterator.hpp
g1RemSet.cpp oop.inline.hpp
g1RemSet.inline.hpp oop.inline.hpp
g1RemSet.inline.hpp g1RemSet.hpp
g1RemSet.inline.hpp heapRegionRemSet.hpp
@ -255,6 +258,7 @@ g1SATBCardTableModRefBS.cpp thread.hpp
g1SATBCardTableModRefBS.cpp thread_<os_family>.inline.hpp
g1SATBCardTableModRefBS.cpp satbQueue.hpp
g1SATBCardTableModRefBS.hpp oop.inline.hpp
g1SATBCardTableModRefBS.hpp cardTableModRefBS.hpp
g1SATBCardTableModRefBS.hpp memRegion.hpp

View File

@ -31,9 +31,10 @@ void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
bool clear,
int n_threads) {
if (n_threads > 0) {
assert(n_threads == (int)ParallelGCThreads, "# worker threads != # requested!");
// Make sure the LNC array is valid for the space.
assert((n_threads == 1 && ParallelGCThreads == 0) ||
n_threads <= (int)ParallelGCThreads,
"# worker threads != # requested!");
// Make sure the LNC array is valid for the space.
jbyte** lowest_non_clean;
uintptr_t lowest_non_clean_base_chunk_index;
size_t lowest_non_clean_chunk_size;

View File

@ -885,7 +885,7 @@ void ParallelScavengeHeap::print_tracing_info() const {
}
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) {
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
// Why do we need the total_collections()-filter below?
if (total_collections() > 0) {
if (!silent) {

View File

@ -217,7 +217,7 @@ class ParallelScavengeHeap : public CollectedHeap {
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual void print_tracing_info() const;
void verify(bool allow_dirty, bool silent);
void verify(bool allow_dirty, bool silent, bool /* option */);
void print_heap_change(size_t prev_used);

View File

@ -117,6 +117,7 @@ inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
process_array_chunk(old);
} else {
if (p.is_narrow()) {
assert(UseCompressedOops, "Error");
PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
} else {
PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);

View File

@ -533,7 +533,7 @@ class CollectedHeap : public CHeapObj {
virtual void print_tracing_info() const = 0;
// Heap verification
virtual void verify(bool allow_dirty, bool silent) = 0;
virtual void verify(bool allow_dirty, bool silent, bool option) = 0;
// Non product verification and debugging.
#ifndef PRODUCT

View File

@ -554,7 +554,6 @@ ciEnv.cpp jvmtiExport.hpp
ciEnv.cpp linkResolver.hpp
ciEnv.cpp methodDataOop.hpp
ciEnv.cpp objArrayKlass.hpp
ciEnv.cpp oop.hpp
ciEnv.cpp oop.inline.hpp
ciEnv.cpp oop.inline2.hpp
ciEnv.cpp oopFactory.hpp
@ -785,7 +784,6 @@ ciObjectFactory.hpp growableArray.hpp
ciSignature.cpp allocation.inline.hpp
ciSignature.cpp ciSignature.hpp
ciSignature.cpp ciUtilities.hpp
ciSignature.cpp oop.hpp
ciSignature.cpp oop.inline.hpp
ciSignature.cpp signature.hpp
@ -952,7 +950,6 @@ classLoadingService.hpp perfData.hpp
classify.cpp classify.hpp
classify.cpp systemDictionary.hpp
classify.hpp oop.hpp
classify.hpp oop.inline.hpp
codeBlob.cpp allocation.inline.hpp
@ -1187,7 +1184,6 @@ compilerOracle.cpp handles.inline.hpp
compilerOracle.cpp jniHandles.hpp
compilerOracle.cpp klass.hpp
compilerOracle.cpp methodOop.hpp
compilerOracle.cpp oop.hpp
compilerOracle.cpp oop.inline.hpp
compilerOracle.cpp oopFactory.hpp
compilerOracle.cpp resourceArea.hpp
@ -1631,7 +1627,6 @@ frame.cpp methodDataOop.hpp
frame.cpp methodOop.hpp
frame.cpp monitorChunk.hpp
frame.cpp nativeInst_<arch>.hpp
frame.cpp oop.hpp
frame.cpp oop.inline.hpp
frame.cpp oop.inline2.hpp
frame.cpp oopMapCache.hpp
@ -1799,7 +1794,6 @@ generation.cpp genOopClosures.inline.hpp
generation.cpp generation.hpp
generation.cpp generation.inline.hpp
generation.cpp java.hpp
generation.cpp oop.hpp
generation.cpp oop.inline.hpp
generation.cpp spaceDecorator.hpp
generation.cpp space.inline.hpp
@ -2272,7 +2266,6 @@ java.cpp jvmtiExport.hpp
java.cpp memprofiler.hpp
java.cpp methodOop.hpp
java.cpp objArrayOop.hpp
java.cpp oop.hpp
java.cpp oop.inline.hpp
java.cpp oopFactory.hpp
java.cpp sharedRuntime.hpp
@ -2949,7 +2942,7 @@ mutex_<os_family>.inline.hpp thread_<os_family>.inline.hpp
nativeInst_<arch>.cpp assembler_<arch>.inline.hpp
nativeInst_<arch>.cpp handles.hpp
nativeInst_<arch>.cpp nativeInst_<arch>.hpp
nativeInst_<arch>.cpp oop.hpp
nativeInst_<arch>.cpp oop.inline.hpp
nativeInst_<arch>.cpp ostream.hpp
nativeInst_<arch>.cpp resourceArea.hpp
nativeInst_<arch>.cpp sharedRuntime.hpp
@ -3844,7 +3837,7 @@ stackMapTable.hpp stackMapFrame.hpp
stackValue.cpp debugInfo.hpp
stackValue.cpp frame.inline.hpp
stackValue.cpp handles.inline.hpp
stackValue.cpp oop.hpp
stackValue.cpp oop.inline.hpp
stackValue.cpp stackValue.hpp
stackValue.hpp handles.hpp
@ -4332,7 +4325,6 @@ typeArrayOop.hpp typeArrayKlass.hpp
unhandledOops.cpp collectedHeap.hpp
unhandledOops.cpp gcLocker.inline.hpp
unhandledOops.cpp globalDefinitions.hpp
unhandledOops.cpp oop.hpp
unhandledOops.cpp oop.inline.hpp
unhandledOops.cpp thread.hpp
unhandledOops.cpp unhandledOops.hpp
@ -4468,7 +4460,6 @@ vframe.cpp javaClasses.hpp
vframe.cpp nmethod.hpp
vframe.cpp objectMonitor.hpp
vframe.cpp objectMonitor.inline.hpp
vframe.cpp oop.hpp
vframe.cpp oop.inline.hpp
vframe.cpp oopMapCache.hpp
vframe.cpp pcDesc.hpp
@ -4580,7 +4571,6 @@ vmThread.cpp events.hpp
vmThread.cpp interfaceSupport.hpp
vmThread.cpp methodOop.hpp
vmThread.cpp mutexLocker.hpp
vmThread.cpp oop.hpp
vmThread.cpp oop.inline.hpp
vmThread.cpp os.hpp
vmThread.cpp resourceArea.hpp

View File

@ -47,7 +47,7 @@ dump.cpp javaCalls.hpp
dump.cpp javaClasses.hpp
dump.cpp loaderConstraints.hpp
dump.cpp methodDataOop.hpp
dump.cpp oop.hpp
dump.cpp oop.inline.hpp
dump.cpp oopFactory.hpp
dump.cpp resourceArea.hpp
dump.cpp signature.hpp
@ -237,7 +237,7 @@ serialize.cpp compactingPermGenGen.hpp
serialize.cpp compiledICHolderOop.hpp
serialize.cpp methodDataOop.hpp
serialize.cpp objArrayOop.hpp
serialize.cpp oop.hpp
serialize.cpp oop.inline.hpp
serialize.cpp symbolTable.hpp
serialize.cpp systemDictionary.hpp
@ -295,7 +295,7 @@ vmStructs.cpp nmethod.hpp
vmStructs.cpp objArrayKlass.hpp
vmStructs.cpp objArrayKlassKlass.hpp
vmStructs.cpp objArrayOop.hpp
vmStructs.cpp oop.hpp
vmStructs.cpp oop.inline.hpp
vmStructs.cpp oopMap.hpp
vmStructs.cpp pcDesc.hpp
vmStructs.cpp perfMemory.hpp

View File

@ -273,6 +273,7 @@ Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
compute_index_maps();
if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
bool did_rewrite = false;
int i = _methods->length();
while (i-- > 0) {
methodOop method = (methodOop)_methods->obj_at(i);
@ -281,9 +282,11 @@ Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
// object for finalization if needed.
methodHandle m(THREAD, method);
rewrite_Object_init(m, CHECK);
did_rewrite = true;
break;
}
}
assert(did_rewrite, "must find Object::<init> to rewrite it");
}
// rewrite methods, in two passes

View File

@ -25,12 +25,27 @@
# include "incls/_precompiled.incl"
# include "incls/_barrierSet.cpp.incl"
// count is in HeapWord's
// count is number of array elements being written
void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
Universe::heap()->barrier_set()->write_ref_array_pre(MemRegion(start, start + count));
assert(count <= (size_t)max_intx, "count too large");
#if 0
warning("Pre: \t" INTPTR_FORMAT "[" SIZE_FORMAT "]\t",
start, count);
#endif
if (UseCompressedOops) {
Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count);
} else {
Universe::heap()->barrier_set()->write_ref_array_pre( (oop*)start, (int)count);
}
}
// count is in HeapWord's
// count is number of array elements being written
void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, start + count));
assert(count <= (size_t)max_intx, "count too large");
HeapWord* end = start + objArrayOopDesc::array_size((int)count);
#if 0
warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
start, count, start, end);
#endif
Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, end));
}

View File

@ -81,9 +81,13 @@ public:
// barrier types. Semantically, it should be thought of as a call to the
// virtual "_work" function below, which must implement the barrier.)
// First the pre-write versions...
inline void write_ref_field_pre(void* field, oop new_val);
template <class T> inline void write_ref_field_pre(T* field, oop new_val);
private:
// Keep this private so as to catch violations at build time.
virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); };
protected:
virtual void write_ref_field_pre_work(void* field, oop new_val) {};
virtual void write_ref_field_pre_work( oop* field, oop new_val) {};
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
public:
// ...then the post-write version.
@ -117,12 +121,17 @@ public:
virtual void read_ref_array(MemRegion mr) = 0;
virtual void read_prim_array(MemRegion mr) = 0;
virtual void write_ref_array_pre(MemRegion mr) {}
virtual void write_ref_array_pre( oop* dst, int length) {}
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
inline void write_ref_array(MemRegion mr);
// Static versions, suitable for calling from generated code.
static void static_write_ref_array_pre(HeapWord* start, size_t count);
static void static_write_ref_array_post(HeapWord* start, size_t count);
// Narrow oop versions of the above; count is # of array elements being written,
// starting with "start", which is HeapWord-aligned.
static void static_write_ref_array_pre_narrow(HeapWord* start, size_t count);
static void static_write_ref_array_post_narrow(HeapWord* start, size_t count);
protected:
virtual void write_ref_array_work(MemRegion mr) = 0;

View File

@ -23,10 +23,10 @@
*/
// Inline functions of BarrierSet, which de-virtualize certain
// performance-critical calls when when the barrier is the most common
// performance-critical calls when the barrier is the most common
// card-table kind.
void BarrierSet::write_ref_field_pre(void* field, oop new_val) {
template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
if (kind() == CardTableModRef) {
((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val);
} else {

View File

@ -287,7 +287,7 @@ public:
// these functions here for performance.
protected:
void write_ref_field_work(oop obj, size_t offset, oop newVal);
void write_ref_field_work(void* field, oop newVal);
virtual void write_ref_field_work(void* field, oop newVal);
public:
bool has_write_ref_array_opt() { return true; }
@ -317,10 +317,10 @@ public:
// *** Card-table-barrier-specific things.
inline void inline_write_ref_field_pre(void* field, oop newVal) {}
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {}
inline void inline_write_ref_field(void* field, oop newVal) {
jbyte* byte = byte_for(field);
template <class T> inline void inline_write_ref_field(T* field, oop newVal) {
jbyte* byte = byte_for((void*)field);
*byte = dirty_card;
}

View File

@ -1194,7 +1194,7 @@ GCStats* GenCollectedHeap::gc_stats(int level) const {
return _gens[level]->gc_stats();
}
void GenCollectedHeap::verify(bool allow_dirty, bool silent) {
void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
if (!silent) {
gclog_or_tty->print("permgen ");
}

View File

@ -325,7 +325,7 @@ public:
void prepare_for_verify();
// Override.
void verify(bool allow_dirty, bool silent);
void verify(bool allow_dirty, bool silent, bool /* option */);
// Override.
void print() const;

View File

@ -57,7 +57,7 @@ class OopsInGenClosure : public OopClosure {
template <class T> void do_barrier(T* p);
// Version for use by closures that may be called in parallel code.
void par_do_barrier(oop* p);
template <class T> void par_do_barrier(T* p);
public:
OopsInGenClosure() : OopClosure(NULL),

View File

@ -40,18 +40,20 @@ inline void OopsInGenClosure::set_generation(Generation* gen) {
template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
assert(generation()->is_in_reserved(p), "expected ref in generation");
assert(!oopDesc::is_null(*p), "expected non-null object");
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
T heap_oop = oopDesc::load_heap_oop(p);
assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
// If p points to a younger generation, mark the card.
if ((HeapWord*)obj < _gen_boundary) {
_rs->inline_write_ref_field_gc(p, obj);
}
}
inline void OopsInGenClosure::par_do_barrier(oop* p) {
template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
assert(generation()->is_in_reserved(p), "expected ref in generation");
oop obj = *p;
assert(obj != NULL, "expected non-null object");
T heap_oop = oopDesc::load_heap_oop(p);
assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
// If p points to a younger generation, mark the card.
if ((HeapWord*)obj < gen_boundary()) {
rs()->write_ref_field_gc_par(p, obj);

View File

@ -1013,12 +1013,19 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
// discovered_addr.
oop current_head = refs_list.head();
// Note: In the case of G1, this pre-barrier is strictly
// Note: In the case of G1, this specific pre-barrier is strictly
// not necessary because the only case we are interested in
// here is when *discovered_addr is NULL, so this will expand to
// nothing. As a result, I am just manually eliding this out for G1.
// here is when *discovered_addr is NULL (see the CAS further below),
// so this will expand to nothing. As a result, we have manually
// elided this out for G1, but left in the test for some future
// collector that might have need for a pre-barrier here.
if (_discovered_list_needs_barrier && !UseG1GC) {
_bs->write_ref_field_pre((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
if (UseCompressedOops) {
_bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
} else {
_bs->write_ref_field_pre((oop*)discovered_addr, current_head);
}
guarantee(false, "Need to check non-G1 collector");
}
oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
NULL);
@ -1029,9 +1036,8 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
refs_list.set_head(obj);
refs_list.inc_length(1);
if (_discovered_list_needs_barrier) {
_bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
_bs->write_ref_field((void*)discovered_addr, current_head);
}
} else {
// If retest was non NULL, another thread beat us to it:
// The reference has already been discovered...
@ -1177,11 +1183,16 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
// pre-value, we can safely elide the pre-barrier here for the case of G1.
assert(discovered == NULL, "control point invariant");
if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
_bs->write_ref_field_pre((oop*)discovered_addr, current_head);
if (UseCompressedOops) {
_bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
} else {
_bs->write_ref_field_pre((oop*)discovered_addr, current_head);
}
guarantee(false, "Need to check non-G1 collector");
}
oop_store_raw(discovered_addr, current_head);
if (_discovered_list_needs_barrier) {
_bs->write_ref_field((oop*)discovered_addr, current_head);
_bs->write_ref_field((void*)discovered_addr, current_head);
}
list->set_head(obj);
list->inc_length(1);

View File

@ -106,6 +106,7 @@ class Space: public CHeapObj {
virtual void set_end(HeapWord* value) { _end = value; }
virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
MemRegionClosure* preconsumptionDirtyCardClosure() const {

View File

@ -1170,7 +1170,7 @@ void Universe::print_heap_after_gc(outputStream* st) {
st->print_cr("}");
}
void Universe::verify(bool allow_dirty, bool silent) {
void Universe::verify(bool allow_dirty, bool silent, bool option) {
if (SharedSkipVerify) {
return;
}
@ -1194,7 +1194,7 @@ void Universe::verify(bool allow_dirty, bool silent) {
if (!silent) gclog_or_tty->print("[Verifying ");
if (!silent) gclog_or_tty->print("threads ");
Threads::verify();
heap()->verify(allow_dirty, silent);
heap()->verify(allow_dirty, silent, option);
if (!silent) gclog_or_tty->print("syms ");
SymbolTable::verify();

View File

@ -343,6 +343,7 @@ class Universe: AllStatic {
// For UseCompressedOops
static address* narrow_oop_base_addr() { return &_narrow_oop._base; }
static address narrow_oop_base() { return _narrow_oop._base; }
static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
static int narrow_oop_shift() { return _narrow_oop._shift; }
static void set_narrow_oop_base(address base) { _narrow_oop._base = base; }
static void set_narrow_oop_shift(int shift) { _narrow_oop._shift = shift; }
@ -398,7 +399,7 @@ class Universe: AllStatic {
// Debugging
static bool verify_in_progress() { return _verify_in_progress; }
static void verify(bool allow_dirty = true, bool silent = false);
static void verify(bool allow_dirty = true, bool silent = false, bool option = true);
static int verify_count() { return _verify_count; }
static void print();
static void print_on(outputStream* st);

View File

@ -28,13 +28,14 @@
template <class T>
static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
oop referent = oopDesc::load_decode_heap_oop(referent_addr);
T heap_oop = oopDesc::load_heap_oop(referent_addr);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
}
)
if (referent != NULL) {
if (!oopDesc::is_null(heap_oop)) {
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
if (!referent->is_gc_marked() &&
MarkSweep::ref_processor()->
discover_reference(obj, ref->reference_type())) {
@ -81,13 +82,14 @@ static void specialized_oop_follow_contents(instanceRefKlass* ref,
ParCompactionManager* cm,
oop obj) {
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
oop referent = oopDesc::load_decode_heap_oop(referent_addr);
T heap_oop = oopDesc::load_heap_oop(referent_addr);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
}
)
if (referent != NULL) {
if (!oopDesc::is_null(heap_oop)) {
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
PSParallelCompact::ref_processor()->
discover_reference(obj, ref->reference_type())) {
@ -182,9 +184,10 @@ int instanceRefKlass::oop_adjust_pointers(oop obj) {
} \
\
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \
oop referent = oopDesc::load_decode_heap_oop(referent_addr); \
if (referent != NULL && contains(referent_addr)) { \
T heap_oop = oopDesc::load_heap_oop(referent_addr); \
if (!oopDesc::is_null(heap_oop) && contains(referent_addr)) { \
ReferenceProcessor* rp = closure->_ref_processor; \
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); \
if (!referent->is_gc_marked() && (rp != NULL) && \
rp->discover_reference(obj, reference_type())) { \
return size; \

View File

@ -68,7 +68,7 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
m->set_constants(NULL);
m->set_max_stack(0);
m->set_max_locals(0);
m->clear_intrinsic_id_cache();
m->set_intrinsic_id(vmIntrinsics::_none);
m->set_method_data(NULL);
m->set_interpreter_throwout_count(0);
m->set_vtable_index(methodOopDesc::garbage_vtable_index);

View File

@ -962,26 +962,39 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
return newm;
}
vmIntrinsics::ID methodOopDesc::compute_intrinsic_id() const {
assert(vmIntrinsics::_none == 0, "correct coding of default case");
const uintptr_t max_cache_uint = right_n_bits((int)(sizeof(_intrinsic_id_cache) * BitsPerByte));
assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_cache_uint, "else fix cache size");
vmSymbols::SID methodOopDesc::klass_id_for_intrinsics(klassOop holder) {
// if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
// because we are not loading from core libraries
if (instanceKlass::cast(method_holder())->class_loader() != NULL) return vmIntrinsics::_none;
if (instanceKlass::cast(holder)->class_loader() != NULL)
return vmSymbols::NO_SID; // regardless of name, no intrinsics here
// see if the klass name is well-known:
symbolOop klass_name = instanceKlass::cast(method_holder())->name();
vmSymbols::SID klass_id = vmSymbols::find_sid(klass_name);
if (klass_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
symbolOop klass_name = instanceKlass::cast(holder)->name();
return vmSymbols::find_sid(klass_name);
}
void methodOopDesc::init_intrinsic_id() {
assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size");
// the klass name is well-known:
vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder());
assert(klass_id != vmSymbols::NO_SID, "caller responsibility");
// ditto for method and signature:
vmSymbols::SID name_id = vmSymbols::find_sid(name());
if (name_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
if (name_id == vmSymbols::NO_SID) return;
vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
if (sig_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
if (sig_id == vmSymbols::NO_SID) return;
jshort flags = access_flags().as_short();
vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
if (id != vmIntrinsics::_none) {
set_intrinsic_id(id);
return;
}
// A few slightly irregular cases:
switch (klass_id) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
@ -992,15 +1005,18 @@ vmIntrinsics::ID methodOopDesc::compute_intrinsic_id() const {
case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
// pretend it is the corresponding method in the non-strict class:
klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
break;
}
}
// return intrinsic id if any
return vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
if (id != vmIntrinsics::_none) {
// Set up its iid. It is an alias method.
set_intrinsic_id(id);
return;
}
}
// These two methods are static since a GC may move the methodOopDesc
bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
bool sig_is_loaded = true;

View File

@ -104,7 +104,7 @@ class methodOopDesc : public oopDesc {
u2 _max_stack; // Maximum number of entries on the expression stack
u2 _max_locals; // Number of local variables used by this method
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
u1 _intrinsic_id_cache; // Cache for intrinsic_id; 0 or 1+vmInt::ID
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
u1 _highest_tier_compile; // Highest compile level this method has ever seen.
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
u2 _number_of_breakpoints; // fullspeed debugging support
@ -224,8 +224,6 @@ class methodOopDesc : public oopDesc {
int highest_tier_compile() { return _highest_tier_compile;}
void set_highest_tier_compile(int level) { _highest_tier_compile = level;}
void clear_intrinsic_id_cache() { _intrinsic_id_cache = 0; }
// Count of times method was exited via exception while interpreting
void interpreter_throwout_increment() {
if (_interpreter_throwout_count < 65534) {
@ -571,18 +569,12 @@ class methodOopDesc : public oopDesc {
void set_cached_itable_index(int index) { instanceKlass::cast(method_holder())->set_cached_itable_index(method_idnum(), index); }
// Support for inlining of intrinsic methods
vmIntrinsics::ID intrinsic_id() const { // returns zero if not an intrinsic
const u1& cache = _intrinsic_id_cache;
if (cache != 0) {
return (vmIntrinsics::ID)(cache - 1);
} else {
vmIntrinsics::ID id = compute_intrinsic_id();
*(u1*)&cache = ((u1) id) + 1; // force the cache to be non-const
vmIntrinsics::verify_method(id, (methodOop) this);
assert((vmIntrinsics::ID)(cache - 1) == id, "proper conversion");
return id;
}
}
vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
// Helper routines for intrinsic_id() and vmIntrinsics::method().
void init_intrinsic_id(); // updates from _none if a match
static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
// On-stack replacement support
bool has_osr_nmethod() { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
@ -635,9 +627,6 @@ class methodOopDesc : public oopDesc {
void set_size_of_parameters(int size) { _size_of_parameters = size; }
private:
// Helper routine for intrinsic_id().
vmIntrinsics::ID compute_intrinsic_id() const;
// Inlined elements
address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
address* signature_handler_addr() const { return native_function_addr() + 1; }

View File

@ -85,8 +85,6 @@ oop objArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
arrayOop d, T* dst, int length, TRAPS) {
const size_t word_len = objArrayOopDesc::array_size(length);
BarrierSet* bs = Universe::heap()->barrier_set();
// For performance reasons, we assume we are that the write barrier we
// are using has optimized modes for arrays of references. At least one
@ -94,11 +92,10 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
MemRegion dst_mr = MemRegion((HeapWord*)dst, word_len);
if (s == d) {
// since source and destination are equal we do not need conversion checks.
assert(length > 0, "sanity check");
bs->write_ref_array_pre(dst_mr);
bs->write_ref_array_pre(dst, length);
Copy::conjoint_oops_atomic(src, dst, length);
} else {
// We have to make sure all elements conform to the destination array
@ -106,7 +103,7 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
klassOop stype = objArrayKlass::cast(s->klass())->element_klass();
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
// elements are guaranteed to be subtypes, so no check necessary
bs->write_ref_array_pre(dst_mr);
bs->write_ref_array_pre(dst, length);
Copy::conjoint_oops_atomic(src, dst, length);
} else {
// slow case: need individual subtype checks
@ -138,6 +135,7 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
}
}
}
const size_t word_len = objArrayOopDesc::array_size(length);
bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
}

View File

@ -148,12 +148,14 @@ inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
assert(!is_null(v), "oop value can never be zero");
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
address base = Universe::narrow_oop_base();
int shift = Universe::narrow_oop_shift();
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
uint64_t result = pd >> shift;
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
assert(decode_heap_oop(result) == v, "reversibility");
return (narrowOop)result;
}
@ -449,7 +451,7 @@ inline void update_barrier_set(void* p, oop v) {
oopDesc::bs()->write_ref_field(p, v);
}
inline void update_barrier_set_pre(void* p, oop v) {
template <class T> inline void update_barrier_set_pre(T* p, oop v) {
oopDesc::bs()->write_ref_field_pre(p, v);
}
@ -459,15 +461,15 @@ template <class T> inline void oop_store(T* p, oop v) {
} else {
update_barrier_set_pre(p, v);
oopDesc::encode_store_heap_oop(p, v);
update_barrier_set(p, v);
update_barrier_set((void*)p, v); // cast away type
}
}
template <class T> inline void oop_store(volatile T* p, oop v) {
update_barrier_set_pre((void*)p, v);
update_barrier_set_pre((T*)p, v); // cast away volatile
// Used by release_obj_field_put, so use release_store_ptr.
oopDesc::release_encode_store_heap_oop(p, v);
update_barrier_set((void*)p, v);
update_barrier_set((void*)p, v); // cast away type
}
template <class T> inline void oop_store_without_check(T* p, oop v) {

View File

@ -29,6 +29,7 @@
typedef juint narrowOop; // Offset instead of address for an oop within a java object
typedef class klassOopDesc* wideKlassOop; // to keep SA happy and unhandled oop
// detector happy.
typedef void* OopOrNarrowOopStar;
#ifndef CHECK_UNHANDLED_OOPS

View File

@ -357,6 +357,9 @@ PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
#ifndef PRODUCT
, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
#endif
#ifdef ASSERT
, _raw_oops(a)
#endif
{
ResourceMark rm;
// I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode,

View File

@ -380,6 +380,10 @@ class PhaseCFG : public Phase {
bool _trace_opto_pipelining; // tracing flag
#endif
#ifdef ASSERT
Unique_Node_List _raw_oops;
#endif
// Build dominators
void Dominators();

Some files were not shown because too many files have changed in this diff Show More