This commit is contained in:
Lana Steuck 2012-09-05 13:50:00 -07:00
commit 51b47903db
1578 changed files with 421077 additions and 9580 deletions
.hgtags.hgtags-top-repo
corba
hotspot
.hgtags
agent
make
src

@ -174,3 +174,5 @@ cecd7026f30cbd83b0601925a7a5e059aec98138 jdk8-b49
38fe5ab028908cf64dd73a43336ba3211577bfc3 jdk8-b50
382651d28f2502d371eca751962232c0e535e57a jdk8-b51
b67041a6cb508da18d2f5c7687e6a31e08bea4fc jdk8-b52
c7aa5cca1c01689a7b1a92411daf83684af05a33 jdk8-b53
7c6aa31ff1b2ae48c1c686ebe1aadf0c3da5be15 jdk8-b54

@ -174,3 +174,5 @@ c97b99424815c43818e3cc3ffcdd1a60f3198b52 jdk8-b49
2fd67618b9a3c847780ed7b9d228e862b6e2824c jdk8-b50
57c0aee7309050b9d6cfcbd202dc704e9260b377 jdk8-b51
8d24def5ceb3b8f2e857f2e18b2804fc59eecf8d jdk8-b52
febd7ff5280067ca482faaeb9418ae88764c1a35 jdk8-b53
c1a277c6022affbc6855bdfb039511e73fbe2395 jdk8-b54

@ -174,3 +174,5 @@ fe44e58a6bdbeae350ce96aafb49770a5dca5d8a jdk8-b49
d20d9eb9f093adbf392918c703960ad24c93a331 jdk8-b50
9b0f841ca9f7ee9bacf16a5ab41c4f829276bc6b jdk8-b51
80689ff9cb499837513f18a1136dac7f0686cd55 jdk8-b52
63aeb7a2472fb299134ad7388e0a111a5340b02d jdk8-b53
16c82fc74695bab9b9e0fb05c086a5a08ba0082f jdk8-b54

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2004, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,10 @@
package com.sun.corba.se.impl.transport;
import java.util.Hashtable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.omg.CORBA.CompletionStatus;
import org.omg.CORBA.SystemException;
@ -68,7 +71,7 @@ public class CorbaResponseWaitingRoomImpl
private CorbaConnection connection;
// Maps requestId to an OutCallDesc.
private Hashtable out_calls = null; // REVISIT - use int hastable/map
final private Map<Integer, OutCallDesc> out_calls;
public CorbaResponseWaitingRoomImpl(ORB orb, CorbaConnection connection)
{
@ -76,7 +79,8 @@ public class CorbaResponseWaitingRoomImpl
wrapper = ORBUtilSystemException.get( orb,
CORBALogDomains.RPC_TRANSPORT ) ;
this.connection = connection;
out_calls = new Hashtable();
out_calls =
Collections.synchronizedMap(new HashMap<Integer, OutCallDesc>());
}
////////////////////////////////////////////////////
@ -139,7 +143,7 @@ public class CorbaResponseWaitingRoomImpl
return null;
}
OutCallDesc call = (OutCallDesc)out_calls.get(requestId);
OutCallDesc call = out_calls.get(requestId);
if (call == null) {
throw wrapper.nullOutCall(CompletionStatus.COMPLETED_MAYBE);
}
@ -197,7 +201,7 @@ public class CorbaResponseWaitingRoomImpl
LocateReplyOrReplyMessage header = (LocateReplyOrReplyMessage)
inputObject.getMessageHeader();
Integer requestId = new Integer(header.getRequestId());
OutCallDesc call = (OutCallDesc) out_calls.get(requestId);
OutCallDesc call = out_calls.get(requestId);
if (orb.transportDebugFlag) {
dprint(".responseReceived: id/"
@ -248,7 +252,6 @@ public class CorbaResponseWaitingRoomImpl
public int numberRegistered()
{
// Note: Hashtable.size() is not synchronized
return out_calls.size();
}
@ -264,29 +267,41 @@ public class CorbaResponseWaitingRoomImpl
dprint(".signalExceptionToAllWaiters: " + systemException);
}
OutCallDesc call;
java.util.Enumeration e = out_calls.elements();
while(e.hasMoreElements()) {
call = (OutCallDesc) e.nextElement();
synchronized (out_calls) {
if (orb.transportDebugFlag) {
dprint(".signalExceptionToAllWaiters: out_calls size :" +
out_calls.size());
}
synchronized(call.done){
// anything waiting for BufferManagerRead's fragment queue
// needs to be cancelled
CorbaMessageMediator corbaMsgMediator =
(CorbaMessageMediator)call.messageMediator;
CDRInputObject inputObject =
(CDRInputObject)corbaMsgMediator.getInputObject();
// IMPORTANT: If inputObject is null, then no need to tell
// BufferManagerRead to cancel request processing.
if (inputObject != null) {
BufferManagerReadStream bufferManager =
(BufferManagerReadStream)inputObject.getBufferManager();
int requestId = corbaMsgMediator.getRequestId();
bufferManager.cancelProcessing(requestId);
for (OutCallDesc call : out_calls.values()) {
if (orb.transportDebugFlag) {
dprint(".signalExceptionToAllWaiters: signaling " +
call);
}
synchronized(call.done) {
try {
// anything waiting for BufferManagerRead's fragment queue
// needs to be cancelled
CorbaMessageMediator corbaMsgMediator =
(CorbaMessageMediator)call.messageMediator;
CDRInputObject inputObject =
(CDRInputObject)corbaMsgMediator.getInputObject();
// IMPORTANT: If inputObject is null, then no need to tell
// BufferManagerRead to cancel request processing.
if (inputObject != null) {
BufferManagerReadStream bufferManager =
(BufferManagerReadStream)inputObject.getBufferManager();
int requestId = corbaMsgMediator.getRequestId();
bufferManager.cancelProcessing(requestId);
}
} catch (Exception e) {
} finally {
// attempt to wake up waiting threads in all cases
call.inputObject = null;
call.exception = systemException;
call.done.notifyAll();
}
}
call.inputObject = null;
call.exception = systemException;
call.done.notify();
}
}
}
@ -294,7 +309,7 @@ public class CorbaResponseWaitingRoomImpl
public MessageMediator getMessageMediator(int requestId)
{
Integer id = new Integer(requestId);
OutCallDesc call = (OutCallDesc) out_calls.get(id);
OutCallDesc call = out_calls.get(id);
if (call == null) {
// This can happen when getting early reply fragments for a
// request which has completed (e.g., client marshaling error).

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1521,7 +1521,7 @@ public class SocketOrChannelConnectionImpl
// connection and give them the SystemException;
responseWaitingRoom.signalExceptionToAllWaiters(systemException);
} finally {
if (contactInfo != null) {
((OutboundConnectionCache)getConnectionCache()).remove(contactInfo);
} else if (acceptor != null) {
@ -1542,7 +1542,6 @@ public class SocketOrChannelConnectionImpl
writeUnlock();
} finally {
if (orb.transportDebugFlag) {
dprint(".purgeCalls<-: "
+ minor_code + "/" + die + "/" + lockHeld

@ -270,3 +270,7 @@ e3619706a7253540a2d94e9e841acaab8ace7038 jdk8-b49
663fc23da8d51c4c0552cbcb17ffc85f5869d4fd jdk8-b51
4c8f2a12e757e7a808aa85827573e09f75d7459f hs24-b20
6d0436885201db3f581523344a734793bb989549 jdk8-b52
54240c1b8e87758f28da2c6a569a926fd9e0910a jdk8-b53
9e3ae661284dc04185b029d85440fe7811f1ed07 hs24-b21
e8fb566b94667f88462164defa654203f0ab6820 jdk8-b54
09ea7e0752b306b8ae74713aeb4eb6263e1c6836 hs24-b22

@ -26,7 +26,7 @@
# This file sets common environment variables for all SA scripts
OS=`uname`
STARTDIR=`dirname $0`
STARTDIR=`(cd \`dirname $0 \`; pwd)`
ARCH=`uname -m`
if [ "x$SA_JAVA" = "x" ]; then

@ -25,10 +25,11 @@
. `dirname $0`/saenv.sh
if [ -f $STARTDIR/sa.jar ] ; then
CP=$STARTDIR/sa.jar
if [ -f $STARTDIR/../lib/sa-jdi.jar ] ; then
CP=$STARTDIR/../lib/sa-jdi.jar
else
CP=$STARTDIR/../build/classes
fi
$SA_JAVA -classpath $CP ${OPTIONS} -Djava.rmi.server.codebase=file:/$CP -Djava.security.policy=$STARTDIR\/grantAll.policy sun.jvm.hotspot.DebugServer $*
$STARTDIR/java -classpath $CP ${OPTIONS} -Djava.rmi.server.codebase=file://$CP -Djava.security.policy=${STARTDIR}/grantAll.policy sun.jvm.hotspot.DebugServer $*

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2007, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,11 +55,11 @@ static jmethodID listAdd_ID = 0;
#define THROW_NEW_DEBUGGER_EXCEPTION_(str, value) { throw_new_debugger_exception(env, str); return value; }
#define THROW_NEW_DEBUGGER_EXCEPTION(str) { throw_new_debugger_exception(env, str); return;}
static void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
(*env)->ThrowNew(env, (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException"), errMsg);
}
static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
jlong ptr = (*env)->GetLongField(env, this_obj, p_ps_prochandle_ID);
return (struct ps_prochandle*)(intptr_t)ptr;
}
@ -280,6 +280,7 @@ JNIEXPORT jbyteArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo
return (err == PS_OK)? array : 0;
}
#if defined(i386) || defined(ia64) || defined(amd64) || defined(sparc) || defined(sparcv9)
JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0
(JNIEnv *env, jobject this_obj, jint lwp_id) {
@ -410,3 +411,4 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo
(*env)->ReleaseLongArrayElements(env, array, regs, JNI_COMMIT);
return array;
}
#endif

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,10 +25,15 @@
#ifndef _LIBPROC_H_
#define _LIBPROC_H_
#include <jni.h>
#include <unistd.h>
#include <stdint.h>
#include "proc_service.h"
#if defined(arm) || defined(ppc)
#include "libproc_md.h"
#endif
#if defined(sparc) || defined(sparcv9)
/*
If _LP64 is defined ptrace.h should be taken from /usr/include/asm-sparc64
@ -139,4 +144,8 @@ uintptr_t lookup_symbol(struct ps_prochandle* ph, const char* object_name,
// address->nearest symbol lookup. return NULL for no symbol
const char* symbol_for_pc(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* poffset);
struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj);
void throw_new_debugger_exception(JNIEnv* env, const char* errMsg);
#endif //__LIBPROC_H_

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -549,7 +549,13 @@ public class HotSpotAgent {
machDesc = new MachineDescriptionSPARC32Bit();
}
} else {
throw new DebuggerException("Linux only supported on x86/ia64/amd64/sparc/sparc64");
try {
machDesc = (MachineDescription)
Class.forName("sun.jvm.hotspot.debugger.MachineDescription" +
cpu.toUpperCase()).newInstance();
} catch (Exception e) {
throw new DebuggerException("Linux not supported on machine type " + cpu);
}
}
LinuxDebuggerLocal dbg =

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -737,9 +737,16 @@ public class BugSpotAgent {
machDesc = new MachineDescriptionSPARC32Bit();
}
} else {
throw new DebuggerException("Linux only supported on x86/ia64/amd64/sparc/sparc64");
try {
machDesc = (MachineDescription)
Class.forName("sun.jvm.hotspot.debugger.MachineDescription" +
cpu.toUpperCase()).newInstance();
} catch (Exception e) {
throw new DebuggerException("unsupported machine type");
}
}
// Note we do not use a cache for the local debugger in server
// mode; it will be taken care of on the client side (once remote
// debugging is implemented).

@ -24,6 +24,8 @@
package sun.jvm.hotspot.debugger;
import sun.jvm.hotspot.debugger.cdbg.*;
/** This is a placeholder interface for a thread's context, containing
only integer registers (no floating-point ones). What it contains
is platform-dependent. Not all registers are guaranteed to be
@ -54,4 +56,6 @@ public interface ThreadContext {
/** Set the value of the specified register (0..getNumRegisters() -
1) as an Address */
public void setRegisterAsAddress(int index, Address value);
public CFrame getTopFrame(Debugger dbg);
}

@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.amd64;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
/** Specifies the thread context on amd64 platforms; only a sub-portion
* of the context is guaranteed to be present on all operating
@ -98,6 +99,10 @@ public abstract class AMD64ThreadContext implements ThreadContext {
return data[index];
}
public CFrame getTopFrame(Debugger dbg) {
return null;
}
/** This can't be implemented in this class since we would have to
* tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value);

@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.ia64;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
/** Specifies the thread context on ia64 platform; only a sub-portion
of the context is guaranteed to be present on all operating
@ -172,6 +173,10 @@ public abstract class IA64ThreadContext implements ThreadContext {
return data[index];
}
public CFrame getTopFrame(Debugger dbg) {
return null;
}
/** This can't be implemented in this class since we would have to
tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -107,7 +107,9 @@ class LinuxCDebugger implements CDebugger {
if (pc == null) return null;
return new LinuxSPARCCFrame(dbg, sp, pc, LinuxDebuggerLocal.getAddressSize());
} else {
throw new DebuggerException(cpu + " is not yet supported");
// Runtime exception thrown by LinuxThreadContextFactory if unknown cpu
ThreadContext context = (ThreadContext) thread.getContext();
return context.getTopFrame(dbg);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2006, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
package sun.jvm.hotspot.debugger.linux;
import java.lang.reflect.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.linux.amd64.*;
import sun.jvm.hotspot.debugger.linux.ia64.*;
@ -41,8 +42,16 @@ class LinuxThreadContextFactory {
return new LinuxIA64ThreadContext(dbg);
} else if (cpu.equals("sparc")) {
return new LinuxSPARCThreadContext(dbg);
} else {
throw new RuntimeException("cpu " + cpu + " is not yet supported");
} else {
try {
Class tcc = Class.forName("sun.jvm.hotspot.debugger.linux." +
cpu.toLowerCase() + ".Linux" + cpu.toUpperCase() +
"ThreadContext");
Constructor[] ctcc = tcc.getConstructors();
return (ThreadContext)ctcc[0].newInstance(dbg);
} catch (Exception e) {
throw new RuntimeException("cpu " + cpu + " is not yet supported");
}
}
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@ package sun.jvm.hotspot.debugger.proc;
import java.io.*;
import java.net.*;
import java.util.*;
import java.lang.reflect.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
import sun.jvm.hotspot.debugger.proc.amd64.*;
@ -86,7 +87,16 @@ public class ProcDebuggerLocal extends DebuggerBase implements ProcDebugger {
pcRegIndex = AMD64ThreadContext.RIP;
fpRegIndex = AMD64ThreadContext.RBP;
} else {
try {
Class tfc = Class.forName("sun.jvm.hotspot.debugger.proc." +
cpu.toLowerCase() + ".Proc" + cpu.toUpperCase() +
"ThreadFactory");
Constructor[] ctfc = tfc.getConstructors();
threadFactory = (ProcThreadFactory)ctfc[0].newInstance(this);
} catch (Exception e) {
throw new RuntimeException("Thread access for CPU architecture " + PlatformInfo.getCPU() + " not yet supported");
// Note: pcRegIndex and fpRegIndex do not appear to be referenced
}
}
if (useCache) {
// Cache portion of the remote process's address space.
@ -375,7 +385,11 @@ public class ProcDebuggerLocal extends DebuggerBase implements ProcDebugger {
int pagesize = getPageSize0();
if (pagesize == -1) {
// return the hard coded default value.
pagesize = (PlatformInfo.getCPU().equals("x86"))? 4096 : 8192;
if (PlatformInfo.getCPU().equals("sparc") ||
PlatformInfo.getCPU().equals("amd64") )
pagesize = 8196;
else
pagesize = 4096;
}
return pagesize;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@ package sun.jvm.hotspot.debugger.remote;
import java.rmi.*;
import java.util.*;
import java.lang.reflect.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
@ -70,7 +71,18 @@ public class RemoteDebuggerClient extends DebuggerBase implements JVMDebugger {
cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize);
unalignedAccessesOkay = true;
} else {
throw new DebuggerException("Thread access for CPU architecture " + cpu + " not yet supported");
try {
Class tf = Class.forName("sun.jvm.hotspot.debugger.remote." +
cpu.toLowerCase() + ".Remote" + cpu.toUpperCase() +
"ThreadFactory");
Constructor[] ctf = tf.getConstructors();
threadFactory = (RemoteThreadFactory)ctf[0].newInstance(this);
} catch (Exception e) {
throw new DebuggerException("Thread access for CPU architecture " + cpu + " not yet supported");
}
cachePageSize = 4096;
cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize);
unalignedAccessesOkay = false;
}
// Cache portion of the remote process's address space.

@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.sparc;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
/** Currently provides just the minimal information necessary to get
stack traces working. FIXME: currently hardwired for v9 -- will
@ -124,6 +125,10 @@ public abstract class SPARCThreadContext implements ThreadContext {
return data[index];
}
public CFrame getTopFrame(Debugger dbg) {
return null;
}
/** This can't be implemented in this class since we would have to
tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value);

@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.x86;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
/** Specifies the thread context on x86 platforms; only a sub-portion
of the context is guaranteed to be present on all operating
@ -109,6 +110,10 @@ public abstract class X86ThreadContext implements ThreadContext {
return data[index];
}
public CFrame getTopFrame(Debugger dbg) {
return null;
}
/** This can't be implemented in this class since we would have to
tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -91,6 +91,16 @@ public class Threads {
access = new LinuxAMD64JavaThreadPDAccess();
} else if (cpu.equals("sparc")) {
access = new LinuxSPARCJavaThreadPDAccess();
} else {
try {
access = (JavaThreadPDAccess)
Class.forName("sun.jvm.hotspot.runtime.linux_" +
cpu.toLowerCase() + ".Linux" + cpu.toUpperCase() +
"JavaThreadPDAccess").newInstance();
} catch (Exception e) {
throw new RuntimeException("OS/CPU combination " + os + "/" + cpu +
" not yet supported");
}
}
} else if (os.equals("bsd")) {
if (cpu.equals("x86")) {

@ -92,6 +92,8 @@ public class VM {
private boolean usingServerCompiler;
/** Flag indicating whether UseTLAB is turned on */
private boolean useTLAB;
/** Flag indicating whether invokedynamic support is on */
private boolean enableInvokeDynamic;
/** alignment constants */
private boolean isLP64;
private int bytesPerLong;
@ -317,6 +319,7 @@ public class VM {
}
useTLAB = (db.lookupIntConstant("UseTLAB").intValue() != 0);
enableInvokeDynamic = (db.lookupIntConstant("EnableInvokeDynamic").intValue() != 0);
if (debugger != null) {
isLP64 = debugger.getMachineDescription().isLP64();
@ -552,6 +555,10 @@ public class VM {
return useTLAB;
}
public boolean getEnableInvokeDynamic() {
return enableInvokeDynamic;
}
public TypeDataBase getTypeDataBase() {
return db;
}

@ -204,7 +204,13 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
} else if (cpu.equals("ia64")) {
cpuHelper = new IA64Helper();
} else {
try {
cpuHelper = (CPUHelper)Class.forName("sun.jvm.hotspot.asm." +
cpu.toLowerCase() + "." + cpu.toUpperCase() +
"Helper").newInstance();
} catch (Exception e) {
throw new RuntimeException("cpu '" + cpu + "' is not yet supported!");
}
}
}

@ -0,0 +1,31 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.utilities;
public interface AltPlatformInfo {
// Additional cpu types can be tested via this interface
public boolean knownCPU(String cpu);
}

@ -64,6 +64,13 @@ public class PlatformInfo {
} else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64")) {
return cpu;
} else {
try {
Class pic = Class.forName("sun.jvm.hotspot.utilities.PlatformInfoClosed");
AltPlatformInfo api = (AltPlatformInfo)pic.newInstance();
if (api.knownCPU(cpu)) {
return cpu;
}
} catch (Exception e) {}
throw new UnsupportedPlatformException("CPU type " + cpu + " not yet supported");
}
}

@ -22,6 +22,14 @@
#
#
ifeq ($(HS_ALT_MAKE),)
ifneq ($(OPENJDK),true)
HS_ALT_MAKE=$(GAMMADIR)/make/closed
else
HS_ALT_MAKE=NO_SUCH_PATH
endif
endif
# The common definitions for hotspot builds.
# Optionally include SPEC file generated by configure.
@ -327,3 +335,4 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
ifndef JAVASE_EMBEDDED
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
endif

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
HS_MAJOR_VER=24
HS_MINOR_VER=0
HS_BUILD_NUMBER=20
HS_BUILD_NUMBER=22
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

@ -38,7 +38,7 @@ jprt.need.sibling.build=false
# This tells jprt what default release we want to build
jprt.hotspot.default.release=jdk7
jprt.hotspot.default.release=jdk8
jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
@ -54,77 +54,77 @@ jprt.sync.push=false
# Define the Solaris platforms we want for the various releases
jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk7u6=${jprt.my.solaris.sparc.jdk7}
jprt.my.solaris.sparc.jdk7u8=${jprt.my.solaris.sparc.jdk7}
jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7u6=${jprt.my.solaris.sparcv9.jdk7}
jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
jprt.my.solaris.i586.jdk8=solaris_i586_5.10
jprt.my.solaris.i586.jdk7=solaris_i586_5.10
jprt.my.solaris.i586.jdk7u6=${jprt.my.solaris.i586.jdk7}
jprt.my.solaris.i586.jdk7u8=${jprt.my.solaris.i586.jdk7}
jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
jprt.my.solaris.x64.jdk8=solaris_x64_5.10
jprt.my.solaris.x64.jdk7=solaris_x64_5.10
jprt.my.solaris.x64.jdk7u6=${jprt.my.solaris.x64.jdk7}
jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
jprt.my.linux.i586.jdk8=linux_i586_2.6
jprt.my.linux.i586.jdk7=linux_i586_2.6
jprt.my.linux.i586.jdk7u6=${jprt.my.linux.i586.jdk7}
jprt.my.linux.i586.jdk7u8=${jprt.my.linux.i586.jdk7}
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
jprt.my.linux.x64.jdk8=linux_x64_2.6
jprt.my.linux.x64.jdk7=linux_x64_2.6
jprt.my.linux.x64.jdk7u6=${jprt.my.linux.x64.jdk7}
jprt.my.linux.x64.jdk7u8=${jprt.my.linux.x64.jdk7}
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
jprt.my.linux.ppc.jdk8=linux_ppc_2.6
jprt.my.linux.ppc.jdk7=linux_ppc_2.6
jprt.my.linux.ppc.jdk7u6=${jprt.my.linux.ppc.jdk7}
jprt.my.linux.ppc.jdk7u8=${jprt.my.linux.ppc.jdk7}
jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7u6=${jprt.my.linux.ppcv2.jdk7}
jprt.my.linux.ppcv2.jdk7u8=${jprt.my.linux.ppcv2.jdk7}
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7u6=${jprt.my.linux.ppcsflt.jdk7}
jprt.my.linux.ppcsflt.jdk7u8=${jprt.my.linux.ppcsflt.jdk7}
jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
jprt.my.linux.armvfp.jdk7u6=${jprt.my.linux.armvfp.jdk7}
jprt.my.linux.armvfp.jdk7u8=${jprt.my.linux.armvfp.jdk7}
jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
jprt.my.linux.armv6.jdk8=linux_armv6_2.6
jprt.my.linux.armv6.jdk7=linux_armv6_2.6
jprt.my.linux.armv6.jdk7u6=${jprt.my.linux.armv6.jdk7}
jprt.my.linux.armv6.jdk7u8=${jprt.my.linux.armv6.jdk7}
jprt.my.linux.armv6=${jprt.my.linux.armv6.${jprt.tools.default.release}}
jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7u6=${jprt.my.linux.armsflt.jdk7}
jprt.my.linux.armsflt.jdk7u8=${jprt.my.linux.armsflt.jdk7}
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
jprt.my.macosx.x64.jdk8=macosx_x64_10.7
jprt.my.macosx.x64.jdk7=macosx_x64_10.7
jprt.my.macosx.x64.jdk7u6=${jprt.my.macosx.x64.jdk7}
jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8=windows_i586_5.1
jprt.my.windows.i586.jdk7=windows_i586_5.1
jprt.my.windows.i586.jdk7u6=${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk8=windows_x64_5.2
jprt.my.windows.x64.jdk7=windows_x64_5.2
jprt.my.windows.x64.jdk7u6=${jprt.my.windows.x64.jdk7}
jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
# Standard list of jprt build targets for this source tree
@ -159,7 +159,7 @@ jprt.build.targets.all=${jprt.build.targets.standard}, \
jprt.build.targets.jdk8=${jprt.build.targets.all}
jprt.build.targets.jdk7=${jprt.build.targets.all}
jprt.build.targets.jdk7u6=${jprt.build.targets.all}
jprt.build.targets.jdk7u8=${jprt.build.targets.all}
jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
# Subset lists of test targets for this source tree
@ -452,7 +452,7 @@ jprt.test.targets.embedded= \
jprt.test.targets.jdk8=${jprt.test.targets.standard}
jprt.test.targets.jdk7=${jprt.test.targets.standard}
jprt.test.targets.jdk7u6=${jprt.test.targets.jdk7}
jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
# The default test/Makefile targets that should be run
@ -512,7 +512,7 @@ jprt.make.rule.test.targets.embedded = \
jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7u6=${jprt.make.rule.test.targets.jdk7}
jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7}
jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
# 7155453: Work-around to prevent popups on OSX from blocking test completion

@ -1,5 +1,5 @@
#
# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -133,8 +133,10 @@ $(GENERATEDFILES): refresh_adfiles
# Note that product files are updated via "mv", which is atomic.
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
# Debuggable by default
CFLAGS += -g
ifneq ($(DEBUG_BINARIES), true)
# Debuggable by default (unless already done by DEBUG_BINARIES)
CFLAGS += -g
endif
# Pass -D flags into ADLC.
ADLCFLAGS += $(SYSDEFS)

@ -295,6 +295,8 @@ ADD_SA_BINARIES/ia64 =
ADD_SA_BINARIES/arm =
ADD_SA_BINARIES/zero =
-include $(HS_ALT_MAKE)/linux/makefiles/defs.make
EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))

@ -215,45 +215,44 @@ AOUT_FLAGS += -Xlinker -export-dynamic
#------------------------------------------------------------------------
# Debug flags
# Use the stabs format for debugging information (this is the default
# on gcc-2.91). It's good enough, has all the information about line
# numbers and local variables, and libjvm_g.so is only about 16M.
# Change this back to "-g" if you want the most expressive format.
# (warning: that could easily inflate libjvm_g.so to 150M!)
# Note: The Itanium gcc compiler crashes when using -gstabs.
DEBUG_CFLAGS/ia64 = -g
DEBUG_CFLAGS/amd64 = -g
DEBUG_CFLAGS/arm = -g
DEBUG_CFLAGS/ppc = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
DEBUG_CFLAGS += -gstabs
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
FASTDEBUG_CFLAGS/ia64 = -g
FASTDEBUG_CFLAGS/amd64 = -g
FASTDEBUG_CFLAGS/arm = -g
FASTDEBUG_CFLAGS/ppc = -g
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
FASTDEBUG_CFLAGS += -gstabs
endif
OPT_CFLAGS/ia64 = -g
OPT_CFLAGS/amd64 = -g
OPT_CFLAGS/arm = -g
OPT_CFLAGS/ppc = -g
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
OPT_CFLAGS += -gstabs
endif
endif
# DEBUG_BINARIES overrides everything, use full -g debug information
# DEBUG_BINARIES uses full -g debug information for all configs
ifeq ($(DEBUG_BINARIES), true)
DEBUG_CFLAGS = -g
CFLAGS += $(DEBUG_CFLAGS)
CFLAGS += -g
else
# Use the stabs format for debugging information (this is the default
# on gcc-2.91). It's good enough, has all the information about line
# numbers and local variables, and libjvm_g.so is only about 16M.
# Change this back to "-g" if you want the most expressive format.
# (warning: that could easily inflate libjvm_g.so to 150M!)
# Note: The Itanium gcc compiler crashes when using -gstabs.
DEBUG_CFLAGS/ia64 = -g
DEBUG_CFLAGS/amd64 = -g
DEBUG_CFLAGS/arm = -g
DEBUG_CFLAGS/ppc = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
DEBUG_CFLAGS += -gstabs
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
FASTDEBUG_CFLAGS/ia64 = -g
FASTDEBUG_CFLAGS/amd64 = -g
FASTDEBUG_CFLAGS/arm = -g
FASTDEBUG_CFLAGS/ppc = -g
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
FASTDEBUG_CFLAGS += -gstabs
endif
OPT_CFLAGS/ia64 = -g
OPT_CFLAGS/amd64 = -g
OPT_CFLAGS/arm = -g
OPT_CFLAGS/ppc = -g
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
OPT_CFLAGS += -gstabs
endif
endif
endif
# If we are building HEADLESS, pass on to VM

@ -30,10 +30,16 @@
include $(GAMMADIR)/make/linux/makefiles/rules.make
include $(GAMMADIR)/make/defs.make
include $(GAMMADIR)/make/altsrc.make
AGENT_DIR = $(GAMMADIR)/agent
include $(GAMMADIR)/make/sa.files
-include $(HS_ALT_MAKE)/linux/makefiles/sa.make
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@ -52,17 +58,15 @@ SA_BUILD_VERSION_PROP = "sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VE
SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties
# if $(AGENT_DIR) does not exist, we don't build SA
# also, we don't build SA on Itanium, PowerPC, ARM or zero.
# also, we don't build SA on Itanium or zero.
all:
if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" \
-a "$(SRCARCH)" != "arm" \
-a "$(SRCARCH)" != "ppc" \
-a "$(SRCARCH)" != "zero" ] ; then \
$(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
fi
$(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
$(GENERATED)/sa-jdi.jar:: $(AGENT_FILES)
$(QUIETLY) echo "Making $@"
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@ -111,3 +115,5 @@ clean:
rm -rf $(SA_CLASSDIR)
rm -rf $(GENERATED)/sa-jdi.jar
rm -rf $(AGENT_FILES_LIST)
-include $(HS_ALT_MAKE)/linux/makefiles/sa-rules.make

@ -21,6 +21,8 @@
# questions.
#
#
include $(GAMMADIR)/make/defs.make
include $(GAMMADIR)/make/altsrc.make
# Rules to build serviceability agent library, used by vm.make
@ -48,6 +50,8 @@ SASRCFILES = $(SASRCDIR)/salibelf.c \
$(SASRCDIR)/ps_core.c \
$(SASRCDIR)/LinuxDebuggerLocal.c
-include $(HS_ALT_MAKE)/linux/makefiles/saproc.make
SAMAPFILE = $(SASRCDIR)/mapfile
DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
@ -60,15 +64,19 @@ ifeq ($(DEBUG_BINARIES), true)
endif
# if $(AGENT_DIR) does not exist, we don't build SA
# also, we don't build SA on Itanium, PPC, ARM or zero.
# also, we don't build SA on Itanium or zero.
ifneq ($(wildcard $(AGENT_DIR)),)
ifneq ($(filter-out ia64 arm ppc zero,$(SRCARCH)),)
ifneq ($(filter-out ia64 zero,$(SRCARCH)),)
BUILDLIBSAPROC = $(LIBSAPROC)
endif
endif
ifneq ($(ALT_SASRCDIR),)
ALT_SAINCDIR=-I$(ALT_SASRCDIR)
else
ALT_SAINCDIR=
endif
SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) $(LDFLAGS_HASH_STYLE)
$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
@ -84,6 +92,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
-I$(GENERATED) \
-I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
$(ALT_SAINCDIR) \
$(SASRCFILES) \
$(SA_LFLAGS) \
$(SA_DEBUG_CFLAGS) \

@ -32,7 +32,7 @@ ifneq ($(OSNAME), windows)
ifndef LP64
PARTIAL_NONPIC=1
endif
PIC_ARCH = ppc
PIC_ARCH = ppc arm
ifneq ("$(filter $(PIC_ARCH),$(BUILDARCH))","")
PARTIAL_NONPIC=0
endif

@ -188,14 +188,22 @@ ifdef COOKED_BUILD_NUMBER
MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER)
endif
NMAKE= MAKEFLAGS= MFLAGS= nmake /NOLOGO
NMAKE= MAKEFLAGS= MFLAGS= nmake -NOLOGO
ifndef SYSTEM_UNAME
SYSTEM_UNAME := $(shell uname)
export SYSTEM_UNAME
endif
# Check for CYGWIN
ifneq (,$(findstring CYGWIN,$(shell uname)))
ifneq (,$(findstring CYGWIN,$(SYSTEM_UNAME)))
USING_CYGWIN=true
else
USING_CYGWIN=false
endif
# Check for MinGW
ifneq (,$(findstring MINGW,$(SYSTEM_UNAME)))
USING_MINGW=true
endif
# FIXUP: The subdirectory for a debug build is NOT the same on all platforms
VM_DEBUG=debug
@ -208,11 +216,16 @@ ifeq ($(USING_CYGWIN), true)
ABS_BOOTDIR := $(subst /,\\,$(shell /bin/cygpath -m -a "$(BOOTDIR)"))
ABS_GAMMADIR := $(subst /,\\,$(shell /bin/cygpath -m -a "$(GAMMADIR)"))
ABS_OS_MAKEFILE := $(shell /bin/cygpath -m -a "$(HS_MAKE_DIR)/$(OSNAME)")/build.make
else
ABS_OUTPUTDIR := $(subst /,\\,$(shell $(CD) $(OUTPUTDIR);$(PWD)))
ABS_BOOTDIR := $(subst /,\\,$(shell $(CD) $(BOOTDIR);$(PWD)))
ABS_GAMMADIR := $(subst /,\\,$(shell $(CD) $(GAMMADIR);$(PWD)))
ABS_OS_MAKEFILE := $(subst /,\\,$(shell $(CD) $(HS_MAKE_DIR)/$(OSNAME);$(PWD))/build.make)
else ifeq ($(USING_MINGW), true)
ABS_OUTPUTDIR := $(shell $(CD) $(OUTPUTDIR);$(PWD))
ABS_BOOTDIR := $(shell $(CD) $(BOOTDIR);$(PWD))
ABS_GAMMADIR := $(shell $(CD) $(GAMMADIR);$(PWD))
ABS_OS_MAKEFILE := $(shell $(CD) $(HS_MAKE_DIR)/$(OSNAME);$(PWD))/build.make
else
ABS_OUTPUTDIR := $(subst /,\\,$(shell $(CD) $(OUTPUTDIR);$(PWD)))
ABS_BOOTDIR := $(subst /,\\,$(shell $(CD) $(BOOTDIR);$(PWD)))
ABS_GAMMADIR := $(subst /,\\,$(shell $(CD) $(GAMMADIR);$(PWD)))
ABS_OS_MAKEFILE := $(subst /,\\,$(shell $(CD) $(HS_MAKE_DIR)/$(OSNAME);$(PWD))/build.make)
endif
# Disable building SA on windows until we are sure

@ -23,14 +23,15 @@
#
# These are the commands used externally to compile and run.
# The \ are used here for traditional Windows apps and " quoted to get
# past the Unix-like shell:
!ifdef BootStrapDir
RUN_JAVA=$(BootStrapDir)\bin\java
RUN_JAVAP=$(BootStrapDir)\bin\javap
RUN_JAVAH=$(BootStrapDir)\bin\javah
RUN_JAR=$(BootStrapDir)\bin\jar
COMPILE_JAVAC=$(BootStrapDir)\bin\javac $(BOOTSTRAP_JAVAC_FLAGS)
COMPILE_RMIC=$(BootStrapDir)\bin\rmic
RUN_JAVA="$(BootStrapDir)\bin\java"
RUN_JAVAP="$(BootStrapDir)\bin\javap"
RUN_JAVAH="$(BootStrapDir)\bin\javah"
RUN_JAR="$(BootStrapDir)\bin\jar"
COMPILE_JAVAC="$(BootStrapDir)\bin\javac" $(BOOTSTRAP_JAVAC_FLAGS)
COMPILE_RMIC="$(BootStrapDir)\bin\rmic"
BOOT_JAVA_HOME=$(BootStrapDir)
!else
RUN_JAVA=java

@ -36,37 +36,37 @@ checkAndBuildSA::
!include $(WorkSpace)/make/windows/makefiles/rules.make
!include $(WorkSpace)/make/sa.files
GENERATED = ..\generated
GENERATED = ../generated
# tools.jar is needed by the JDI - SA binding
SA_CLASSPATH = $(BOOT_JAVA_HOME)\lib\tools.jar
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
SA_CLASSDIR = $(GENERATED)\saclasses
SA_CLASSDIR = $(GENERATED)/saclasses
SA_BUILD_VERSION_PROP = sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION)
SA_PROPERTIES = $(SA_CLASSDIR)\sa.properties
SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties
default:: $(GENERATED)\sa-jdi.jar
default:: $(GENERATED)/sa-jdi.jar
# Remove the space between $(SA_BUILD_VERSION_PROP) and > below as it adds a white space
# at the end of SA version string and causes a version mismatch with the target VM version.
$(GENERATED)\sa-jdi.jar: $(AGENT_FILES:/=\)
@if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR)
@echo ...Building sa-jdi.jar
$(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
$(QUIETLY) mkdir -p $(SA_CLASSDIR)
@echo ...Building sa-jdi.jar into $(SA_CLASSDIR)
@echo ...$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -d $(SA_CLASSDIR) ....
@$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES:/=\)
@$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES)
$(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES)
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
$(QUIETLY) rm -rf $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) mkdir $(SA_CLASSDIR)\sun\jvm\hotspot\ui\resources
$(QUIETLY) mkdir $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)
$(RUN_JAR) cf $@ -C $(SA_CLASSDIR) .
$(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector
$(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext
@ -85,27 +85,27 @@ checkAndBuildSA:: $(SAWINDBG)
# will be useful to have the assertion checks in place
!if "$(BUILDARCH)" == "ia64"
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 $(GX_OPTION) /Od /D "WIN32" /D "WIN64" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "WIN64" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -YX -FD -c
!elseif "$(BUILDARCH)" == "amd64"
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 $(GX_OPTION) /Od /D "WIN32" /D "WIN64" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "WIN64" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -YX -FD -c
!if "$(COMPILER_NAME)" == "VS2005"
# On amd64, VS2005 compiler requires bufferoverflowU.lib on the link command line,
# otherwise we get missing __security_check_cookie externals at link time.
SA_LD_FLAGS = bufferoverflowU.lib
!endif
!else
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 /Gm $(GX_OPTION) /Od /D "WIN32" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 -Gm $(GX_OPTION) -Od -D "WIN32" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -YX -FD -GZ -c
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
SA_CFLAGS = $(SA_CFLAGS) /ZI
SA_CFLAGS = $(SA_CFLAGS) -ZI
!endif
!endif
!if "$(MT)" != ""
SA_LD_FLAGS = /manifest $(SA_LD_FLAGS)
SA_LD_FLAGS = -manifest $(SA_LD_FLAGS)
!endif
SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
SA_LFLAGS = $(SA_LD_FLAGS) /nologo /subsystem:console /machine:$(MACHINE)
SA_LFLAGS = $(SA_LD_FLAGS) -nologo -subsystem:console -machine:$(MACHINE)
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
SA_LFLAGS = $(SA_LFLAGS) /map /debug
SA_LFLAGS = $(SA_LFLAGS) -map -debug
!endif
# Note that we do not keep sawindbj.obj around as it would then
@ -117,15 +117,15 @@ SA_LFLAGS = $(SA_LFLAGS) /map /debug
$(SAWINDBG): $(SASRCFILE)
set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
$(CXX) @<<
/I"$(BootStrapDir)/include" /I"$(BootStrapDir)/include/win32"
/I"$(GENERATED)" $(SA_CFLAGS)
-I"$(BootStrapDir)/include" -I"$(BootStrapDir)/include/win32"
-I"$(GENERATED)" $(SA_CFLAGS)
$(SASRCFILE)
/out:$*.obj
-out:$*.obj
<<
set LIB=$(SA_LIB)$(LIB)
$(LD) /out:$@ /DLL $*.obj dbgeng.lib $(SA_LFLAGS)
$(LD) -out:$@ -DLL $*.obj dbgeng.lib $(SA_LFLAGS)
!if "$(MT)" != ""
$(MT) /manifest $(@F).manifest /outputresource:$(@F);#2
$(MT) -manifest $(@F).manifest -outputresource:$(@F);#2
!endif
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
!if "$(ZIP_DEBUGINFO_FILES)" == "1"
@ -136,6 +136,6 @@ $(SAWINDBG): $(SASRCFILE)
-@rm -f $*.obj
cleanall :
rm -rf $(GENERATED:\=/)/saclasses
rm -rf $(GENERATED:\=/)/sa-jdi.jar
rm -rf $(GENERATED)/saclasses
rm -rf $(GENERATED)/sa-jdi.jar
!endif

@ -36,11 +36,12 @@ CXX=cl.exe
!ifdef SUBDIRS
# \ is used below because $(MAKE) is nmake here, which expects Windows paths
$(SUBDIRS): FORCE
@if not exist $@ mkdir $@
@if not exist $@\local.make echo # Empty > $@\local.make
@echo nmake $(ACTION) in $(DIR)\$@
cd $@ && $(MAKE) /NOLOGO /f $(WorkSpace)\make\windows\makefiles\$@.make $(ACTION) DIR=$(DIR)\$@ BUILD_FLAVOR=$(BUILD_FLAVOR)
@if not exist $@/local.make echo # Empty > $@/local.make
@echo nmake $(ACTION) in $(DIR)/$@
cd $@ && $(MAKE) -NOLOGO -f $(WorkSpace)\make\windows\makefiles\$@.make $(ACTION) DIR=$(DIR)\$@ BUILD_FLAVOR=$(BUILD_FLAVOR)
!endif
# Creates the needed directory

@ -108,7 +108,7 @@ ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) \
-define HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\"
$(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
@$(RUN_JAVA) -Djava.class.path=$(HOTSPOTBUILDSPACE)/classes ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
@$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
clean:
@rm -rf $(HOTSPOTBUILDSPACE)/classes

@ -435,85 +435,6 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
}
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that offset == referent_offset.
//
// So we might have to emit:
// if (src == null) goto continuation.
//
// and we definitely have to emit:
// if (klass(src).reference_type == REF_NONE) goto continuation
// if (!marking_active) goto continuation
// if (pre_val == null) goto continuation
// call pre_barrier(pre_val)
// goto continuation
//
__ bind(_entry);
assert(src()->is_register(), "sanity");
Register src_reg = src()->as_register();
if (gen_src_check()) {
// The original src operand was not a constant.
// Generate src == null?
if (__ is_in_wdisp16_range(_continuation)) {
__ br_null(src_reg, /*annul*/false, Assembler::pt, _continuation);
} else {
__ cmp(src_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
}
// Generate src->_klass->_reference_type() == REF_NONE)?
assert(tmp()->is_register(), "sanity");
Register tmp_reg = tmp()->as_register();
__ load_klass(src_reg, tmp_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ ldub(ref_type_adr, tmp_reg);
// _reference_type field is of type ReferenceType (enum)
assert(REF_NONE == 0, "check this code");
__ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
__ delayed()->nop();
// Is marking active?
assert(thread()->is_register(), "precondition");
Register thread_reg = thread()->as_pointer_register();
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
__ ld(in_progress, tmp_reg);
} else {
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
__ ldsb(in_progress, tmp_reg);
}
__ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
__ delayed()->nop();
// val == null?
assert(val()->is_register(), "Precondition.");
Register val_reg = val()->as_register();
if (__ is_in_wdisp16_range(_continuation)) {
__ br_null(val_reg, /*annul*/false, Assembler::pt, _continuation);
} else {
__ cmp(val_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
__ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
__ delayed()->mov(val_reg, G4);
__ br(Assembler::always, false, Assembler::pt, _continuation);
__ delayed()->nop();
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,4 +75,43 @@ define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
\
product(intx, UseVIS, 99, \
"Highest supported VIS instructions set on Sparc") \
\
product(bool, UseCBCond, false, \
"Use compare and branch instruction on SPARC") \
\
product(bool, UseBlockZeroing, false, \
"Use special cpu instructions for block zeroing") \
\
product(intx, BlockZeroingLowLimit, 2048, \
"Minimum size in bytes when block zeroing will be used") \
\
product(bool, UseBlockCopy, false, \
"Use special cpu instructions for block copy") \
\
product(intx, BlockCopyLowLimit, 2048, \
"Minimum size in bytes when block copy will be used") \
\
develop(bool, UseV8InstrsOnly, false, \
"Use SPARC-V8 Compliant instruction subset") \
\
product(bool, UseNiagaraInstrs, false, \
"Use Niagara-efficient instruction subset") \
\
develop(bool, UseCASForSwap, false, \
"Do not use swap instructions, but only CAS (in a loop) on SPARC")\
\
product(uintx, ArraycopySrcPrefetchDistance, 0, \
"Distance to prefetch source array in arracopy") \
\
product(uintx, ArraycopyDstPrefetchDistance, 0, \
"Distance to prefetch destination array in arracopy") \
\
develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \
"Number of times to spin wait on a v8 atomic operation lock") \
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP

@ -106,10 +106,10 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
}
// When using CMS, we cannot use memset() in BOT updates because
// the sun4v/CMT version in libc_psr uses BIS which exposes
// "phantom zeros" to concurrent readers. See 6948537.
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && UseConcMarkSweepGC) {
// When using CMS or G1, we cannot use memset() in BOT updates
// because the sun4v/CMT version in libc_psr uses BIS which
// exposes "phantom zeros" to concurrent readers. See 6948537.
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
FLAG_SET_DEFAULT(UseMemSetInBOT, false);
}
#ifdef _LP64

File diff suppressed because it is too large Load Diff

@ -617,6 +617,7 @@ private:
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
simd_prefix(dst, xnoreg, src, pre, opc);
}
void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
simd_prefix(src, dst, pre);
}
@ -626,16 +627,10 @@ private:
simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
}
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, bool vector256 = false);
int simd_prefix_and_encode(XMMRegister dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
return simd_prefix_and_encode(dst, xnoreg, src, pre, opc);
}
// Move/convert 32-bit integer value.
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre) {
@ -677,6 +672,15 @@ private:
void emit_arith(int op1, int op2, Register dst, jobject obj);
void emit_arith(int op1, int op2, Register dst, Register src);
void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
Address src, VexSimdPrefix pre, bool vector256);
void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
XMMRegister src, VexSimdPrefix pre, bool vector256);
void emit_operand(Register reg,
Register base, Register index, Address::ScaleFactor scale,
int disp,
@ -891,12 +895,6 @@ private:
void andq(Register dst, Address src);
void andq(Register dst, Register src);
// Bitwise Logical AND of Packed Double-Precision Floating-Point Values
void andpd(XMMRegister dst, XMMRegister src);
// Bitwise Logical AND of Packed Single-Precision Floating-Point Values
void andps(XMMRegister dst, XMMRegister src);
void bsfl(Register dst, Register src);
void bsrl(Register dst, Register src);
@ -1436,10 +1434,6 @@ private:
void prefetcht2(Address src);
void prefetchw(Address src);
// POR - Bitwise logical OR
void por(XMMRegister dst, XMMRegister src);
void por(XMMRegister dst, Address src);
// Shuffle Packed Doublewords
void pshufd(XMMRegister dst, XMMRegister src, int mode);
void pshufd(XMMRegister dst, Address src, int mode);
@ -1448,9 +1442,6 @@ private:
void pshuflw(XMMRegister dst, XMMRegister src, int mode);
void pshuflw(XMMRegister dst, Address src, int mode);
// Shift Right by bits Logical Quadword Immediate
void psrlq(XMMRegister dst, int shift);
// Shift Right by bytes Logical DoubleQuadword Immediate
void psrldq(XMMRegister dst, int shift);
@ -1475,10 +1466,6 @@ private:
void pushq(Address src);
// Xor Packed Byte Integer Values
void pxor(XMMRegister dst, Address src);
void pxor(XMMRegister dst, XMMRegister src);
void rcll(Register dst, int imm8);
void rclq(Register dst, int imm8);
@ -1601,15 +1588,10 @@ private:
void xorq(Register dst, Address src);
void xorq(Register dst, Register src);
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
void xorpd(XMMRegister dst, XMMRegister src);
// Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
void xorps(XMMRegister dst, XMMRegister src);
void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
// AVX 3-operands scalar instructions (encoded with VEX prefix)
void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vaddss(XMMRegister dst, XMMRegister nds, Address src);
@ -1627,14 +1609,147 @@ private:
void vsubss(XMMRegister dst, XMMRegister nds, Address src);
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
// AVX Vector instrucitons.
void vandpd(XMMRegister dst, XMMRegister nds, Address src);
void vandps(XMMRegister dst, XMMRegister nds, Address src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src);
//====================VECTOR ARITHMETIC=====================================
// Add Packed Floating-Point Values
void addpd(XMMRegister dst, XMMRegister src);
void addps(XMMRegister dst, XMMRegister src);
void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Subtract Packed Floating-Point Values
void subpd(XMMRegister dst, XMMRegister src);
void subps(XMMRegister dst, XMMRegister src);
void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Multiply Packed Floating-Point Values
void mulpd(XMMRegister dst, XMMRegister src);
void mulps(XMMRegister dst, XMMRegister src);
void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Divide Packed Floating-Point Values
void divpd(XMMRegister dst, XMMRegister src);
void divps(XMMRegister dst, XMMRegister src);
void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Bitwise Logical AND of Packed Floating-Point Values
void andpd(XMMRegister dst, XMMRegister src);
void andps(XMMRegister dst, XMMRegister src);
void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Bitwise Logical XOR of Packed Floating-Point Values
void xorpd(XMMRegister dst, XMMRegister src);
void xorps(XMMRegister dst, XMMRegister src);
void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Add packed integers
void paddb(XMMRegister dst, XMMRegister src);
void paddw(XMMRegister dst, XMMRegister src);
void paddd(XMMRegister dst, XMMRegister src);
void paddq(XMMRegister dst, XMMRegister src);
void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Sub packed integers
void psubb(XMMRegister dst, XMMRegister src);
void psubw(XMMRegister dst, XMMRegister src);
void psubd(XMMRegister dst, XMMRegister src);
void psubq(XMMRegister dst, XMMRegister src);
void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Multiply packed integers (only shorts and ints)
void pmullw(XMMRegister dst, XMMRegister src);
void pmulld(XMMRegister dst, XMMRegister src);
void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Shift left packed integers
void psllw(XMMRegister dst, int shift);
void pslld(XMMRegister dst, int shift);
void psllq(XMMRegister dst, int shift);
void psllw(XMMRegister dst, XMMRegister shift);
void pslld(XMMRegister dst, XMMRegister shift);
void psllq(XMMRegister dst, XMMRegister shift);
void vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
// Logical shift right packed integers
void psrlw(XMMRegister dst, int shift);
void psrld(XMMRegister dst, int shift);
void psrlq(XMMRegister dst, int shift);
void psrlw(XMMRegister dst, XMMRegister shift);
void psrld(XMMRegister dst, XMMRegister shift);
void psrlq(XMMRegister dst, XMMRegister shift);
void vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
// Arithmetic shift right packed integers (only shorts and ints, no instructions for longs)
void psraw(XMMRegister dst, int shift);
void psrad(XMMRegister dst, int shift);
void psraw(XMMRegister dst, XMMRegister shift);
void psrad(XMMRegister dst, XMMRegister shift);
void vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
// And packed integers
void pand(XMMRegister dst, XMMRegister src);
void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Or packed integers
void por(XMMRegister dst, XMMRegister src);
void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Xor packed integers
void pxor(XMMRegister dst, XMMRegister src);
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Copy low 128bit into high 128bit of YMM registers.
void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
@ -2532,11 +2647,13 @@ public:
void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vandpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vandpd(dst, nds, src); }
void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
void vandps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vandps(dst, nds, src); }
void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
@ -2565,12 +2682,12 @@ public:
// AVX Vector instructions
void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
void vxorpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorpd(dst, nds, src); }
void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); }
void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
@ -2578,6 +2695,12 @@ public:
else
Assembler::vxorpd(dst, nds, src, vector256);
}
void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
Assembler::vpxor(dst, nds, src, vector256);
else
Assembler::vxorpd(dst, nds, src, vector256);
}
// Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {

@ -488,68 +488,6 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
}
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that offset == referent_offset.
//
// So we might have to emit:
// if (src == null) goto continuation.
//
// and we definitely have to emit:
// if (klass(src).reference_type == REF_NONE) goto continuation
// if (!marking_active) goto continuation
// if (pre_val == null) goto continuation
// call pre_barrier(pre_val)
// goto continuation
//
__ bind(_entry);
assert(src()->is_register(), "sanity");
Register src_reg = src()->as_register();
if (gen_src_check()) {
// The original src operand was not a constant.
// Generate src == null?
__ cmpptr(src_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
}
// Generate src->_klass->_reference_type == REF_NONE)?
assert(tmp()->is_register(), "sanity");
Register tmp_reg = tmp()->as_register();
__ load_klass(tmp_reg, src_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ cmpb(ref_type_adr, REF_NONE);
__ jcc(Assembler::equal, _continuation);
// Is marking active?
assert(thread()->is_register(), "precondition");
Register thread_reg = thread()->as_pointer_register();
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
__ cmpl(in_progress, 0);
} else {
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
__ cmpb(in_progress, 0);
}
__ jcc(Assembler::equal, _continuation);
// val == null?
assert(val()->is_register(), "Precondition.");
Register val_reg = val()->as_register();
__ cmpptr(val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
ce->store_parameter(val()->as_register(), 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
__ jmp(_continuation);
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,4 +78,53 @@ define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
\
develop(bool, IEEEPrecision, true, \
"Enables IEEE precision (for INTEL only)") \
\
product(intx, FenceInstruction, 0, \
"(Unsafe,Unstable) Experimental") \
\
product(intx, ReadPrefetchInstr, 0, \
"Prefetch instruction to prefetch ahead") \
\
product(bool, UseStoreImmI16, true, \
"Use store immediate 16-bits value instruction on x86") \
\
product(intx, UseAVX, 99, \
"Highest supported AVX instructions set on x86/x64") \
\
diagnostic(bool, UseIncDec, true, \
"Use INC, DEC instructions on x86") \
\
product(bool, UseNewLongLShift, false, \
"Use optimized bitwise shift left") \
\
product(bool, UseAddressNop, false, \
"Use '0F 1F [addr]' NOP instructions on x86 cpus") \
\
product(bool, UseXmmLoadAndClearUpper, true, \
"Load low part of XMM register and clear upper part") \
\
product(bool, UseXmmRegToRegMoveAll, false, \
"Copy all XMM register bits when moving value between registers") \
\
product(bool, UseXmmI2D, false, \
"Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \
\
product(bool, UseXmmI2F, false, \
"Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \
\
product(bool, UseUnalignedLoadStores, false, \
"Use SSE2 MOVDQU instruction for Arraycopy") \
\
/* assembler */ \
product(bool, Use486InstrsOnly, false, \
"Use 80486 Compliant instruction subset") \
\
product(bool, UseCountLeadingZerosInstruction, false, \
"Use count leading zeros instruction") \
#endif // CPU_X86_VM_GLOBALS_X86_HPP

File diff suppressed because it is too large Load Diff

@ -1367,22 +1367,6 @@ int emit_deopt_handler(CodeBuffer& cbuf) {
return offset;
}
const bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode))
return false;
switch (opcode) {
case Op_PopCountI:
case Op_PopCountL:
if (!UsePopCountInstruction)
return false;
break;
}
return true; // Per default match rules are supported.
}
int Matcher::regnum_to_fpu_offset(int regnum) {
return regnum - 32; // The FP registers are in the second chunk
}

@ -1513,22 +1513,6 @@ int emit_deopt_handler(CodeBuffer& cbuf)
return offset;
}
const bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode))
return false;
switch (opcode) {
case Op_PopCountI:
case Op_PopCountL:
if (!UsePopCountInstruction)
return false;
break;
}
return true; // Per default match rules are supported.
}
int Matcher::regnum_to_fpu_offset(int regnum)
{
return regnum - 32; // The FP registers are in the second chunk
@ -6427,6 +6411,31 @@ instruct castP2X(rRegL dst, rRegP src)
ins_pipe(ialu_reg_reg); // XXX
%}
// Convert oop into int for vectors alignment masking
instruct convP2I(rRegI dst, rRegP src)
%{
match(Set dst (ConvL2I (CastP2X src)));
format %{ "movl $dst, $src\t# ptr -> int" %}
ins_encode %{
__ movl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg); // XXX
%}
// Convert compressed oop into int for vectors alignment masking
// in case of 32bit oops (heap < 4Gb).
instruct convN2I(rRegI dst, rRegN src)
%{
predicate(Universe::narrow_oop_shift() == 0);
match(Set dst (ConvL2I (CastP2X (DecodeN src))));
format %{ "movl $dst, $src\t# compressed ptr -> int" %}
ins_encode %{
__ movl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg); // XXX
%}
// Convert oop pointer into compressed form
instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{
@ -10049,11 +10058,10 @@ instruct MoveD2L_reg_reg(rRegL dst, regD src) %{
ins_pipe( pipe_slow );
%}
// The next instructions have long latency and use Int unit. Set high cost.
instruct MoveI2F_reg_reg(regF dst, rRegI src) %{
match(Set dst (MoveI2F src));
effect(DEF dst, USE src);
ins_cost(300);
ins_cost(100);
format %{ "movd $dst,$src\t# MoveI2F" %}
ins_encode %{
__ movdl($dst$$XMMRegister, $src$$Register);
@ -10064,7 +10072,7 @@ instruct MoveI2F_reg_reg(regF dst, rRegI src) %{
instruct MoveL2D_reg_reg(regD dst, rRegL src) %{
match(Set dst (MoveL2D src));
effect(DEF dst, USE src);
ins_cost(300);
ins_cost(100);
format %{ "movd $dst,$src\t# MoveL2D" %}
ins_encode %{
__ movdq($dst$$XMMRegister, $src$$Register);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -60,4 +60,7 @@ define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -202,7 +202,7 @@ class Bsd {
static void fast_thread_clock_init(void);
#endif
static bool supports_monotonic_clock() {
static inline bool supports_monotonic_clock() {
return _clock_gettime != NULL;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -178,7 +178,7 @@ class Linux {
// fast POSIX clocks support
static void fast_thread_clock_init(void);
static bool supports_monotonic_clock() {
static inline bool supports_monotonic_clock() {
return _clock_gettime != NULL;
}

@ -29,7 +29,7 @@
# inside Emacs".
#
# If the first parameter is "-dbx", HotSpot will be launched inside dbx.
#
#
# If the first parameter is "-valgrind", HotSpot will be launched
# inside Valgrind (http://valgrind.kde.org) using the Memcheck skin,
# and with memory leak detection enabled. This currently (2005jan19)
@ -45,19 +45,19 @@
# This is the name of the gdb binary to use
if [ ! "$GDB" ]
then
then
GDB=gdb
fi
# This is the name of the gdb binary to use
if [ ! "$DBX" ]
then
then
DBX=dbx
fi
# This is the name of the Valgrind binary to use
if [ ! "$VALGRIND" ]
then
then
VALGRIND=valgrind
fi
@ -98,7 +98,7 @@ esac
JDK=
if [ "${ALT_JAVA_HOME}" = "" ]; then
. ${MYDIR}/jdkpath.sh
else
else
JDK=${ALT_JAVA_HOME%%/jre};
fi
@ -114,22 +114,34 @@ fi
# any.
JRE=$JDK/jre
JAVA_HOME=$JDK
ARCH=@@LIBARCH@@
export JAVA_HOME
ARCH=@@LIBARCH@@
SBP=${MYDIR}:${JRE}/lib/${ARCH}
# Set up a suitable LD_LIBRARY_PATH
if [ -z "$LD_LIBRARY_PATH" ]
# Set up a suitable LD_LIBRARY_PATH or DYLD_LIBRARY_PATH
OS=`uname -s`
if [ "${OS}" = "Darwin" ]
then
LD_LIBRARY_PATH="$SBP"
if [ -z "$DYLD_LIBRARY_PATH" ]
then
DYLD_LIBRARY_PATH="$SBP"
else
DYLD_LIBRARY_PATH="$SBP:$DYLD_LIBRARY_PATH"
fi
export DYLD_LIBRARY_PATH
else
LD_LIBRARY_PATH="$SBP:$LD_LIBRARY_PATH"
# not 'Darwin'
if [ -z "$LD_LIBRARY_PATH" ]
then
LD_LIBRARY_PATH="$SBP"
else
LD_LIBRARY_PATH="$SBP:$LD_LIBRARY_PATH"
fi
export LD_LIBRARY_PATH
fi
export LD_LIBRARY_PATH
export JAVA_HOME
JPARMS="$@ $JAVA_ARGS";
# Locate the gamma development launcher

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -626,45 +626,6 @@ static void printDOF(void* dof) {
}
}
/**
* This prints out hex data in a 'windbg' or 'xxd' form, where each line is:
* <hex-address>: 8 * <hex-halfword> <ascii translation>
* example:
* 0000000: 7f44 4f46 0102 0102 0000 0000 0000 0000 .DOF............
* 0000010: 0000 0000 0000 0040 0000 0020 0000 0005 .......@... ....
* 0000020: 0000 0000 0000 0040 0000 0000 0000 015d .......@.......]
* ...
*/
static void printDOFRawData(void* dof) {
size_t size = ((dof_hdr_t*)dof)->dofh_loadsz;
size_t limit = (size + 16) / 16 * 16;
for (size_t i = 0; i < limit; ++i) {
if (i % 16 == 0) {
tty->print("%07x:", i);
}
if (i % 2 == 0) {
tty->print(" ");
}
if (i < size) {
tty->print("%02x", ((unsigned char*)dof)[i]);
} else {
tty->print(" ");
}
if ((i + 1) % 16 == 0) {
tty->print(" ");
for (size_t j = 0; j < 16; ++j) {
size_t idx = i + j - 15;
char c = ((char*)dof)[idx];
if (idx < size) {
tty->print("%c", c >= 32 && c <= 126 ? c : '.');
}
}
tty->print_cr("");
}
}
tty->print_cr("");
}
static void printDOFHelper(dof_helper_t* helper) {
tty->print_cr("// dof_helper_t {");
tty->print_cr("// dofhp_mod = \"%s\"", helper->dofhp_mod);
@ -672,7 +633,8 @@ static void printDOFHelper(dof_helper_t* helper) {
tty->print_cr("// dofhp_dof = 0x%016llx", helper->dofhp_dof);
printDOF((void*)helper->dofhp_dof);
tty->print_cr("// }");
printDOFRawData((void*)helper->dofhp_dof);
size_t len = ((dof_hdr_t*)helper)->dofh_loadsz;
tty->print_data((void*)helper->dofhp_dof, len, true);
}
#else // ndef HAVE_DTRACE_H

@ -574,71 +574,6 @@ class G1PreBarrierStub: public CodeStub {
#endif // PRODUCT
};
// This G1 barrier code stub is used in Unsafe.getObject.
// It generates a sequence of guards around the SATB
// barrier code that are used to detect when we have
// the referent field of a Reference object.
// The first check is assumed to have been generated
// in the code generated for Unsafe.getObject().
class G1UnsafeGetObjSATBBarrierStub: public CodeStub {
private:
LIR_Opr _val;
LIR_Opr _src;
LIR_Opr _tmp;
LIR_Opr _thread;
bool _gen_src_check;
public:
// A G1 barrier that is guarded by generated guards that determine whether
// val (which is the result of Unsafe.getObject() should be recorded in an
// SATB log buffer. We could be reading the referent field of a Reference object
// using Unsafe.getObject() and we need to record the referent.
//
// * val is the operand returned by the unsafe.getObject routine.
// * src is the base object
// * tmp is a temp used to load the klass of src, and then reference type
// * thread is the thread object.
G1UnsafeGetObjSATBBarrierStub(LIR_Opr val, LIR_Opr src,
LIR_Opr tmp, LIR_Opr thread,
bool gen_src_check) :
_val(val), _src(src),
_tmp(tmp), _thread(thread),
_gen_src_check(gen_src_check)
{
assert(_val->is_register(), "should have already been loaded");
assert(_src->is_register(), "should have already been loaded");
assert(_tmp->is_register(), "should be a temporary register");
}
LIR_Opr val() const { return _val; }
LIR_Opr src() const { return _src; }
LIR_Opr tmp() const { return _tmp; }
LIR_Opr thread() const { return _thread; }
bool gen_src_check() const { return _gen_src_check; }
virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case();
visitor->do_input(_val);
visitor->do_input(_src);
visitor->do_input(_thread);
visitor->do_temp(_tmp);
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("G1UnsafeGetObjSATBBarrierStub"); }
#endif // PRODUCT
};
class G1PostBarrierStub: public CodeStub {
private:
LIR_Opr _addr;

@ -1646,19 +1646,15 @@ Dependencies* GraphBuilder::dependency_recorder() const {
void GraphBuilder::invoke(Bytecodes::Code code) {
const bool has_receiver =
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = (code == Bytecodes::_invokedynamic);
bool will_link;
ciMethod* target = stream()->get_method(will_link);
ciSignature* declared_signature = NULL;
ciMethod* target = stream()->get_method(will_link, &declared_signature);
ciKlass* holder = stream()->get_declared_method_holder();
const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
assert(declared_signature != NULL, "cannot be null");
// FIXME bail out for now
if ((bc_raw == Bytecodes::_invokehandle || is_invokedynamic) && !will_link) {
if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
}
@ -1690,8 +1686,12 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// convert them directly to an invokespecial or invokestatic.
if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
switch (bc_raw) {
case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break;
case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break;
case Bytecodes::_invokevirtual:
code = Bytecodes::_invokespecial;
break;
case Bytecodes::_invokehandle:
code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
break;
}
}
@ -1840,7 +1840,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
bool success = false;
if (target->is_method_handle_intrinsic()) {
// method handle invokes
success = for_method_handle_inline(target);
success = try_method_handle_inline(target);
} else {
// static binding => check if callee is ok
success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
@ -1877,12 +1877,14 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// inlining not successful => standard invoke
bool is_loaded = target->is_loaded();
ValueType* result_type = as_ValueType(target->return_type());
// We require the debug info to be the "state before" because
// invokedynamics may deoptimize.
ValueStack* state_before = is_invokedynamic ? copy_state_before() : copy_state_exhandling();
ValueType* result_type = as_ValueType(declared_signature->return_type());
ValueStack* state_before = copy_state_exhandling();
// The bytecode (code) might change in this method so we are checking this very late.
const bool has_receiver =
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface;
Values* args = state()->pop_arguments(target->arg_size_no_receiver());
Value recv = has_receiver ? apop() : NULL;
int vtable_index = methodOopDesc::invalid_vtable_index;
@ -3058,7 +3060,7 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
case vmIntrinsics::_Reference_get:
{
if (UseG1GC) {
{
// With java.lang.ref.reference.get() we must go through the
// intrinsic - when G1 is enabled - even when get() is the root
// method of the compile so that, if necessary, the value in
@ -3070,6 +3072,9 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
// object removed from the list of discovered references during
// reference processing.
// Also we need intrinsic to prevent commoning reads from this field
// across safepoint since GC can change its value.
// Set up a stream so that appending instructions works properly.
ciBytecodeStream s(scope->method());
s.reset_to_bci(0);
@ -3226,7 +3231,6 @@ const char* GraphBuilder::should_not_inline(ciMethod* callee) const {
bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
if (!InlineNatives ) INLINE_BAILOUT("intrinsic method inlining disabled");
if (callee->is_synchronized()) {
// We don't currently support any synchronized intrinsics
return false;
@ -3234,9 +3238,13 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
// callee seems like a good candidate
// determine id
vmIntrinsics::ID id = callee->intrinsic_id();
if (!InlineNatives && id != vmIntrinsics::_Reference_get) {
// InlineNatives does not control Reference.get
INLINE_BAILOUT("intrinsic method inlining disabled");
}
bool preserves_state = false;
bool cantrap = true;
vmIntrinsics::ID id = callee->intrinsic_id();
switch (id) {
case vmIntrinsics::_arraycopy:
if (!InlineArrayCopy) return false;
@ -3376,11 +3384,10 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
return true;
case vmIntrinsics::_Reference_get:
// It is only when G1 is enabled that we absolutely
// need to use the intrinsic version of Reference.get()
// so that the value in the referent field, if necessary,
// can be registered by the pre-barrier code.
if (!UseG1GC) return false;
// Use the intrinsic version of Reference.get() so that the value in
// the referent field can be registered by the G1 pre-barrier code.
// Also to prevent commoning reads from this field across safepoint
// since GC can change its value.
preserves_state = true;
break;
@ -3816,7 +3823,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
}
bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
ValueStack* state_before = state()->copy_for_parsing();
vmIntrinsics::ID iid = callee->intrinsic_id();
switch (iid) {
@ -3851,7 +3858,7 @@ bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
// If the target is another method handle invoke try recursivly to get
// a better target.
if (target->is_method_handle_intrinsic()) {
if (for_method_handle_inline(target)) {
if (try_method_handle_inline(target)) {
return true;
}
} else {

@ -346,7 +346,7 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
const char* should_not_inline(ciMethod* callee) const;
// JSR 292 support
bool for_method_handle_inline(ciMethod* callee);
bool try_method_handle_inline(ciMethod* callee);
// helpers
void inline_bailout(const char* msg);

@ -369,9 +369,6 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values*
_signature = new BasicTypeList(number_of_arguments() + (has_receiver() ? 1 : 0));
if (has_receiver()) {
_signature->append(as_BasicType(receiver()->type()));
} else if (is_invokedynamic()) {
// Add the synthetic MethodHandle argument to the signature.
_signature->append(T_OBJECT);
}
for (int i = 0; i < number_of_arguments(); i++) {
ValueType* t = argument_at(i)->type();

@ -448,10 +448,10 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
switch (op->code()) {
case lir_static_call:
case lir_dynamic_call:
call(op, relocInfo::static_call_type);
break;
case lir_optvirtual_call:
case lir_dynamic_call:
call(op, relocInfo::opt_virtual_call_type);
break;
case lir_icvirtual_call:
@ -460,7 +460,9 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
case lir_virtual_call:
vtable_call(op);
break;
default: ShouldNotReachHere();
default:
fatal(err_msg_res("unexpected op code: %s", op->name()));
break;
}
// JSR 292

@ -920,7 +920,8 @@ LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
assert(type2size[t] == type2size[value->type()], "size mismatch");
assert(type2size[t] == type2size[value->type()],
err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
if (!value->is_register()) {
// force into a register
LIR_Opr r = new_register(value->type());
@ -2176,9 +2177,9 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
off.load_item();
src.load_item();
LIR_Opr reg = rlock_result(x, x->basic_type());
LIR_Opr value = rlock_result(x, x->basic_type());
get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
#ifndef SERIALGC
// We might be reading the value of the referent field of a
@ -2191,19 +2192,16 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
// if (offset == java_lang_ref_Reference::referent_offset) {
// if (src != NULL) {
// if (klass(src)->reference_type() != REF_NONE) {
// pre_barrier(..., reg, ...);
// pre_barrier(..., value, ...);
// }
// }
// }
//
// The first non-constant check of either the offset or
// the src operand will be done here; the remainder
// will take place in the generated code stub.
if (UseG1GC && type == T_OBJECT) {
bool gen_code_stub = true; // Assume we need to generate the slow code stub.
bool gen_offset_check = true; // Assume the code stub has to generate the offset guard.
bool gen_source_check = true; // Assume the code stub has to check the src object for null.
bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
bool gen_offset_check = true; // Assume we need to generate the offset guard.
bool gen_source_check = true; // Assume we need to check the src object for null.
bool gen_type_check = true; // Assume we need to check the reference_type.
if (off.is_constant()) {
jlong off_con = (off.type()->is_int() ?
@ -2215,7 +2213,7 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
// The constant offset is something other than referent_offset.
// We can skip generating/checking the remaining guards and
// skip generation of the code stub.
gen_code_stub = false;
gen_pre_barrier = false;
} else {
// The constant offset is the same as referent_offset -
// we do not need to generate a runtime offset check.
@ -2224,11 +2222,11 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
}
// We don't need to generate stub if the source object is an array
if (gen_code_stub && src.type()->is_array()) {
gen_code_stub = false;
if (gen_pre_barrier && src.type()->is_array()) {
gen_pre_barrier = false;
}
if (gen_code_stub) {
if (gen_pre_barrier) {
// We still need to continue with the checks.
if (src.is_constant()) {
ciObject* src_con = src.get_jobject_constant();
@ -2236,7 +2234,7 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
if (src_con->is_null_object()) {
// The constant src object is null - We can skip
// generating the code stub.
gen_code_stub = false;
gen_pre_barrier = false;
} else {
// Non-null constant source object. We still have to generate
// the slow stub - but we don't need to generate the runtime
@ -2245,20 +2243,28 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
}
}
}
if (gen_pre_barrier && !PatchALot) {
// Can the klass of object be statically determined to be
// a sub-class of Reference?
ciType* type = src.value()->declared_type();
if ((type != NULL) && type->is_loaded()) {
if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
gen_type_check = false;
} else if (type->is_klass() &&
!compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
// Not Reference and not Object klass.
gen_pre_barrier = false;
}
}
}
if (gen_code_stub) {
// Temoraries.
LIR_Opr src_klass = new_register(T_OBJECT);
// Get the thread pointer for the pre-barrier
LIR_Opr thread = getThreadPointer();
CodeStub* stub;
if (gen_pre_barrier) {
LabelObj* Lcont = new LabelObj();
// We can have generate one runtime check here. Let's start with
// the offset check.
if (gen_offset_check) {
// if (offset == referent_offset) -> slow code stub
// if (offset != referent_offset) -> continue
// If offset is an int then we can do the comparison with the
// referent_offset constant; otherwise we need to move
// referent_offset into a temporary register and generate
@ -2273,43 +2279,36 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
referent_off = new_register(T_LONG);
__ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
}
__ cmp(lir_cond_equal, off.result(), referent_off);
// Optionally generate "src == null" check.
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
src_klass, thread,
gen_source_check);
__ branch(lir_cond_equal, as_BasicType(off.type()), stub);
} else {
if (gen_source_check) {
// offset is a const and equals referent offset
// if (source != null) -> slow code stub
__ cmp(lir_cond_notEqual, src.result(), LIR_OprFact::oopConst(NULL));
// Since we are generating the "if src == null" guard here,
// there is no need to generate the "src == null" check again.
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
src_klass, thread,
false);
__ branch(lir_cond_notEqual, T_OBJECT, stub);
} else {
// We have statically determined that offset == referent_offset
// && src != null so we unconditionally branch to code stub
// to perform the guards and record reg in the SATB log buffer.
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
src_klass, thread,
false);
__ branch(lir_cond_always, T_ILLEGAL, stub);
}
__ cmp(lir_cond_notEqual, off.result(), referent_off);
__ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
}
// Continuation point
__ branch_destination(stub->continuation());
if (gen_source_check) {
// offset is a const and equals referent offset
// if (source == null) -> continue
__ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
__ branch(lir_cond_equal, T_OBJECT, Lcont->label());
}
LIR_Opr src_klass = new_register(T_OBJECT);
if (gen_type_check) {
// We have determined that offset == referent_offset && src != null.
// if (src->_klass->_reference_type == REF_NONE) -> continue
__ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), src_klass);
LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(instanceKlass::reference_type_offset()), T_BYTE);
LIR_Opr reference_type = new_register(T_INT);
__ move(reference_type_addr, reference_type);
__ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
__ branch(lir_cond_equal, T_INT, Lcont->label());
}
{
// We have determined that src->_klass->_reference_type != REF_NONE
// so register the value in the referent field with the pre-barrier.
pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
value /* pre_val */,
false /* do_load */,
false /* patch */,
NULL /* info */);
}
__ branch_destination(Lcont->label());
}
}
#endif // SERIALGC
@ -2664,8 +2663,9 @@ void LIRGenerator::do_OsrEntry(OsrEntry* x) {
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
for (; i < args->length(); i++) {
assert(args->length() == arg_list->length(),
err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
LIRItem* param = args->at(i);
LIR_Opr loc = arg_list->at(i);
if (loc->is_register()) {
@ -2705,15 +2705,9 @@ LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
LIRItem* receiver = new LIRItem(x->receiver(), this);
argument_items->append(receiver);
}
if (x->is_invokedynamic()) {
// Insert a dummy for the synthetic MethodHandle argument.
argument_items->append(NULL);
}
int idx = x->has_receiver() ? 1 : 0;
for (int i = 0; i < x->number_of_arguments(); i++) {
LIRItem* param = new LIRItem(x->argument_at(i), this);
argument_items->append(param);
idx += (param->type()->is_double_word() ? 2 : 1);
}
return argument_items;
}
@ -2758,9 +2752,6 @@ void LIRGenerator::do_Invoke(Invoke* x) {
CodeEmitInfo* info = state_for(x, x->state());
// invokedynamics can deoptimize.
CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
invoke_load_arguments(x, args, arg_list);
if (x->has_receiver()) {
@ -2809,41 +2800,8 @@ void LIRGenerator::do_Invoke(Invoke* x) {
}
break;
case Bytecodes::_invokedynamic: {
ciBytecodeStream bcs(x->scope()->method());
bcs.force_bci(x->state()->bci());
assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
ciCPCache* cpcache = bcs.get_cpcache();
// Get CallSite offset from constant pool cache pointer.
int index = bcs.get_method_index();
size_t call_site_offset = cpcache->get_f1_offset(index);
// Load CallSite object from constant pool cache.
LIR_Opr call_site = new_register(objectType);
__ oop2reg(cpcache->constant_encoding(), call_site);
__ move_wide(new LIR_Address(call_site, call_site_offset, T_OBJECT), call_site);
// If this invokedynamic call site hasn't been executed yet in
// the interpreter, the CallSite object in the constant pool
// cache is still null and we need to deoptimize.
if (cpcache->is_f1_null_at(index)) {
// Only deoptimize if the CallSite object is still null; we don't
// recompile methods in C1 after deoptimization so this call site
// might be resolved the next time we execute it after OSR.
DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
__ cmp(lir_cond_equal, call_site, LIR_OprFact::oopConst(NULL));
__ branch(lir_cond_equal, T_OBJECT, deopt_stub);
}
// Use the receiver register for the synthetic MethodHandle
// argument.
receiver = LIR_Assembler::receiverOpr();
// Load target MethodHandle from CallSite object.
__ load(new LIR_Address(call_site, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
__ call_dynamic(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
SharedRuntime::get_resolve_static_call_stub(),
arg_list, info);
break;
}

@ -190,7 +190,7 @@ Value ValueMap::find_insert(Value x) {
LoadField* lf = value->as_LoadField(); \
bool must_kill = lf != NULL \
&& lf->field()->holder() == field->holder() \
&& lf->field()->offset() == field->offset();
&& (all_offsets || lf->field()->offset() == field->offset());
#define MUST_KILL_EXCEPTION(must_kill, entry, value) \
assert(entry->nesting() < nesting(), "must not find bigger nesting than current"); \
@ -205,7 +205,7 @@ void ValueMap::kill_array(ValueType* type) {
GENERIC_KILL_VALUE(MUST_KILL_ARRAY);
}
void ValueMap::kill_field(ciField* field) {
void ValueMap::kill_field(ciField* field, bool all_offsets) {
GENERIC_KILL_VALUE(MUST_KILL_FIELD);
}
@ -280,9 +280,9 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
ValueMap* value_map_of(BlockBegin* block) { return _gvn->value_map_of(block); }
// implementation for abstract methods of ValueNumberingVisitor
void kill_memory() { _too_complicated_loop = true; }
void kill_field(ciField* field) { current_map()->kill_field(field); };
void kill_array(ValueType* type) { current_map()->kill_array(type); };
void kill_memory() { _too_complicated_loop = true; }
void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); };
void kill_array(ValueType* type) { current_map()->kill_array(type); };
public:
ShortLoopOptimizer(GlobalValueNumbering* gvn)

@ -114,7 +114,7 @@ class ValueMap: public CompilationResourceObj {
Value find_insert(Value x);
void kill_memory();
void kill_field(ciField* field);
void kill_field(ciField* field, bool all_offsets);
void kill_array(ValueType* type);
void kill_exception();
void kill_map(ValueMap* map);
@ -136,7 +136,7 @@ class ValueNumberingVisitor: public InstructionVisitor {
protected:
// called by visitor functions for instructions that kill values
virtual void kill_memory() = 0;
virtual void kill_field(ciField* field) = 0;
virtual void kill_field(ciField* field, bool all_offsets) = 0;
virtual void kill_array(ValueType* type) = 0;
// visitor functions
@ -148,7 +148,7 @@ class ValueNumberingVisitor: public InstructionVisitor {
x->field()->is_volatile()) {
kill_memory();
} else {
kill_field(x->field());
kill_field(x->field(), x->needs_patching());
}
}
void do_StoreIndexed (StoreIndexed* x) { kill_array(x->type()); }
@ -214,9 +214,9 @@ class ValueNumberingEffects: public ValueNumberingVisitor {
public:
// implementation for abstract methods of ValueNumberingVisitor
void kill_memory() { _map->kill_memory(); }
void kill_field(ciField* field) { _map->kill_field(field); }
void kill_array(ValueType* type) { _map->kill_array(type); }
void kill_memory() { _map->kill_memory(); }
void kill_field(ciField* field, bool all_offsets) { _map->kill_field(field, all_offsets); }
void kill_array(ValueType* type) { _map->kill_array(type); }
ValueNumberingEffects(ValueMap* map): _map(map) {}
};
@ -234,9 +234,9 @@ class GlobalValueNumbering: public ValueNumberingVisitor {
void set_value_map_of(BlockBegin* block, ValueMap* map) { assert(value_map_of(block) == NULL, ""); _value_maps.at_put(block->linear_scan_number(), map); }
// implementation for abstract methods of ValueNumberingVisitor
void kill_memory() { current_map()->kill_memory(); }
void kill_field(ciField* field) { current_map()->kill_field(field); }
void kill_array(ValueType* type) { current_map()->kill_array(type); }
void kill_memory() { current_map()->kill_memory(); }
void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); }
void kill_array(ValueType* type) { current_map()->kill_array(type); }
// main entry point that performs global value numbering
GlobalValueNumbering(IR* ir);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -156,18 +156,12 @@
develop(bool, CanonicalizeNodes, true, \
"Canonicalize graph nodes") \
\
develop(bool, CanonicalizeExperimental, false, \
"Canonicalize graph nodes, experimental code") \
\
develop(bool, PrintCanonicalization, false, \
"Print graph node canonicalization") \
\
develop(bool, UseTableRanges, true, \
"Faster versions of lookup table using ranges") \
\
develop(bool, UseFastExceptionHandling, true, \
"Faster handling of exceptions") \
\
develop_pd(bool, RoundFPResults, \
"Indicates whether rounding is needed for floating point results")\
\
@ -224,9 +218,6 @@
develop(bool, PinAllInstructions, false, \
"All instructions are pinned") \
\
develop(bool, ValueStackPinStackAll, true, \
"Pinning in ValueStack pin everything") \
\
develop(bool, UseFastNewInstance, true, \
"Use fast inlined instance allocation") \
\

@ -236,12 +236,16 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
ciInstanceKlass* actual_recv = callee_holder;
// some methods are obviously bindable without any type checks so
// convert them directly to an invokespecial.
// Some methods are obviously bindable without any type checks so
// convert them directly to an invokespecial or invokestatic.
if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
switch (code) {
case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break;
case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break;
case Bytecodes::_invokevirtual:
code = Bytecodes::_invokespecial;
break;
case Bytecodes::_invokehandle:
code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
break;
}
}
@ -826,8 +830,8 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
break;
case Bytecodes::_getstatic:
case Bytecodes::_getfield:
{ bool will_link;
ciField* field = s.get_field(will_link);
{ bool ignored_will_link;
ciField* field = s.get_field(ignored_will_link);
BasicType field_type = field->type()->basic_type();
if (s.cur_bc() != Bytecodes::_getstatic) {
set_method_escape(state.apop());
@ -865,16 +869,21 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
{ bool will_link;
ciMethod* target = s.get_method(will_link);
ciKlass* holder = s.get_declared_method_holder();
{ bool ignored_will_link;
ciSignature* declared_signature = NULL;
ciMethod* target = s.get_method(ignored_will_link, &declared_signature);
ciKlass* holder = s.get_declared_method_holder();
assert(declared_signature != NULL, "cannot be null");
// Push appendix argument, if one.
if (s.has_appendix()) {
state.apush(unknown_obj);
}
// Pass in raw bytecode because we need to see invokehandle instructions.
invoke(state, s.cur_bc_raw(), target, holder);
ciType* return_type = target->return_type();
// We are using the return type of the declared signature here because
// it might be a more concrete type than the one from the target (for
// e.g. invokedynamic and invokehandle).
ciType* return_type = declared_signature->return_type();
if (!return_type->is_primitive_type()) {
state.apush(unknown_obj);
} else if (return_type->is_one_word()) {

@ -738,91 +738,81 @@ methodOop ciEnv::lookup_method(instanceKlass* accessor,
ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
int index, Bytecodes::Code bc,
ciInstanceKlass* accessor) {
int holder_index = cpool->klass_ref_index_at(index);
bool holder_is_accessible;
ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
if (bc == Bytecodes::_invokedynamic) {
ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index);
const bool is_resolved = !secondary_entry->is_f1_null();
// FIXME: code generation could allow for null (unlinked) call site
// The call site could be made patchable as follows:
// Load the appendix argument from the constant pool.
// Test the appendix argument and jump to a known deopt routine if it is null.
// Jump through a patchable call site, which is initially a deopt routine.
// Patch the call site to the nmethod entry point of the static compiled lambda form.
// As with other two-component call sites, both values must be independently verified.
// Get the method's name and signature.
Symbol* name_sym = cpool->name_ref_at(index);
Symbol* sig_sym = cpool->signature_ref_at(index);
if (cpool->has_preresolution()
|| (holder == ciEnv::MethodHandle_klass() &&
MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) {
// Short-circuit lookups for JSR 292-related call sites.
// That is, do not rely only on name-based lookups, because they may fail
// if the names are not resolvable in the boot class loader (7056328).
switch (bc) {
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
{
oop appendix_oop = NULL;
methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index);
if (m != NULL) {
return get_object(m)->as_method();
}
}
break;
if (is_resolved) {
// Get the invoker methodOop and the extra argument from the constant pool.
methodOop adapter = secondary_entry->f2_as_vfinal_method();
return get_object(adapter)->as_method();
}
}
if (holder_is_accessible) { // Our declared holder is loaded.
instanceKlass* lookup = declared_holder->get_instanceKlass();
methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
if (m != NULL &&
(bc == Bytecodes::_invokestatic
? instanceKlass::cast(m->method_holder())->is_not_initialized()
: !instanceKlass::cast(m->method_holder())->is_loaded())) {
m = NULL;
}
if (m != NULL) {
// We found the method.
return get_object(m)->as_method();
}
}
// Either the declared holder was not loaded, or the method could
// not be found. Create a dummy ciMethod to represent the failed
// lookup.
ciSymbol* name = get_symbol(name_sym);
ciSymbol* signature = get_symbol(sig_sym);
return get_unloaded_method(declared_holder, name, signature, accessor);
}
// ------------------------------------------------------------------
// ciEnv::get_fake_invokedynamic_method_impl
ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
int index, Bytecodes::Code bc,
ciInstanceKlass* accessor) {
// Compare the following logic with InterpreterRuntime::resolve_invokedynamic.
assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index);
bool is_resolved = !secondary_entry->is_f1_null();
// FIXME: code generation could allow for null (unlinked) call site
// The call site could be made patchable as follows:
// Load the appendix argument from the constant pool.
// Test the appendix argument and jump to a known deopt routine if it is null.
// Jump through a patchable call site, which is initially a deopt routine.
// Patch the call site to the nmethod entry point of the static compiled lambda form.
// As with other two-component call sites, both values must be independently verified.
// Call site might not be resolved yet.
// Stop the code path here with an unlinked method.
if (!is_resolved) {
// Fake a method that is equivalent to a declared method.
ciInstanceKlass* holder = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
ciSymbol* name = ciSymbol::invokeBasic_name();
ciSymbol* signature = get_symbol(cpool->signature_ref_at(index));
return get_unloaded_method(holder, name, signature, accessor);
}
} else {
const int holder_index = cpool->klass_ref_index_at(index);
bool holder_is_accessible;
ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
// Get the invoker methodOop and the extra argument from the constant pool.
methodOop adapter = secondary_entry->f2_as_vfinal_method();
return get_object(adapter)->as_method();
// Get the method's name and signature.
Symbol* name_sym = cpool->name_ref_at(index);
Symbol* sig_sym = cpool->signature_ref_at(index);
if (cpool->has_preresolution()
|| (holder == ciEnv::MethodHandle_klass() &&
MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) {
// Short-circuit lookups for JSR 292-related call sites.
// That is, do not rely only on name-based lookups, because they may fail
// if the names are not resolvable in the boot class loader (7056328).
switch (bc) {
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
{
methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index);
if (m != NULL) {
return get_object(m)->as_method();
}
}
break;
}
}
if (holder_is_accessible) { // Our declared holder is loaded.
instanceKlass* lookup = declared_holder->get_instanceKlass();
methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
if (m != NULL &&
(bc == Bytecodes::_invokestatic
? instanceKlass::cast(m->method_holder())->is_not_initialized()
: !instanceKlass::cast(m->method_holder())->is_loaded())) {
m = NULL;
}
if (m != NULL) {
// We found the method.
return get_object(m)->as_method();
}
}
// Either the declared holder was not loaded, or the method could
// not be found. Create a dummy ciMethod to represent the failed
// lookup.
ciSymbol* name = get_symbol(name_sym);
ciSymbol* signature = get_symbol(sig_sym);
return get_unloaded_method(declared_holder, name, signature, accessor);
}
}
@ -853,11 +843,7 @@ ciInstanceKlass* ciEnv::get_instance_klass_for_declared_method_holder(ciKlass* m
ciMethod* ciEnv::get_method_by_index(constantPoolHandle cpool,
int index, Bytecodes::Code bc,
ciInstanceKlass* accessor) {
if (bc == Bytecodes::_invokedynamic) {
GUARDED_VM_ENTRY(return get_fake_invokedynamic_method_impl(cpool, index, bc, accessor);)
} else {
GUARDED_VM_ENTRY(return get_method_by_index_impl( cpool, index, bc, accessor);)
}
GUARDED_VM_ENTRY(return get_method_by_index_impl(cpool, index, bc, accessor);)
}

@ -152,9 +152,6 @@ private:
ciMethod* get_method_by_index_impl(constantPoolHandle cpool,
int method_index, Bytecodes::Code bc,
ciInstanceKlass* loading_klass);
ciMethod* get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
int index, Bytecodes::Code bc,
ciInstanceKlass* accessor);
// Helper methods
bool check_klass_accessibility(ciKlass* accessing_klass,

@ -1215,9 +1215,10 @@ void ciMethod::print_impl(outputStream* st) {
holder()->print_name_on(st);
st->print(" signature=");
signature()->as_symbol()->print_symbol_on(st);
st->print(" arg_size=%d", arg_size());
if (is_loaded()) {
st->print(" loaded=true flags=");
st->print(" loaded=true");
st->print(" arg_size=%d", arg_size());
st->print(" flags=");
flags().print_member_flags(st);
} else {
st->print(" loaded=false");

@ -355,11 +355,23 @@ int ciBytecodeStream::get_method_index() {
// ciBytecodeStream::get_method
//
// If this is a method invocation bytecode, get the invoked method.
ciMethod* ciBytecodeStream::get_method(bool& will_link) {
// Additionally return the declared signature to get more concrete
// type information if required (Cf. invokedynamic and invokehandle).
ciMethod* ciBytecodeStream::get_method(bool& will_link, ciSignature* *declared_signature_result) {
VM_ENTRY_MARK;
ciEnv* env = CURRENT_ENV;
constantPoolHandle cpool(_method->get_methodOop()->constants());
ciMethod* m = CURRENT_ENV->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
will_link = m->is_loaded();
// Get declared method signature and return it.
if (has_optional_appendix()) {
const int sig_index = get_method_signature_index();
Symbol* sig_sym = cpool->symbol_at(sig_index);
ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass();
(*declared_signature_result) = new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym));
} else {
(*declared_signature_result) = m->signature();
}
return m;
}
@ -418,24 +430,6 @@ int ciBytecodeStream::get_method_holder_index() {
return cpool->klass_ref_index_at(get_method_index());
}
// ------------------------------------------------------------------
// ciBytecodeStream::get_declared_method_signature
//
// Get the declared signature of the currently referenced method.
//
// This is always the same as the signature of the resolved method
// itself, except for _invokehandle and _invokedynamic calls.
//
ciSignature* ciBytecodeStream::get_declared_method_signature() {
int sig_index = get_method_signature_index();
VM_ENTRY_MARK;
ciEnv* env = CURRENT_ENV;
constantPoolHandle cpool(_method->get_methodOop()->constants());
Symbol* sig_sym = cpool->symbol_at(sig_index);
ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass();
return new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym));
}
// ------------------------------------------------------------------
// ciBytecodeStream::get_method_signature_index
//
@ -443,11 +437,12 @@ ciSignature* ciBytecodeStream::get_declared_method_signature() {
// referenced by the current bytecode. Used for generating
// deoptimization information.
int ciBytecodeStream::get_method_signature_index() {
VM_ENTRY_MARK;
constantPoolOop cpool = _holder->get_instanceKlass()->constants();
int method_index = get_method_index();
int name_and_type_index = cpool->name_and_type_ref_index_at(method_index);
return cpool->signature_ref_index_at(name_and_type_index);
GUARDED_VM_ENTRY(
constantPoolOop cpool = _holder->get_instanceKlass()->constants();
const int method_index = get_method_index();
const int name_and_type_index = cpool->name_and_type_ref_index_at(method_index);
return cpool->signature_ref_index_at(name_and_type_index);
)
}
// ------------------------------------------------------------------

@ -151,6 +151,8 @@ public:
// Does this instruction contain an index which refes into the CP cache?
bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
bool has_optional_appendix() { return Bytecodes::has_optional_appendix(cur_bc_raw()); }
int get_index_u1() const {
return bytecode().get_index_u1(cur_bc_raw());
}
@ -257,13 +259,11 @@ public:
int get_field_holder_index();
int get_field_signature_index();
// If this is a method invocation bytecode, get the invoked method.
ciMethod* get_method(bool& will_link);
ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result);
bool has_appendix();
ciObject* get_appendix();
ciKlass* get_declared_method_holder();
int get_method_holder_index();
ciSignature* get_declared_method_signature();
int get_method_signature_index();
ciCPCache* get_cpcache() const;

@ -643,9 +643,11 @@ void ciTypeFlow::StateVector::do_getstatic(ciBytecodeStream* str) {
// ------------------------------------------------------------------
// ciTypeFlow::StateVector::do_invoke
void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
bool has_receiver_foo) {
bool has_receiver) {
bool will_link;
ciMethod* callee = str->get_method(will_link);
ciSignature* declared_signature = NULL;
ciMethod* callee = str->get_method(will_link, &declared_signature);
assert(declared_signature != NULL, "cannot be null");
if (!will_link) {
// We weren't able to find the method.
if (str->cur_bc() == Bytecodes::_invokedynamic) {
@ -658,22 +660,12 @@ void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
trap(str, unloaded_holder, str->get_method_holder_index());
}
} else {
// TODO Use Bytecode_invoke after metadata changes.
//Bytecode_invoke inv(str->method(), str->cur_bci());
//const bool has_receiver = callee->is_loaded() ? !callee->is_static() : inv.has_receiver();
Bytecode inv(str);
Bytecodes::Code code = inv.invoke_code();
const bool has_receiver = callee->is_loaded() ? !callee->is_static() : code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic;
ciSignature* signature = callee->signature();
ciSignatureStream sigstr(signature);
// Push appendix argument, if one.
if (str->has_appendix()) {
ciObject* appendix = str->get_appendix();
push_object(appendix->klass());
}
int arg_size = signature->size();
int stack_base = stack_size() - arg_size;
// We are using the declared signature here because it might be
// different from the callee signature (Cf. invokedynamic and
// invokehandle).
ciSignatureStream sigstr(declared_signature);
const int arg_size = declared_signature->size();
const int stack_base = stack_size() - arg_size;
int i = 0;
for( ; !sigstr.at_return_type(); sigstr.next()) {
ciType* type = sigstr.type();
@ -689,7 +681,6 @@ void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
for (int j = 0; j < arg_size; j++) {
pop();
}
assert(!callee->is_loaded() || has_receiver == !callee->is_static(), "mismatch");
if (has_receiver) {
// Check this?
pop_object();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,9 +32,9 @@
#include "utilities/globalDefinitions.hpp"
StackMapFrame::StackMapFrame(u2 max_locals, u2 max_stack, ClassVerifier* v) :
_offset(0), _locals_size(0), _stack_size(0), _flags(0),
_max_locals(max_locals), _max_stack(max_stack),
_verifier(v) {
_offset(0), _locals_size(0), _stack_size(0),
_stack_mark(0), _flags(0), _max_locals(max_locals),
_max_stack(max_stack), _verifier(v) {
Thread* thr = v->thread();
_locals = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, max_locals);
_stack = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, max_stack);
@ -157,17 +157,17 @@ void StackMapFrame::copy_stack(const StackMapFrame* src) {
}
}
bool StackMapFrame::is_assignable_to(
// Returns the location of the first mismatch, or 'len' if there are no
// mismatches
int StackMapFrame::is_assignable_to(
VerificationType* from, VerificationType* to, int32_t len, TRAPS) const {
for (int32_t i = 0; i < len; i++) {
bool subtype = to[i].is_assignable_from(
from[i], verifier(), THREAD);
if (!subtype) {
return false;
int32_t i = 0;
for (i = 0; i < len; i++) {
if (!to[i].is_assignable_from(from[i], verifier(), THREAD)) {
break;
}
}
return true;
return i;
}
bool StackMapFrame::has_flag_match_exception(
@ -209,50 +209,84 @@ bool StackMapFrame::has_flag_match_exception(
}
bool StackMapFrame::is_assignable_to(
const StackMapFrame* target, bool is_exception_handler, TRAPS) const {
if (_max_locals != target->max_locals() ||
_stack_size != target->stack_size()) {
const StackMapFrame* target, bool is_exception_handler,
ErrorContext* ctx, TRAPS) const {
if (_max_locals != target->max_locals()) {
*ctx = ErrorContext::locals_size_mismatch(
_offset, (StackMapFrame*)this, (StackMapFrame*)target);
return false;
}
if (_stack_size != target->stack_size()) {
*ctx = ErrorContext::stack_size_mismatch(
_offset, (StackMapFrame*)this, (StackMapFrame*)target);
return false;
}
// Only need to compare type elements up to target->locals() or target->stack().
// The remaining type elements in this state can be ignored because they are
// assignable to bogus type.
bool match_locals = is_assignable_to(
_locals, target->locals(), target->locals_size(), CHECK_false);
bool match_stack = is_assignable_to(
_stack, target->stack(), _stack_size, CHECK_false);
bool match_flags = (_flags | target->flags()) == target->flags();
int mismatch_loc;
mismatch_loc = is_assignable_to(
_locals, target->locals(), target->locals_size(), THREAD);
if (mismatch_loc != target->locals_size()) {
*ctx = ErrorContext::bad_type(target->offset(),
TypeOrigin::local(mismatch_loc, (StackMapFrame*)this),
TypeOrigin::sm_local(mismatch_loc, (StackMapFrame*)target));
return false;
}
mismatch_loc = is_assignable_to(_stack, target->stack(), _stack_size, THREAD);
if (mismatch_loc != _stack_size) {
*ctx = ErrorContext::bad_type(target->offset(),
TypeOrigin::stack(mismatch_loc, (StackMapFrame*)this),
TypeOrigin::sm_stack(mismatch_loc, (StackMapFrame*)target));
return false;
}
return match_locals && match_stack &&
(match_flags || (is_exception_handler && has_flag_match_exception(target)));
bool match_flags = (_flags | target->flags()) == target->flags();
if (match_flags || is_exception_handler && has_flag_match_exception(target)) {
return true;
} else {
*ctx = ErrorContext::bad_flags(target->offset(),
(StackMapFrame*)this, (StackMapFrame*)target);
return false;
}
}
VerificationType StackMapFrame::pop_stack_ex(VerificationType type, TRAPS) {
if (_stack_size <= 0) {
verifier()->verify_error(_offset, "Operand stack underflow");
verifier()->verify_error(
ErrorContext::stack_underflow(_offset, this),
"Operand stack underflow");
return VerificationType::bogus_type();
}
VerificationType top = _stack[--_stack_size];
bool subtype = type.is_assignable_from(
top, verifier(), CHECK_(VerificationType::bogus_type()));
if (!subtype) {
verifier()->verify_error(_offset, "Bad type on operand stack");
verifier()->verify_error(
ErrorContext::bad_type(_offset, stack_top_ctx(),
TypeOrigin::implicit(type)),
"Bad type on operand stack");
return VerificationType::bogus_type();
}
NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
return top;
}
VerificationType StackMapFrame::get_local(
int32_t index, VerificationType type, TRAPS) {
if (index >= _max_locals) {
verifier()->verify_error(_offset, "Local variable table overflow");
verifier()->verify_error(
ErrorContext::bad_local_index(_offset, index),
"Local variable table overflow");
return VerificationType::bogus_type();
}
bool subtype = type.is_assignable_from(_locals[index],
verifier(), CHECK_(VerificationType::bogus_type()));
if (!subtype) {
verifier()->verify_error(_offset, "Bad local variable type");
verifier()->verify_error(
ErrorContext::bad_type(_offset,
TypeOrigin::local(index, this),
TypeOrigin::implicit(type)),
"Bad local variable type");
return VerificationType::bogus_type();
}
if(index >= _locals_size) { _locals_size = index + 1; }
@ -264,23 +298,37 @@ void StackMapFrame::get_local_2(
assert(type1.is_long() || type1.is_double(), "must be long/double");
assert(type2.is_long2() || type2.is_double2(), "must be long/double_2");
if (index >= _locals_size - 1) {
verifier()->verify_error(_offset, "get long/double overflows locals");
verifier()->verify_error(
ErrorContext::bad_local_index(_offset, index),
"get long/double overflows locals");
return;
}
bool subtype1 = type1.is_assignable_from(
_locals[index], verifier(), CHECK);
bool subtype2 = type2.is_assignable_from(
_locals[index+1], verifier(), CHECK);
if (!subtype1 || !subtype2) {
verifier()->verify_error(_offset, "Bad local variable type");
return;
bool subtype = type1.is_assignable_from(_locals[index], verifier(), CHECK);
if (!subtype) {
verifier()->verify_error(
ErrorContext::bad_type(_offset,
TypeOrigin::local(index, this), TypeOrigin::implicit(type1)),
"Bad local variable type");
} else {
subtype = type2.is_assignable_from(_locals[index + 1], verifier(), CHECK);
if (!subtype) {
/* Unreachable? All local store routines convert a split long or double
* into a TOP during the store. So we should never end up seeing an
* orphaned half. */
verifier()->verify_error(
ErrorContext::bad_type(_offset,
TypeOrigin::local(index + 1, this), TypeOrigin::implicit(type2)),
"Bad local variable type");
}
}
}
void StackMapFrame::set_local(int32_t index, VerificationType type, TRAPS) {
assert(!type.is_check(), "Must be a real type");
if (index >= _max_locals) {
verifier()->verify_error("Local variable table overflow", _offset);
verifier()->verify_error(
ErrorContext::bad_local_index(_offset, index),
"Local variable table overflow");
return;
}
// If type at index is double or long, set the next location to be unusable
@ -310,7 +358,9 @@ void StackMapFrame::set_local_2(
assert(type1.is_long() || type1.is_double(), "must be long/double");
assert(type2.is_long2() || type2.is_double2(), "must be long/double_2");
if (index >= _max_locals - 1) {
verifier()->verify_error("Local variable table overflow", _offset);
verifier()->verify_error(
ErrorContext::bad_local_index(_offset, index),
"Local variable table overflow");
return;
}
// If type at index+1 is double or long, set the next location to be unusable
@ -336,21 +386,30 @@ void StackMapFrame::set_local_2(
}
}
#ifndef PRODUCT
void StackMapFrame::print() const {
tty->print_cr("stackmap_frame[%d]:", _offset);
tty->print_cr("flags = 0x%x", _flags);
tty->print("locals[%d] = { ", _locals_size);
for (int32_t i = 0; i < _locals_size; i++) {
_locals[i].print_on(tty);
}
tty->print_cr(" }");
tty->print("stack[%d] = { ", _stack_size);
for (int32_t j = 0; j < _stack_size; j++) {
_stack[j].print_on(tty);
}
tty->print_cr(" }");
TypeOrigin StackMapFrame::stack_top_ctx() {
return TypeOrigin::stack(_stack_size, this);
}
#endif
void StackMapFrame::print_on(outputStream* str) const {
str->indent().print_cr("bci: @%d", _offset);
str->indent().print_cr("flags: {%s }",
flag_this_uninit() ? " flagThisUninit" : "");
str->indent().print("locals: {");
for (int32_t i = 0; i < _locals_size; ++i) {
str->print(" ");
_locals[i].print_on(str);
if (i != _locals_size - 1) {
str->print(",");
}
}
str->print_cr(" }");
str->indent().print("stack: {");
for (int32_t j = 0; j < _stack_size; ++j) {
str->print(" ");
_stack[j].print_on(str);
if (j != _stack_size - 1) {
str->print(",");
}
}
str->print_cr(" }");
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,8 @@
// A StackMapFrame represents one frame in the stack map attribute.
class TypeContext;
enum {
FLAG_THIS_UNINIT = 0x01
};
@ -47,6 +49,10 @@ class StackMapFrame : public ResourceObj {
int32_t _locals_size; // number of valid type elements in _locals
int32_t _stack_size; // number of valid type elements in _stack
int32_t _stack_mark; // Records the size of the stack prior to an
// instruction modification, to allow rewinding
// when/if an error occurs.
int32_t _max_locals;
int32_t _max_stack;
@ -56,6 +62,31 @@ class StackMapFrame : public ResourceObj {
ClassVerifier* _verifier; // the verifier verifying this method
StackMapFrame(const StackMapFrame& cp) :
_offset(cp._offset), _locals_size(cp._locals_size),
_stack_size(cp._stack_size), _stack_mark(cp._stack_mark),
_max_locals(cp._max_locals), _max_stack(cp._max_stack),
_flags(cp._flags) {
_locals = NEW_RESOURCE_ARRAY(VerificationType, _max_locals);
for (int i = 0; i < _max_locals; ++i) {
if (i < _locals_size) {
_locals[i] = cp._locals[i];
} else {
_locals[i] = VerificationType::bogus_type();
}
}
int ss = MAX2(_stack_size, _stack_mark);
_stack = NEW_RESOURCE_ARRAY(VerificationType, _max_stack);
for (int i = 0; i < _max_stack; ++i) {
if (i < ss) {
_stack[i] = cp._stack[i];
} else {
_stack[i] = VerificationType::bogus_type();
}
}
_verifier = NULL;
}
public:
// constructors
@ -77,16 +108,21 @@ class StackMapFrame : public ResourceObj {
ClassVerifier* v) : _offset(offset), _flags(flags),
_locals_size(locals_size),
_stack_size(stack_size),
_stack_mark(-1),
_max_locals(max_locals),
_max_stack(max_stack),
_locals(locals), _stack(stack),
_verifier(v) { }
static StackMapFrame* copy(StackMapFrame* smf) {
return new StackMapFrame(*smf);
}
inline void set_offset(int32_t offset) { _offset = offset; }
inline void set_verifier(ClassVerifier* v) { _verifier = v; }
inline void set_flags(u1 flags) { _flags = flags; }
inline void set_locals_size(u2 locals_size) { _locals_size = locals_size; }
inline void set_stack_size(u2 stack_size) { _stack_size = stack_size; }
inline void set_stack_size(u2 stack_size) { _stack_size = _stack_mark = stack_size; }
inline void clear_stack() { _stack_size = 0; }
inline int32_t offset() const { return _offset; }
inline ClassVerifier* verifier() const { return _verifier; }
@ -134,14 +170,37 @@ class StackMapFrame : public ResourceObj {
void copy_stack(const StackMapFrame* src);
// Return true if this stack map frame is assignable to target.
bool is_assignable_to(const StackMapFrame* target,
bool is_exception_handler, TRAPS) const;
bool is_assignable_to(
const StackMapFrame* target, bool is_exception_handler,
ErrorContext* ctx, TRAPS) const;
inline void set_mark() {
#ifdef DEBUG
// Put bogus type to indicate it's no longer valid.
if (_stack_mark != -1) {
for (int i = _stack_mark; i >= _stack_size; --i) {
_stack[i] = VerificationType::bogus_type();
}
}
#endif // def DEBUG
_stack_mark = _stack_size;
}
// Used when an error occurs and we want to reset the stack to the state
// it was before operands were popped off.
void restore() {
if (_stack_mark != -1) {
_stack_size = _stack_mark;
}
}
// Push type into stack type array.
inline void push_stack(VerificationType type, TRAPS) {
assert(!type.is_check(), "Must be a real type");
if (_stack_size >= _max_stack) {
verifier()->verify_error(_offset, "Operand stack overflow");
verifier()->verify_error(
ErrorContext::stack_overflow(_offset, this),
"Operand stack overflow");
return;
}
_stack[_stack_size++] = type;
@ -152,7 +211,9 @@ class StackMapFrame : public ResourceObj {
assert(type1.is_long() || type1.is_double(), "must be long/double");
assert(type2.is_long2() || type2.is_double2(), "must be long/double_2");
if (_stack_size >= _max_stack - 1) {
verifier()->verify_error(_offset, "Operand stack overflow");
verifier()->verify_error(
ErrorContext::stack_overflow(_offset, this),
"Operand stack overflow");
return;
}
_stack[_stack_size++] = type1;
@ -162,13 +223,12 @@ class StackMapFrame : public ResourceObj {
// Pop and return the top type on stack without verifying.
inline VerificationType pop_stack(TRAPS) {
if (_stack_size <= 0) {
verifier()->verify_error(_offset, "Operand stack underflow");
verifier()->verify_error(
ErrorContext::stack_underflow(_offset, this),
"Operand stack underflow");
return VerificationType::bogus_type();
}
// Put bogus type to indicate it's no longer valid.
// Added to make it consistent with the other pop_stack method.
VerificationType top = _stack[--_stack_size];
NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
return top;
}
@ -180,8 +240,7 @@ class StackMapFrame : public ResourceObj {
bool subtype = type.is_assignable_from(
top, verifier(), CHECK_(VerificationType::bogus_type()));
if (subtype) {
_stack_size --;
NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
--_stack_size;
return top;
}
}
@ -199,8 +258,6 @@ class StackMapFrame : public ResourceObj {
bool subtype2 = type2.is_assignable_from(top2, verifier(), CHECK);
if (subtype1 && subtype2) {
_stack_size -= 2;
NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
NOT_PRODUCT( _stack[_stack_size+1] = VerificationType::bogus_type(); )
return;
}
}
@ -208,6 +265,14 @@ class StackMapFrame : public ResourceObj {
pop_stack_ex(type2, THREAD);
}
VerificationType local_at(int index) {
return _locals[index];
}
VerificationType stack_at(int index) {
return _stack[index];
}
// Uncommon case that throws exceptions.
VerificationType pop_stack_ex(VerificationType type, TRAPS);
@ -226,13 +291,14 @@ class StackMapFrame : public ResourceObj {
// Private auxiliary method used only in is_assignable_to(StackMapFrame).
// Returns true if src is assignable to target.
bool is_assignable_to(
int is_assignable_to(
VerificationType* src, VerificationType* target, int32_t len, TRAPS) const;
bool has_flag_match_exception(const StackMapFrame* target) const;
// Debugging
void print() const PRODUCT_RETURN;
TypeOrigin stack_top_ctx();
void print_on(outputStream* str) const;
};
#endif // SHARE_VM_CLASSFILE_STACKMAPFRAME_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,9 @@ StackMapTable::StackMapTable(StackMapReader* reader, StackMapFrame* init_frame,
_frame_array[i] = frame;
int offset = frame->offset();
if (offset >= code_len || code_data[offset] == 0) {
frame->verifier()->verify_error("StackMapTable error: bad offset");
frame->verifier()->verify_error(
ErrorContext::bad_stackmap(i, frame),
"StackMapTable error: bad offset");
return;
}
pre_frame = frame;
@ -68,12 +70,9 @@ int StackMapTable::get_index_from_offset(int32_t offset) const {
bool StackMapTable::match_stackmap(
StackMapFrame* frame, int32_t target,
bool match, bool update, TRAPS) const {
bool match, bool update, ErrorContext* ctx, TRAPS) const {
int index = get_index_from_offset(target);
return match_stackmap(
frame, target, index, match,
update, CHECK_VERIFY_(frame->verifier(), false));
return match_stackmap(frame, target, index, match, update, ctx, THREAD);
}
// Match and/or update current_frame to the frame in stackmap table with
@ -88,23 +87,23 @@ bool StackMapTable::match_stackmap(
// unconditional branch: true true
bool StackMapTable::match_stackmap(
StackMapFrame* frame, int32_t target, int32_t frame_index,
bool match, bool update, TRAPS) const {
bool match, bool update, ErrorContext* ctx, TRAPS) const {
if (frame_index < 0 || frame_index >= _frame_count) {
frame->verifier()->verify_error(frame->offset(),
"Expecting a stackmap frame at branch target %d", target);
*ctx = ErrorContext::missing_stackmap(frame->offset());
frame->verifier()->verify_error(
*ctx, "Expecting a stackmap frame at branch target %d", target);
return false;
}
bool result = true;
StackMapFrame *stackmap_frame = _frame_array[frame_index];
bool result = true;
if (match) {
// when checking handler target, match == true && update == false
bool is_exception_handler = !update;
// Has direct control flow from last instruction, need to match the two
// frames.
result = frame->is_assignable_to(
stackmap_frame, is_exception_handler,
CHECK_VERIFY_(frame->verifier(), false));
result = frame->is_assignable_to(stackmap_frame, is_exception_handler,
ctx, CHECK_VERIFY_(frame->verifier(), result));
}
if (update) {
// Use the frame in stackmap table as current frame
@ -125,11 +124,12 @@ bool StackMapTable::match_stackmap(
void StackMapTable::check_jump_target(
StackMapFrame* frame, int32_t target, TRAPS) const {
ErrorContext ctx;
bool match = match_stackmap(
frame, target, true, false, CHECK_VERIFY(frame->verifier()));
frame, target, true, false, &ctx, CHECK_VERIFY(frame->verifier()));
if (!match || (target < 0 || target >= _code_length)) {
frame->verifier()->verify_error(frame->offset(),
"Inconsistent stackmap frames at branch target %d", target);
frame->verifier()->verify_error(ctx,
"Inconsistent stackmap frames at branch target %d", target);
return;
}
// check if uninitialized objects exist on backward branches
@ -139,25 +139,25 @@ void StackMapTable::check_jump_target(
void StackMapTable::check_new_object(
const StackMapFrame* frame, int32_t target, TRAPS) const {
if (frame->offset() > target && frame->has_new_object()) {
frame->verifier()->verify_error(frame->offset(),
"Uninitialized object exists on backward branch %d", target);
frame->verifier()->verify_error(
ErrorContext::bad_code(frame->offset()),
"Uninitialized object exists on backward branch %d", target);
return;
}
}
#ifndef PRODUCT
void StackMapTable::print() const {
tty->print_cr("StackMapTable: frame_count = %d", _frame_count);
tty->print_cr("table = { ");
for (int32_t i = 0; i < _frame_count; i++) {
_frame_array[i]->print();
void StackMapTable::print_on(outputStream* str) const {
str->indent().print_cr("StackMapTable: frame_count = %d", _frame_count);
str->indent().print_cr("table = { ");
{
streamIndentor si(str);
for (int32_t i = 0; i < _frame_count; ++i) {
_frame_array[i]->print_on(str);
}
}
tty->print_cr(" }");
str->print_cr(" }");
}
#endif
int32_t StackMapReader::chop(
VerificationType* locals, int32_t length, int32_t chops) {
if (locals == NULL) return -1;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#define SHARE_VM_CLASSFILE_STACKMAPTABLE_HPP
#include "classfile/stackMapFrame.hpp"
#include "classfile/verifier.hpp"
#include "memory/allocation.hpp"
#include "oops/constantPoolOop.hpp"
#include "oops/methodOop.hpp"
@ -73,12 +74,12 @@ class StackMapTable : public StackObj {
// specified offset. Return true if the two frames match.
bool match_stackmap(
StackMapFrame* current_frame, int32_t offset,
bool match, bool update, TRAPS) const;
bool match, bool update, ErrorContext* ctx, TRAPS) const;
// Match and/or update current_frame to the frame in stackmap table with
// specified offset and frame index. Return true if the two frames match.
bool match_stackmap(
StackMapFrame* current_frame, int32_t offset, int32_t frame_index,
bool match, bool update, TRAPS) const;
bool match, bool update, ErrorContext* ctx, TRAPS) const;
// Check jump instructions. Make sure there are no uninitialized
// instances on backward branch.
@ -93,8 +94,7 @@ class StackMapTable : public StackObj {
void check_new_object(
const StackMapFrame* frame, int32_t target, TRAPS) const;
// Debugging
void print() const PRODUCT_RETURN;
void print_on(outputStream* str) const;
};
class StackMapStream : StackObj {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -135,7 +135,6 @@ class verification_type_info {
!is_object() && !is_uninitialized()));
}
#ifdef ASSERT
void print_on(outputStream* st) {
switch (tag()) {
case ITEM_Top: st->print("Top"); break;
@ -154,14 +153,13 @@ class verification_type_info {
assert(false, "Bad verification_type_info");
}
}
#endif
};
#define FOR_EACH_STACKMAP_FRAME_TYPE(macro, arg1, arg2) \
macro(same_frame, arg1, arg2) \
macro(same_frame_extended, arg1, arg2) \
macro(same_frame_1_stack_item_frame, arg1, arg2) \
macro(same_frame_1_stack_item_extended, arg1, arg2) \
macro(same_locals_1_stack_item_frame, arg1, arg2) \
macro(same_locals_1_stack_item_extended, arg1, arg2) \
macro(chop_frame, arg1, arg2) \
macro(append_frame, arg1, arg2) \
macro(full_frame, arg1, arg2)
@ -203,9 +201,8 @@ class stack_map_frame {
// that we don't read past a particular memory limit. It returns false
// if any part of the data structure is outside the specified memory bounds.
inline bool verify(address start, address end) const;
#ifdef ASSERT
inline void print_on(outputStream* st) const;
#endif
inline void print_on(outputStream* st, int current_offset) const;
// Create as_xxx and is_xxx methods for the subtypes
#define FRAME_TYPE_DECL(stackmap_frame_type, arg1, arg2) \
@ -263,11 +260,9 @@ class same_frame : public stack_map_frame {
return true;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("same_frame(%d)", offset_delta());
void print_on(outputStream* st, int current_offset = -1) const {
st->print("same_frame(@%d)", offset_delta() + current_offset);
}
#endif
};
class same_frame_extended : public stack_map_frame {
@ -311,14 +306,12 @@ class same_frame_extended : public stack_map_frame {
return frame_type_addr() + size() <= end;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("same_frame_extended(%d)", offset_delta());
void print_on(outputStream* st, int current_offset = -1) const {
st->print("same_frame_extended(@%d)", offset_delta() + current_offset);
}
#endif
};
class same_frame_1_stack_item_frame : public stack_map_frame {
class same_locals_1_stack_item_frame : public stack_map_frame {
private:
address type_addr() const { return frame_type_addr() + sizeof(u1); }
@ -332,14 +325,14 @@ class same_frame_1_stack_item_frame : public stack_map_frame {
return tag >= 64 && tag < 128;
}
static same_frame_1_stack_item_frame* at(address addr) {
static same_locals_1_stack_item_frame* at(address addr) {
assert(is_frame_type(*addr), "Wrong frame id");
return (same_frame_1_stack_item_frame*)addr;
return (same_locals_1_stack_item_frame*)addr;
}
static same_frame_1_stack_item_frame* create_at(
static same_locals_1_stack_item_frame* create_at(
address addr, int offset_delta, verification_type_info* vti) {
same_frame_1_stack_item_frame* sm = (same_frame_1_stack_item_frame*)addr;
same_locals_1_stack_item_frame* sm = (same_locals_1_stack_item_frame*)addr;
sm->set_offset_delta(offset_delta);
if (vti != NULL) {
sm->set_type(vti);
@ -382,16 +375,15 @@ class same_frame_1_stack_item_frame : public stack_map_frame {
return types()->verify(start, end);
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("same_frame_1_stack_item_frame(%d,", offset_delta());
void print_on(outputStream* st, int current_offset = -1) const {
st->print("same_locals_1_stack_item_frame(@%d,",
offset_delta() + current_offset);
types()->print_on(st);
st->print(")");
}
#endif
};
class same_frame_1_stack_item_extended : public stack_map_frame {
class same_locals_1_stack_item_extended : public stack_map_frame {
private:
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
address type_addr() const { return offset_delta_addr() + sizeof(u2); }
@ -403,15 +395,15 @@ class same_frame_1_stack_item_extended : public stack_map_frame {
return tag == _frame_id;
}
static same_frame_1_stack_item_extended* at(address addr) {
static same_locals_1_stack_item_extended* at(address addr) {
assert(is_frame_type(*addr), "Wrong frame id");
return (same_frame_1_stack_item_extended*)addr;
return (same_locals_1_stack_item_extended*)addr;
}
static same_frame_1_stack_item_extended* create_at(
static same_locals_1_stack_item_extended* create_at(
address addr, int offset_delta, verification_type_info* vti) {
same_frame_1_stack_item_extended* sm =
(same_frame_1_stack_item_extended*)addr;
same_locals_1_stack_item_extended* sm =
(same_locals_1_stack_item_extended*)addr;
sm->set_frame_type(_frame_id);
sm->set_offset_delta(offset_delta);
if (vti != NULL) {
@ -448,13 +440,12 @@ class same_frame_1_stack_item_extended : public stack_map_frame {
return type_addr() < end && types()->verify(start, end);
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("same_frame_1_stack_item_extended(%d,", offset_delta());
void print_on(outputStream* st, int current_offset = -1) const {
st->print("same_locals_1_stack_item_extended(@%d,",
offset_delta() + current_offset);
types()->print_on(st);
st->print(")");
}
#endif
};
class chop_frame : public stack_map_frame {
@ -517,11 +508,9 @@ class chop_frame : public stack_map_frame {
return frame_type_addr() + size() <= end;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("chop_frame(%d,%d)", offset_delta(), chops());
void print_on(outputStream* st, int current_offset = -1) const {
st->print("chop_frame(@%d,%d)", offset_delta() + current_offset, chops());
}
#endif
};
class append_frame : public stack_map_frame {
@ -618,9 +607,8 @@ class append_frame : public stack_map_frame {
return false;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("append_frame(%d,", offset_delta());
void print_on(outputStream* st, int current_offset = -1) const {
st->print("append_frame(@%d,", offset_delta() + current_offset);
verification_type_info* vti = types();
for (int i = 0; i < number_of_types(); ++i) {
vti->print_on(st);
@ -631,7 +619,6 @@ class append_frame : public stack_map_frame {
}
st->print(")");
}
#endif
};
class full_frame : public stack_map_frame {
@ -774,9 +761,8 @@ class full_frame : public stack_map_frame {
return true;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("full_frame(%d,{", offset_delta());
void print_on(outputStream* st, int current_offset = -1) const {
st->print("full_frame(@%d,{", offset_delta() + current_offset);
verification_type_info* vti = locals();
for (int i = 0; i < num_locals(); ++i) {
vti->print_on(st);
@ -798,7 +784,6 @@ class full_frame : public stack_map_frame {
}
st->print("})");
}
#endif
};
#define VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
@ -852,11 +837,9 @@ bool stack_map_frame::verify(address start, address end) const {
return false;
}
#ifdef ASSERT
void stack_map_frame::print_on(outputStream* st) const {
FOR_EACH_STACKMAP_FRAME_TYPE(VOID_VIRTUAL_DISPATCH, print_on, (st));
void stack_map_frame::print_on(outputStream* st, int offs = -1) const {
FOR_EACH_STACKMAP_FRAME_TYPE(VOID_VIRTUAL_DISPATCH, print_on, (st, offs));
}
#endif
#undef VIRTUAL_DISPATCH
#undef VOID_VIRTUAL_DISPATCH
@ -873,16 +856,46 @@ stack_frame_type* stack_map_frame::as_##stack_frame_type() const { \
FOR_EACH_STACKMAP_FRAME_TYPE(AS_SUBTYPE_DEF, x, x)
#undef AS_SUBTYPE_DEF
class stack_map_table {
private:
address number_of_entries_addr() const {
return (address)this;
}
address entries_addr() const {
return number_of_entries_addr() + sizeof(u2);
}
protected:
// No constructors - should be 'private', but GCC issues a warning if it is
stack_map_table() {}
stack_map_table(const stack_map_table&) {}
public:
static stack_map_table* at(address addr) {
return (stack_map_table*)addr;
}
u2 number_of_entries() const {
return Bytes::get_Java_u2(number_of_entries_addr());
}
stack_map_frame* entries() const {
return stack_map_frame::at(entries_addr());
}
void set_number_of_entries(u2 num) {
Bytes::put_Java_u2(number_of_entries_addr(), num);
}
};
class stack_map_table_attribute {
private:
address name_index_addr() const {
return (address)this; }
address attribute_length_addr() const {
return name_index_addr() + sizeof(u2); }
address number_of_entries_addr() const {
address stack_map_table_addr() const {
return attribute_length_addr() + sizeof(u4); }
address entries_addr() const {
return number_of_entries_addr() + sizeof(u2); }
protected:
// No constructors - should be 'private', but GCC issues a warning if it is
@ -896,17 +909,11 @@ class stack_map_table_attribute {
}
u2 name_index() const {
return Bytes::get_Java_u2(name_index_addr()); }
return Bytes::get_Java_u2(name_index_addr()); }
u4 attribute_length() const {
return Bytes::get_Java_u4(attribute_length_addr()); }
u2 number_of_entries() const {
return Bytes::get_Java_u2(number_of_entries_addr()); }
stack_map_frame* entries() const {
return stack_map_frame::at(entries_addr());
}
static size_t header_size() {
return sizeof(u2) + sizeof(u4);
return Bytes::get_Java_u4(attribute_length_addr()); }
stack_map_table* table() const {
return stack_map_table::at(stack_map_table_addr());
}
void set_name_index(u2 idx) {
@ -915,9 +922,8 @@ class stack_map_table_attribute {
void set_attribute_length(u4 len) {
Bytes::put_Java_u4(attribute_length_addr(), len);
}
void set_number_of_entries(u2 num) {
Bytes::put_Java_u2(number_of_entries_addr(), num);
}
};
#undef FOR_EACH_STACKMAP_FRAME_TYPE
#endif // SHARE_VM_CLASSFILE_STACKMAPTABLEFORMAT_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -110,34 +110,34 @@ VerificationType VerificationType::get_component(ClassVerifier *context, TRAPS)
}
}
#ifndef PRODUCT
void VerificationType::print_on(outputStream* st) const {
switch (_u._data) {
case Bogus: st->print(" bogus "); break;
case Category1: st->print(" category1 "); break;
case Category2: st->print(" category2 "); break;
case Category2_2nd: st->print(" category2_2nd "); break;
case Boolean: st->print(" boolean "); break;
case Byte: st->print(" byte "); break;
case Short: st->print(" short "); break;
case Char: st->print(" char "); break;
case Integer: st->print(" integer "); break;
case Float: st->print(" float "); break;
case Long: st->print(" long "); break;
case Double: st->print(" double "); break;
case Long_2nd: st->print(" long_2nd "); break;
case Double_2nd: st->print(" double_2nd "); break;
case Null: st->print(" null "); break;
case Bogus: st->print("top"); break;
case Category1: st->print("category1"); break;
case Category2: st->print("category2"); break;
case Category2_2nd: st->print("category2_2nd"); break;
case Boolean: st->print("boolean"); break;
case Byte: st->print("byte"); break;
case Short: st->print("short"); break;
case Char: st->print("char"); break;
case Integer: st->print("integer"); break;
case Float: st->print("float"); break;
case Long: st->print("long"); break;
case Double: st->print("double"); break;
case Long_2nd: st->print("long_2nd"); break;
case Double_2nd: st->print("double_2nd"); break;
case Null: st->print("null"); break;
case ReferenceQuery: st->print("reference type"); break;
case Category1Query: st->print("category1 type"); break;
case Category2Query: st->print("category2 type"); break;
case Category2_2ndQuery: st->print("category2_2nd type"); break;
default:
if (is_uninitialized_this()) {
st->print(" uninitializedThis ");
st->print("uninitializedThis");
} else if (is_uninitialized()) {
st->print(" uninitialized %d ", bci());
st->print("uninitialized %d", bci());
} else {
st->print(" class %s ", name()->as_klass_external_name());
name()->print_value_on(st);
}
}
}
#endif

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -157,7 +157,7 @@ class VerificationType VALUE_OBJ_CLASS_SPEC {
// For reference types, store the actual Symbol
static VerificationType reference_type(Symbol* sh) {
assert(((uintptr_t)sh & 0x3) == 0, "Oops must be aligned");
assert(((uintptr_t)sh & 0x3) == 0, "Symbols must be aligned");
// If the above assert fails in the future because oop* isn't aligned,
// then this type encoding system will have to change to have a tag value
// to descriminate between oops and primitives.
@ -303,7 +303,7 @@ class VerificationType VALUE_OBJ_CLASS_SPEC {
return index;
}
void print_on(outputStream* st) const PRODUCT_RETURN;
void print_on(outputStream* st) const;
private:

File diff suppressed because it is too large Load Diff

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -88,18 +88,178 @@ class StackMapTable;
#define CHECK_VERIFY_(verifier, result) \
CHECK_(result)); if ((verifier)->has_error()) return (result); (0
class TypeOrigin VALUE_OBJ_CLASS_SPEC {
private:
typedef enum {
CF_LOCALS, // Comes from the current frame locals
CF_STACK, // Comes from the current frame expression stack
SM_LOCALS, // Comes from stackmap locals
SM_STACK, // Comes from stackmap expression stack
CONST_POOL, // Comes from the constant pool
SIG, // Comes from method signature
IMPLICIT, // Comes implicitly from code or context
BAD_INDEX, // No type, but the index is bad
FRAME_ONLY, // No type, context just contains the frame
NONE
} Origin;
Origin _origin;
u2 _index; // local, stack, or constant pool index
StackMapFrame* _frame; // source frame if CF or SM
VerificationType _type; // The actual type
TypeOrigin(
Origin origin, u2 index, StackMapFrame* frame, VerificationType type)
: _origin(origin), _index(index), _frame(frame), _type(type) {}
public:
TypeOrigin() : _origin(NONE), _index(0), _frame(NULL) {}
static TypeOrigin null();
static TypeOrigin local(u2 index, StackMapFrame* frame);
static TypeOrigin stack(u2 index, StackMapFrame* frame);
static TypeOrigin sm_local(u2 index, StackMapFrame* frame);
static TypeOrigin sm_stack(u2 index, StackMapFrame* frame);
static TypeOrigin cp(u2 index, VerificationType vt);
static TypeOrigin signature(VerificationType vt);
static TypeOrigin bad_index(u2 index);
static TypeOrigin implicit(VerificationType t);
static TypeOrigin frame(StackMapFrame* frame);
void reset_frame();
void details(outputStream* ss) const;
void print_frame(outputStream* ss) const;
const StackMapFrame* frame() const { return _frame; }
bool is_valid() const { return _origin != NONE; }
u2 index() const { return _index; }
#ifdef ASSERT
void print_on(outputStream* str) const;
#endif
};
class ErrorContext VALUE_OBJ_CLASS_SPEC {
private:
typedef enum {
INVALID_BYTECODE, // There was a problem with the bytecode
WRONG_TYPE, // Type value was not as expected
FLAGS_MISMATCH, // Frame flags are not assignable
BAD_CP_INDEX, // Invalid constant pool index
BAD_LOCAL_INDEX, // Invalid local index
LOCALS_SIZE_MISMATCH, // Frames have differing local counts
STACK_SIZE_MISMATCH, // Frames have different stack sizes
STACK_OVERFLOW, // Attempt to push onto a full expression stack
STACK_UNDERFLOW, // Attempt to pop and empty expression stack
MISSING_STACKMAP, // No stackmap for this location and there should be
BAD_STACKMAP, // Format error in stackmap
NO_FAULT, // No error
UNKNOWN
} FaultType;
int _bci;
FaultType _fault;
TypeOrigin _type;
TypeOrigin _expected;
ErrorContext(int bci, FaultType fault) :
_bci(bci), _fault(fault) {}
ErrorContext(int bci, FaultType fault, TypeOrigin type) :
_bci(bci), _fault(fault), _type(type) {}
ErrorContext(int bci, FaultType fault, TypeOrigin type, TypeOrigin exp) :
_bci(bci), _fault(fault), _type(type), _expected(exp) {}
public:
ErrorContext() : _bci(-1), _fault(NO_FAULT) {}
static ErrorContext bad_code(u2 bci) {
return ErrorContext(bci, INVALID_BYTECODE);
}
static ErrorContext bad_type(u2 bci, TypeOrigin type) {
return ErrorContext(bci, WRONG_TYPE, type);
}
static ErrorContext bad_type(u2 bci, TypeOrigin type, TypeOrigin exp) {
return ErrorContext(bci, WRONG_TYPE, type, exp);
}
static ErrorContext bad_flags(u2 bci, StackMapFrame* frame) {
return ErrorContext(bci, FLAGS_MISMATCH, TypeOrigin::frame(frame));
}
static ErrorContext bad_flags(u2 bci, StackMapFrame* cur, StackMapFrame* sm) {
return ErrorContext(bci, FLAGS_MISMATCH,
TypeOrigin::frame(cur), TypeOrigin::frame(sm));
}
static ErrorContext bad_cp_index(u2 bci, u2 index) {
return ErrorContext(bci, BAD_CP_INDEX, TypeOrigin::bad_index(index));
}
static ErrorContext bad_local_index(u2 bci, u2 index) {
return ErrorContext(bci, BAD_LOCAL_INDEX, TypeOrigin::bad_index(index));
}
static ErrorContext locals_size_mismatch(
u2 bci, StackMapFrame* frame0, StackMapFrame* frame1) {
return ErrorContext(bci, LOCALS_SIZE_MISMATCH,
TypeOrigin::frame(frame0), TypeOrigin::frame(frame1));
}
static ErrorContext stack_size_mismatch(
u2 bci, StackMapFrame* frame0, StackMapFrame* frame1) {
return ErrorContext(bci, STACK_SIZE_MISMATCH,
TypeOrigin::frame(frame0), TypeOrigin::frame(frame1));
}
static ErrorContext stack_overflow(u2 bci, StackMapFrame* frame) {
return ErrorContext(bci, STACK_OVERFLOW, TypeOrigin::frame(frame));
}
static ErrorContext stack_underflow(u2 bci, StackMapFrame* frame) {
return ErrorContext(bci, STACK_UNDERFLOW, TypeOrigin::frame(frame));
}
static ErrorContext missing_stackmap(u2 bci) {
return ErrorContext(bci, MISSING_STACKMAP);
}
static ErrorContext bad_stackmap(int index, StackMapFrame* frame) {
return ErrorContext(0, BAD_STACKMAP, TypeOrigin::frame(frame));
}
bool is_valid() const { return _fault != NO_FAULT; }
int bci() const { return _bci; }
void reset_frames() {
_type.reset_frame();
_expected.reset_frame();
}
void details(outputStream* ss, methodOop method) const;
#ifdef ASSERT
void print_on(outputStream* str) const {
str->print("error_context(%d, %d,", _bci, _fault);
_type.print_on(str);
str->print(",");
_expected.print_on(str);
str->print(")");
}
#endif
private:
void location_details(outputStream* ss, methodOop method) const;
void reason_details(outputStream* ss) const;
void frame_details(outputStream* ss) const;
void bytecode_details(outputStream* ss, methodOop method) const;
void handler_details(outputStream* ss, methodOop method) const;
void stackmap_details(outputStream* ss, methodOop method) const;
};
// A new instance of this class is created for each class being verified
class ClassVerifier : public StackObj {
private:
Thread* _thread;
GrowableArray<Symbol*>* _symbols; // keep a list of symbols created
Symbol* _exception_type;
char* _message;
size_t _message_buffer_len;
GrowableArray<Symbol*>* _symbols; // keep a list of symbols created
ErrorContext _error_context; // contains information about an error
void verify_method(methodHandle method, TRAPS);
char* generate_code_data(methodHandle m, u4 code_length, TRAPS);
void verify_exception_handler_table(u4 code_length, char* code_data, int& min, int& max, TRAPS);
void verify_exception_handler_table(u4 code_length, char* code_data,
int& min, int& max, TRAPS);
void verify_local_variable_table(u4 code_length, char* code_data, TRAPS);
VerificationType cp_ref_index_to_type(
@ -111,10 +271,10 @@ class ClassVerifier : public StackObj {
instanceKlassHandle this_class, klassOop target_class,
Symbol* field_name, Symbol* field_sig, bool is_method);
void verify_cp_index(constantPoolHandle cp, int index, TRAPS);
void verify_cp_type(
int index, constantPoolHandle cp, unsigned int types, TRAPS);
void verify_cp_class_type(int index, constantPoolHandle cp, TRAPS);
void verify_cp_index(u2 bci, constantPoolHandle cp, int index, TRAPS);
void verify_cp_type(u2 bci, int index, constantPoolHandle cp,
unsigned int types, TRAPS);
void verify_cp_class_type(u2 bci, int index, constantPoolHandle cp, TRAPS);
u2 verify_stackmap_table(
u2 stackmap_index, u2 bci, StackMapFrame* current_frame,
@ -137,7 +297,7 @@ class ClassVerifier : public StackObj {
constantPoolHandle cp, TRAPS);
void verify_invoke_init(
RawBytecodeStream* bcs, VerificationType ref_class_type,
RawBytecodeStream* bcs, u2 ref_index, VerificationType ref_class_type,
StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
constantPoolHandle cp, TRAPS);
@ -147,10 +307,11 @@ class ClassVerifier : public StackObj {
constantPoolHandle cp, TRAPS);
VerificationType get_newarray_type(u2 index, u2 bci, TRAPS);
void verify_anewarray(
u2 index, constantPoolHandle cp, StackMapFrame* current_frame, TRAPS);
void verify_anewarray(u2 bci, u2 index, constantPoolHandle cp,
StackMapFrame* current_frame, TRAPS);
void verify_return_value(
VerificationType return_type, VerificationType type, u2 offset, TRAPS);
VerificationType return_type, VerificationType type, u2 offset,
StackMapFrame* current_frame, TRAPS);
void verify_iload (u2 index, StackMapFrame* current_frame, TRAPS);
void verify_lload (u2 index, StackMapFrame* current_frame, TRAPS);
@ -189,7 +350,7 @@ class ClassVerifier : public StackObj {
};
// constructor
ClassVerifier(instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS);
ClassVerifier(instanceKlassHandle klass, TRAPS);
// destructor
~ClassVerifier();
@ -207,13 +368,17 @@ class ClassVerifier : public StackObj {
// Return status modes
Symbol* result() const { return _exception_type; }
bool has_error() const { return result() != NULL; }
char* exception_message() {
stringStream ss;
ss.print(_message);
_error_context.details(&ss, _method());
return ss.as_string();
}
// Called when verify or class format errors are encountered.
// May throw an exception based upon the mode.
void verify_error(u2 offset, const char* fmt, ...);
void verify_error(const char* fmt, ...);
void verify_error(ErrorContext ctx, const char* fmt, ...);
void class_format_error(const char* fmt, ...);
void format_error_message(const char* fmt, int offset, va_list args);
klassOop load_class(Symbol* name, TRAPS);
@ -228,10 +393,11 @@ class ClassVerifier : public StackObj {
// their reference counts need to be decrememented when the verifier object
// goes out of scope. Since these symbols escape the scope in which they're
// created, we can't use a TempNewSymbol.
Symbol* create_temporary_symbol(const Symbol* s, int begin, int end, TRAPS);
Symbol* create_temporary_symbol(
const Symbol* s, int begin, int end, TRAPS);
Symbol* create_temporary_symbol(const char *s, int length, TRAPS);
static bool _verify_verbose; // for debugging
TypeOrigin ref_ctx(const char* str, TRAPS);
};
inline int ClassVerifier::change_sig_to_verificationType(

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -159,14 +159,30 @@ private:
"right address out of range");
assert(left < right, "Heap addresses out of order");
size_t num_cards = pointer_delta(right, left) >> LogN_words;
memset(&_offset_array[index_for(left)], offset, num_cards);
if (UseMemSetInBOT) {
memset(&_offset_array[index_for(left)], offset, num_cards);
} else {
size_t i = index_for(left);
const size_t end = i + num_cards;
for (; i < end; i++) {
_offset_array[i] = offset;
}
}
}
void set_offset_array(size_t left, size_t right, u_char offset) {
assert(right < _vs.committed_size(), "right address out of range");
assert(left <= right, "indexes out of order");
assert(left <= right, "indexes out of order");
size_t num_cards = right - left + 1;
memset(&_offset_array[left], offset, num_cards);
if (UseMemSetInBOT) {
memset(&_offset_array[left], offset, num_cards);
} else {
size_t i = left;
const size_t end = i + num_cards;
for (; i < end; i++) {
_offset_array[i] = offset;
}
}
}
void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {

@ -1246,6 +1246,31 @@ void G1CollectedHeap::print_hrs_post_compaction() {
heap_region_iterate(&cl);
}
double G1CollectedHeap::verify(bool guard, const char* msg) {
double verify_time_ms = 0.0;
if (guard && total_collections() >= VerifyGCStartAt) {
double verify_start = os::elapsedTime();
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(msg);
prepare_for_verify();
Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
}
return verify_time_ms;
}
void G1CollectedHeap::verify_before_gc() {
double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
}
void G1CollectedHeap::verify_after_gc() {
double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
}
bool G1CollectedHeap::do_collection(bool explicit_gc,
bool clear_all_soft_refs,
size_t word_size) {
@ -1304,14 +1329,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
size_t g1h_prev_used = used();
assert(used() == recalculate_used(), "Should be equal");
if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
prepare_for_verify();
Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
verify_before_gc();
}
pre_full_gc_dump();
COMPILER2_PRESENT(DerivedPointerTable::clear());
@ -1378,14 +1397,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
MemoryService::track_memory_usage();
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyAfterGC:");
prepare_for_verify();
Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
verify_after_gc();
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
ref_processor_stw()->verify_no_references_recorded();
@ -1891,6 +1903,8 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_young_list(new YoungList(this)),
_gc_time_stamp(0),
_retained_old_gc_alloc_region(NULL),
_survivor_plab_stats(YoungPLABSize, PLABWeight),
_old_plab_stats(OldPLABSize, PLABWeight),
_expand_heap_after_alloc_failure(true),
_surviving_young_words(NULL),
_old_marking_cycles_started(0),
@ -1931,7 +1945,18 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
clear_cset_start_regions();
// Initialize the G1EvacuationFailureALot counters and flags.
NOT_PRODUCT(reset_evacuation_should_fail();)
guarantee(_task_queues != NULL, "task_queues allocation failure.");
#ifdef SPARC
// Issue a stern warning, but allow use for experimentation and debugging.
if (VM_Version::is_sun4v() && UseMemSetInBOT) {
assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
" on sun4v; please understand that you are using at your own risk!");
}
#endif
}
jint G1CollectedHeap::initialize() {
@ -2317,8 +2342,7 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
n_completed_buffers++;
}
g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i,
(double) n_completed_buffers);
g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
dcqs.clear_n_completed_buffers();
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
}
@ -3580,15 +3604,11 @@ size_t G1CollectedHeap::pending_card_num() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
size_t buffer_size = dcqs.buffer_size();
size_t buffer_num = dcqs.completed_buffers_num();
return buffer_size * buffer_num + extra_cards;
}
size_t G1CollectedHeap::max_pending_card_num() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
size_t buffer_size = dcqs.buffer_size();
size_t buffer_num = dcqs.completed_buffers_num();
int thread_num = Threads::number_of_threads();
return (buffer_num + thread_num) * buffer_size;
// PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
// in bytes - not the number of 'entries'. We need to convert
// into a number of cards.
return (buffer_size * buffer_num + extra_cards) / oopSize;
}
size_t G1CollectedHeap::cards_scanned() {
@ -3729,8 +3749,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
workers()->active_workers() : 1);
g1_policy()->phase_times()->note_gc_start(os::elapsedTime(), active_workers,
g1_policy()->gcs_are_young(), g1_policy()->during_initial_mark_pause(), gc_cause());
double pause_start_sec = os::elapsedTime();
g1_policy()->phase_times()->note_gc_start(active_workers);
bool initial_mark_gc = g1_policy()->during_initial_mark_pause();
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
@ -3759,13 +3780,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
increment_total_collections(false /* full gc */);
increment_gc_time_stamp();
if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyBeforeGC:");
prepare_for_verify();
Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
verify_before_gc();
COMPILER2_PRESENT(DerivedPointerTable::clear());
@ -3978,10 +3993,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
true /* verify_fingers */);
_cm->note_end_of_gc();
// Collect thread local data to allow the ergonomics to use
// the collected information
g1_policy()->phase_times()->collapse_par_times();
// This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will
// investigate this in CR 7178365.
@ -4014,13 +4025,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// scanning cards (see CR 7039627).
increment_gc_time_stamp();
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyAfterGC:");
prepare_for_verify();
Universe::verify(/* silent */ false,
/* option */ VerifyOption_G1UsePrevMarking);
}
verify_after_gc();
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
ref_processor_stw()->verify_no_references_recorded();
@ -4044,10 +4049,35 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
gc_epilogue(false);
g1_policy()->phase_times()->note_gc_end(os::elapsedTime());
if (G1Log::fine()) {
if (PrintGCTimeStamps) {
gclog_or_tty->stamp();
gclog_or_tty->print(": ");
}
// We have to do this after we decide whether to expand the heap or not.
GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
.append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)")
.append(initial_mark_gc ? " (initial-mark)" : "");
double pause_time_sec = os::elapsedTime() - pause_start_sec;
if (G1Log::finer()) {
if (evacuation_failed()) {
gc_cause_str.append(" (to-space exhausted)");
}
gclog_or_tty->print_cr("[%s, %3.7f secs]", (const char*)gc_cause_str, pause_time_sec);
g1_policy()->phase_times()->note_gc_end();
g1_policy()->phase_times()->print(pause_time_sec);
g1_policy()->print_detailed_heap_transition();
} else {
if (evacuation_failed()) {
gc_cause_str.append("--");
}
gclog_or_tty->print("[%s", (const char*)gc_cause_str);
g1_policy()->print_heap_transition();
gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
}
}
}
// It is not yet to safe to tell the concurrent mark to
@ -4099,17 +4129,22 @@ size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
size_t gclab_word_size;
switch (purpose) {
case GCAllocForSurvived:
gclab_word_size = YoungPLABSize;
gclab_word_size = _survivor_plab_stats.desired_plab_sz();
break;
case GCAllocForTenured:
gclab_word_size = OldPLABSize;
gclab_word_size = _old_plab_stats.desired_plab_sz();
break;
default:
assert(false, "unknown GCAllocPurpose");
gclab_word_size = OldPLABSize;
gclab_word_size = _old_plab_stats.desired_plab_sz();
break;
}
return gclab_word_size;
// Prevent humongous PLAB sizes for two reasons:
// * PLABs are allocated using a similar paths as oops, but should
// never be in a humongous region
// * Allowing humongous PLABs needlessly churns the region free lists
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
}
void G1CollectedHeap::init_mutator_alloc_region() {
@ -4165,6 +4200,11 @@ void G1CollectedHeap::release_gc_alloc_regions() {
// want either way so no reason to check explicitly for either
// condition.
_retained_old_gc_alloc_region = _old_gc_alloc_region.release();
if (ResizePLAB) {
_survivor_plab_stats.adjust_desired_plab_sz();
_old_plab_stats.adjust_desired_plab_sz();
}
}
void G1CollectedHeap::abandon_gc_alloc_regions() {
@ -4527,7 +4567,15 @@ oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
word_sz);
HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
oop obj = oop(obj_ptr);
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1->evacuation_should_fail()) {
if (obj_ptr != NULL) {
_par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
obj_ptr = NULL;
}
}
#endif // !PRODUCT
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
@ -4536,6 +4584,8 @@ oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
return _g1->handle_evacuation_failure_par(cl, old);
}
oop obj = oop(obj_ptr);
// We're going to allocate linearly, so might as well prefetch ahead.
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
@ -4847,7 +4897,7 @@ public:
evac.do_void();
double elapsed_ms = (os::elapsedTime()-start)*1000.0;
double term_ms = pss.term_time()*1000.0;
_g1h->g1_policy()->phase_times()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
_g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
_g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
}
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
@ -4975,27 +5025,28 @@ g1_process_strong_roots(bool collecting_perm_gen,
buf_scan_non_heap_roots.done();
buf_scan_perm.done();
double ext_roots_end = os::elapsedTime();
g1_policy()->phase_times()->reset_obj_copy_time(worker_i);
double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
buf_scan_non_heap_roots.closure_app_seconds();
g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
double ext_root_time_ms =
((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
// During conc marking we have to filter the per-thread SATB buffers
// to make sure we remove any oops into the CSet (which will show up
// as implicitly live).
double satb_filtering_ms = 0.0;
if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
if (mark_in_progress()) {
double satb_filter_start = os::elapsedTime();
JavaThread::satb_mark_queue_set().filter_thread_buffers();
satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
}
}
double satb_filtering_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
// Now scan the complement of the collection set.
@ -5540,6 +5591,9 @@ void G1CollectedHeap::evacuate_collection_set() {
_expand_heap_after_alloc_failure = true;
set_evacuation_failed(false);
// Should G1EvacuationFailureALot be in effect for this GC?
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
g1_rem_set()->prepare_for_oops_into_collection_set_do();
concurrent_g1_refine()->set_use_cache(false);
concurrent_g1_refine()->clear_hot_cache_claimed_index();
@ -5631,11 +5685,11 @@ void G1CollectedHeap::evacuate_collection_set() {
if (evacuation_failed()) {
remove_self_forwarding_pointers();
if (G1Log::finer()) {
gclog_or_tty->print(" (to-space exhausted)");
} else if (G1Log::fine()) {
gclog_or_tty->print("--");
}
// Reset the G1EvacuationFailureALot counters and flags
// Note: the values are reset only when an actual
// evacuation failure occurs.
NOT_PRODUCT(reset_evacuation_should_fail();)
}
// Enqueue any remaining references remaining on the STW

@ -33,7 +33,7 @@
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "memory/barrierSet.hpp"
#include "memory/memRegion.hpp"
#include "memory/sharedHeap.hpp"
@ -278,10 +278,33 @@ private:
// survivor objects.
SurvivorGCAllocRegion _survivor_gc_alloc_region;
// PLAB sizing policy for survivors.
PLABStats _survivor_plab_stats;
// Alloc region used to satisfy allocation requests by the GC for
// old objects.
OldGCAllocRegion _old_gc_alloc_region;
// PLAB sizing policy for tenured objects.
PLABStats _old_plab_stats;
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
PLABStats* stats = NULL;
switch (purpose) {
case GCAllocForSurvived:
stats = &_survivor_plab_stats;
break;
case GCAllocForTenured:
stats = &_old_plab_stats;
break;
default:
assert(false, "unrecognized GCAllocPurpose");
}
return stats;
}
// The last old region we allocated to during the last GC.
// Typically, it is not full so we should re-use it during the next GC.
HeapRegion* _retained_old_gc_alloc_region;
@ -314,7 +337,7 @@ private:
G1MonitoringSupport* _g1mm;
// Determines PLAB size for a particular allocation purpose.
static size_t desired_plab_sz(GCAllocPurpose purpose);
size_t desired_plab_sz(GCAllocPurpose purpose);
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region.
@ -382,6 +405,10 @@ private:
// heap after a compaction.
void print_hrs_post_compaction();
double verify(bool guard, const char* msg);
void verify_before_gc();
void verify_after_gc();
// These are macros so that, if the assert fires, we get the correct
// line number, file, etc.
@ -888,6 +915,39 @@ protected:
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m);
#ifndef PRODUCT
// Support for forcing evacuation failures. Analogous to
// PromotionFailureALot for the other collectors.
// Records whether G1EvacuationFailureALot should be in effect
// for the current GC
bool _evacuation_failure_alot_for_current_gc;
// Used to record the GC number for interval checking when
// determining whether G1EvaucationFailureALot is in effect
// for the current GC.
size_t _evacuation_failure_alot_gc_number;
// Count of the number of evacuations between failures.
volatile size_t _evacuation_failure_alot_count;
// Set whether G1EvacuationFailureALot should be in effect
// for the current GC (based upon the type of GC and which
// command line flags are set);
inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young,
bool during_initial_mark,
bool during_marking);
inline void set_evacuation_failure_alot_for_current_gc();
// Return true if it's time to cause an evacuation failure.
inline bool evacuation_should_fail();
// Reset the G1EvacuationFailureALot counters. Should be called at
// the end of an evacuation pause in which an evacuation failure ocurred.
inline void reset_evacuation_should_fail();
#endif // !PRODUCT
// ("Weak") Reference processing support.
//
// G1 has 2 instances of the referece processor class. One
@ -1683,7 +1743,6 @@ public:
void stop_conc_gc_threads();
size_t pending_card_num();
size_t max_pending_card_num();
size_t cards_scanned();
protected:
@ -1811,19 +1870,19 @@ public:
}
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
assert(gclab_word_size == alloc_buf->word_sz(),
"dynamic resizing is not supported");
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false, false);
alloc_buf->flush_stats_and_retire(_g1h->stats_for_purpose(purpose),
false /* end_of_gc */,
false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) return NULL; // Let caller handle allocation failure.
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
@ -1908,7 +1967,9 @@ public:
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->retire(true, false);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -138,7 +138,7 @@ inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
return _task_queues->queue(i);
}
inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
}
@ -146,4 +146,77 @@ inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
}
#ifndef PRODUCT
// Support for G1EvacuationFailureALot
inline bool
G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
bool during_initial_mark,
bool during_marking) {
bool res = false;
if (during_marking) {
res |= G1EvacuationFailureALotDuringConcMark;
}
if (during_initial_mark) {
res |= G1EvacuationFailureALotDuringInitialMark;
}
if (gcs_are_young) {
res |= G1EvacuationFailureALotDuringYoungGC;
} else {
// GCs are mixed
res |= G1EvacuationFailureALotDuringMixedGC;
}
return res;
}
inline void
G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
if (G1EvacuationFailureALot) {
// Note we can't assert that _evacuation_failure_alot_for_current_gc
// is clear here. It may have been set during a previous GC but that GC
// did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
// trigger an evacuation failure and clear the flags and and counts.
// Check if we have gone over the interval.
const size_t gc_num = total_collections();
const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
_evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
// Now check if G1EvacuationFailureALot is enabled for the current GC type.
const bool gcs_are_young = g1_policy()->gcs_are_young();
const bool during_im = g1_policy()->during_initial_mark_pause();
const bool during_marking = mark_in_progress();
_evacuation_failure_alot_for_current_gc &=
evacuation_failure_alot_for_gc_type(gcs_are_young,
during_im,
during_marking);
}
}
inline bool
G1CollectedHeap::evacuation_should_fail() {
if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
return false;
}
// G1EvacuationFailureALot is in effect for current GC
// Access to _evacuation_failure_alot_count is not atomic;
// the value does not have to be exact.
if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
return false;
}
_evacuation_failure_alot_count = 0;
return true;
}
inline void G1CollectedHeap::reset_evacuation_should_fail() {
if (G1EvacuationFailureALot) {
_evacuation_failure_alot_gc_number = total_collections();
_evacuation_failure_alot_count = 0;
_evacuation_failure_alot_for_current_gc = false;
}
}
#endif // #ifndef PRODUCT
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

@ -90,7 +90,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_prev_collection_pause_end_ms(0.0),
_pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
@ -197,7 +196,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
int index = MIN2(_parallel_gc_threads - 1, 7);
_pending_card_diff_seq->add(0.0);
_rs_length_diff_seq->add(rs_length_diff_defaults[index]);
_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
_young_cards_per_entry_ratio_seq->add(
@ -657,7 +655,7 @@ double G1CollectorPolicy::predict_survivor_regions_evac_time() {
for (HeapRegion * r = _recorded_survivor_head;
r != NULL && r != _recorded_survivor_tail->get_next_young_region();
r = r->get_next_young_region()) {
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
}
return survivor_regions_evac_time;
}
@ -797,13 +795,12 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_trace_gen0_time_data.record_start_collection(s_w_t_ms);
_stop_world_start = 0.0;
phase_times()->_cur_collection_start_sec = start_time_sec;
phase_times()->record_cur_collection_start_sec(start_time_sec);
_cur_collection_pause_used_at_start_bytes = start_used;
_cur_collection_pause_used_regions_at_start = _g1->used_regions();
_pending_cards = _g1->pending_card_num();
_max_pending_cards = _g1->max_pending_card_num();
_bytes_in_collection_set_before_gc = 0;
_collection_set_bytes_used_before = 0;
_bytes_copied_during_gc = 0;
YoungList* young_list = _g1->young_list();
@ -950,7 +947,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
_trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
// this is where we update the allocation rate of the application
double app_time_ms =
(phase_times()->_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
(phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
if (app_time_ms < MIN_TIMER_GRANULARITY) {
// This usually happens due to the timer not having the required
// granularity. Some Linuxes are the usual culprits.
@ -1036,15 +1033,9 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
// do that for any other surv rate groupsx
if (update_stats) {
size_t diff = 0;
if (_max_pending_cards >= _pending_cards) {
diff = _max_pending_cards - _pending_cards;
}
_pending_card_diff_seq->add((double) diff);
double cost_per_card_ms = 0.0;
if (_pending_cards > 0) {
cost_per_card_ms = phase_times()->_update_rs_time / (double) _pending_cards;
cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
_cost_per_card_ms_seq->add(cost_per_card_ms);
}
@ -1052,7 +1043,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) {
cost_per_entry_ms = phase_times()->_scan_rs_time / (double) cards_scanned;
cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
if (_last_gc_was_young) {
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
} else {
@ -1092,7 +1083,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
size_t copied_bytes = surviving_bytes;
double cost_per_byte_ms = 0.0;
if (copied_bytes > 0) {
cost_per_byte_ms = phase_times()->_obj_copy_time / (double) copied_bytes;
cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
if (_in_marking_window) {
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
} else {
@ -1101,21 +1092,22 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
}
double all_other_time_ms = pause_time_ms -
(phase_times()->_update_rs_time + phase_times()->_scan_rs_time + phase_times()->_obj_copy_time + phase_times()->_termination_time);
(phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
+ phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
double young_other_time_ms = 0.0;
if (young_cset_region_length() > 0) {
young_other_time_ms =
phase_times()->_recorded_young_cset_choice_time_ms +
phase_times()->_recorded_young_free_cset_time_ms;
phase_times()->young_cset_choice_time_ms() +
phase_times()->young_free_cset_time_ms();
_young_other_cost_per_region_ms_seq->add(young_other_time_ms /
(double) young_cset_region_length());
}
double non_young_other_time_ms = 0.0;
if (old_cset_region_length() > 0) {
non_young_other_time_ms =
phase_times()->_recorded_non_young_cset_choice_time_ms +
phase_times()->_recorded_non_young_free_cset_time_ms;
phase_times()->non_young_cset_choice_time_ms() +
phase_times()->non_young_free_cset_time_ms();
_non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
(double) old_cset_region_length());
@ -1126,9 +1118,9 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
_constant_other_time_ms_seq->add(constant_other_time_ms);
double survival_ratio = 0.0;
if (_bytes_in_collection_set_before_gc > 0) {
if (_collection_set_bytes_used_before > 0) {
survival_ratio = (double) _bytes_copied_during_gc /
(double) _bytes_in_collection_set_before_gc;
(double) _collection_set_bytes_used_before;
}
_pending_cards_seq->add((double) _pending_cards);
@ -1142,7 +1134,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
adjust_concurrent_refinement(phase_times()->_update_rs_time, phase_times()->_update_rs_processed_buffers, update_rs_time_goal_ms);
adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
_collectionSetChooser->verify();
}
@ -1153,7 +1146,11 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
proper_unit_for_byte_size((bytes))
void G1CollectorPolicy::print_heap_transition() {
if (G1Log::finer()) {
_g1->print_size_transition(gclog_or_tty,
_cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
}
void G1CollectorPolicy::print_detailed_heap_transition() {
YoungList* young_list = _g1->young_list();
size_t eden_bytes = young_list->eden_used_bytes();
size_t survivor_bytes = young_list->survivor_used_bytes();
@ -1180,11 +1177,6 @@ void G1CollectorPolicy::print_heap_transition() {
EXT_SIZE_PARAMS(capacity));
_prev_eden_capacity = eden_capacity;
} else if (G1Log::fine()) {
_g1->print_size_transition(gclog_or_tty,
_cur_collection_pause_used_at_start_bytes,
_g1->used(), _g1->capacity());
}
}
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
@ -1228,18 +1220,6 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
dcqs.notify_if_necessary();
}
double
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
size_t rs_length = predict_rs_length_diff();
size_t card_num;
if (gcs_are_young()) {
card_num = predict_young_card_num(rs_length);
} else {
card_num = predict_non_young_card_num(rs_length);
}
return predict_base_elapsed_time_ms(pending_cards, card_num);
}
double
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
size_t scanned_cards) {
@ -1250,27 +1230,15 @@ G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
}
double
G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
bool young) {
size_t rs_length = hr->rem_set()->occupied();
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
size_t rs_length = predict_rs_length_diff();
size_t card_num;
if (gcs_are_young()) {
card_num = predict_young_card_num(rs_length);
} else {
card_num = predict_non_young_card_num(rs_length);
}
size_t bytes_to_copy = predict_bytes_to_copy(hr);
double region_elapsed_time_ms =
predict_rs_scan_time_ms(card_num) +
predict_object_copy_time_ms(bytes_to_copy);
if (young)
region_elapsed_time_ms += predict_young_other_time_ms(1);
else
region_elapsed_time_ms += predict_non_young_other_time_ms(1);
return region_elapsed_time_ms;
return predict_base_elapsed_time_ms(pending_cards, card_num);
}
size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
@ -1286,6 +1254,35 @@ size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
return bytes_to_copy;
}
double
G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
bool for_young_gc) {
size_t rs_length = hr->rem_set()->occupied();
size_t card_num;
// Predicting the number of cards is based on which type of GC
// we're predicting for.
if (for_young_gc) {
card_num = predict_young_card_num(rs_length);
} else {
card_num = predict_non_young_card_num(rs_length);
}
size_t bytes_to_copy = predict_bytes_to_copy(hr);
double region_elapsed_time_ms =
predict_rs_scan_time_ms(card_num) +
predict_object_copy_time_ms(bytes_to_copy);
// The prediction of the "other" time for this region is based
// upon the region type and NOT the GC type.
if (hr->is_young()) {
region_elapsed_time_ms += predict_young_other_time_ms(1);
} else {
region_elapsed_time_ms += predict_non_young_other_time_ms(1);
}
return region_elapsed_time_ms;
}
void
G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length) {
@ -1342,22 +1339,6 @@ size_t G1CollectorPolicy::expansion_amount() {
}
}
class CountCSClosure: public HeapRegionClosure {
G1CollectorPolicy* _g1_policy;
public:
CountCSClosure(G1CollectorPolicy* g1_policy) :
_g1_policy(g1_policy) {}
bool doHeapRegion(HeapRegion* r) {
_g1_policy->_bytes_in_collection_set_before_gc += r->used();
return false;
}
};
void G1CollectorPolicy::count_CS_bytes_used() {
CountCSClosure cs_closure(this);
_g1->collection_set_iterate(&cs_closure);
}
void G1CollectorPolicy::print_tracing_info() const {
_trace_gen0_time_data.print();
_trace_gen1_time_data.print();
@ -1696,7 +1677,7 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
// retiring the current allocation region) or a concurrent
// refine thread (RSet sampling).
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
size_t used_bytes = hr->used();
_inc_cset_recorded_rs_lengths += rs_length;
_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
@ -1731,7 +1712,7 @@ void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
_inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
_inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
@ -1854,8 +1835,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
}
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
// Set this here - in case we're not doing young collections.
double non_young_start_time_sec = os::elapsedTime();
double young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list();
finalize_incremental_cset_building();
@ -1869,17 +1849,14 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
double predicted_pause_time_ms = base_time_ms;
double time_remaining_ms = target_pause_time_ms - base_time_ms;
ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
"start choosing CSet",
ergo_format_size("_pending_cards")
ergo_format_ms("predicted base time")
ergo_format_ms("remaining time")
ergo_format_ms("target pause time"),
base_time_ms, time_remaining_ms, target_pause_time_ms);
_pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
HeapRegion* hr;
double young_start_time_sec = os::elapsedTime();
_collection_set_bytes_used_before = 0;
_last_gc_was_young = gcs_are_young() ? true : false;
if (_last_gc_was_young) {
@ -1895,7 +1872,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
uint survivor_region_length = young_list->survivor_length();
uint eden_region_length = young_list->length() - survivor_region_length;
init_cset_region_lengths(eden_region_length, survivor_region_length);
hr = young_list->first_survivor_region();
HeapRegion* hr = young_list->first_survivor_region();
while (hr != NULL) {
assert(hr->is_survivor(), "badly formed young list");
hr->set_young();
@ -1923,11 +1901,10 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
double young_end_time_sec = os::elapsedTime();
phase_times()->_recorded_young_cset_choice_time_ms =
(young_end_time_sec - young_start_time_sec) * 1000.0;
phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
// We are doing young collections so reset this.
non_young_start_time_sec = young_end_time_sec;
// Set the start of the non-young choice time.
double non_young_start_time_sec = young_end_time_sec;
if (!gcs_are_young()) {
CollectionSetChooser* cset_chooser = _collectionSetChooser;
@ -1937,6 +1914,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
uint expensive_region_num = 0;
bool check_time_remaining = adaptive_young_list_length();
HeapRegion* hr = cset_chooser->peek();
while (hr != NULL) {
if (old_cset_region_length() >= max_old_cset_length) {
@ -1950,7 +1928,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
break;
}
double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
if (check_time_remaining) {
if (predicted_time_ms > time_remaining_ms) {
// Too expensive for the current CSet.
@ -2025,8 +2003,6 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
stop_incremental_cset_building();
count_CS_bytes_used();
ergo_verbose5(ErgoCSetConstruction,
"finish choosing CSet",
ergo_format_region("eden")
@ -2039,8 +2015,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
predicted_pause_time_ms, target_pause_time_ms);
double non_young_end_time_sec = os::elapsedTime();
phase_times()->_recorded_non_young_cset_choice_time_ms =
(non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
}
void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
@ -2059,25 +2034,25 @@ void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTim
if(TraceGen0Time) {
_total.add(pause_time_ms);
_other.add(pause_time_ms - phase_times->accounted_time_ms());
_root_region_scan_wait.add(phase_times->_root_region_scan_wait_time_ms);
_parallel.add(phase_times->_cur_collection_par_time_ms);
_ext_root_scan.add(phase_times->_ext_root_scan_time);
_satb_filtering.add(phase_times->_satb_filtering_time);
_update_rs.add(phase_times->_update_rs_time);
_scan_rs.add(phase_times->_scan_rs_time);
_obj_copy.add(phase_times->_obj_copy_time);
_termination.add(phase_times->_termination_time);
_root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
_parallel.add(phase_times->cur_collection_par_time_ms());
_ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
_satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
_update_rs.add(phase_times->average_last_update_rs_time());
_scan_rs.add(phase_times->average_last_scan_rs_time());
_obj_copy.add(phase_times->average_last_obj_copy_time());
_termination.add(phase_times->average_last_termination_time());
double parallel_known_time = phase_times->_ext_root_scan_time +
phase_times->_satb_filtering_time +
phase_times->_update_rs_time +
phase_times->_scan_rs_time +
phase_times->_obj_copy_time +
+ phase_times->_termination_time;
double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
phase_times->average_last_satb_filtering_times_ms() +
phase_times->average_last_update_rs_time() +
phase_times->average_last_scan_rs_time() +
phase_times->average_last_obj_copy_time() +
+ phase_times->average_last_termination_time();
double parallel_other_time = phase_times->_cur_collection_par_time_ms - parallel_known_time;
double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
_parallel_other.add(parallel_other_time);
_clear_ct.add(phase_times->_cur_clear_ct_time_ms);
_clear_ct.add(phase_times->cur_clear_ct_time_ms());
}
}

@ -228,7 +228,6 @@ private:
TruncatedSeq* _alloc_rate_ms_seq;
double _prev_collection_pause_end_ms;
TruncatedSeq* _pending_card_diff_seq;
TruncatedSeq* _rs_length_diff_seq;
TruncatedSeq* _cost_per_card_ms_seq;
TruncatedSeq* _young_cards_per_entry_ratio_seq;
@ -295,7 +294,6 @@ private:
double _pause_time_target_ms;
size_t _pending_cards;
size_t _max_pending_cards;
public:
// Accessors
@ -325,28 +323,6 @@ public:
_max_rs_lengths = rs_lengths;
}
size_t predict_pending_card_diff() {
double prediction = get_new_neg_prediction(_pending_card_diff_seq);
if (prediction < 0.00001) {
return 0;
} else {
return (size_t) prediction;
}
}
size_t predict_pending_cards() {
size_t max_pending_card_num = _g1->max_pending_card_num();
size_t diff = predict_pending_card_diff();
size_t prediction;
if (diff > max_pending_card_num) {
prediction = max_pending_card_num;
} else {
prediction = max_pending_card_num - diff;
}
return prediction;
}
size_t predict_rs_length_diff() {
return (size_t) get_new_prediction(_rs_length_diff_seq);
}
@ -439,7 +415,7 @@ public:
double predict_base_elapsed_time_ms(size_t pending_cards,
size_t scanned_cards);
size_t predict_bytes_to_copy(HeapRegion* hr);
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
void set_recorded_rs_lengths(size_t rs_lengths);
@ -495,12 +471,6 @@ public:
}
private:
size_t _bytes_in_collection_set_before_gc;
size_t _bytes_copied_during_gc;
// Used to count used bytes in CS.
friend class CountCSClosure;
// Statistics kept per GC stoppage, pause or full.
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
@ -514,9 +484,13 @@ private:
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause.
// pause, and incremented in finalize_cset() when adding old regions
// (if any) to the collection set.
size_t _collection_set_bytes_used_before;
// The number of bytes copied during the GC.
size_t _bytes_copied_during_gc;
// The associated information that is maintained while the incremental
// collection set is being built with young regions. Used to populate
// the recorded info for the evacuation pause.
@ -646,9 +620,6 @@ private:
bool predict_will_fit(uint young_length, double base_time_ms,
uint base_free_regions, double target_pause_time_ms);
// Count the number of bytes used in the CS.
void count_CS_bytes_used();
public:
G1CollectorPolicy();
@ -666,10 +637,6 @@ public:
// higher, recalculate the young list target length prediction.
void revise_young_list_target_length_if_necessary();
size_t bytes_in_collection_set() {
return _bytes_in_collection_set_before_gc;
}
// This should be called after the heap is resized.
void record_new_heap_size(uint new_number_of_regions);
@ -716,6 +683,7 @@ public:
void record_collection_pause_end(double pause_time);
void print_heap_transition();
void print_detailed_heap_transition();
// Record the fact that a full collection occurred.
void record_full_collection_start();

@ -125,6 +125,7 @@ public:
#define ergo_format_double(_name_) ", " _name_ ": %1.2f"
#define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%"
#define ergo_format_ms(_name_) ", " _name_ ": %1.2f ms"
#define ergo_format_size(_name_) ", " _name_ ": "SIZE_FORMAT
// Double parameter format strings
#define ergo_format_byte_perc(_name_) \

@ -79,119 +79,145 @@ public:
}
};
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_max_gc_threads(max_gc_threads),
_min_clear_cc_time_ms(-1.0),
_max_clear_cc_time_ms(-1.0),
_cur_clear_cc_time_ms(0.0),
_cum_clear_cc_time_ms(0.0),
_num_cc_clears(0L)
{
assert(max_gc_threads > 0, "Must have some GC threads");
_par_last_gc_worker_start_times_ms = new double[_max_gc_threads];
_par_last_ext_root_scan_times_ms = new double[_max_gc_threads];
_par_last_satb_filtering_times_ms = new double[_max_gc_threads];
_par_last_update_rs_times_ms = new double[_max_gc_threads];
_par_last_update_rs_processed_buffers = new double[_max_gc_threads];
_par_last_scan_rs_times_ms = new double[_max_gc_threads];
_par_last_obj_copy_times_ms = new double[_max_gc_threads];
_par_last_termination_times_ms = new double[_max_gc_threads];
_par_last_termination_attempts = new double[_max_gc_threads];
_par_last_gc_worker_end_times_ms = new double[_max_gc_threads];
_par_last_gc_worker_times_ms = new double[_max_gc_threads];
_par_last_gc_worker_other_times_ms = new double[_max_gc_threads];
}
void G1GCPhaseTimes::note_gc_start(double pause_start_time_sec, uint active_gc_threads,
bool is_young_gc, bool is_initial_mark_gc, GCCause::Cause gc_cause) {
assert(active_gc_threads > 0, "The number of threads must be > 0");
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max nubmer of threads");
_active_gc_threads = active_gc_threads;
_pause_start_time_sec = pause_start_time_sec;
_is_young_gc = is_young_gc;
_is_initial_mark_gc = is_initial_mark_gc;
_gc_cause = gc_cause;
#ifdef ASSERT
// initialise the timing data to something well known so that we can spot
// if something is not set properly
for (uint i = 0; i < _max_gc_threads; ++i) {
_par_last_gc_worker_start_times_ms[i] = -1234.0;
_par_last_ext_root_scan_times_ms[i] = -1234.0;
_par_last_satb_filtering_times_ms[i] = -1234.0;
_par_last_update_rs_times_ms[i] = -1234.0;
_par_last_update_rs_processed_buffers[i] = -1234.0;
_par_last_scan_rs_times_ms[i] = -1234.0;
_par_last_obj_copy_times_ms[i] = -1234.0;
_par_last_termination_times_ms[i] = -1234.0;
_par_last_termination_attempts[i] = -1234.0;
_par_last_gc_worker_end_times_ms[i] = -1234.0;
_par_last_gc_worker_times_ms[i] = -1234.0;
_par_last_gc_worker_other_times_ms[i] = -1234.0;
}
#endif
}
void G1GCPhaseTimes::note_gc_end(double pause_end_time_sec) {
if (G1Log::fine()) {
double pause_time_ms = (pause_end_time_sec - _pause_start_time_sec) * MILLIUNITS;
for (uint i = 0; i < _active_gc_threads; i++) {
_par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
_par_last_gc_worker_start_times_ms[i];
double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
_par_last_satb_filtering_times_ms[i] +
_par_last_update_rs_times_ms[i] +
_par_last_scan_rs_times_ms[i] +
_par_last_obj_copy_times_ms[i] +
_par_last_termination_times_ms[i];
_par_last_gc_worker_other_times_ms[i] = _par_last_gc_worker_times_ms[i] -
worker_known_time;
}
print(pause_time_ms);
template <class T>
void WorkerDataArray<T>::print(int level, const char* title) {
if (_length == 1) {
// No need for min, max, average and sum for only one worker
LineBuffer buf(level);
buf.append("[%s: ", title);
buf.append(_print_format, _data[0]);
buf.append_and_print_cr("]");
return;
}
}
T min = _data[0];
T max = _data[0];
T sum = 0;
void G1GCPhaseTimes::print_par_stats(int level,
const char* str,
double* data,
bool showDecimals) {
double min = data[0], max = data[0];
double total = 0.0;
LineBuffer buf(level);
buf.append("[%s (ms):", str);
for (uint i = 0; i < _active_gc_threads; ++i) {
double val = data[i];
if (val < min)
min = val;
if (val > max)
max = val;
total += val;
buf.append("[%s:", title);
for (uint i = 0; i < _length; ++i) {
T val = _data[i];
min = MIN2(val, min);
max = MAX2(val, max);
sum += val;
if (G1Log::finest()) {
if (showDecimals) {
buf.append(" %.1lf", val);
} else {
buf.append(" %d", (int)val);
}
buf.append(" ");
buf.append(_print_format, val);
}
}
if (G1Log::finest()) {
buf.append_and_print_cr("");
}
double avg = total / (double) _active_gc_threads;
if (showDecimals) {
buf.append_and_print_cr(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf, Sum: %.1lf]",
min, avg, max, max - min, total);
} else {
buf.append_and_print_cr(" Min: %d, Avg: %d, Max: %d, Diff: %d, Sum: %d]",
(int)min, (int)avg, (int)max, (int)max - (int)min, (int)total);
double avg = (double)sum / (double)_length;
buf.append(" Min: ");
buf.append(_print_format, min);
buf.append(", Avg: ");
buf.append("%.1lf", avg); // Always print average as a double
buf.append(", Max: ");
buf.append(_print_format, max);
buf.append(", Diff: ");
buf.append(_print_format, max - min);
if (_print_sum) {
// for things like the start and end times the sum is not
// that relevant
buf.append(", Sum: ");
buf.append(_print_format, sum);
}
buf.append_and_print_cr("]");
}
#ifdef ASSERT
template <class T>
void WorkerDataArray<T>::reset() {
for (uint i = 0; i < _length; i++) {
_data[i] = (T)-1;
}
}
template <class T>
void WorkerDataArray<T>::verify() {
for (uint i = 0; i < _length; i++) {
assert(_data[i] >= (T)0, err_msg("Invalid data for worker %d", i));
}
}
#endif
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_max_gc_threads(max_gc_threads),
_min_clear_cc_time_ms(-1.0),
_max_clear_cc_time_ms(-1.0),
_cur_clear_cc_time_ms(0.0),
_cum_clear_cc_time_ms(0.0),
_num_cc_clears(0L),
_last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false),
_last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"),
_last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"),
_last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
_last_update_rs_processed_buffers(_max_gc_threads, "%d"),
_last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
_last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
_last_gc_worker_end_times_ms(_max_gc_threads, "%.1lf", false),
_last_gc_worker_times_ms(_max_gc_threads, "%.1lf"),
_last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf")
{
assert(max_gc_threads > 0, "Must have some GC threads");
}
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
assert(active_gc_threads > 0, "The number of threads must be > 0");
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max nubmer of threads");
_active_gc_threads = active_gc_threads;
_last_gc_worker_start_times_ms.reset();
_last_ext_root_scan_times_ms.reset();
_last_satb_filtering_times_ms.reset();
_last_update_rs_times_ms.reset();
_last_update_rs_processed_buffers.reset();
_last_scan_rs_times_ms.reset();
_last_obj_copy_times_ms.reset();
_last_termination_times_ms.reset();
_last_termination_attempts.reset();
_last_gc_worker_end_times_ms.reset();
_last_gc_worker_times_ms.reset();
_last_gc_worker_other_times_ms.reset();
}
void G1GCPhaseTimes::note_gc_end() {
_last_gc_worker_start_times_ms.verify();
_last_ext_root_scan_times_ms.verify();
_last_satb_filtering_times_ms.verify();
_last_update_rs_times_ms.verify();
_last_update_rs_processed_buffers.verify();
_last_scan_rs_times_ms.verify();
_last_obj_copy_times_ms.verify();
_last_termination_times_ms.verify();
_last_termination_attempts.verify();
_last_gc_worker_end_times_ms.verify();
for (uint i = 0; i < _active_gc_threads; i++) {
double worker_time = _last_gc_worker_end_times_ms.get(i) - _last_gc_worker_start_times_ms.get(i);
_last_gc_worker_times_ms.set(i, worker_time);
double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
_last_satb_filtering_times_ms.get(i) +
_last_update_rs_times_ms.get(i) +
_last_scan_rs_times_ms.get(i) +
_last_obj_copy_times_ms.get(i) +
_last_termination_times_ms.get(i);
double worker_other_time = worker_time - worker_known_time;
_last_gc_worker_other_times_ms.set(i, worker_other_time);
}
_last_gc_worker_times_ms.verify();
_last_gc_worker_other_times_ms.verify();
}
void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
@ -202,73 +228,6 @@ void G1GCPhaseTimes::print_stats(int level, const char* str, double value, int w
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %d]", str, value, workers);
}
void G1GCPhaseTimes::print_stats(int level, const char* str, int value) {
LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
}
double G1GCPhaseTimes::avg_value(double* data) {
if (G1CollectedHeap::use_parallel_gc_threads()) {
double ret = 0.0;
for (uint i = 0; i < _active_gc_threads; ++i) {
ret += data[i];
}
return ret / (double) _active_gc_threads;
} else {
return data[0];
}
}
double G1GCPhaseTimes::max_value(double* data) {
if (G1CollectedHeap::use_parallel_gc_threads()) {
double ret = data[0];
for (uint i = 1; i < _active_gc_threads; ++i) {
if (data[i] > ret) {
ret = data[i];
}
}
return ret;
} else {
return data[0];
}
}
double G1GCPhaseTimes::sum_of_values(double* data) {
if (G1CollectedHeap::use_parallel_gc_threads()) {
double sum = 0.0;
for (uint i = 0; i < _active_gc_threads; i++) {
sum += data[i];
}
return sum;
} else {
return data[0];
}
}
double G1GCPhaseTimes::max_sum(double* data1, double* data2) {
double ret = data1[0] + data2[0];
if (G1CollectedHeap::use_parallel_gc_threads()) {
for (uint i = 1; i < _active_gc_threads; ++i) {
double data = data1[i] + data2[i];
if (data > ret) {
ret = data;
}
}
}
return ret;
}
void G1GCPhaseTimes::collapse_par_times() {
_ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
_satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
_update_rs_time = avg_value(_par_last_update_rs_times_ms);
_update_rs_processed_buffers =
sum_of_values(_par_last_update_rs_processed_buffers);
_scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
_obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
_termination_time = avg_value(_par_last_termination_times_ms);
}
double G1GCPhaseTimes::accounted_time_ms() {
// Subtract the root region scanning wait time. It's initialized to
// zero at the start of the pause.
@ -286,58 +245,37 @@ double G1GCPhaseTimes::accounted_time_ms() {
return misc_time_ms;
}
void G1GCPhaseTimes::print(double pause_time_ms) {
if (PrintGCTimeStamps) {
gclog_or_tty->stamp();
gclog_or_tty->print(": ");
}
GCCauseString gc_cause_str = GCCauseString("GC pause", _gc_cause)
.append(_is_young_gc ? " (young)" : " (mixed)")
.append(_is_initial_mark_gc ? " (initial-mark)" : "");
gclog_or_tty->print_cr("[%s, %3.7f secs]", (const char*)gc_cause_str, pause_time_ms / 1000.0);
if (!G1Log::finer()) {
return;
}
void G1GCPhaseTimes::print(double pause_time_sec) {
if (_root_region_scan_wait_time_ms > 0.0) {
print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
if (_satb_filtering_time > 0.0) {
print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
_last_gc_worker_start_times_ms.print(2, "GC Worker Start (ms)");
_last_ext_root_scan_times_ms.print(2, "Ext Root Scanning (ms)");
if (_last_satb_filtering_times_ms.sum() > 0.0) {
_last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
}
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
_last_update_rs_times_ms.print(2, "Update RS (ms)");
_last_update_rs_processed_buffers.print(3, "Processed Buffers");
_last_scan_rs_times_ms.print(2, "Scan RS (ms)");
_last_obj_copy_times_ms.print(2, "Object Copy (ms)");
_last_termination_times_ms.print(2, "Termination (ms)");
if (G1Log::finest()) {
print_par_stats(3, "Processed Buffers", _par_last_update_rs_processed_buffers,
false /* showDecimals */);
_last_termination_attempts.print(3, "Termination Attempts");
}
print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
print_par_stats(2, "Termination", _par_last_termination_times_ms);
if (G1Log::finest()) {
print_par_stats(3, "Termination Attempts", _par_last_termination_attempts,
false /* showDecimals */);
}
print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
print_par_stats(2, "GC Worker Total", _par_last_gc_worker_times_ms);
print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
_last_gc_worker_other_times_ms.print(2, "GC Worker Other (ms)");
_last_gc_worker_times_ms.print(2, "GC Worker Total (ms)");
_last_gc_worker_end_times_ms.print(2, "GC Worker End (ms)");
} else {
print_stats(1, "Ext Root Scanning", _ext_root_scan_time);
if (_satb_filtering_time > 0.0) {
print_stats(1, "SATB Filtering", _satb_filtering_time);
_last_ext_root_scan_times_ms.print(1, "Ext Root Scanning (ms)");
if (_last_satb_filtering_times_ms.sum() > 0.0) {
_last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
}
print_stats(1, "Update RS", _update_rs_time);
if (G1Log::finest()) {
print_stats(2, "Processed Buffers", (int)_update_rs_processed_buffers);
}
print_stats(1, "Scan RS", _scan_rs_time);
print_stats(1, "Object Copying", _obj_copy_time);
_last_update_rs_times_ms.print(1, "Update RS (ms)");
_last_update_rs_processed_buffers.print(2, "Processed Buffers");
_last_scan_rs_times_ms.print(1, "Scan RS (ms)");
_last_obj_copy_times_ms.print(1, "Object Copy (ms)");
}
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
@ -350,8 +288,11 @@ void G1GCPhaseTimes::print(double pause_time_ms) {
print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
}
}
double misc_time_ms = pause_time_ms - accounted_time_ms();
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
print_stats(1, "Other", misc_time_ms);
if (_cur_verify_before_time_ms > 0.0) {
print_stats(2, "Verify Before", _cur_verify_before_time_ms);
}
print_stats(2, "Choose CSet",
(_recorded_young_cset_choice_time_ms +
_recorded_non_young_cset_choice_time_ms));
@ -360,6 +301,9 @@ void G1GCPhaseTimes::print(double pause_time_ms) {
print_stats(2, "Free CSet",
(_recorded_young_free_cset_time_ms +
_recorded_non_young_free_cset_time_ms));
if (_cur_verify_after_time_ms > 0.0) {
print_stats(2, "Verify After", _cur_verify_after_time_ms);
}
}
void G1GCPhaseTimes::record_cc_clear_time_ms(double ms) {

@ -28,52 +28,109 @@
#include "memory/allocation.hpp"
#include "gc_interface/gcCause.hpp"
template <class T>
class WorkerDataArray : public CHeapObj<mtGC> {
T* _data;
uint _length;
const char* _print_format;
bool _print_sum;
// We are caching the sum and average to only have to calculate them once.
// This is not done in an MT-safe way. It is intetened to allow single
// threaded code to call sum() and average() multiple times in any order
// without having to worry about the cost.
bool _has_new_data;
T _sum;
double _average;
public:
WorkerDataArray(uint length, const char* print_format, bool print_sum = true) :
_length(length), _print_format(print_format), _print_sum(print_sum), _has_new_data(true) {
assert(length > 0, "Must have some workers to store data for");
_data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
}
~WorkerDataArray() {
FREE_C_HEAP_ARRAY(T, _data, mtGC);
}
void set(uint worker_i, T value) {
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
assert(_data[worker_i] == (T)-1, err_msg("Overwriting data for worker %d", worker_i));
_data[worker_i] = value;
_has_new_data = true;
}
T get(uint worker_i) {
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
return _data[worker_i];
}
void add(uint worker_i, T value) {
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
_data[worker_i] += value;
_has_new_data = true;
}
double average(){
if (_has_new_data) {
calculate_totals();
}
return _average;
}
T sum() {
if (_has_new_data) {
calculate_totals();
}
return _sum;
}
void print(int level, const char* title);
void reset() PRODUCT_RETURN;
void verify() PRODUCT_RETURN;
private:
void calculate_totals(){
_sum = (T)0;
for (uint i = 0; i < _length; ++i) {
_sum += _data[i];
}
_average = (double)_sum / (double)_length;
_has_new_data = false;
}
};
class G1GCPhaseTimes : public CHeapObj<mtGC> {
friend class G1CollectorPolicy;
friend class TraceGen0TimeData;
private:
uint _active_gc_threads;
uint _max_gc_threads;
GCCause::Cause _gc_cause;
bool _is_young_gc;
bool _is_initial_mark_gc;
double _pause_start_time_sec;
double* _par_last_gc_worker_start_times_ms;
double* _par_last_ext_root_scan_times_ms;
double* _par_last_satb_filtering_times_ms;
double* _par_last_update_rs_times_ms;
double* _par_last_update_rs_processed_buffers;
double* _par_last_scan_rs_times_ms;
double* _par_last_obj_copy_times_ms;
double* _par_last_termination_times_ms;
double* _par_last_termination_attempts;
double* _par_last_gc_worker_end_times_ms;
double* _par_last_gc_worker_times_ms;
double* _par_last_gc_worker_other_times_ms;
WorkerDataArray<double> _last_gc_worker_start_times_ms;
WorkerDataArray<double> _last_ext_root_scan_times_ms;
WorkerDataArray<double> _last_satb_filtering_times_ms;
WorkerDataArray<double> _last_update_rs_times_ms;
WorkerDataArray<int> _last_update_rs_processed_buffers;
WorkerDataArray<double> _last_scan_rs_times_ms;
WorkerDataArray<double> _last_obj_copy_times_ms;
WorkerDataArray<double> _last_termination_times_ms;
WorkerDataArray<size_t> _last_termination_attempts;
WorkerDataArray<double> _last_gc_worker_end_times_ms;
WorkerDataArray<double> _last_gc_worker_times_ms;
WorkerDataArray<double> _last_gc_worker_other_times_ms;
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
double _cur_clear_ct_time_ms;
double _cur_ref_proc_time_ms;
double _cur_ref_enq_time_ms;
// Helper methods for detailed logging
void print_par_stats(int level, const char* str, double* data, bool showDecimals = true);
void print_stats(int level, const char* str, double value);
void print_stats(int level, const char* str, double value, int workers);
void print_stats(int level, const char* str, int value);
double avg_value(double* data);
double max_value(double* data);
double sum_of_values(double* data);
double max_sum(double* data1, double* data2);
double accounted_time_ms();
// Card Table Count Cache stats
double _min_clear_cc_time_ms; // min
double _max_clear_cc_time_ms; // max
@ -81,19 +138,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cum_clear_cc_time_ms; // cummulative clearing time
jlong _num_cc_clears; // number of times the card count cache has been cleared
// The following insance variables are directly accessed by G1CollectorPolicy
// and TraceGen0TimeData. This is why those classes are declared friends.
// An alternative is to add getters and setters for all of these fields.
// It might also be possible to restructure the code to reduce these
// dependencies.
double _ext_root_scan_time;
double _satb_filtering_time;
double _update_rs_time;
double _update_rs_processed_buffers;
double _scan_rs_time;
double _obj_copy_time;
double _termination_time;
double _cur_collection_start_sec;
double _root_region_scan_wait_time_ms;
@ -103,79 +147,58 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
void print(double pause_time_ms);
double _cur_verify_before_time_ms;
double _cur_verify_after_time_ms;
// Helper methods for detailed logging
void print_stats(int level, const char* str, double value);
void print_stats(int level, const char* str, double value, int workers);
public:
G1GCPhaseTimes(uint max_gc_threads);
void note_gc_start(double pause_start_time_sec, uint active_gc_threads,
bool is_young_gc, bool is_initial_mark_gc, GCCause::Cause gc_cause);
void note_gc_end(double pause_end_time_sec);
void collapse_par_times();
void note_gc_start(uint active_gc_threads);
void note_gc_end();
void print(double pause_time_sec);
void record_gc_worker_start_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_gc_worker_start_times_ms[worker_i] = ms;
_last_gc_worker_start_times_ms.set(worker_i, ms);
}
void record_ext_root_scan_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_ext_root_scan_times_ms[worker_i] = ms;
_last_ext_root_scan_times_ms.set(worker_i, ms);
}
void record_satb_filtering_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_satb_filtering_times_ms[worker_i] = ms;
_last_satb_filtering_times_ms.set(worker_i, ms);
}
void record_update_rs_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_update_rs_times_ms[worker_i] = ms;
_last_update_rs_times_ms.set(worker_i, ms);
}
void record_update_rs_processed_buffers (uint worker_i,
double processed_buffers) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_update_rs_processed_buffers[worker_i] = processed_buffers;
void record_update_rs_processed_buffers(uint worker_i, int processed_buffers) {
_last_update_rs_processed_buffers.set(worker_i, processed_buffers);
}
void record_scan_rs_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_scan_rs_times_ms[worker_i] = ms;
}
void reset_obj_copy_time(uint worker_i) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_obj_copy_times_ms[worker_i] = 0.0;
}
void reset_obj_copy_time() {
reset_obj_copy_time(0);
_last_scan_rs_times_ms.set(worker_i, ms);
}
void record_obj_copy_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_obj_copy_times_ms[worker_i] += ms;
_last_obj_copy_times_ms.set(worker_i, ms);
}
void add_obj_copy_time(uint worker_i, double ms) {
_last_obj_copy_times_ms.add(worker_i, ms);
}
void record_termination(uint worker_i, double ms, size_t attempts) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_termination_times_ms[worker_i] = ms;
_par_last_termination_attempts[worker_i] = (double) attempts;
_last_termination_times_ms.set(worker_i, ms);
_last_termination_attempts.set(worker_i, attempts);
}
void record_gc_worker_end_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_gc_worker_end_times_ms[worker_i] = ms;
_last_gc_worker_end_times_ms.set(worker_i, ms);
}
void record_clear_ct_time(double ms) {
@ -211,6 +234,88 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
void record_non_young_free_cset_time_ms(double time_ms) {
_recorded_non_young_free_cset_time_ms = time_ms;
}
void record_young_cset_choice_time_ms(double time_ms) {
_recorded_young_cset_choice_time_ms = time_ms;
}
void record_non_young_cset_choice_time_ms(double time_ms) {
_recorded_non_young_cset_choice_time_ms = time_ms;
}
void record_cur_collection_start_sec(double time_ms) {
_cur_collection_start_sec = time_ms;
}
void record_verify_before_time_ms(double time_ms) {
_cur_verify_before_time_ms = time_ms;
}
void record_verify_after_time_ms(double time_ms) {
_cur_verify_after_time_ms = time_ms;
}
double accounted_time_ms();
double cur_collection_start_sec() {
return _cur_collection_start_sec;
}
double cur_collection_par_time_ms() {
return _cur_collection_par_time_ms;
}
double cur_clear_ct_time_ms() {
return _cur_clear_ct_time_ms;
}
double root_region_scan_wait_time_ms() {
return _root_region_scan_wait_time_ms;
}
double young_cset_choice_time_ms() {
return _recorded_young_cset_choice_time_ms;
}
double young_free_cset_time_ms() {
return _recorded_young_free_cset_time_ms;
}
double non_young_cset_choice_time_ms() {
return _recorded_non_young_cset_choice_time_ms;
}
double non_young_free_cset_time_ms() {
return _recorded_non_young_free_cset_time_ms;
}
double average_last_update_rs_time() {
return _last_update_rs_times_ms.average();
}
int sum_last_update_rs_processed_buffers() {
return _last_update_rs_processed_buffers.sum();
}
double average_last_scan_rs_time(){
return _last_scan_rs_times_ms.average();
}
double average_last_obj_copy_time() {
return _last_obj_copy_times_ms.average();
}
double average_last_termination_time() {
return _last_termination_times_ms.average();
}
double average_last_ext_root_scan_time() {
return _last_ext_root_scan_times_ms.average();
}
double average_last_satb_filtering_times_ms() {
return _last_satb_filtering_times_ms.average();
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP

@ -324,7 +324,7 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
if (G1UseParallelRSetUpdating || (worker_i == 0)) {
updateRS(&into_cset_dcq, worker_i);
} else {
_g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0.0);
_g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0);
_g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
}
if (G1UseParallelRSetScanning || (worker_i == 0)) {

@ -287,17 +287,17 @@
"The number of times we'll force an overflow during " \
"concurrent marking") \
\
develop(uintx, G1DefaultMinNewGenPercent, 20, \
experimental(uintx, G1DefaultMinNewGenPercent, 20, \
"Percentage (0-100) of the heap size to use as minimum " \
"young gen size.") \
\
develop(uintx, G1DefaultMaxNewGenPercent, 80, \
experimental(uintx, G1DefaultMaxNewGenPercent, 80, \
"Percentage (0-100) of the heap size to use as maximum " \
"young gen size.") \
\
develop(uintx, G1OldCSetRegionLiveThresholdPercent, 90, \
experimental(uintx, G1OldCSetRegionLiveThresholdPercent, 90, \
"Threshold for regions to be added to the collection set. " \
"Regions with more live bytes that this will not be collected.") \
"Regions with more live bytes than this will not be collected.") \
\
product(uintx, G1HeapWastePercent, 5, \
"Amount of space, expressed as a percentage of the heap size, " \
@ -306,12 +306,40 @@
product(uintx, G1MixedGCCountTarget, 4, \
"The target number of mixed GCs after a marking cycle.") \
\
develop(uintx, G1OldCSetRegionThresholdPercent, 10, \
experimental(uintx, G1OldCSetRegionThresholdPercent, 10, \
"An upper bound for the number of old CSet regions expressed " \
"as a percentage of the heap size.") \
\
experimental(ccstr, G1LogLevel, NULL, \
"Log level for G1 logging: fine, finer, finest")
"Log level for G1 logging: fine, finer, finest") \
\
notproduct(bool, G1EvacuationFailureALot, false, \
"Force use of evacuation failure handling during certain " \
"evacuation pauses") \
\
develop(uintx, G1EvacuationFailureALotCount, 1000, \
"Number of successful evacuations between evacuation failures " \
"occurring at object copying") \
\
develop(uintx, G1EvacuationFailureALotInterval, 5, \
"Total collections between forced triggering of evacuation " \
"failures") \
\
develop(bool, G1EvacuationFailureALotDuringConcMark, true, \
"Force use of evacuation failure handling during evacuation " \
"pauses when marking is in progress") \
\
develop(bool, G1EvacuationFailureALotDuringInitialMark, true, \
"Force use of evacuation failure handling during initial mark " \
"evacuation pauses") \
\
develop(bool, G1EvacuationFailureALotDuringYoungGC, true, \
"Force use of evacuation failure handling during young " \
"evacuation pauses") \
\
develop(bool, G1EvacuationFailureALotDuringMixedGC, true, \
"Force use of evacuation failure handling during mixed " \
"evacuation pauses")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)

@ -384,10 +384,17 @@ void HeapRegion::par_clear() {
}
void HeapRegion::calc_gc_efficiency() {
// GC efficiency is the ratio of how much space would be
// reclaimed over how long we predict it would take to reclaim it.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1p = g1h->g1_policy();
_gc_efficiency = (double) reclaimable_bytes() /
g1p->predict_region_elapsed_time_ms(this, false);
// Retrieve a prediction of the elapsed time for this region for
// a mixed gc because the region will only be evacuated during a
// mixed gc.
double region_elapsed_time_ms =
g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
}
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {

@ -24,11 +24,11 @@
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "gc_implementation/shared/ageTable.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/defNewGeneration.inline.hpp"
#include "memory/genCollectedHeap.hpp"
@ -453,7 +453,8 @@ void ParScanThreadStateSet::flush()
// retire the last buffer.
par_scan_state.to_space_alloc_buffer()->
flush_stats_and_retire(_gen.plab_stats(),
false /* !retain */);
true /* end_of_gc */,
false /* retain */);
// Every thread has its own age table. We need to merge
// them all into one.

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "memory/defNewGeneration.hpp"
#include "utilities/taskqueue.hpp"

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "memory/sharedHeap.hpp"
#include "oops/arrayOop.hpp"
#include "oops/oop.inline.hpp"
@ -110,9 +110,7 @@ void PLABStats::adjust_desired_plab_sz() {
plab_sz = align_object_size(plab_sz);
// Latch the result
if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
if (ResizePLAB) {
_desired_plab_sz = plab_sz;
}
_desired_plab_sz = plab_sz;
// Now clear the accumulators for next round:
// note this needs to be fixed in the case where we
// are retaining across scavenges. FIX ME !!! XXX

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -121,14 +121,14 @@ public:
// Flush the stats supporting ergonomic sizing of PLAB's
void flush_stats(PLABStats* stats);
void flush_stats_and_retire(PLABStats* stats, bool retain) {
void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
// We flush the stats first in order to get a reading of
// unused space in the last buffer.
if (ResizePLAB) {
flush_stats(stats);
}
// Retire the last allocation buffer.
retire(true, retain);
retire(end_of_gc, retain);
}
// Force future allocations to fail and queries for contains()

@ -424,6 +424,8 @@ class Bytecodes: AllStatic {
|| code == _fconst_0 || code == _dconst_0); }
static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); }
static bool has_optional_appendix(Code code) { return code == _invokedynamic || code == _invokehandle; }
static int compute_flags (const char* format, int more_flags = 0); // compute the flags
static int flags (int code, bool is_wide) {
assert(code == (u_char)code, "must be a byte");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/generation.inline.hpp"

@ -353,27 +353,9 @@
develop(bool, StressRecompilation, false, \
"Recompile each compiled method without subsuming loads or escape analysis.") \
\
/* controls for tier 1 compilations */ \
\
develop(bool, Tier1CountInvocations, true, \
"Generate code, during tier 1, to update invocation counter") \
\
product(intx, Tier1Inline, false, \
"enable inlining during tier 1") \
\
product(intx, Tier1MaxInlineSize, 8, \
"maximum bytecode size of a method to be inlined, during tier 1") \
\
product(intx, Tier1FreqInlineSize, 35, \
"max bytecode size of a frequent method to be inlined, tier 1") \
\
develop(intx, ImplicitNullCheckThreshold, 3, \
"Don't do implicit null checks if NPE's in a method exceeds limit") \
\
/* controls for loop optimization */ \
product(intx, Tier1LoopOptsCount, 0, \
"Set level of loop optimization for tier 1 compiles") \
\
product(intx, LoopOptsCount, 43, \
"Set level of loop optimization for tier 1 compiles") \
\
@ -505,6 +487,116 @@
\
product(bool, BlockLayoutRotateLoops, true, \
"Allow back branches to be fall throughs in the block layour") \
\
develop(bool, InlineReflectionGetCallerClass, true, \
"inline sun.reflect.Reflection.getCallerClass(), known to be part "\
"of base library DLL") \
\
develop(bool, InlineObjectCopy, true, \
"inline Object.clone and Arrays.copyOf[Range] intrinsics") \
\
develop(bool, SpecialStringCompareTo, true, \
"special version of string compareTo") \
\
develop(bool, SpecialStringIndexOf, true, \
"special version of string indexOf") \
\
develop(bool, SpecialStringEquals, true, \
"special version of string equals") \
\
develop(bool, SpecialArraysEquals, true, \
"special version of Arrays.equals(char[],char[])") \
\
develop(bool, BailoutToInterpreterForThrows, false, \
"Compiled methods which throws/catches exceptions will be " \
"deopt and intp.") \
\
develop(bool, ConvertCmpD2CmpF, true, \
"Convert cmpD to cmpF when one input is constant in float range") \
\
develop(bool, ConvertFloat2IntClipping, true, \
"Convert float2int clipping idiom to integer clipping") \
\
develop(bool, Use24BitFPMode, true, \
"Set 24-bit FPU mode on a per-compile basis ") \
\
develop(bool, Use24BitFP, true, \
"use FP instructions that produce 24-bit precise results") \
\
develop(bool, MonomorphicArrayCheck, true, \
"Uncommon-trap array store checks that require full type check") \
\
notproduct(bool, TracePhaseCCP, false, \
"Print progress during Conditional Constant Propagation") \
\
develop(bool, PrintDominators, false, \
"Print out dominator trees for GVN") \
\
notproduct(bool, TraceSpilling, false, \
"Trace spilling") \
\
notproduct(bool, TraceTypeProfile, false, \
"Trace type profile") \
\
develop(bool, PoisonOSREntry, true, \
"Detect abnormal calls to OSR code") \
\
product(bool, UseCondCardMark, false, \
"Check for already marked card before updating card table") \
\
develop(bool, SoftMatchFailure, trueInProduct, \
"If the DFA fails to match a node, print a message and bail out") \
\
develop(bool, InlineAccessors, true, \
"inline accessor methods (get/set)") \
\
product(intx, TypeProfileMajorReceiverPercent, 90, \
"% of major receiver type to all profiled receivers") \
\
notproduct(bool, TimeCompiler2, false, \
"detailed time the compiler (requires +TimeCompiler)") \
\
diagnostic(bool, PrintIntrinsics, false, \
"prints attempted and successful inlining of intrinsics") \
\
diagnostic(ccstrlist, DisableIntrinsic, "", \
"do not expand intrinsics whose (internal) names appear here") \
\
develop(bool, StressReflectiveCode, false, \
"Use inexact types at allocations, etc., to test reflection") \
\
diagnostic(bool, DebugInlinedCalls, true, \
"If false, restricts profiled locations to the root method only") \
\
notproduct(bool, VerifyLoopOptimizations, false, \
"verify major loop optimizations") \
\
diagnostic(bool, ProfileDynamicTypes, true, \
"do extra type profiling and use it more aggressively") \
\
develop(bool, TraceIterativeGVN, false, \
"Print progress during Iterative Global Value Numbering") \
\
develop(bool, VerifyIterativeGVN, false, \
"Verify Def-Use modifications during sparse Iterative Global " \
"Value Numbering") \
\
notproduct(bool, TraceCISCSpill, false, \
"Trace allocators use of cisc spillable instructions") \
\
product(bool, SplitIfBlocks, true, \
"Clone compares and control flow through merge points to fold " \
"some branches") \
\
develop(intx, FreqCountInvocations, 1, \
"Scaling factor for branch frequencies (deprecated)") \
\
product(intx, AliasLevel, 3, \
"0 for no aliasing, 1 for oop/field/static/array split, " \
"2 for class split, 3 for unique instances") \
\
develop(bool, VerifyAliases, false, \
"perform extra checks on the results of alias analysis") \
C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)

@ -158,74 +158,6 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
return kit.transfer_exceptions_into_jvms();
}
//---------------------------DynamicCallGenerator-----------------------------
// Internal class which handles all out-of-line invokedynamic calls.
class DynamicCallGenerator : public CallGenerator {
public:
DynamicCallGenerator(ciMethod* method)
: CallGenerator(method)
{
}
virtual JVMState* generate(JVMState* jvms);
};
JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
Compile* C = kit.C;
PhaseGVN& gvn = kit.gvn();
if (C->log() != NULL) {
C->log()->elem("dynamic_call bci='%d'", jvms->bci());
}
// Get the constant pool cache from the caller class.
ciMethod* caller_method = jvms->method();
ciBytecodeStream str(caller_method);
str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!");
ciCPCache* cpcache = str.get_cpcache();
// Get the offset of the CallSite from the constant pool cache
// pointer.
int index = str.get_method_index();
size_t call_site_offset = cpcache->get_f1_offset(index);
// Load the CallSite object from the constant pool cache.
const TypeOopPtr* cpcache_type = TypeOopPtr::make_from_constant(cpcache); // returns TypeAryPtr of type T_OBJECT
const TypeOopPtr* call_site_type = TypeOopPtr::make_from_klass(C->env()->CallSite_klass());
Node* cpcache_adr = kit.makecon(cpcache_type);
Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, call_site_offset);
// The oops in the constant pool cache are not compressed; load then as raw pointers.
Node* call_site = kit.make_load(kit.control(), call_site_adr, call_site_type, T_ADDRESS, Compile::AliasIdxRaw);
// Load the target MethodHandle from the CallSite object.
const TypeOopPtr* target_type = TypeOopPtr::make_from_klass(C->env()->MethodHandle_klass());
Node* target_mh_adr = kit.basic_plus_adr(call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
Node* target_mh = kit.make_load(kit.control(), target_mh_adr, target_type, T_OBJECT);
address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub();
CallStaticJavaNode* call = new (C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci());
// invokedynamic is treated as an optimized invokevirtual.
call->set_optimized_virtual(true);
// Take extra care (in the presence of argument motion) not to trash the SP:
call->set_method_handle_invoke(true);
// Pass the target MethodHandle as first argument and shift the
// other arguments.
call->init_req(0 + TypeFunc::Parms, target_mh);
uint nargs = call->method()->arg_size();
for (uint i = 1; i < nargs; i++) {
Node* arg = kit.argument(i - 1);
call->init_req(i + TypeFunc::Parms, arg);
}
kit.set_edges_for_java_call(call);
Node* ret = kit.set_results_for_java_call(call);
kit.push_node(method()->return_type()->basic_type(), ret);
return kit.transfer_exceptions_into_jvms();
}
//--------------------------VirtualCallGenerator------------------------------
// Internal class which handles all out-of-line calls checking receiver type.
class VirtualCallGenerator : public CallGenerator {
@ -328,12 +260,6 @@ CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
return new VirtualCallGenerator(m, vtable_index);
}
CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
assert(m->is_compiled_lambda_form(), "for_dynamic_call mismatch");
//@@ FIXME: this should be done via a direct call
return new DynamicCallGenerator(m);
}
// Allow inlining decisions to be delayed
class LateInlineCallGenerator : public DirectCallGenerator {
CallGenerator* _inline_cg;
@ -347,7 +273,7 @@ class LateInlineCallGenerator : public DirectCallGenerator {
// Convert the CallStaticJava into an inline
virtual void do_late_inline();
JVMState* generate(JVMState* jvms) {
virtual JVMState* generate(JVMState* jvms) {
// Record that this call site should be revisited once the main
// parse is finished.
Compile::current()->add_late_inline(this);

Some files were not shown because too many files have changed in this diff Show More