This commit is contained in:
J. Duke 2017-07-05 17:12:12 +02:00
commit a9d8e5b02c
261 changed files with 7004 additions and 4960 deletions

View File

@ -67,3 +67,4 @@ cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85
425ba3efabbfe0b188105c10aaf7c3c8fa8d1a38 jdk7-b90
97d8b6c659c29c8493a8b2b72c2796a021a8cf79 jdk7-b91
5f5c33d417f3a14706b09a4a95e65fa7b6fa54d6 jdk7-b92
5fc102ff48f0e787ce9cc77249841d5ff0941b75 jdk7-b93

View File

@ -970,9 +970,9 @@ including non-open portions.
So for now you should be able to build with either VS2003 or VS2010.
We do not guarantee that VS2008 will work, although there is sufficient
makefile support to make at least basic JDK builds plausible.
Visual Studio 2010 Express compilers are likely to be able to build all the
"open" sources, with only small adjustments, but this has yet to be made
to work. Also we have not yet seen the 7.1 Windows SDK with the 64 bit
Visual Studio 2010 Express compilers are now able to build all the
open source repositories, but this is 32 bit only, since
we have not yet seen the 7.1 Windows SDK with the 64 bit
compilers. <b>END WARNING.</b>
<p>
The 32-bit OpenJDK Windows build

View File

@ -67,3 +67,4 @@ bb4424c5e778b842c064a8b1aa902b35f4397654 jdk7-b89
56ce07b0eb47b93a98a72adef0f21e602c460623 jdk7-b90
bcd2fc089227559ac5be927923609fac29f067fa jdk7-b91
930582f667a13391cd0b3e41e8cb760f55e3a5c0 jdk7-b92
9718d624864c29dca44373d541e93cdd309a994f jdk7-b93

View File

@ -30,8 +30,7 @@
BUILDDIR = ../..
include $(BUILDDIR)/common/Defs.gmk
SUBDIRS = org core
SUBDIRS = org
all build clean clobber::
$(SUBDIRS-loop)
$(RM) -r $(CLASSBINDIR)/com/sun/corba/se/internal/io

View File

@ -1,101 +0,0 @@
#
# Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Sun designates this
# particular file as subject to the "Classpath" exception as provided
# by Sun in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions.
#
#
# Makefile for building RMI/IIOP
#
BUILDDIR = ../../..
PACKAGE = com.sun.corba.se.internal.io
PRODUCT = sun
LIBRARY = ioser12
include $(BUILDDIR)/common/Defs.gmk
#
# Use mapfile
#
FILES_m = mapfile-vers
include $(BUILDDIR)/common/Mapfile-vers.gmk
#
# Files to compile.
#
CORBA_JMK_DIRECTORY=$(TOPDIR)/make/com/sun/corba/minclude/
include $(CORBA_JMK_DIRECTORY)javax_rmi.jmk
include $(CORBA_JMK_DIRECTORY)javax_rmi_CORBA.jmk
include $(CORBA_JMK_DIRECTORY)javax_transaction.jmk
include $(CORBA_JMK_DIRECTORY)javax_activity.jmk
include $(CORBA_JMK_DIRECTORY)ioser_io.jmk
include $(CORBA_JMK_DIRECTORY)sun_corba.jmk
ifdef STANDALONE_CORBA_WS
# FIXUP: What is this all about?
OTHER_LDFLAGS=-L$(BOOTDIR)/jre/lib/$(ARCH) -L$(BOOTDIR)/jre/lib/$(LIBARCH)/native_threads -ljvm
OTHER_INCLUDES+=-ICClassHeaders -I$(BOOTDIR)/include -I$(BOOTDIR)/include/$(PLATFORM)
else
OTHER_LDLIBS=$(JVMLIB)
OTHER_INCLUDES+=-ICClassHeaders -I$(BOOTDIR)/include -I$(BOOTDIR)/include/$(PLATFORM)
endif
FILES_c = ioser.c
FILES_java = \
$(javax_rmi_java) \
$(javax_rmi_CORBA_java) \
$(javax_transaction_java) \
$(javax_activity_java) \
$(IOSER_IO_java) \
$(sun_corba_java)
#
# Generate header files for.
#
FILES_export = \
com/sun/corba/se/internal/io/IIOPInputStream.java \
com/sun/corba/se/internal/io/IIOPOutputStream.java \
com/sun/corba/se/internal/io/ObjectStreamClass.java \
com/sun/corba/se/internal/io/LibraryManager.java
#
# Resources
#
LOCALE_SET_DEFINITION = jre
RESOURCE_BUNDLES_PROPERTIES = \
com/sun/corba/se/impl/orbutil/resources/sunorb.properties
#
# Rules
#
include $(BUILDDIR)/common/Library.gmk
#
# Extra clean rules because we build more than one package.
#
clean:: classheaders.clean objects.clean
$(RM) -r $(CLASSBINDIR)/javax/rmi
$(RM) -r $(CLASSBINDIR)/javax/transaction
$(RM) -r $(CLASSBINDIR)/javax/activity
$(RM) -r $(CLASSBINDIR)/com/sun/corba/se/impl

View File

@ -1,80 +0,0 @@
#
# Copyright 2005 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Sun designates this
# particular file as subject to the "Classpath" exception as provided
# by Sun in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions.
#
# Define library interface.
SUNWprivate_1.1 {
global:
Java_com_sun_corba_se_internal_io_IIOPInputStream_allocateNewObject;
Java_com_sun_corba_se_internal_io_IIOPInputStream_loadClass;
Java_com_sun_corba_se_internal_io_IIOPInputStream_readObject;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setBooleanField;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setBooleanFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setByteField;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setByteFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setCharField;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setCharFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setDoubleField;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setDoubleFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setFloatField;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setFloatFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setIntField;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setIntFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setLongField;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setLongFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setObjectField;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setObjectFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setShortField;
Java_com_sun_corba_se_internal_io_IIOPInputStream_setShortFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPInputStream_throwExceptionType;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getBooleanField;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getBooleanFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getByteField;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getByteFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getCharField;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getCharFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getDoubleField;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getDoubleFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getFloatField;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getFloatFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getIntField;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getIntFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getLongField;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getLongFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getObjectField;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getObjectFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getShortField;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_getShortFieldOpt;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_throwExceptionType;
Java_com_sun_corba_se_internal_io_IIOPOutputStream_writeObject;
Java_com_sun_corba_se_internal_io_LibraryManager_getMajorVersion;
Java_com_sun_corba_se_internal_io_LibraryManager_getMinorVersion;
Java_com_sun_corba_se_internal_io_LibraryManager_setEnableOverride;
Java_com_sun_corba_se_internal_io_ObjectStreamClass_hasStaticInitializer;
Java_com_sun_corba_se_internal_io_ObjectStreamField_getFieldIDNative;
Java_com_sun_corba_se_internal_util_JDKClassLoader_specialLoadClass;
local:
*;
};

View File

@ -1,66 +0,0 @@
/*
* Copyright 2003 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
package com.sun.corba.se.internal.io;
public class IIOPInputStream {
private static native Object allocateNewObject(Class aclass,
Class initclass)
throws InstantiationException, IllegalAccessException;
/* Create a pending exception. This is needed to get around
* the fact that the *Delegate methods do not explicitly
* declare that they throw exceptions.
*
* This native methods creates an exception of the given type with
* the given message string and posts it to the pending queue.
*/
private static native void throwExceptionType(Class c, String message);
/* The following native methods of the form set*Field are used
* to set private, protected, and package private fields
* of an Object.
*/
private static native void setObjectField(Object o, Class c, String fieldName, String fieldSig, Object v);
private static native void setBooleanField(Object o, Class c, String fieldName, String fieldSig, boolean v);
private static native void setByteField(Object o, Class c, String fieldName, String fieldSig, byte v);
private static native void setCharField(Object o, Class c, String fieldName, String fieldSig, char v);
private static native void setShortField(Object o, Class c, String fieldName, String fieldSig, short v);
private static native void setIntField(Object o, Class c, String fieldName, String fieldSig, int v);
private static native void setLongField(Object o, Class c, String fieldName, String fieldSig, long v);
private static native void setFloatField(Object o, Class c, String fieldName, String fieldSig, float v);
private static native void setDoubleField(Object o, Class c, String fieldName, String fieldSig, double v);
private static native void readObject(Object obj, Class asClass, Object ois);
private static native void setObjectFieldOpt(Object o, long fieldID, Object v);
private static native void setBooleanFieldOpt(Object o, long fieldID, boolean v);
private static native void setByteFieldOpt(Object o, long fieldID, byte v);
private static native void setCharFieldOpt(Object o, long fieldID, char v);
private static native void setShortFieldOpt(Object o, long fieldID, short v);
private static native void setIntFieldOpt(Object o, long fieldID, int v);
private static native void setLongFieldOpt(Object o, long fieldID, long v);
private static native void setFloatFieldOpt(Object o, long fieldID, float v);
private static native void setDoubleFieldOpt(Object o, long fieldID, double v);
}

View File

@ -1,51 +0,0 @@
/*
* Copyright 2003 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
package com.sun.corba.se.internal.io;
public class IIOPOutputStream {
/* Create a pending exception. This is needed to get around
* the fact that the *Delegate methods do not explicitly
* declare that they throw exceptions.
*
* This native method creates an exception of the given type with
* the given message string and posts it to the pending queue.
*/
private static native void throwExceptionType(Class c, String message);
private static native Object getObjectFieldOpt(Object o, long fieldID);
private static native boolean getBooleanFieldOpt(Object o, long fieldID);
private static native byte getByteFieldOpt(Object o, long fieldID);
private static native char getCharFieldOpt(Object o, long fieldID);
private static native short getShortFieldOpt(Object o, long fieldID);
private static native int getIntFieldOpt(Object o, long fieldID);
private static native long getLongFieldOpt(Object o, long fieldID);
private static native float getFloatFieldOpt(Object o, long fieldID);
private static native double getDoubleFieldOpt(Object o, long fieldID);
private static native void writeObject(Object obj, Class asClass, Object oos) throws IllegalAccessException;
}

View File

@ -1,862 +0,0 @@
/*
* Copyright 1998-2002 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
#include "jni.h"
#include "com_sun_corba_se_internal_io_IIOPInputStream.h"
#include "com_sun_corba_se_internal_io_IIOPOutputStream.h"
#include "com_sun_corba_se_internal_io_ObjectStreamClass.h"
#include "com_sun_corba_se_internal_io_LibraryManager.h"
#define MAJOR_VERSION 1
#define MINOR_VERSION 11 /*sun.4296963 ibm.11861*/
static char *copyright[] = {
"Licensed Materials - Property of IBM and Sun",
"RMI-IIOP v1.0",
"Copyright IBM Corp. 1998 1999 All Rights Reserved",
"Copyright 1998-1999 Sun Microsystems, Inc. 901 San Antonio Road,",
"Palo Alto, CA 94303, U.S.A. All rights reserved."
};
/*
* Class: com_sun_corba_se_internal_io_LibraryManager
* Method: getMajorVersion
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_com_sun_corba_se_internal_io_LibraryManager_getMajorVersion
(JNIEnv *env, jclass this)
{
return MAJOR_VERSION;
}
/*
* Class: com_sun_corba_se_internal_io_LibraryManager
* Method: getMinorVersion
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_com_sun_corba_se_internal_io_LibraryManager_getMinorVersion
(JNIEnv *env, jclass this)
{
return MINOR_VERSION;
}
/*
* Class: com_sun_corba_se_internal_io_LibraryManager
* Method: setEnableOverride
* Signature: (Ljava/lang/Class;Ljava/lang/Object;)Z
*/
JNIEXPORT jboolean JNICALL Java_com_sun_corba_se_internal_io_LibraryManager_setEnableOverride
(JNIEnv *env, jclass this, jclass targetClass, jobject instance)
{
jfieldID fieldID = (*env)->GetFieldID(env, targetClass,
"enableSubclassImplementation",
"Z");
(*env)->SetBooleanField(env, instance, fieldID, JNI_TRUE);
return (*env)->GetBooleanField(env, instance, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: throwExceptionType
* Signature: (Ljava/lang/Class;Ljava/lang/String;)V
*
* Construct and throw the given exception using the given message.
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_throwExceptionType
(JNIEnv *env, jobject obj, jclass c, jstring mssg)
{
const char* strMsg = (*env)->GetStringUTFChars(env, mssg, 0L);
(*env)->ThrowNew(env, c, strMsg);
(*env)->ReleaseStringUTFChars(env, mssg, strMsg);
return;
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: throwExceptionType
* Signature: (Ljava/lang/Class;Ljava/lang/String;)V
*
* Construct and throw the given exception using the given message.
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_throwExceptionType
(JNIEnv *env, jobject obj, jclass c, jstring mssg)
{
const char* strMsg = (*env)->GetStringUTFChars(env, mssg, 0L);
(*env)->ThrowNew(env, c, strMsg);
(*env)->ReleaseStringUTFChars(env, mssg, strMsg);
return;
}
JNIEXPORT jobject JNICALL
Java_com_sun_corba_se_internal_io_IIOPInputStream_allocateNewObject (JNIEnv * env,
jclass this,
jclass aclass,
jclass initclass)
{
jmethodID cid;
/**
* Get the method ID of the default constructor of
* initclass, which is the first non-Serializable
* superclass.
*/
cid = (*env)->GetMethodID(env, initclass, "<init>", "()V");
if (cid == NULL) {
/* exception thrown */
return NULL;
}
/**
* Allocates an object of type aclass and calls the
* initclass default constructor (found above)
*/
return (*env)->NewObject(env, aclass, cid);
}
/* DEPRECATED - This is no longer used.
*
* Find the first class loader up the stack and use its class to call
* FindClassFromClass to resolve the specified class
* name. The code is similar to that of java.lang.currentClassLoader
*/
JNIEXPORT jclass JNICALL
Java_com_sun_corba_se_internal_io_IIOPInputStream_loadClass (JNIEnv * env,
jobject this,
jclass curClass,
jstring currClassName)
{
return 0L;
}
#include "com_sun_corba_se_internal_io_ObjectStreamClass.h"
/*
* Class: com_sun_corba_se_internal_io_ObjectStreamClass
* Method: hasStaticInitializer
* Signature: (Ljava/lang/Class;)Z
*
* If the method <clinit> ()V is defined true is returned.
* Otherwise, false is returned.
*/
JNIEXPORT jboolean JNICALL
Java_com_sun_corba_se_internal_io_ObjectStreamClass_hasStaticInitializer(JNIEnv *env, jclass this,
jclass clazz)
{
jclass superclazz = NULL;
jmethodID superclinit = NULL;
jmethodID clinit = (*env)->GetStaticMethodID(env, clazz,
"<clinit>", "()V");
if (clinit == NULL || (*env)->ExceptionOccurred(env)) {
(*env)->ExceptionClear(env);
return 0;
}
/* Ask the superclass the same question
* If the answer is the same then the constructor is from a superclass.
* If different, it's really defined on the subclass.
*/
superclazz = (*env)->GetSuperclass(env, clazz);
if ((*env)->ExceptionOccurred(env)) {
return 0;
}
if (superclazz == NULL)
return 1;
superclinit = (*env)->GetStaticMethodID(env, superclazz,
"<clinit>", "()V");
if ((*env)->ExceptionOccurred(env)) {
(*env)->ExceptionClear(env);
superclinit = NULL;
}
return (superclinit != clinit);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: readObject
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Object;)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_readObject
(JNIEnv *env, jobject this, jobject obj, jclass cls, jobject ois)
{
jthrowable exc;
jclass newExcCls;
jmethodID mid = (*env)->GetMethodID(env, cls, "readObject", "(Ljava/io/ObjectInputStream;)V");
if (mid == 0)
return;
(*env)->CallNonvirtualVoidMethod(env, obj, cls, mid, ois);
exc = (*env)->ExceptionOccurred(env);
if (exc) {
(*env)->ExceptionDescribe(env);
(*env)->ExceptionClear(env);
newExcCls = (*env)->FindClass(env, "java/io/IOException");
if (newExcCls == 0) /* Unable to find the new exception class, give up. */
return;
(*env)->ThrowNew(env, newExcCls, "Serializable readObject method failed internally");
return;
}
return;
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: writeObject
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/Object;)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_writeObject
(JNIEnv *env, jobject this, jobject obj, jclass cls, jobject oos)
{
jthrowable exc;
jclass newExcCls;
jmethodID mid = (*env)->GetMethodID(env, cls, "writeObject", "(Ljava/io/ObjectOutputStream;)V");
if (mid == 0)
return;
(*env)->CallNonvirtualVoidMethod(env, obj, cls, mid, oos);
exc = (*env)->ExceptionOccurred(env);
if (exc) {
(*env)->ExceptionDescribe(env);
(*env)->ExceptionClear(env);
newExcCls = (*env)->FindClass(env, "java/io/IOException");
if (newExcCls == 0) /* Unable to find the new exception class, give up. */
return;
(*env)->ThrowNew(env, newExcCls, "Serializable readObject method failed internally");
return;
}
return;
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getObjectField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/Object;
*/
JNIEXPORT jobject JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getObjectField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char *strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char *strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (*env)->GetObjectField(env, obj, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getBooleanField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)Z
*/
JNIEXPORT jboolean JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getBooleanField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (*env)->GetBooleanField(env, obj, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getByteField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)B
*/
JNIEXPORT jbyte JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getByteField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (*env)->GetByteField(env, obj, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getCharField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)C
*/
JNIEXPORT jchar JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getCharField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (*env)->GetCharField(env, obj, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getShortField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)S
*/
JNIEXPORT jshort JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getShortField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (*env)->GetShortField(env, obj, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getIntField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getIntField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (*env)->GetIntField(env, obj, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getLongField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)J
*/
JNIEXPORT jlong JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getLongField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (*env)->GetLongField(env, obj, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getFloatField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)F
*/
JNIEXPORT jfloat JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getFloatField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (*env)->GetFloatField(env, obj, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getDoubleField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)D
*/
JNIEXPORT jdouble JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getDoubleField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (*env)->GetDoubleField(env, obj, fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setObjectField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;Ljava/lang/Object;)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setObjectField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig, jobject v)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
(*env)->SetObjectField(env, obj, fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setBooleanField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;Z)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setBooleanField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig, jboolean v)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
(*env)->SetBooleanField(env, obj, fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setByteField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;B)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setByteField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig, jbyte v)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
(*env)->SetByteField(env, obj, fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setCharField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;C)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setCharField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig, jchar v)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
(*env)->SetCharField(env, obj, fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setShortField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;S)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setShortField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig, jshort v)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
(*env)->SetShortField(env, obj, fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setIntField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;I)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setIntField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig, jint v)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
(*env)->SetIntField(env, obj, fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setLongField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;J)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setLongField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig, jlong v)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
(*env)->SetLongField(env, obj, fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setFloatField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;F)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setFloatField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig, jfloat v)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
(*env)->SetFloatField(env, obj, fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setDoubleField
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;D)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setDoubleField
(JNIEnv *env, jobject this, jobject obj, jclass clazz, jstring fieldName, jstring fieldSig, jdouble v)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
(*env)->SetDoubleField(env, obj, fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_util_JDKClassLoader
* Method: specialLoadClass
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;)Ljava/lang/Class;
*/
JNIEXPORT jclass JNICALL Java_com_sun_corba_se_internal_util_JDKClassLoader_specialLoadClass
(JNIEnv *env, jclass this, jobject target, jclass cls, jstring clsName)
{
jthrowable exc;
jclass streamTargetCls;
jmethodID mid;
jclass result;
streamTargetCls = (*env)->FindClass(env, "java/io/ObjectInputStream");
mid = (*env)->GetMethodID(env, streamTargetCls, "loadClass0", "(Ljava/lang/Class;Ljava/lang/String;)Ljava/lang/Class;");
if (mid == 0)
return 0L;
result = (jclass) (*env)->CallNonvirtualObjectMethod(env, target, streamTargetCls, mid, cls, clsName);
exc = (*env)->ExceptionOccurred(env);
if (exc) {
return 0L;
}
return result;
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getObjectFieldOpt
* Signature: (Ljava/lang/Object;J)Ljava/lang/Object;
*/
JNIEXPORT jobject JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getObjectFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID)
{
return (*env)->GetObjectField(env, obj, (jfieldID)fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getBooleanFieldOpt
* Signature: (Ljava/lang/Object;J)Z
*/
JNIEXPORT jboolean JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getBooleanFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID)
{
return (*env)->GetBooleanField(env, obj, (jfieldID)fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getByteFieldOpt
* Signature: (Ljava/lang/Object;J)B
*/
JNIEXPORT jbyte JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getByteFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID)
{
return (*env)->GetByteField(env, obj, (jfieldID)fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getCharFieldOpt
* Signature: (Ljava/lang/Object;J)C
*/
JNIEXPORT jchar JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getCharFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID)
{
return (*env)->GetCharField(env, obj, (jfieldID)fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getShortFieldOpt
* Signature: (Ljava/lang/Object;J)S
*/
JNIEXPORT jshort JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getShortFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID)
{
return (*env)->GetShortField(env, obj, (jfieldID)fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getIntFieldOpt
* Signature: (Ljava/lang/Object;J)I
*/
JNIEXPORT jint JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getIntFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID)
{
return (*env)->GetIntField(env, obj, (jfieldID)fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getLongFieldOpt
* Signature: (Ljava/lang/Object;J)J
*/
JNIEXPORT jlong JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getLongFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID)
{
return (*env)->GetLongField(env, obj, (jfieldID)fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getFloatFieldOpt
* Signature: (Ljava/lang/Object;J)F
*/
JNIEXPORT jfloat JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getFloatFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID)
{
return (*env)->GetFloatField(env, obj, (jfieldID)fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPOutputStream
* Method: getDoubleFieldOpt
* Signature: (Ljava/lang/Object;J)D
*/
JNIEXPORT jdouble JNICALL Java_com_sun_corba_se_internal_io_IIOPOutputStream_getDoubleFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID)
{
return (*env)->GetDoubleField(env, obj, (jfieldID)fieldID);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setObjectFieldOpt
* Signature: (Ljava/lang/Object;JLjava/lang/Object;)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setObjectFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID, jobject v)
{
(*env)->SetObjectField(env, obj, (jfieldID)fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setBooleanFieldOpt
* Signature: (Ljava/lang/Object;JZ)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setBooleanFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID, jboolean v)
{
(*env)->SetBooleanField(env, obj, (jfieldID)fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setByteFieldOpt
* Signature: (Ljava/lang/Object;JB)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setByteFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID, jbyte v)
{
(*env)->SetByteField(env, obj, (jfieldID)fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setCharFieldOpt
* Signature: (Ljava/lang/Object;JC)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setCharFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID, jchar v)
{
(*env)->SetCharField(env, obj, (jfieldID)fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setShortFieldOpt
* Signature: (Ljava/lang/Object;JS)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setShortFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID, jshort v)
{
(*env)->SetShortField(env, obj, (jfieldID)fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setIntFieldOpt
* Signature: (Ljava/lang/Object;JI)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setIntFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID, jint v)
{
(*env)->SetIntField(env, obj, (jfieldID)fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setLongFieldOpt
* Signature: (Ljava/lang/Object;JJ)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setLongFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID, jlong v)
{
(*env)->SetLongField(env, obj, (jfieldID)fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setFloatFieldOpt
* Signature: (Ljava/lang/Object;JF)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setFloatFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID, jfloat v)
{
(*env)->SetFloatField(env, obj, (jfieldID)fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPInputStream
* Method: setDoubleFieldOpt
* Signature: (Ljava/lang/Object;JD)V
*/
JNIEXPORT void JNICALL Java_com_sun_corba_se_internal_io_IIOPInputStream_setDoubleFieldOpt
(JNIEnv *env, jobject this, jobject obj, jlong fieldID, jdouble v)
{
(*env)->SetDoubleField(env, obj, (jfieldID)fieldID, v);
}
/*
* Class: com_sun_corba_se_internal_io_IIOPObjectStreamField
* Method: getFieldID
* Signature: (Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;)X
*/
JNIEXPORT jlong JNICALL Java_com_sun_corba_se_internal_io_ObjectStreamField_getFieldIDNative
(JNIEnv *env, jobject this, jclass clazz, jstring fieldName, jstring fieldSig)
{
const char* strFieldName = (*env)->GetStringUTFChars(env, fieldName, 0L);
const char* strFieldSig = (*env)->GetStringUTFChars(env, fieldSig, 0L);
jfieldID fieldID = (*env)->GetFieldID(env, clazz, strFieldName, strFieldSig);
(*env)->ReleaseStringUTFChars(env, fieldName, strFieldName);
(*env)->ReleaseStringUTFChars(env, fieldSig, strFieldSig);
return (jlong)fieldID;
}

View File

@ -93,3 +93,5 @@ e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87
e0a1a502e402dbe7bf2d9102b4084a7e79a99a9b jdk7-b91
25f53b53aaa3eb8b2d5391a1e8de9a76ae1dd8a2 hs18-b03
3221d1887d30341bedfdac1dbf365ea41beff20f jdk7-b92
310cdbc355355a13aa53c002b6bde4a8c5ba67ff hs18-b04
9d865fc2f644fdd9a0108fd6407944ee610aadd9 jdk7-b93

View File

@ -28,5 +28,5 @@
# Don't put quotes (fail windows build).
HOTSPOT_VM_DISTRO=Java HotSpot(TM)
COMPANY_NAME=Sun Microsystems, Inc.
COMPANY_NAME=Oracle Corporation
PRODUCT_NAME=Java(TM) Platform SE

View File

@ -33,9 +33,9 @@
# Don't put quotes (fail windows build).
HOTSPOT_VM_COPYRIGHT=Copyright 2010
HS_MAJOR_VER=18
HS_MAJOR_VER=19
HS_MINOR_VER=0
HS_BUILD_NUMBER=04
HS_BUILD_NUMBER=01
JDK_MAJOR_VER=1
JDK_MINOR_VER=7

View File

@ -51,6 +51,8 @@ jprt.my.solaris.sparc.jdk6=solaris_sparc_5.8
jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8
jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8
jprt.my.solaris.sparc.jdk6u14=solaris_sparc_5.8
jprt.my.solaris.sparc.jdk6u18=solaris_sparc_5.8
jprt.my.solaris.sparc.jdk6u20=solaris_sparc_5.8
jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
@ -58,6 +60,8 @@ jprt.my.solaris.sparcv9.jdk6=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9.jdk6u14=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9.jdk6u18=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9.jdk6u20=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
jprt.my.solaris.i586.jdk7=solaris_i586_5.10
@ -65,6 +69,8 @@ jprt.my.solaris.i586.jdk6=solaris_i586_5.8
jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8
jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8
jprt.my.solaris.i586.jdk6u14=solaris_i586_5.8
jprt.my.solaris.i586.jdk6u18=solaris_i586_5.8
jprt.my.solaris.i586.jdk6u20=solaris_i586_5.8
jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
jprt.my.solaris.x64.jdk7=solaris_x64_5.10
@ -72,6 +78,8 @@ jprt.my.solaris.x64.jdk6=solaris_x64_5.10
jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10
jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10
jprt.my.solaris.x64.jdk6u14=solaris_x64_5.10
jprt.my.solaris.x64.jdk6u18=solaris_x64_5.10
jprt.my.solaris.x64.jdk6u20=solaris_x64_5.10
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
jprt.my.linux.i586.jdk7=linux_i586_2.6
@ -79,6 +87,8 @@ jprt.my.linux.i586.jdk6=linux_i586_2.4
jprt.my.linux.i586.jdk6perf=linux_i586_2.4
jprt.my.linux.i586.jdk6u10=linux_i586_2.4
jprt.my.linux.i586.jdk6u14=linux_i586_2.4
jprt.my.linux.i586.jdk6u18=linux_i586_2.4
jprt.my.linux.i586.jdk6u20=linux_i586_2.4
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
jprt.my.linux.x64.jdk7=linux_x64_2.6
@ -86,6 +96,8 @@ jprt.my.linux.x64.jdk6=linux_x64_2.4
jprt.my.linux.x64.jdk6perf=linux_x64_2.4
jprt.my.linux.x64.jdk6u10=linux_x64_2.4
jprt.my.linux.x64.jdk6u14=linux_x64_2.4
jprt.my.linux.x64.jdk6u18=linux_x64_2.4
jprt.my.linux.x64.jdk6u20=linux_x64_2.4
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk7=windows_i586_5.0
@ -93,6 +105,8 @@ jprt.my.windows.i586.jdk6=windows_i586_5.0
jprt.my.windows.i586.jdk6perf=windows_i586_5.0
jprt.my.windows.i586.jdk6u10=windows_i586_5.0
jprt.my.windows.i586.jdk6u14=windows_i586_5.0
jprt.my.windows.i586.jdk6u18=windows_i586_5.0
jprt.my.windows.i586.jdk6u20=windows_i586_5.0
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk7=windows_x64_5.2
@ -100,6 +114,8 @@ jprt.my.windows.x64.jdk6=windows_x64_5.2
jprt.my.windows.x64.jdk6perf=windows_x64_5.2
jprt.my.windows.x64.jdk6u10=windows_x64_5.2
jprt.my.windows.x64.jdk6u14=windows_x64_5.2
jprt.my.windows.x64.jdk6u18=windows_x64_5.2
jprt.my.windows.x64.jdk6u20=windows_x64_5.2
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
# Standard list of jprt build targets for this source tree

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2333,6 +2333,18 @@ void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
#endif
void MacroAssembler::load_sized_value(Address src, Register dst,
size_t size_in_bytes, bool is_signed) {
switch (size_in_bytes) {
case 8: ldx(src, dst); break;
case 4: ld( src, dst); break;
case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
default: ShouldNotReachHere();
}
}
void MacroAssembler::float_cmp( bool is_float, int unordered_result,
FloatRegister Fa, FloatRegister Fb,
Register Rresult) {
@ -2625,40 +2637,103 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
}
void MacroAssembler::regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
assert(dest.register_or_noreg() != G0, "lost side effect");
if ((src.is_constant() && src.as_constant() == 0) ||
(src.is_register() && src.as_register() == G0)) {
// do nothing
} else if (dest.is_register()) {
add(dest.as_register(), ensure_simm13_or_reg(src, temp), dest.as_register());
} else if (src.is_constant()) {
intptr_t res = dest.as_constant() + src.as_constant();
dest = RegisterOrConstant(res); // side effect seen by caller
RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
assert(d.register_or_noreg() != G0, "lost side effect");
if ((s2.is_constant() && s2.as_constant() == 0) ||
(s2.is_register() && s2.as_register() == G0)) {
// Do nothing, just move value.
if (s1.is_register()) {
if (d.is_constant()) d = temp;
mov(s1.as_register(), d.as_register());
return d;
} else {
return s1;
}
}
if (s1.is_register()) {
assert_different_registers(s1.as_register(), temp);
if (d.is_constant()) d = temp;
andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
return d;
} else {
assert(temp != noreg, "cannot handle constant += register");
add(src.as_register(), ensure_simm13_or_reg(dest, temp), temp);
dest = RegisterOrConstant(temp); // side effect seen by caller
if (s2.is_register()) {
assert_different_registers(s2.as_register(), temp);
if (d.is_constant()) d = temp;
set(s1.as_constant(), temp);
andn(temp, s2.as_register(), d.as_register());
return d;
} else {
intptr_t res = s1.as_constant() & ~s2.as_constant();
return res;
}
}
}
void MacroAssembler::regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
assert(dest.register_or_noreg() != G0, "lost side effect");
if (!is_simm13(src.constant_or_zero()))
src = (src.as_constant() & 0xFF);
if ((src.is_constant() && src.as_constant() == 0) ||
(src.is_register() && src.as_register() == G0)) {
// do nothing
} else if (dest.is_register()) {
sll_ptr(dest.as_register(), src, dest.as_register());
} else if (src.is_constant()) {
intptr_t res = dest.as_constant() << src.as_constant();
dest = RegisterOrConstant(res); // side effect seen by caller
RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
assert(d.register_or_noreg() != G0, "lost side effect");
if ((s2.is_constant() && s2.as_constant() == 0) ||
(s2.is_register() && s2.as_register() == G0)) {
// Do nothing, just move value.
if (s1.is_register()) {
if (d.is_constant()) d = temp;
mov(s1.as_register(), d.as_register());
return d;
} else {
return s1;
}
}
if (s1.is_register()) {
assert_different_registers(s1.as_register(), temp);
if (d.is_constant()) d = temp;
add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
return d;
} else {
assert(temp != noreg, "cannot handle constant <<= register");
set(dest.as_constant(), temp);
sll_ptr(temp, src, temp);
dest = RegisterOrConstant(temp); // side effect seen by caller
if (s2.is_register()) {
assert_different_registers(s2.as_register(), temp);
if (d.is_constant()) d = temp;
add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
return d;
} else {
intptr_t res = s1.as_constant() + s2.as_constant();
return res;
}
}
}
RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
assert(d.register_or_noreg() != G0, "lost side effect");
if (!is_simm13(s2.constant_or_zero()))
s2 = (s2.as_constant() & 0xFF);
if ((s2.is_constant() && s2.as_constant() == 0) ||
(s2.is_register() && s2.as_register() == G0)) {
// Do nothing, just move value.
if (s1.is_register()) {
if (d.is_constant()) d = temp;
mov(s1.as_register(), d.as_register());
return d;
} else {
return s1;
}
}
if (s1.is_register()) {
assert_different_registers(s1.as_register(), temp);
if (d.is_constant()) d = temp;
sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
return d;
} else {
if (s2.is_register()) {
assert_different_registers(s2.as_register(), temp);
if (d.is_constant()) d = temp;
set(s1.as_constant(), temp);
sll_ptr(temp, s2.as_register(), d.as_register());
return d;
} else {
intptr_t res = s1.as_constant() << s2.as_constant();
return res;
}
}
}
@ -2708,8 +2783,8 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
RegisterOrConstant itable_offset = itable_index;
regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes());
itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
@ -2805,7 +2880,7 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
assert_different_registers(sub_klass, super_klass, temp_reg);
if (super_check_offset.is_register()) {
assert_different_registers(sub_klass, super_klass,
assert_different_registers(sub_klass, super_klass, temp_reg,
super_check_offset.as_register());
} else if (must_load_sco) {
assert(temp2_reg != noreg, "supply either a temp or a register offset");
@ -2855,6 +2930,8 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
// The super check offset is always positive...
lduw(super_klass, sco_offset, temp2_reg);
super_check_offset = RegisterOrConstant(temp2_reg);
// super_check_offset is register.
assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
}
ld_ptr(sub_klass, super_check_offset, temp_reg);
cmp(super_klass, temp_reg);
@ -3014,11 +3091,10 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
}
void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
Register temp_reg,
Label& wrong_method_type) {
if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
assert_different_registers(mtype_reg, mh_reg, temp_reg);
// compare method type against that of the receiver
RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
@ -3029,10 +3105,33 @@ void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_re
}
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) {
// A method handle has a "vmslots" field which gives the size of its
// argument list in JVM stack slots. This field is either located directly
// in every method handle, or else is indirectly accessed through the
// method handle's MethodType. This macro hides the distinction.
void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
Register temp_reg) {
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
// load mh.type.form.vmslots
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
// hoist vmslots into every mh to avoid dependent load chain
ld( Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
} else {
Register temp2_reg = vmslots_reg;
ld_ptr(Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
ld_ptr(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
ld( Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
}
}
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
assert_different_registers(mh_reg, temp_reg);
if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
// pick out the interpreted side of the handler
ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
@ -3043,17 +3142,18 @@ void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_
// for the various stubs which take control at this point,
// see MethodHandles::generate_method_handle_stub
// (Can any caller use this delay slot? If so, add an option for supression.)
delayed()->nop();
// Some callers can fill the delay slot.
if (emit_delayed_nop) {
delayed()->nop();
}
}
RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
int extra_slot_offset) {
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
int stackElementSize = Interpreter::stackElementWords() * wordSize;
int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
assert(offset1 - offset == stackElementSize, "correct arithmetic");
int stackElementSize = Interpreter::stackElementSize;
int offset = extra_slot_offset * stackElementSize;
if (arg_slot.is_constant()) {
offset += arg_slot.as_constant() * stackElementSize;
return offset;
@ -3067,6 +3167,11 @@ RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
}
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
int extra_slot_offset) {
return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
}
void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
Register temp_reg,
@ -4082,7 +4187,7 @@ static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
// make it work.
static void check_index(int ind) {
assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
"Invariants.")
"Invariants.");
}
static void generate_satb_log_enqueue(bool with_frame) {

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1062,7 +1062,7 @@ class Assembler : public AbstractAssembler {
}
void assert_not_delayed(const char* msg) {
#ifdef CHECK_DELAY
assert_msg ( delay_state == no_delay, msg);
assert(delay_state == no_delay, msg);
#endif
}
@ -1380,24 +1380,25 @@ public:
// pp 181
void and3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
void and3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void and3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
void and3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void andcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void andcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void andn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); }
void andn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void andn( Register s1, RegisterOrConstant s2, Register d);
void andncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void andncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
void or3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
void or3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void orcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void orcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void orn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
void orn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void orncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void orncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void xor3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
void xor3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void xor3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
void xor3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void xorcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void xorcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void xnor( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | rs2(s2) ); }
@ -2026,8 +2027,8 @@ public:
inline void st_ptr(Register d, Register s1, ByteSize simm13a);
#endif
// ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's
// st_long will perform st for 32 bit VM's and stx for 64 bit VM's
// ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
// st_long will perform std for 32 bit VM's and stx for 64 bit VM's
inline void ld_long(Register s1, Register s2, Register d);
inline void ld_long(Register s1, int simm13a, Register d);
inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
@ -2038,23 +2039,19 @@ public:
inline void st_long(Register d, const Address& a, int offset = 0);
// Helpers for address formation.
// They update the dest in place, whether it is a register or constant.
// They emit no code at all if src is a constant zero.
// If dest is a constant and src is a register, the temp argument
// is required, and becomes the result.
// If dest is a register and src is a non-simm13 constant,
// the temp argument is required, and is used to materialize the constant.
void regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
Register temp = noreg );
void regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
Register temp = noreg );
// - They emit only a move if s2 is a constant zero.
// - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
// - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant roc, Register Rtemp) {
guarantee(Rtemp != noreg, "constant offset overflow");
if (is_simm13(roc.constant_or_zero()))
return roc; // register or short constant
set(roc.as_constant(), Rtemp);
return RegisterOrConstant(Rtemp);
RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
if (is_simm13(src.constant_or_zero()))
return src; // register or short constant
guarantee(temp != noreg, "constant offset overflow");
set(src.as_constant(), temp);
return temp;
}
// --------------------------------------------------
@ -2303,6 +2300,9 @@ public:
void lcmp( Register Ra, Register Rb, Register Rresult);
#endif
// Loading values by size and signed-ness
void load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed);
void float_cmp( bool is_float, int unordered_result,
FloatRegister Fa, FloatRegister Fb,
Register Rresult);
@ -2421,12 +2421,16 @@ public:
void check_method_handle_type(Register mtype_reg, Register mh_reg,
Register temp_reg,
Label& wrong_method_type);
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
Register temp_reg);
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
// offset relative to Gargs of argument at tos[arg_slot].
// (arg_slot == 0 means the last argument, not the first).
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
int extra_slot_offset = 0);
// Address of Gargs and argument_offset.
Address argument_address(RegisterOrConstant arg_slot,
int extra_slot_offset = 0);
// Stack overflow checking

View File

@ -206,12 +206,17 @@ inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
// form effective addresses this way:
inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
else { add(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) andn(s1, s2.as_register(), d);
else andn(s1, s2.as_constant(), d);
}
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI and TaggedStackInterpreter
// Max size with JVMTI
// QQQ this is proably way too large for c++ interpreter

View File

@ -620,7 +620,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// stack frames shouldn't be much larger than max_stack elements
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
return false;
}

View File

@ -50,7 +50,6 @@ void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args
// Any changes should also be applied to CodeEmitter::emit_osr_entry().
assert_different_registers(args_size, locals_size);
// max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
if (TaggedStackInterpreter) sll(locals_size, 1, locals_size);
subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
// Use br/mov combination because it works on both V8 and V9 and is
// faster.
@ -319,7 +318,7 @@ void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, F
ldf(FloatRegisterImpl::D, r1, offset, d);
#else
ldf(FloatRegisterImpl::S, r1, offset, d);
ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize(), d->successor());
ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
#endif
}
@ -330,10 +329,10 @@ void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register
#ifdef _LP64
stf(FloatRegisterImpl::D, d, r1, offset);
// store something more useful here
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
#else
stf(FloatRegisterImpl::S, d, r1, offset);
stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize());
stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
#endif
}
@ -345,7 +344,7 @@ void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Reg
ldx(r1, offset, rd);
#else
ld(r1, offset, rd);
ld(r1, offset + Interpreter::stackElementSize(), rd->successor());
ld(r1, offset + Interpreter::stackElementSize, rd->successor());
#endif
}
@ -356,138 +355,62 @@ void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, in
#ifdef _LP64
stx(l, r1, offset);
// store something more useful here
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
#else
st(l, r1, offset);
st(l->successor(), r1, offset + Interpreter::stackElementSize());
st(l->successor(), r1, offset + Interpreter::stackElementSize);
#endif
}
#ifdef ASSERT
void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t,
Register r,
Register scratch) {
if (TaggedStackInterpreter) {
Label ok, long_ok;
ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(0), r);
if (t == frame::TagCategory2) {
cmp(r, G0);
brx(Assembler::equal, false, Assembler::pt, long_ok);
delayed()->ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(1), r);
stop("stack long/double tag value bad");
bind(long_ok);
cmp(r, G0);
} else if (t == frame::TagValue) {
cmp(r, G0);
} else {
assert_different_registers(r, scratch);
mov(t, scratch);
cmp(r, scratch);
}
brx(Assembler::equal, false, Assembler::pt, ok);
delayed()->nop();
// Also compare if the stack value is zero, then the tag might
// not have been set coming from deopt.
ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
cmp(r, G0);
brx(Assembler::equal, false, Assembler::pt, ok);
delayed()->nop();
stop("Stack tag value is bad");
bind(ok);
}
}
#endif // ASSERT
void InterpreterMacroAssembler::pop_i(Register r) {
assert_not_delayed();
// Uses destination register r for scratch
debug_only(verify_stack_tag(frame::TagValue, r));
ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
inc(Lesp, Interpreter::stackElementSize());
inc(Lesp, Interpreter::stackElementSize);
debug_only(verify_esp(Lesp));
}
void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
assert_not_delayed();
// Uses destination register r for scratch
debug_only(verify_stack_tag(frame::TagReference, r, scratch));
ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
inc(Lesp, Interpreter::stackElementSize());
inc(Lesp, Interpreter::stackElementSize);
debug_only(verify_esp(Lesp));
}
void InterpreterMacroAssembler::pop_l(Register r) {
assert_not_delayed();
// Uses destination register r for scratch
debug_only(verify_stack_tag(frame::TagCategory2, r));
load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
inc(Lesp, 2*Interpreter::stackElementSize());
inc(Lesp, 2*Interpreter::stackElementSize);
debug_only(verify_esp(Lesp));
}
void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
assert_not_delayed();
debug_only(verify_stack_tag(frame::TagValue, scratch));
ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
inc(Lesp, Interpreter::stackElementSize());
inc(Lesp, Interpreter::stackElementSize);
debug_only(verify_esp(Lesp));
}
void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
assert_not_delayed();
debug_only(verify_stack_tag(frame::TagCategory2, scratch));
load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
inc(Lesp, 2*Interpreter::stackElementSize());
inc(Lesp, 2*Interpreter::stackElementSize);
debug_only(verify_esp(Lesp));
}
// (Note use register first, then decrement so dec can be done during store stall)
void InterpreterMacroAssembler::tag_stack(Register r) {
if (TaggedStackInterpreter) {
st_ptr(r, Lesp, Interpreter::tag_offset_in_bytes());
}
}
void InterpreterMacroAssembler::tag_stack(frame::Tag t, Register r) {
if (TaggedStackInterpreter) {
assert (frame::TagValue == 0, "TagValue must be zero");
if (t == frame::TagValue) {
st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
} else if (t == frame::TagCategory2) {
st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
// Tag next slot down too
st_ptr(G0, Lesp, -Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes());
} else {
assert_different_registers(r, O3);
mov(t, O3);
st_ptr(O3, Lesp, Interpreter::tag_offset_in_bytes());
}
}
}
void InterpreterMacroAssembler::push_i(Register r) {
assert_not_delayed();
debug_only(verify_esp(Lesp));
tag_stack(frame::TagValue, r);
st( r, Lesp, Interpreter::value_offset_in_bytes());
dec( Lesp, Interpreter::stackElementSize());
st(r, Lesp, 0);
dec(Lesp, Interpreter::stackElementSize);
}
void InterpreterMacroAssembler::push_ptr(Register r) {
assert_not_delayed();
tag_stack(frame::TagReference, r);
st_ptr( r, Lesp, Interpreter::value_offset_in_bytes());
dec( Lesp, Interpreter::stackElementSize());
}
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
assert_not_delayed();
tag_stack(tag);
st_ptr(r, Lesp, Interpreter::value_offset_in_bytes());
dec( Lesp, Interpreter::stackElementSize());
st_ptr(r, Lesp, 0);
dec(Lesp, Interpreter::stackElementSize);
}
// remember: our convention for longs in SPARC is:
@ -497,33 +420,28 @@ void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
void InterpreterMacroAssembler::push_l(Register r) {
assert_not_delayed();
debug_only(verify_esp(Lesp));
tag_stack(frame::TagCategory2, r);
// Longs are in stored in memory-correct order, even if unaligned.
// and may be separated by stack tags.
int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
// Longs are stored in memory-correct order, even if unaligned.
int offset = -Interpreter::stackElementSize;
store_unaligned_long(r, Lesp, offset);
dec(Lesp, 2 * Interpreter::stackElementSize());
dec(Lesp, 2 * Interpreter::stackElementSize);
}
void InterpreterMacroAssembler::push_f(FloatRegister f) {
assert_not_delayed();
debug_only(verify_esp(Lesp));
tag_stack(frame::TagValue, Otos_i);
stf(FloatRegisterImpl::S, f, Lesp, Interpreter::value_offset_in_bytes());
dec(Lesp, Interpreter::stackElementSize());
stf(FloatRegisterImpl::S, f, Lesp, 0);
dec(Lesp, Interpreter::stackElementSize);
}
void InterpreterMacroAssembler::push_d(FloatRegister d) {
assert_not_delayed();
debug_only(verify_esp(Lesp));
tag_stack(frame::TagCategory2, Otos_i);
// Longs are in stored in memory-correct order, even if unaligned.
// and may be separated by stack tags.
int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
// Longs are stored in memory-correct order, even if unaligned.
int offset = -Interpreter::stackElementSize;
store_unaligned_double(d, Lesp, offset);
dec(Lesp, 2 * Interpreter::stackElementSize());
dec(Lesp, 2 * Interpreter::stackElementSize);
}
@ -561,30 +479,18 @@ void InterpreterMacroAssembler::pop(TosState state) {
}
// Tagged stack helpers for swap and dup
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
Register tag) {
// Helpers for swap and dup
void InterpreterMacroAssembler::load_ptr(int n, Register val) {
ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
if (TaggedStackInterpreter) {
ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(n), tag);
}
}
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
Register tag) {
void InterpreterMacroAssembler::store_ptr(int n, Register val) {
st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
if (TaggedStackInterpreter) {
st_ptr(tag, Lesp, Interpreter::expr_tag_offset_in_bytes(n));
}
}
void InterpreterMacroAssembler::load_receiver(Register param_count,
Register recv) {
sll(param_count, Interpreter::logStackElementSize(), param_count);
if (TaggedStackInterpreter) {
add(param_count, Interpreter::value_offset_in_bytes(), param_count); // get obj address
}
sll(param_count, Interpreter::logStackElementSize, param_count);
ld_ptr(Lesp, param_count, recv); // gets receiver Oop
}
@ -605,7 +511,6 @@ void InterpreterMacroAssembler::empty_expression_stack() {
// Compute max expression stack+register save area
lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack.
if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size); // max_stack * 2 for TAGS
add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
//
@ -814,22 +719,39 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset) {
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) {
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
} else {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
xor3(tmp, -1, tmp); // convert to plain index
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp);
assert_not_delayed();
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
// convert from field index to ConstantPoolCacheEntry index
// and from word index to byte offset
get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
// convert from field index to ConstantPoolCacheEntry index and from
// word index to byte offset
sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
add(LcpoolCache, tmp, cache);
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) {
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp);
assert_not_delayed();
assert(!giant_index,"NYI");
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
// convert from field index to ConstantPoolCacheEntry index
// and from word index to byte offset
@ -1675,15 +1597,31 @@ void InterpreterMacroAssembler::profile_final_call(Register scratch) {
// Count a virtual call in the bytecodes.
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register scratch) {
Register scratch,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
tst(receiver);
brx(Assembler::notZero, false, Assembler::pt, not_null);
delayed()->nop();
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
ba(false, skip_receiver_profile);
delayed()->nop();
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(receiver, scratch, true);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
@ -1985,51 +1923,11 @@ void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,
}
// Locals
#ifdef ASSERT
void InterpreterMacroAssembler::verify_local_tag(frame::Tag t,
Register base,
Register scratch,
int n) {
if (TaggedStackInterpreter) {
Label ok, long_ok;
// Use dst for scratch
assert_different_registers(base, scratch);
ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n), scratch);
if (t == frame::TagCategory2) {
cmp(scratch, G0);
brx(Assembler::equal, false, Assembler::pt, long_ok);
delayed()->ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n+1), scratch);
stop("local long/double tag value bad");
bind(long_ok);
// compare second half tag
cmp(scratch, G0);
} else if (t == frame::TagValue) {
cmp(scratch, G0);
} else {
assert_different_registers(O3, base, scratch);
mov(t, O3);
cmp(scratch, O3);
}
brx(Assembler::equal, false, Assembler::pt, ok);
delayed()->nop();
// Also compare if the local value is zero, then the tag might
// not have been set coming from deopt.
ld_ptr(base, Interpreter::local_offset_in_bytes(n), scratch);
cmp(scratch, G0);
brx(Assembler::equal, false, Assembler::pt, ok);
delayed()->nop();
stop("Local tag value is bad");
bind(ok);
}
}
#endif // ASSERT
void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
debug_only(verify_local_tag(frame::TagReference, index, dst));
ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
ld_ptr(index, 0, dst);
// Note: index must hold the effective address--the iinc template uses it
}
@ -2037,27 +1935,24 @@ void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst )
void InterpreterMacroAssembler::access_local_returnAddress(Register index,
Register dst ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
debug_only(verify_local_tag(frame::TagValue, index, dst));
ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
ld_ptr(index, 0, dst);
}
void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
debug_only(verify_local_tag(frame::TagValue, index, dst));
ld(index, Interpreter::value_offset_in_bytes(), dst);
ld(index, 0, dst);
// Note: index must hold the effective address--the iinc template uses it
}
void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
debug_only(verify_local_tag(frame::TagCategory2, index, dst));
// First half stored at index n+1 (which grows down from Llocals[n])
load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
}
@ -2065,18 +1960,16 @@ void InterpreterMacroAssembler::access_local_long( Register index, Register dst
void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
debug_only(verify_local_tag(frame::TagValue, index, G1_scratch));
ldf(FloatRegisterImpl::S, index, Interpreter::value_offset_in_bytes(), dst);
ldf(FloatRegisterImpl::S, index, 0, dst);
}
void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
debug_only(verify_local_tag(frame::TagCategory2, index, G1_scratch));
load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
}
@ -2102,94 +1995,60 @@ void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int off
}
#endif // ASSERT
void InterpreterMacroAssembler::tag_local(frame::Tag t,
Register base,
Register src,
int n) {
if (TaggedStackInterpreter) {
// have to store zero because local slots can be reused (rats!)
if (t == frame::TagValue) {
st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
} else if (t == frame::TagCategory2) {
st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n+1));
} else {
// assert that we don't stomp the value in 'src'
// O3 is arbitrary because it's not used.
assert_different_registers(src, base, O3);
mov( t, O3);
st_ptr(O3, base, Interpreter::local_tag_offset_in_bytes(n));
}
}
}
void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
debug_only(check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);)
tag_local(frame::TagValue, index, src);
st(src, index, Interpreter::value_offset_in_bytes());
debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
st(src, index, 0);
}
void InterpreterMacroAssembler::store_local_ptr( Register index, Register src,
Register tag ) {
void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
#ifdef ASSERT
check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
#endif
st_ptr(src, index, Interpreter::value_offset_in_bytes());
// Store tag register directly
if (TaggedStackInterpreter) {
st_ptr(tag, index, Interpreter::tag_offset_in_bytes());
}
#ifdef ASSERT
check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
#endif
st_ptr(src, index, 0);
}
void InterpreterMacroAssembler::store_local_ptr( int n, Register src,
Register tag ) {
st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
if (TaggedStackInterpreter) {
st_ptr(tag, Llocals, Interpreter::local_tag_offset_in_bytes(n));
}
void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {
st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
}
void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
#ifdef ASSERT
#ifdef ASSERT
check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
#endif
tag_local(frame::TagCategory2, index, src);
#endif
store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
}
void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
#ifdef ASSERT
check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
#endif
tag_local(frame::TagValue, index, G1_scratch);
stf(FloatRegisterImpl::S, src, index, Interpreter::value_offset_in_bytes());
#ifdef ASSERT
check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
#endif
stf(FloatRegisterImpl::S, src, index, 0);
}
void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
assert_not_delayed();
sll(index, Interpreter::logStackElementSize(), index);
sll(index, Interpreter::logStackElementSize, index);
sub(Llocals, index, index);
#ifdef ASSERT
#ifdef ASSERT
check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
#endif
tag_local(frame::TagCategory2, index, G1_scratch);
#endif
store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
}

View File

@ -149,7 +149,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
void push_i( Register r = Otos_i);
void push_ptr( Register r = Otos_i);
void push_ptr( Register r, Register tag);
void push_l( Register r = Otos_l1);
void push_f(FloatRegister f = Ftos_f);
void push_d(FloatRegister f = Ftos_d1);
@ -159,17 +158,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void push(TosState state); // transition state -> vtos
void empty_expression_stack(); // resets both Lesp and SP
// Support for Tagged Stacks
void tag_stack(frame::Tag t, Register r);
void tag_stack(Register tag);
void tag_local(frame::Tag t, Register src, Register base, int n = 0);
#ifdef ASSERT
void verify_sp(Register Rsp, Register Rtemp);
void verify_esp(Register Resp); // verify that Lesp points to a word in the temp stack
void verify_stack_tag(frame::Tag t, Register r, Register scratch = G0);
void verify_local_tag(frame::Tag t, Register base, Register scr, int n = 0);
#endif // ASSERT
public:
@ -191,8 +182,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register Rdst,
setCCOrNot should_set_CC = dont_set_CC );
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset);
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
// common code
@ -241,17 +233,17 @@ class InterpreterMacroAssembler: public MacroAssembler {
void check_for_regarea_stomp( Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1);
#endif // ASSERT
void store_local_int( Register index, Register src );
void store_local_ptr( Register index, Register src, Register tag = Otos_l2 );
void store_local_ptr( int n, Register src, Register tag = Otos_l2 );
void store_local_ptr( Register index, Register src );
void store_local_ptr( int n, Register src );
void store_local_long( Register index, Register src );
void store_local_float( Register index, FloatRegister src );
void store_local_double( Register index, FloatRegister src );
// Tagged stack helpers for swap and dup
void load_ptr_and_tag(int n, Register val, Register tag);
void store_ptr_and_tag(int n, Register val, Register tag);
// Helpers for swap and dup
void load_ptr(int n, Register val);
void store_ptr(int n, Register val);
// Tagged stack helper for getting receiver in register.
// Helper for getting receiver in register.
void load_receiver(Register param_count, Register recv);
static int top_most_monitor_byte_offset(); // offset in bytes to top of monitor block
@ -304,7 +296,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_not_taken_branch(Register scratch);
void profile_call(Register scratch);
void profile_final_call(Register scratch);
void profile_virtual_call(Register receiver, Register scratch);
void profile_virtual_call(Register receiver, Register scratch, bool receiver_can_be_null = false);
void profile_ret(TosState state, Register return_bci, Register scratch);
void profile_null_seen(Register scratch);
void profile_typecheck(Register klass, Register scratch);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,19 +43,6 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
Argument jni_arg(jni_offset(), false);
Register Rtmp = O0;
#ifdef ASSERT
if (TaggedStackInterpreter) {
// check at least one tag is okay
Label ok;
__ ld_ptr(Llocals, Interpreter::local_tag_offset_in_bytes(offset() + 1), Rtmp);
__ cmp(Rtmp, G0);
__ brx(Assembler::equal, false, Assembler::pt, ok);
__ delayed()->nop();
__ stop("Native object has bad tag value");
__ bind(ok);
}
#endif // ASSERT
#ifdef _LP64
__ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_long_argument(Rtmp, jni_arg);
@ -107,18 +94,6 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
Address h_arg = Address(Llocals, Interpreter::local_offset_in_bytes(offset()));
__ ld_ptr(h_arg, Rtmp1);
#ifdef ASSERT
if (TaggedStackInterpreter) {
// check we have the obj and not the tag
Label ok;
__ mov(frame::TagReference, Rtmp3);
__ cmp(Rtmp1, Rtmp3);
__ brx(Assembler::notEqual, true, Assembler::pt, ok);
__ delayed()->nop();
__ stop("Native object passed tag by mistake");
__ bind(ok);
}
#endif // ASSERT
if (!do_NULL_check) {
__ add(h_arg.base(), h_arg.disp(), Rtmp2);
} else {
@ -168,17 +143,9 @@ class SlowSignatureHandler: public NativeSignatureIterator {
long_sig = 3
};
#ifdef ASSERT
void verify_tag(frame::Tag t) {
assert(!TaggedStackInterpreter ||
*(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
}
#endif // ASSERT
virtual void pass_int() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagValue));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
add_signature( non_float );
}
@ -186,31 +153,27 @@ class SlowSignatureHandler: public NativeSignatureIterator {
// pass address of from
intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
*_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
debug_only(verify_tag(frame::TagReference));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
add_signature( non_float );
}
#ifdef _LP64
virtual void pass_float() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagValue));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
add_signature( float_sig );
}
virtual void pass_double() {
*_to++ = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
debug_only(verify_tag(frame::TagValue));
_from -= 2*Interpreter::stackElementSize();
_from -= 2*Interpreter::stackElementSize;
add_signature( double_sig );
}
virtual void pass_long() {
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
debug_only(verify_tag(frame::TagValue));
_to += 1;
_from -= 2*Interpreter::stackElementSize();
_from -= 2*Interpreter::stackElementSize;
add_signature( long_sig );
}
#else
@ -218,9 +181,8 @@ class SlowSignatureHandler: public NativeSignatureIterator {
virtual void pass_long() {
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagValue));
_to += 2;
_from -= 2*Interpreter::stackElementSize();
_from -= 2*Interpreter::stackElementSize;
add_signature( non_float );
}
#endif // _LP64

View File

@ -235,19 +235,17 @@ address InterpreterGenerator::generate_abstract_entry(void) {
}
// Method handle invoker
// Dispatch a method of the form java.dyn.MethodHandles::invoke(...)
address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableMethodHandles) {
return generate_abstract_entry();
}
return generate_abstract_entry(); //6815692//
return MethodHandles::generate_method_handle_interpreter_entry(_masm);
}
//----------------------------------------------------------------------------------------------------
// Entry points & stack frame layout
//

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,33 +24,13 @@
public:
// Support for Tagged Stacks
static int expr_offset_in_bytes(int i) { return stackElementSize * i + wordSize; }
// Stack index relative to tos (which points at value)
static int expr_index_at(int i) {
return stackElementWords() * i;
}
static int expr_tag_index_at(int i) {
assert(TaggedStackInterpreter, "should not call this");
// tag is one word above java stack element
return stackElementWords() * i + 1;
}
static int expr_offset_in_bytes(int i) { return stackElementSize()*i + wordSize; }
static int expr_tag_offset_in_bytes (int i) {
assert(TaggedStackInterpreter, "should not call this");
return expr_offset_in_bytes(i) + wordSize;
}
static int expr_index_at(int i) { return stackElementWords * i; }
// Already negated by c++ interpreter
static int local_index_at(int i) {
assert(i<=0, "local direction already negated");
return stackElementWords() * i + (value_offset_in_bytes()/wordSize);
}
static int local_tag_index_at(int i) {
assert(i<=0, "local direction already negated");
assert(TaggedStackInterpreter, "should not call this");
return stackElementWords() * i + (tag_offset_in_bytes()/wordSize);
static int local_index_at(int i) {
assert(i <= 0, "local direction already negated");
return stackElementWords * i;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2008-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,9 @@
address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
address interpreted_entry) {
// Just before the actual machine code entry point, allocate space
// for a MethodHandleEntry::Data record, so that we can manage everything
// from one base pointer.
__ align(wordSize);
address target = __ pc() + sizeof(Data);
while (__ pc() < target) {
@ -59,12 +62,891 @@ MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _mas
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
ShouldNotReachHere(); //NYI, 6815692
return NULL;
// I5_savedSP: sender SP (must preserve)
// G4 (Gargs): incoming argument list (must preserve)
// G5_method: invoke methodOop; becomes method type.
// G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
// O0, O1: garbage temps, blown away
Register O0_argslot = O0;
Register O1_scratch = O1;
// emit WrongMethodType path first, to enable back-branch from main path
Label wrong_method_type;
__ bind(wrong_method_type);
__ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
__ delayed()->nop();
// here's where control starts out:
__ align(CodeEntryAlignment);
address entry_point = __ pc();
// fetch the MethodType from the method handle into G5_method_type
{
Register tem = G5_method;
assert(tem == G5_method_type, "yes, it's the same register");
for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
__ ld_ptr(Address(tem, *pchase), G5_method_type);
}
}
// given the MethodType, find out where the MH argument is buried
__ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O0_argslot);
__ ldsw( Address(O0_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
__ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
__ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
return entry_point;
}
#ifdef ASSERT
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
// Verify that argslot lies within (Gargs, FP].
Label L_ok, L_bad;
#ifdef _LP64
__ add(FP, STACK_BIAS, temp_reg);
__ cmp(argslot_reg, temp_reg);
#else
__ cmp(argslot_reg, FP);
#endif
__ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
__ delayed()->nop();
__ cmp(Gargs, argslot_reg);
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ bind(L_bad);
__ stop(error_message);
__ bind(L_ok);
}
#endif
// Helper to insert argument slots into the stack.
// arg_slots must be a multiple of stack_move_unit() and <= 0
void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
int arg_mask,
Register argslot_reg,
Register temp_reg, Register temp2_reg, Register temp3_reg) {
assert(temp3_reg != noreg, "temp3 required");
assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
(!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
#ifdef ASSERT
verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
__ br(Assembler::greater, false, Assembler::pn, L_bad);
__ delayed()->nop();
__ btst(-stack_move_unit() - 1, arg_slots.as_register());
__ br(Assembler::zero, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ bind(L_bad);
__ stop("assert arg_slots <= 0 and clear low bits");
__ bind(L_ok);
} else {
assert(arg_slots.as_constant() <= 0, "");
assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
}
#endif // ASSERT
#ifdef _LP64
if (arg_slots.is_register()) {
// Was arg_slots register loaded as signed int?
Label L_ok;
__ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
__ sra(temp_reg, BitsPerInt, temp_reg);
__ cmp(arg_slots.as_register(), temp_reg);
__ br(Assembler::equal, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ stop("arg_slots register not loaded as signed int");
__ bind(L_ok);
}
#endif
// Make space on the stack for the inserted argument(s).
// Then pull down everything shallower than argslot_reg.
// The stacked return address gets pulled down with everything else.
// That is, copy [sp, argslot) downward by -size words. In pseudo-code:
// sp -= size;
// for (temp = sp + size; temp < argslot; temp++)
// temp[-size] = temp[0]
// argslot -= size;
RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
// Keep the stack pointer 2*wordSize aligned.
const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
__ add(SP, masked_offset, SP);
__ mov(Gargs, temp_reg); // source pointer for copy
__ add(Gargs, offset, Gargs);
{
Label loop;
__ bind(loop);
// pull one word down each time through the loop
__ ld_ptr(Address(temp_reg, 0), temp2_reg);
__ st_ptr(temp2_reg, Address(temp_reg, offset));
__ add(temp_reg, wordSize, temp_reg);
__ cmp(temp_reg, argslot_reg);
__ brx(Assembler::less, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
}
// Now move the argslot down, to point to the opened-up space.
__ add(argslot_reg, offset, argslot_reg);
}
// Helper to remove argument slots from the stack.
// arg_slots must be a multiple of stack_move_unit() and >= 0
void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register argslot_reg,
Register temp_reg, Register temp2_reg, Register temp3_reg) {
assert(temp3_reg != noreg, "temp3 required");
assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
(!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
#ifdef ASSERT
// Verify that [argslot..argslot+size) lies within (Gargs, FP).
__ add(argslot_reg, offset, temp2_reg);
verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
__ br(Assembler::less, false, Assembler::pn, L_bad);
__ delayed()->nop();
__ btst(-stack_move_unit() - 1, arg_slots.as_register());
__ br(Assembler::zero, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ bind(L_bad);
__ stop("assert arg_slots >= 0 and clear low bits");
__ bind(L_ok);
} else {
assert(arg_slots.as_constant() >= 0, "");
assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
}
#endif // ASSERT
// Pull up everything shallower than argslot.
// Then remove the excess space on the stack.
// The stacked return address gets pulled up with everything else.
// That is, copy [sp, argslot) upward by size words. In pseudo-code:
// for (temp = argslot-1; temp >= sp; --temp)
// temp[size] = temp[0]
// argslot += size;
// sp += size;
__ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy
{
Label loop;
__ bind(loop);
// pull one word up each time through the loop
__ ld_ptr(Address(temp_reg, 0), temp2_reg);
__ st_ptr(temp2_reg, Address(temp_reg, offset));
__ sub(temp_reg, wordSize, temp_reg);
__ cmp(temp_reg, Gargs);
__ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
}
// Now move the argslot up, to point to the just-copied block.
__ add(Gargs, offset, Gargs);
// And adjust the argslot address to point at the deletion point.
__ add(argslot_reg, offset, argslot_reg);
// Keep the stack pointer 2*wordSize aligned.
const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
__ add(SP, masked_offset, SP);
}
#ifndef PRODUCT
extern "C" void print_method_handle(oop mh);
void trace_method_handle_stub(const char* adaptername,
oop mh) {
#if 0
intptr_t* entry_sp,
intptr_t* saved_sp,
intptr_t* saved_bp) {
// called as a leaf from native code: do not block the JVM!
intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
if (last_sp != saved_sp)
printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
#endif
printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
print_method_handle(mh);
}
#endif // PRODUCT
// which conversion op types are implemented here?
int MethodHandles::adapter_conversion_ops_supported_mask() {
return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
|(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
|(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
|(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
|(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
//|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
);
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
}
//------------------------------------------------------------------------------
// MethodHandles::generate_method_handle_stub
//
// Generate an "entry" field for a method handle.
// This determines how the method handle will respond to calls.
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
ShouldNotReachHere(); //NYI, 6815692
// Here is the register state during an interpreted call,
// as set up by generate_method_handle_interpreter_entry():
// - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
// - G3: receiver method handle
// - O5_savedSP: sender SP (must preserve)
Register O0_argslot = O0;
Register O1_scratch = O1;
Register O2_scratch = O2;
Register O3_scratch = O3;
Register G5_index = G5;
guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
// Some handy addresses:
Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset()));
Address G3_mh_vmtarget( G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
Address G3_dmh_vmindex( G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
Address G3_bmh_argument( G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
if (have_entry(ek)) {
__ nop(); // empty stubs make SG sick
return;
}
address interp_entry = __ pc();
if (UseCompressedOops) __ unimplemented("UseCompressedOops");
#ifndef PRODUCT
if (TraceMethodHandles) {
// save: Gargs, O5_savedSP
__ save(SP, -16*wordSize, SP);
__ set((intptr_t) entry_name(ek), O0);
__ mov(G3_method_handle, O1);
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
__ restore(SP, 16*wordSize, SP);
}
#endif // PRODUCT
switch ((int) ek) {
case _raise_exception:
{
// Not a real MH entry, but rather shared code for raising an
// exception. Extra local arguments are passed in scratch
// registers, as required type in O3, failing object (or NULL)
// in O2, failing bytecode type in O1.
__ mov(O5_savedSP, SP); // Cut the stack back to where the caller started.
// Push arguments as if coming from the interpreter.
Register O0_scratch = O0_argslot;
int stackElementSize = Interpreter::stackElementSize;
// Make space on the stack for the arguments.
__ sub(SP, 4*stackElementSize, SP);
__ sub(Gargs, 3*stackElementSize, Gargs);
//__ sub(Lesp, 3*stackElementSize, Lesp);
// void raiseException(int code, Object actual, Object required)
__ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code
__ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize)); // actual
__ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize)); // required
Label no_method;
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
__ set(AddressLiteral((address) &_raise_exception_method), G5_method);
__ ld_ptr(Address(G5_method, 0), G5_method);
__ tst(G5_method);
__ brx(Assembler::zero, false, Assembler::pn, no_method);
__ delayed()->nop();
int jobject_oop_offset = 0;
__ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
__ tst(G5_method);
__ brx(Assembler::zero, false, Assembler::pn, no_method);
__ delayed()->nop();
__ verify_oop(G5_method);
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
// If we get here, the Java runtime did not do its job of creating the exception.
// Do something that is at least causes a valid throw from the interpreter.
__ bind(no_method);
__ unimplemented("_raise_exception no method");
}
break;
case _invokestatic_mh:
case _invokespecial_mh:
{
__ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop
__ verify_oop(G5_method);
// Same as TemplateTable::invokestatic or invokespecial,
// minus the CP setup and profiling:
if (ek == _invokespecial_mh) {
// Must load & check the first argument before entering the target method.
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
__ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
__ null_check(G3_method_handle);
__ verify_oop(G3_method_handle);
}
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
}
break;
case _invokevirtual_mh:
{
// Same as TemplateTable::invokevirtual,
// minus the CP setup and profiling:
// Pick out the vtable index and receiver offset from the MH,
// and then we can discard it:
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
__ ldsw(G3_dmh_vmindex, G5_index);
// Note: The verifier allows us to ignore G3_mh_vmtarget.
__ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
__ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
// Get receiver klass:
Register O0_klass = O0_argslot;
__ load_klass(G3_method_handle, O0_klass);
__ verify_oop(O0_klass);
// Get target methodOop & entry point:
const int base = instanceKlass::vtable_start_offset() * wordSize;
assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
__ sll_ptr(G5_index, LogBytesPerWord, G5_index);
__ add(O0_klass, G5_index, O0_klass);
Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
__ ld_ptr(vtable_entry_addr, G5_method);
__ verify_oop(G5_method);
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
}
break;
case _invokeinterface_mh:
{
// Same as TemplateTable::invokeinterface,
// minus the CP setup and profiling:
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
Register O1_intf = O1_scratch;
__ ld_ptr(G3_mh_vmtarget, O1_intf);
__ ldsw(G3_dmh_vmindex, G5_index);
__ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
__ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
// Get receiver klass:
Register O0_klass = O0_argslot;
__ load_klass(G3_method_handle, O0_klass);
__ verify_oop(O0_klass);
// Get interface:
Label no_such_interface;
__ verify_oop(O1_intf);
__ lookup_interface_method(O0_klass, O1_intf,
// Note: next two args must be the same:
G5_index, G5_method,
O2_scratch,
O3_scratch,
no_such_interface);
__ verify_oop(G5_method);
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
__ bind(no_such_interface);
// Throw an exception.
// For historical reasons, it will be IncompatibleClassChangeError.
__ unimplemented("not tested yet");
__ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch); // required interface
__ mov(O0_klass, O2_scratch); // bad receiver
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
__ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch); // who is complaining?
}
break;
case _bound_ref_mh:
case _bound_int_mh:
case _bound_long_mh:
case _bound_ref_direct_mh:
case _bound_int_direct_mh:
case _bound_long_direct_mh:
{
const bool direct_to_method = (ek >= _bound_ref_direct_mh);
BasicType arg_type = T_ILLEGAL;
int arg_mask = _INSERT_NO_MASK;
int arg_slots = -1;
get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
// Make room for the new argument:
__ ldsw(G3_bmh_vmargslot, O0_argslot);
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
// Store bound argument into the new stack slot:
__ ld_ptr(G3_bmh_argument, O1_scratch);
if (arg_type == T_OBJECT) {
__ st_ptr(O1_scratch, Address(O0_argslot, 0));
} else {
Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
__ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
if (arg_slots == 2) {
__ unimplemented("not yet tested");
#ifndef _LP64
__ signx(O2_scratch, O3_scratch); // Sign extend
#endif
__ st_long(O2_scratch, Address(O0_argslot, 0)); // Uses O2/O3 on !_LP64
} else {
__ st_ptr( O2_scratch, Address(O0_argslot, 0));
}
}
if (direct_to_method) {
__ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop
__ verify_oop(G5_method);
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
} else {
__ ld_ptr(G3_mh_vmtarget, G3_method_handle); // target is a methodOop
__ verify_oop(G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
}
break;
case _adapter_retype_only:
case _adapter_retype_raw:
// Immediately jump to the next MH layer:
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
// This is OK when all parameter types widen.
// It is also OK when a return type narrows.
break;
case _adapter_check_cast:
{
// Temps:
Register G5_klass = G5_index; // Interesting AMH data.
// Check a reference argument before jumping to the next layer of MH:
__ ldsw(G3_amh_vmargslot, O0_argslot);
Address vmarg = __ argument_address(O0_argslot);
// What class are we casting to?
__ ld_ptr(G3_amh_argument, G5_klass); // This is a Class object!
__ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
Label done;
__ ld_ptr(vmarg, O1_scratch);
__ tst(O1_scratch);
__ brx(Assembler::zero, false, Assembler::pn, done); // No cast if null.
__ delayed()->nop();
__ load_klass(O1_scratch, O1_scratch);
// Live at this point:
// - G5_klass : klass required by the target method
// - O1_scratch : argument klass to test
// - G3_method_handle: adapter method handle
__ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
// If we get here, the type check failed!
__ ldsw(G3_amh_vmargslot, O0_argslot); // reload argslot field
__ ld_ptr(G3_amh_argument, O3_scratch); // required class
__ ld_ptr(vmarg, O2_scratch); // bad object
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
__ delayed()->mov(Bytecodes::_checkcast, O1_scratch); // who is complaining?
__ bind(done);
// Get the new MH:
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_prim_to_prim:
case _adapter_ref_to_prim:
// Handled completely by optimized cases.
__ stop("init_AdapterMethodHandle should not issue this");
break;
case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim
//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim
case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim
case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim
{
// Perform an in-place conversion to int or an int subword.
__ ldsw(G3_amh_vmargslot, O0_argslot);
Address vmarg = __ argument_address(O0_argslot);
Address value;
bool value_left_justified = false;
switch (ek) {
case _adapter_opt_i2i:
case _adapter_opt_l2i:
__ unimplemented(entry_name(ek));
value = vmarg;
break;
case _adapter_opt_unboxi:
{
// Load the value up from the heap.
__ ld_ptr(vmarg, O1_scratch);
int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
#ifdef ASSERT
for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
if (is_subword_type(BasicType(bt)))
assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
}
#endif
__ null_check(O1_scratch, value_offset);
value = Address(O1_scratch, value_offset);
#ifdef _BIG_ENDIAN
// Values stored in objects are packed.
value_left_justified = true;
#endif
}
break;
default:
ShouldNotReachHere();
}
// This check is required on _BIG_ENDIAN
Register G5_vminfo = G5_index;
__ ldsw(G3_amh_conversion, G5_vminfo);
assert(CONV_VMINFO_SHIFT == 0, "preshifted");
// Original 32-bit vmdata word must be of this form:
// | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
__ lduw(value, O1_scratch);
if (!value_left_justified)
__ sll(O1_scratch, G5_vminfo, O1_scratch);
Label zero_extend, done;
__ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
__ br(Assembler::zero, false, Assembler::pn, zero_extend);
__ delayed()->nop();
// this path is taken for int->byte, int->short
__ sra(O1_scratch, G5_vminfo, O1_scratch);
__ ba(false, done);
__ delayed()->nop();
__ bind(zero_extend);
// this is taken for int->char
__ srl(O1_scratch, G5_vminfo, O1_scratch);
__ bind(done);
__ st(O1_scratch, vmarg);
// Get the new MH:
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim
case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim
{
// Perform an in-place int-to-long or ref-to-long conversion.
__ ldsw(G3_amh_vmargslot, O0_argslot);
// On big-endian machine we duplicate the slot and store the MSW
// in the first slot.
__ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
Address arg_lsw(O0_argslot, 0);
Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
switch (ek) {
case _adapter_opt_i2l:
{
__ ldsw(arg_lsw, O2_scratch); // Load LSW
#ifndef _LP64
__ signx(O2_scratch, O3_scratch); // Sign extend
#endif
__ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64
}
break;
case _adapter_opt_unboxl:
{
// Load the value up from the heap.
__ ld_ptr(arg_lsw, O1_scratch);
int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
__ null_check(O1_scratch, value_offset);
__ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64
__ st_long(O2_scratch, arg_msw);
}
break;
default:
ShouldNotReachHere();
}
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim
case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim
{
// perform an in-place floating primitive conversion
__ unimplemented(entry_name(ek));
}
break;
case _adapter_prim_to_ref:
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
break;
case _adapter_swap_args:
case _adapter_rot_args:
// handled completely by optimized cases
__ stop("init_AdapterMethodHandle should not issue this");
break;
case _adapter_opt_swap_1:
case _adapter_opt_swap_2:
case _adapter_opt_rot_1_up:
case _adapter_opt_rot_1_down:
case _adapter_opt_rot_2_up:
case _adapter_opt_rot_2_down:
{
int swap_bytes = 0, rotate = 0;
get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
// 'argslot' is the position of the first argument to swap.
__ ldsw(G3_amh_vmargslot, O0_argslot);
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
// 'vminfo' is the second.
Register O1_destslot = O1_scratch;
__ ldsw(G3_amh_conversion, O1_destslot);
assert(CONV_VMINFO_SHIFT == 0, "preshifted");
__ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
__ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
if (!rotate) {
for (int i = 0; i < swap_bytes; i += wordSize) {
__ ld_ptr(Address(O0_argslot, i), O2_scratch);
__ ld_ptr(Address(O1_destslot, i), O3_scratch);
__ st_ptr(O3_scratch, Address(O0_argslot, i));
__ st_ptr(O2_scratch, Address(O1_destslot, i));
}
} else {
// Save the first chunk, which is going to get overwritten.
switch (swap_bytes) {
case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
default: ShouldNotReachHere();
}
if (rotate > 0) {
// Rorate upward.
__ sub(O0_argslot, swap_bytes, O0_argslot);
#if ASSERT
{
// Verify that argslot > destslot, by at least swap_bytes.
Label L_ok;
__ cmp(O0_argslot, O1_destslot);
__ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ stop("source must be above destination (upward rotation)");
__ bind(L_ok);
}
#endif
// Work argslot down to destslot, copying contiguous data upwards.
// Pseudo-code:
// argslot = src_addr - swap_bytes
// destslot = dest_addr
// while (argslot >= destslot) {
// *(argslot + swap_bytes) = *(argslot + 0);
// argslot--;
// }
Label loop;
__ bind(loop);
__ ld_ptr(Address(O0_argslot, 0), G5_index);
__ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
__ sub(O0_argslot, wordSize, O0_argslot);
__ cmp(O0_argslot, O1_destslot);
__ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
} else {
__ add(O0_argslot, swap_bytes, O0_argslot);
#if ASSERT
{
// Verify that argslot < destslot, by at least swap_bytes.
Label L_ok;
__ cmp(O0_argslot, O1_destslot);
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ stop("source must be above destination (upward rotation)");
__ bind(L_ok);
}
#endif
// Work argslot up to destslot, copying contiguous data downwards.
// Pseudo-code:
// argslot = src_addr + swap_bytes
// destslot = dest_addr
// while (argslot >= destslot) {
// *(argslot - swap_bytes) = *(argslot + 0);
// argslot++;
// }
Label loop;
__ bind(loop);
__ ld_ptr(Address(O0_argslot, 0), G5_index);
__ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
__ add(O0_argslot, wordSize, O0_argslot);
__ cmp(O0_argslot, O1_destslot);
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
}
// Store the original first chunk into the destination slot, now free.
switch (swap_bytes) {
case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
default: ShouldNotReachHere();
}
}
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_dup_args:
{
// 'argslot' is the position of the first argument to duplicate.
__ ldsw(G3_amh_vmargslot, O0_argslot);
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
// 'stack_move' is negative number of words to duplicate.
Register G5_stack_move = G5_index;
__ ldsw(G3_amh_conversion, G5_stack_move);
__ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
// Remember the old Gargs (argslot[0]).
Register O1_oldarg = O1_scratch;
__ mov(Gargs, O1_oldarg);
// Move Gargs down to make room for dups.
__ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
__ add(Gargs, G5_stack_move, Gargs);
// Compute the new Gargs (argslot[0]).
Register O2_newarg = O2_scratch;
__ mov(Gargs, O2_newarg);
// Copy from oldarg[0...] down to newarg[0...]
// Pseude-code:
// O1_oldarg = old-Gargs
// O2_newarg = new-Gargs
// O0_argslot = argslot
// while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
Label loop;
__ bind(loop);
__ ld_ptr(Address(O0_argslot, 0), O3_scratch);
__ st_ptr(O3_scratch, Address(O2_newarg, 0));
__ add(O0_argslot, wordSize, O0_argslot);
__ add(O2_newarg, wordSize, O2_newarg);
__ cmp(O2_newarg, O1_oldarg);
__ brx(Assembler::less, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_drop_args:
{
// 'argslot' is the position of the first argument to nuke.
__ ldsw(G3_amh_vmargslot, O0_argslot);
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
// 'stack_move' is number of words to drop.
Register G5_stack_move = G5_index;
__ ldsw(G3_amh_conversion, G5_stack_move);
__ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_collect_args:
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
break;
case _adapter_spread_args:
// Handled completely by optimized cases.
__ stop("init_AdapterMethodHandle should not issue this");
break;
case _adapter_opt_spread_0:
case _adapter_opt_spread_1:
case _adapter_opt_spread_more:
{
// spread an array out into a group of arguments
__ unimplemented(entry_name(ek));
}
break;
case _adapter_flyby:
case _adapter_ricochet:
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
break;
default:
ShouldNotReachHere();
}
address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
}

View File

@ -547,17 +547,11 @@ class AdapterGenerator {
void set_Rdisp(Register r) { Rdisp = r; }
void patch_callers_callsite();
void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
// base+st_off points to top of argument
int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
int arg_offset(const int st_off) { return st_off; }
int next_arg_offset(const int st_off) {
return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
}
int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); }
int next_tag_offset(const int st_off) {
return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes();
return st_off - Interpreter::stackElementSize;
}
// Argument slot values may be loaded first into a register because
@ -565,9 +559,6 @@ class AdapterGenerator {
RegisterOrConstant arg_slot(const int st_off);
RegisterOrConstant next_arg_slot(const int st_off);
RegisterOrConstant tag_slot(const int st_off);
RegisterOrConstant next_tag_slot(const int st_off);
// Stores long into offset pointed to by base
void store_c2i_long(Register r, Register base,
const int st_off, bool is_stack);
@ -653,23 +644,6 @@ void AdapterGenerator::patch_callers_callsite() {
__ bind(L);
}
void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
Register scratch) {
if (TaggedStackInterpreter) {
RegisterOrConstant slot = tag_slot(st_off);
// have to store zero because local slots can be reused (rats!)
if (t == frame::TagValue) {
__ st_ptr(G0, base, slot);
} else if (t == frame::TagCategory2) {
__ st_ptr(G0, base, slot);
__ st_ptr(G0, base, next_tag_slot(st_off));
} else {
__ mov(t, scratch);
__ st_ptr(scratch, base, slot);
}
}
}
RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
RegisterOrConstant roc(arg_offset(st_off));
@ -682,17 +656,6 @@ RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
}
RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) {
RegisterOrConstant roc(tag_offset(st_off));
return __ ensure_simm13_or_reg(roc, Rdisp);
}
RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) {
RegisterOrConstant roc(next_tag_offset(st_off));
return __ ensure_simm13_or_reg(roc, Rdisp);
}
// Stores long into offset pointed to by base
void AdapterGenerator::store_c2i_long(Register r, Register base,
const int st_off, bool is_stack) {
@ -718,19 +681,16 @@ void AdapterGenerator::store_c2i_long(Register r, Register base,
}
#endif // COMPILER2
#endif // _LP64
tag_c2i_arg(frame::TagCategory2, base, st_off, r);
}
void AdapterGenerator::store_c2i_object(Register r, Register base,
const int st_off) {
__ st_ptr (r, base, arg_slot(st_off));
tag_c2i_arg(frame::TagReference, base, st_off, r);
}
void AdapterGenerator::store_c2i_int(Register r, Register base,
const int st_off) {
__ st (r, base, arg_slot(st_off));
tag_c2i_arg(frame::TagValue, base, st_off, r);
}
// Stores into offset pointed to by base
@ -745,13 +705,11 @@ void AdapterGenerator::store_c2i_double(VMReg r_2,
__ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
__ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
#endif
tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch);
}
void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
const int st_off) {
__ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch);
}
void AdapterGenerator::gen_c2i_adapter(
@ -786,14 +744,14 @@ void AdapterGenerator::gen_c2i_adapter(
// Since all args are passed on the stack, total_args_passed*wordSize is the
// space we need. Add in varargs area needed by the interpreter. Round up
// to stack alignment.
const int arg_size = total_args_passed * Interpreter::stackElementSize();
const int arg_size = total_args_passed * Interpreter::stackElementSize;
const int varargs_area =
(frame::varargs_offset - frame::register_save_words)*wordSize;
const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
int bias = STACK_BIAS;
const int interp_arg_offset = frame::varargs_offset*wordSize +
(total_args_passed-1)*Interpreter::stackElementSize();
(total_args_passed-1)*Interpreter::stackElementSize;
Register base = SP;
@ -814,7 +772,7 @@ void AdapterGenerator::gen_c2i_adapter(
// First write G1 (if used) to where ever it must go
for (int i=0; i<total_args_passed; i++) {
const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
VMReg r_1 = regs[i].first();
VMReg r_2 = regs[i].second();
if (r_1 == G1_scratch->as_VMReg()) {
@ -831,7 +789,7 @@ void AdapterGenerator::gen_c2i_adapter(
// Now write the args into the outgoing interpreter space
for (int i=0; i<total_args_passed; i++) {
const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
VMReg r_1 = regs[i].first();
VMReg r_2 = regs[i].second();
if (!r_1->is_valid()) {
@ -900,7 +858,7 @@ void AdapterGenerator::gen_c2i_adapter(
#endif // _LP64
__ mov((frame::varargs_offset)*wordSize -
1*Interpreter::stackElementSize()+bias+BytesPerWord, G1);
1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
// Jump to the interpreter just as if interpreter was doing it.
__ jmpl(G3_scratch, 0, G0);
// Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
@ -1051,7 +1009,7 @@ void AdapterGenerator::gen_i2c_adapter(
// ldx/lddf optimizations.
// Load in argument order going down.
const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
set_Rdisp(G1_scratch);
VMReg r_1 = regs[i].first();
@ -1120,7 +1078,7 @@ void AdapterGenerator::gen_i2c_adapter(
for (int i=0; i<total_args_passed; i++) {
if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
// Load in argument order going down
int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
// Need to marshal 64-bit value from misaligned Lesp loads
Register r = regs[i].first()->as_Register()->after_restore();
if (r == G1 || r == G4) {
@ -3062,7 +3020,7 @@ int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals)
"test and remove; got more parms than locals");
if (callee_locals < callee_parameters)
return 0; // No adjustment for negative locals
int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords();
int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
return round_to(diff, WordsPerLong);
}

View File

@ -139,7 +139,7 @@ class StubGenerator: public StubCodeGenerator {
__ ld_ptr(parameter_size.as_address(), t); // get parameter size (in words)
__ add(t, frame::memory_parameter_word_sp_offset, t); // add space for save area (in words)
__ round_to(t, WordsPerLong); // make sure it is multiple of 2 (in words)
__ sll(t, Interpreter::logStackElementSize(), t); // compute number of bytes
__ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes
__ neg(t); // negate so it can be used with save
__ save(SP, t, SP); // setup new frame
}
@ -191,19 +191,13 @@ class StubGenerator: public StubCodeGenerator {
// copy parameters if any
Label loop;
__ BIND(loop);
// Store tag first.
if (TaggedStackInterpreter) {
__ ld_ptr(src, 0, tmp);
__ add(src, BytesPerWord, src); // get next
__ st_ptr(tmp, dst, Interpreter::tag_offset_in_bytes());
}
// Store parameter value
__ ld_ptr(src, 0, tmp);
__ add(src, BytesPerWord, src);
__ st_ptr(tmp, dst, Interpreter::value_offset_in_bytes());
__ st_ptr(tmp, dst, 0);
__ deccc(cnt);
__ br(Assembler::greater, false, Assembler::pt, loop);
__ delayed()->sub(dst, Interpreter::stackElementSize(), dst);
__ delayed()->sub(dst, Interpreter::stackElementSize, dst);
// done
__ BIND(exit);
@ -220,7 +214,7 @@ class StubGenerator: public StubCodeGenerator {
// setup parameters
const Register t = G3_scratch;
__ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
__ sll(t, Interpreter::logStackElementSize(), t); // compute number of bytes
__ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes
__ sub(FP, t, Gargs); // setup parameter pointer
#ifdef _LP64
__ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias
@ -2917,6 +2911,16 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
// generic method handle stubs
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
// Don't initialize the platform math functions since sparc
// doesn't have intrinsics for these operations.
}

View File

@ -151,8 +151,10 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
address compiled_entry = __ pc();
TosState incoming_state = state;
Label cont;
address compiled_entry = __ pc();
address entry = __ pc();
#if !defined(_LP64) && defined(COMPILER2)
@ -165,12 +167,11 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node
// first which would move g1 -> O0/O1 and destroy the exception we were throwing.
if( state == ltos ) {
__ srl (G1, 0,O1);
__ srlx(G1,32,O0);
if (incoming_state == ltos) {
__ srl (G1, 0, O1);
__ srlx(G1, 32, O0);
}
#endif /* !_LP64 && COMPILER2 */
#endif // !_LP64 && COMPILER2
__ bind(cont);
@ -182,17 +183,32 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ mov(Llast_SP, SP); // Remove any adapter added stack space.
Label L_got_cache, L_giant_index;
const Register cache = G3_scratch;
const Register size = G1_scratch;
if (EnableInvokeDynamic) {
__ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode.
__ cmp(G1_scratch, Bytecodes::_invokedynamic);
__ br(Assembler::equal, false, Assembler::pn, L_giant_index);
__ delayed()->nop();
}
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
__ bind(L_got_cache);
__ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset(), size);
__ and3(size, 0xFF, size); // argument size in words
__ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes
__ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
__ add(Lesp, size, Lesp); // pop arguments
__ dispatch_next(state, step);
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
__ ba(false, L_got_cache);
__ delayed()->nop();
}
return entry;
}
@ -479,7 +495,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// Set the saved SP after the register window save
//
assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
__ sll(Glocals_size, Interpreter::logStackElementSize(), Otmp1);
__ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
__ add(Gargs, Otmp1, Gargs);
if (native_call) {
@ -495,7 +511,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ lduh( size_of_locals, Otmp1 );
__ sub( Otmp1, Glocals_size, Glocals_size );
__ round_to( Glocals_size, WordsPerLong );
__ sll( Glocals_size, Interpreter::logStackElementSize(), Glocals_size );
__ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
// see if the frame is greater than one page in size. If so,
// then we need to verify there is enough stack space remaining
@ -503,7 +519,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ lduh( max_stack, Gframe_size );
__ add( Gframe_size, extra_space, Gframe_size );
__ round_to( Gframe_size, WordsPerLong );
__ sll( Gframe_size, Interpreter::logStackElementSize(), Gframe_size);
__ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
// Add in java locals size for stack overflow check only
__ add( Gframe_size, Glocals_size, Gframe_size );
@ -1218,8 +1234,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// be updated!
__ lduh( size_of_locals, O2 );
__ lduh( size_of_parameters, O1 );
__ sll( O2, Interpreter::logStackElementSize(), O2);
__ sll( O1, Interpreter::logStackElementSize(), O1 );
__ sll( O2, Interpreter::logStackElementSize, O2);
__ sll( O1, Interpreter::logStackElementSize, O1 );
__ sub( Llocals, O2, O2 );
__ sub( Llocals, O1, O1 );
@ -1454,8 +1470,8 @@ static int size_activation_helper(int callee_extra_locals, int max_stack, int mo
round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
// callee_locals and max_stack are counts, not the size in frame.
const int locals_size =
round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong);
const int max_stack_words = max_stack * Interpreter::stackElementWords();
round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
const int max_stack_words = max_stack * Interpreter::stackElementWords;
return (round_to((max_stack_words
//6815692//+ methodOopDesc::extra_stack_words()
+ rounded_vm_local_words
@ -1554,11 +1570,11 @@ int AbstractInterpreter::layout_activation(methodOop method,
// preallocate stack space
intptr_t* esp = monitors - 1 -
(tempcount * Interpreter::stackElementWords()) -
(tempcount * Interpreter::stackElementWords) -
popframe_extra_args;
int local_words = method->max_locals() * Interpreter::stackElementWords();
int parm_words = method->size_of_parameters() * Interpreter::stackElementWords();
int local_words = method->max_locals() * Interpreter::stackElementWords;
int parm_words = method->size_of_parameters() * Interpreter::stackElementWords;
NEEDS_CLEANUP;
intptr_t* locals;
if (caller->is_interpreted_frame()) {
@ -1646,7 +1662,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
BasicObjectLock* mp = (BasicObjectLock*)monitors;
assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize())+Interpreter::value_offset_in_bytes()), "locals match");
assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches");
assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
@ -1742,7 +1758,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Compute size of arguments for saving when returning to deoptimized caller
__ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1);
__ sll(Gtmp1, Interpreter::logStackElementSize(), Gtmp1);
__ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
__ sub(Llocals, Gtmp1, Gtmp2);
__ add(Gtmp2, wordSize, Gtmp2);
// Save these arguments

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,8 @@
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI and TaggedStackInterpreter
// Max size with JVMTI
#ifdef _LP64
// The sethi() instruction generates lots more instructions when shell
// stack limit is unlimited, so that's why this is much bigger.

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -580,7 +580,6 @@ void TemplateTable::saload() {
void TemplateTable::iload(int n) {
transition(vtos, itos);
debug_only(__ verify_local_tag(frame::TagValue, Llocals, Otos_i, n));
__ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
}
@ -588,7 +587,6 @@ void TemplateTable::iload(int n) {
void TemplateTable::lload(int n) {
transition(vtos, ltos);
assert(n+1 < Argument::n_register_parameters, "would need more code");
debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, Otos_l, n));
__ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
}
@ -596,7 +594,6 @@ void TemplateTable::lload(int n) {
void TemplateTable::fload(int n) {
transition(vtos, ftos);
assert(n < Argument::n_register_parameters, "would need more code");
debug_only(__ verify_local_tag(frame::TagValue, Llocals, G3_scratch, n));
__ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
}
@ -604,14 +601,12 @@ void TemplateTable::fload(int n) {
void TemplateTable::dload(int n) {
transition(vtos, dtos);
FloatRegister dst = Ftos_d;
debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, G3_scratch, n));
__ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
}
void TemplateTable::aload(int n) {
transition(vtos, atos);
debug_only(__ verify_local_tag(frame::TagReference, Llocals, Otos_i, n));
__ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
}
@ -707,12 +702,11 @@ void TemplateTable::dstore() {
void TemplateTable::astore() {
transition(vtos, vtos);
// astore tos can also be a returnAddress, so load and store the tag too
__ load_ptr_and_tag(0, Otos_i, Otos_l2);
__ inc(Lesp, Interpreter::stackElementSize());
__ load_ptr(0, Otos_i);
__ inc(Lesp, Interpreter::stackElementSize);
__ verify_oop_or_return_address(Otos_i, G3_scratch);
locals_index(G3_scratch);
__ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
__ store_local_ptr(G3_scratch, Otos_i);
}
@ -750,12 +744,11 @@ void TemplateTable::wide_dstore() {
void TemplateTable::wide_astore() {
transition(vtos, vtos);
// astore tos can also be a returnAddress, so load and store the tag too
__ load_ptr_and_tag(0, Otos_i, Otos_l2);
__ inc(Lesp, Interpreter::stackElementSize());
__ load_ptr(0, Otos_i);
__ inc(Lesp, Interpreter::stackElementSize);
__ verify_oop_or_return_address(Otos_i, G3_scratch);
locals_index_wide(G3_scratch);
__ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
__ store_local_ptr(G3_scratch, Otos_i);
}
@ -845,13 +838,13 @@ void TemplateTable::aastore() {
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
__ ba(false,done);
__ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
__ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
__ bind(is_null);
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
__ profile_null_seen(G3_scratch);
__ inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
__ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
__ bind(done);
}
@ -884,7 +877,6 @@ void TemplateTable::sastore() {
void TemplateTable::istore(int n) {
transition(itos, vtos);
__ tag_local(frame::TagValue, Llocals, Otos_i, n);
__ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
}
@ -892,7 +884,6 @@ void TemplateTable::istore(int n) {
void TemplateTable::lstore(int n) {
transition(ltos, vtos);
assert(n+1 < Argument::n_register_parameters, "only handle register cases");
__ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
__ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
}
@ -901,7 +892,6 @@ void TemplateTable::lstore(int n) {
void TemplateTable::fstore(int n) {
transition(ftos, vtos);
assert(n < Argument::n_register_parameters, "only handle register cases");
__ tag_local(frame::TagValue, Llocals, Otos_l, n);
__ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
}
@ -909,30 +899,28 @@ void TemplateTable::fstore(int n) {
void TemplateTable::dstore(int n) {
transition(dtos, vtos);
FloatRegister src = Ftos_d;
__ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
__ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
}
void TemplateTable::astore(int n) {
transition(vtos, vtos);
// astore tos can also be a returnAddress, so load and store the tag too
__ load_ptr_and_tag(0, Otos_i, Otos_l2);
__ inc(Lesp, Interpreter::stackElementSize());
__ load_ptr(0, Otos_i);
__ inc(Lesp, Interpreter::stackElementSize);
__ verify_oop_or_return_address(Otos_i, G3_scratch);
__ store_local_ptr( n, Otos_i, Otos_l2 );
__ store_local_ptr(n, Otos_i);
}
void TemplateTable::pop() {
transition(vtos, vtos);
__ inc(Lesp, Interpreter::stackElementSize());
__ inc(Lesp, Interpreter::stackElementSize);
}
void TemplateTable::pop2() {
transition(vtos, vtos);
__ inc(Lesp, 2 * Interpreter::stackElementSize());
__ inc(Lesp, 2 * Interpreter::stackElementSize);
}
@ -940,8 +928,8 @@ void TemplateTable::dup() {
transition(vtos, vtos);
// stack: ..., a
// load a and tag
__ load_ptr_and_tag(0, Otos_i, Otos_l2);
__ push_ptr(Otos_i, Otos_l2);
__ load_ptr(0, Otos_i);
__ push_ptr(Otos_i);
// stack: ..., a, a
}
@ -949,11 +937,11 @@ void TemplateTable::dup() {
void TemplateTable::dup_x1() {
transition(vtos, vtos);
// stack: ..., a, b
__ load_ptr_and_tag(1, G3_scratch, G4_scratch); // get a
__ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get b
__ store_ptr_and_tag(1, Otos_l1, Otos_l2); // put b
__ store_ptr_and_tag(0, G3_scratch, G4_scratch); // put a - like swap
__ push_ptr(Otos_l1, Otos_l2); // push b
__ load_ptr( 1, G3_scratch); // get a
__ load_ptr( 0, Otos_l1); // get b
__ store_ptr(1, Otos_l1); // put b
__ store_ptr(0, G3_scratch); // put a - like swap
__ push_ptr(Otos_l1); // push b
// stack: ..., b, a, b
}
@ -962,27 +950,27 @@ void TemplateTable::dup_x2() {
transition(vtos, vtos);
// stack: ..., a, b, c
// get c and push on stack, reuse registers
__ load_ptr_and_tag(0, G3_scratch, G4_scratch); // get c
__ push_ptr(G3_scratch, G4_scratch); // push c with tag
__ load_ptr( 0, G3_scratch); // get c
__ push_ptr(G3_scratch); // push c with tag
// stack: ..., a, b, c, c (c in reg) (Lesp - 4)
// (stack offsets n+1 now)
__ load_ptr_and_tag(3, Otos_l1, Otos_l2); // get a
__ store_ptr_and_tag(3, G3_scratch, G4_scratch); // put c at 3
__ load_ptr( 3, Otos_l1); // get a
__ store_ptr(3, G3_scratch); // put c at 3
// stack: ..., c, b, c, c (a in reg)
__ load_ptr_and_tag(2, G3_scratch, G4_scratch); // get b
__ store_ptr_and_tag(2, Otos_l1, Otos_l2); // put a at 2
__ load_ptr( 2, G3_scratch); // get b
__ store_ptr(2, Otos_l1); // put a at 2
// stack: ..., c, a, c, c (b in reg)
__ store_ptr_and_tag(1, G3_scratch, G4_scratch); // put b at 1
__ store_ptr(1, G3_scratch); // put b at 1
// stack: ..., c, a, b, c
}
void TemplateTable::dup2() {
transition(vtos, vtos);
__ load_ptr_and_tag(1, G3_scratch, G4_scratch); // get a
__ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get b
__ push_ptr(G3_scratch, G4_scratch); // push a
__ push_ptr(Otos_l1, Otos_l2); // push b
__ load_ptr(1, G3_scratch); // get a
__ load_ptr(0, Otos_l1); // get b
__ push_ptr(G3_scratch); // push a
__ push_ptr(Otos_l1); // push b
// stack: ..., a, b, a, b
}
@ -990,17 +978,17 @@ void TemplateTable::dup2() {
void TemplateTable::dup2_x1() {
transition(vtos, vtos);
// stack: ..., a, b, c
__ load_ptr_and_tag(1, Lscratch, G1_scratch); // get b
__ load_ptr_and_tag(2, Otos_l1, Otos_l2); // get a
__ store_ptr_and_tag(2, Lscratch, G1_scratch); // put b at a
__ load_ptr( 1, Lscratch); // get b
__ load_ptr( 2, Otos_l1); // get a
__ store_ptr(2, Lscratch); // put b at a
// stack: ..., b, b, c
__ load_ptr_and_tag(0, G3_scratch, G4_scratch); // get c
__ store_ptr_and_tag(1, G3_scratch, G4_scratch); // put c at b
__ load_ptr( 0, G3_scratch); // get c
__ store_ptr(1, G3_scratch); // put c at b
// stack: ..., b, c, c
__ store_ptr_and_tag(0, Otos_l1, Otos_l2); // put a at c
__ store_ptr(0, Otos_l1); // put a at c
// stack: ..., b, c, a
__ push_ptr(Lscratch, G1_scratch); // push b
__ push_ptr(G3_scratch, G4_scratch); // push c
__ push_ptr(Lscratch); // push b
__ push_ptr(G3_scratch); // push c
// stack: ..., b, c, a, b, c
}
@ -1010,18 +998,18 @@ void TemplateTable::dup2_x1() {
void TemplateTable::dup2_x2() {
transition(vtos, vtos);
// stack: ..., a, b, c, d
__ load_ptr_and_tag(1, Lscratch, G1_scratch); // get c
__ load_ptr_and_tag(3, Otos_l1, Otos_l2); // get a
__ store_ptr_and_tag(3, Lscratch, G1_scratch); // put c at 3
__ store_ptr_and_tag(1, Otos_l1, Otos_l2); // put a at 1
__ load_ptr( 1, Lscratch); // get c
__ load_ptr( 3, Otos_l1); // get a
__ store_ptr(3, Lscratch); // put c at 3
__ store_ptr(1, Otos_l1); // put a at 1
// stack: ..., c, b, a, d
__ load_ptr_and_tag(2, G3_scratch, G4_scratch); // get b
__ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get d
__ store_ptr_and_tag(0, G3_scratch, G4_scratch); // put b at 0
__ store_ptr_and_tag(2, Otos_l1, Otos_l2); // put d at 2
__ load_ptr( 2, G3_scratch); // get b
__ load_ptr( 0, Otos_l1); // get d
__ store_ptr(0, G3_scratch); // put b at 0
__ store_ptr(2, Otos_l1); // put d at 2
// stack: ..., c, d, a, b
__ push_ptr(Lscratch, G1_scratch); // push c
__ push_ptr(Otos_l1, Otos_l2); // push d
__ push_ptr(Lscratch); // push c
__ push_ptr(Otos_l1); // push d
// stack: ..., c, d, a, b, c, d
}
@ -1029,10 +1017,10 @@ void TemplateTable::dup2_x2() {
void TemplateTable::swap() {
transition(vtos, vtos);
// stack: ..., a, b
__ load_ptr_and_tag(1, G3_scratch, G4_scratch); // get a
__ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get b
__ store_ptr_and_tag(0, G3_scratch, G4_scratch); // put b
__ store_ptr_and_tag(1, Otos_l1, Otos_l2); // put a
__ load_ptr( 1, G3_scratch); // get a
__ load_ptr( 0, Otos_l1); // get b
__ store_ptr(0, G3_scratch); // put b
__ store_ptr(1, Otos_l1); // put a
// stack: ..., b, a
}
@ -1045,9 +1033,9 @@ void TemplateTable::iop2(Operation op) {
case sub: __ sub(O1, Otos_i, Otos_i); break;
// %%%%% Mul may not exist: better to call .mul?
case mul: __ smul(O1, Otos_i, Otos_i); break;
case _and: __ and3(O1, Otos_i, Otos_i); break;
case _or: __ or3(O1, Otos_i, Otos_i); break;
case _xor: __ xor3(O1, Otos_i, Otos_i); break;
case _and: __ and3(O1, Otos_i, Otos_i); break;
case _or: __ or3(O1, Otos_i, Otos_i); break;
case _xor: __ xor3(O1, Otos_i, Otos_i); break;
case shl: __ sll(O1, Otos_i, Otos_i); break;
case shr: __ sra(O1, Otos_i, Otos_i); break;
case ushr: __ srl(O1, Otos_i, Otos_i); break;
@ -1061,17 +1049,17 @@ void TemplateTable::lop2(Operation op) {
__ pop_l(O2);
switch (op) {
#ifdef _LP64
case add: __ add(O2, Otos_l, Otos_l); break;
case sub: __ sub(O2, Otos_l, Otos_l); break;
case _and: __ and3( O2, Otos_l, Otos_l); break;
case _or: __ or3( O2, Otos_l, Otos_l); break;
case _xor: __ xor3( O2, Otos_l, Otos_l); break;
case add: __ add(O2, Otos_l, Otos_l); break;
case sub: __ sub(O2, Otos_l, Otos_l); break;
case _and: __ and3(O2, Otos_l, Otos_l); break;
case _or: __ or3(O2, Otos_l, Otos_l); break;
case _xor: __ xor3(O2, Otos_l, Otos_l); break;
#else
case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
case _and: __ and3( O3, Otos_l2, Otos_l2); __ and3( O2, Otos_l1, Otos_l1); break;
case _or: __ or3( O3, Otos_l2, Otos_l2); __ or3( O2, Otos_l1, Otos_l1); break;
case _xor: __ xor3( O3, Otos_l2, Otos_l2); __ xor3( O2, Otos_l1, Otos_l1); break;
case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
#endif
default: ShouldNotReachHere();
}
@ -1307,7 +1295,7 @@ void TemplateTable::iinc() {
__ ldsb(Lbcp, 2, O2); // load constant
__ access_local_int(G3_scratch, Otos_i);
__ add(Otos_i, O2, Otos_i);
__ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes()); // access_local_int puts E.A. in G3_scratch
__ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
}
@ -1317,7 +1305,7 @@ void TemplateTable::wide_iinc() {
__ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
__ access_local_int(G3_scratch, Otos_i);
__ add(Otos_i, O3, Otos_i);
__ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes()); // access_local_int puts E.A. in G3_scratch
__ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
}
@ -1555,7 +1543,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Bump Lbcp to target of JSR
__ add(Lbcp, O1_disp, Lbcp);
// Push returnAddress for "ret" on stack
__ push_ptr(Otos_i, G0); // push ptr sized thing plus 0 for tag.
__ push_ptr(Otos_i);
// And away we go!
__ dispatch_next(vtos);
return;
@ -1963,19 +1951,30 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
// ----------------------------------------------------------------------------
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
// Depends on cpCacheOop layout!
const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
if (is_invokedynamic) {
// We are resolved if the f1 field contains a non-null CallSite object.
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
__ tst(Lbyte_code);
__ br(Assembler::notEqual, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1);
} else {
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
__ srl( Lbyte_code, shift_count, Lbyte_code );
__ and3( Lbyte_code, 0xFF, Lbyte_code );
__ cmp( Lbyte_code, (int)bytecode());
__ br( Assembler::equal, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1);
__ srl( Lbyte_code, shift_count, Lbyte_code );
__ and3( Lbyte_code, 0xFF, Lbyte_code );
__ cmp( Lbyte_code, (int)bytecode());
__ br( Assembler::equal, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1);
}
address entry;
switch (bytecode()) {
@ -1987,12 +1986,13 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
default : ShouldNotReachHere(); break;
}
// first time invocation - must resolve first
__ call_VM(noreg, entry, O1);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1);
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
__ bind(resolved);
}
@ -2742,7 +2742,7 @@ void TemplateTable::fast_xaccess(TosState state) {
Register Rflags = G4_scratch;
Register Rreceiver = Lscratch;
__ ld_ptr(Llocals, Interpreter::value_offset_in_bytes(), Rreceiver);
__ ld_ptr(Llocals, 0, Rreceiver);
// access constant pool cache (is resolved)
__ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
@ -3130,7 +3130,42 @@ void TemplateTable::invokedynamic(int byte_no) {
return;
}
__ stop("invokedynamic NYI");//6815692//
// G5: CallSite object (f1)
// XX: unused (f2)
// G3: receiver address
// XX: flags (unused)
Register G5_callsite = G5_method;
Register Rscratch = G3_scratch;
Register Rtemp = G1_scratch;
Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_callsite);
// profile this call
__ profile_call(O4);
// get return address
AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
__ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
__ sll(Rret, LogBytesPerWord, Rret);
__ ld_ptr(Rtemp, Rret, Rret); // get return address
__ ld_ptr(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
__ null_check(G3_method_handle);
// Adjust Rret first so Llast_SP can be same as Rret
__ add(Rret, -frame::pc_return_offset, O7);
__ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
__ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
// Record SP so we can remove any stack space allocated by adapter transition
__ delayed()->mov(SP, Llast_SP);
}
@ -3649,7 +3684,7 @@ void TemplateTable::multianewarray() {
transition(vtos, atos);
// put ndims * wordSize into Lscratch
__ ldub( Lbcp, 3, Lscratch);
__ sll( Lscratch, Interpreter::logStackElementSize(), Lscratch);
__ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
// Lesp points past last_dim, so set to O1 to first_dim address
__ add( Lesp, Lscratch, O1);
call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);

View File

@ -104,6 +104,12 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
}
// When using CMS, we cannot use memset() in BOT updates because
// the sun4v/CMT version in libc_psr uses BIS which exposes
// "phantom zeros" to concurrent readers. See 6948537.
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && UseConcMarkSweepGC) {
FLAG_SET_DEFAULT(UseMemSetInBOT, false);
}
}
// Use hardware population count instruction if available.

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -6492,24 +6492,19 @@ int MacroAssembler::load_unsigned_short(Register dst, Address src) {
}
void MacroAssembler::load_sized_value(Register dst, Address src,
int size_in_bytes, bool is_signed) {
switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
size_t size_in_bytes, bool is_signed) {
switch (size_in_bytes) {
#ifndef _LP64
// For case 8, caller is responsible for manually loading
// the second word into another register.
case ~8: // fall through:
case 8: movl( dst, src ); break;
case 8: movl(dst, src); break;
#else
case ~8: // fall through:
case 8: movq( dst, src ); break;
case 8: movq(dst, src); break;
#endif
case ~4: // fall through:
case 4: movl( dst, src ); break;
case ~2: load_signed_short( dst, src ); break;
case 2: load_unsigned_short( dst, src ); break;
case ~1: load_signed_byte( dst, src ); break;
case 1: load_unsigned_byte( dst, src ); break;
default: ShouldNotReachHere();
case 4: movl(dst, src); break;
case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
default: ShouldNotReachHere();
}
}
@ -7706,6 +7701,7 @@ void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_re
// method handle's MethodType. This macro hides the distinction.
void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
Register temp_reg) {
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
if (UseCompressedOops) unimplemented(); // field accesses must decode
// load mh.type.form.vmslots
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
@ -7744,7 +7740,7 @@ void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
int extra_slot_offset) {
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
int stackElementSize = Interpreter::stackElementSize();
int stackElementSize = Interpreter::stackElementSize;
int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
#ifdef ASSERT
int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
@ -7975,7 +7971,7 @@ class FPU_State {
case 2: return "special";
case 3: return "empty";
}
ShouldNotReachHere()
ShouldNotReachHere();
return NULL;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1511,7 +1511,7 @@ class MacroAssembler: public Assembler {
void extend_sign(Register hi, Register lo);
// Loading values by size and signed-ness
void load_sized_value(Register dst, Address src, int size_in_bytes, bool is_signed);
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
// Support for inc/dec with optimal instruction selection depending on value

View File

@ -2838,7 +2838,7 @@ void LIR_Assembler::emit_static_call_stub() {
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
__ jump(RuntimeAddress(__ pc()));
assert(__ offset() - start <= call_stub_size, "stub too big")
assert(__ offset() - start <= call_stub_size, "stub too big");
__ end_a_stub();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,6 @@
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreterSize to get the VM to print out the size.
// Max size with JVMTI and TaggedStackInterpreter
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
const static int InterpreterCodeSize = 168 * 1024;

View File

@ -291,8 +291,8 @@ BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
// make sure the pointer points inside the frame
assert((intptr_t) fp() > (intptr_t) result, "result must < than frame pointer");
assert((intptr_t) sp() <= (intptr_t) result, "result must >= than stack pointer");
assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer");
return result;
}
@ -502,7 +502,7 @@ bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
// When unpacking an optimized frame the frame pointer is
// adjusted with:
int diff = (method->max_locals() - method->size_of_parameters()) *
Interpreter::stackElementWords();
Interpreter::stackElementWords;
return _fp == (fp - diff);
}
@ -542,7 +542,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// stack frames shouldn't be much larger than max_stack elements
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
return false;
}
@ -594,7 +594,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
#ifdef AMD64
// This is times two because we do a push(ltos) after pushing XMM0
// and that takes two interpreter stack slots.
tos_addr += 2 * Interpreter::stackElementWords();
tos_addr += 2 * Interpreter::stackElementWords;
#else
tos_addr += 2;
#endif // AMD64

View File

@ -265,89 +265,30 @@ void InterpreterMacroAssembler::d2ieee() {
// Java Expression Stack
#ifdef ASSERT
void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
if (TaggedStackInterpreter) {
Label okay;
cmpptr(Address(rsp, wordSize), (int32_t)t);
jcc(Assembler::equal, okay);
// Also compare if the stack value is zero, then the tag might
// not have been set coming from deopt.
cmpptr(Address(rsp, 0), 0);
jcc(Assembler::equal, okay);
stop("Java Expression stack tag value is bad");
bind(okay);
}
}
#endif // ASSERT
void InterpreterMacroAssembler::pop_ptr(Register r) {
debug_only(verify_stack_tag(frame::TagReference));
pop(r);
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
pop(r);
// Tag may not be reference for jsr, can be returnAddress
if (TaggedStackInterpreter) pop(tag);
}
void InterpreterMacroAssembler::pop_i(Register r) {
debug_only(verify_stack_tag(frame::TagValue));
pop(r);
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
debug_only(verify_stack_tag(frame::TagValue));
pop(lo);
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
debug_only(verify_stack_tag(frame::TagValue));
pop(hi);
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_f() {
debug_only(verify_stack_tag(frame::TagValue));
fld_s(Address(rsp, 0));
addptr(rsp, 1 * wordSize);
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_d() {
// Write double to stack contiguously and load into ST0
pop_dtos_to_rsp();
fld_d(Address(rsp, 0));
addptr(rsp, 2 * wordSize);
}
// Pop the top of the java expression stack to execution stack (which
// happens to be the same place).
void InterpreterMacroAssembler::pop_dtos_to_rsp() {
if (TaggedStackInterpreter) {
// Pop double value into scratch registers
debug_only(verify_stack_tag(frame::TagValue));
pop(rax);
addptr(rsp, 1* wordSize);
debug_only(verify_stack_tag(frame::TagValue));
pop(rdx);
addptr(rsp, 1* wordSize);
push(rdx);
push(rax);
}
}
void InterpreterMacroAssembler::pop_ftos_to_rsp() {
if (TaggedStackInterpreter) {
debug_only(verify_stack_tag(frame::TagValue));
pop(rax);
addptr(rsp, 1 * wordSize);
push(rax); // ftos is at rsp
}
}
void InterpreterMacroAssembler::pop(TosState state) {
switch (state) {
case atos: pop_ptr(rax); break;
@ -365,54 +306,28 @@ void InterpreterMacroAssembler::pop(TosState state) {
}
void InterpreterMacroAssembler::push_ptr(Register r) {
if (TaggedStackInterpreter) push(frame::TagReference);
push(r);
}
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
if (TaggedStackInterpreter) push(tag); // tag first
push(r);
}
void InterpreterMacroAssembler::push_i(Register r) {
if (TaggedStackInterpreter) push(frame::TagValue);
push(r);
}
void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
if (TaggedStackInterpreter) push(frame::TagValue);
push(hi);
if (TaggedStackInterpreter) push(frame::TagValue);
push(lo);
}
void InterpreterMacroAssembler::push_f() {
if (TaggedStackInterpreter) push(frame::TagValue);
// Do not schedule for no AGI! Never write beyond rsp!
subptr(rsp, 1 * wordSize);
fstp_s(Address(rsp, 0));
}
void InterpreterMacroAssembler::push_d(Register r) {
if (TaggedStackInterpreter) {
// Double values are stored as:
// tag
// high
// tag
// low
push(frame::TagValue);
subptr(rsp, 3 * wordSize);
fstp_d(Address(rsp, 0));
// move high word up to slot n-1
movl(r, Address(rsp, 1*wordSize));
movl(Address(rsp, 2*wordSize), r);
// move tag
movl(Address(rsp, 1*wordSize), frame::TagValue);
} else {
// Do not schedule for no AGI! Never write beyond rsp!
subptr(rsp, 2 * wordSize);
fstp_d(Address(rsp, 0));
}
// Do not schedule for no AGI! Never write beyond rsp!
subptr(rsp, 2 * wordSize);
fstp_d(Address(rsp, 0));
}
@ -433,118 +348,15 @@ void InterpreterMacroAssembler::push(TosState state) {
}
// Tagged stack helpers for swap and dup
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
Register tag) {
// Helpers for swap and dup
void InterpreterMacroAssembler::load_ptr(int n, Register val) {
movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
if (TaggedStackInterpreter) {
movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
}
}
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
Register tag) {
void InterpreterMacroAssembler::store_ptr(int n, Register val) {
movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
if (TaggedStackInterpreter) {
movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
}
}
// Tagged local support
void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) {
movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)frame::TagValue);
movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)frame::TagValue);
} else {
movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
}
}
}
void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) {
movptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
movptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
} else {
movptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(0)), (int32_t)tag);
}
}
}
void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
if (TaggedStackInterpreter) {
// can only be TagValue or TagReference
movptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(0)), tag);
}
}
void InterpreterMacroAssembler::tag_local(Register tag, int n) {
if (TaggedStackInterpreter) {
// can only be TagValue or TagReference
movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag);
}
}
#ifdef ASSERT
void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
if (TaggedStackInterpreter) {
frame::Tag t = tag;
if (tag == frame::TagCategory2) {
Label nbl;
t = frame::TagValue; // change to what is stored in locals
cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double");
bind(nbl);
}
Label notBad;
cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might
// not have been set coming from deopt.
cmpptr(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0);
jcc(Assembler::equal, notBad);
stop("Local tag is bad");
bind(notBad);
}
}
void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
if (TaggedStackInterpreter) {
frame::Tag t = tag;
if (tag == frame::TagCategory2) {
Label nbl;
t = frame::TagValue; // change to what is stored in locals
cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double");
bind(nbl);
}
Label notBad;
cmpl(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might
// not have been set coming from deopt.
cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_offset_in_bytes(0)), 0);
jcc(Assembler::equal, notBad);
stop("Local tag is bad");
bind(notBad);
}
}
#endif // ASSERT
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
MacroAssembler::call_VM_leaf_base(entry_point, 0);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -85,16 +85,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
void d2ieee(); // truncate dtos to 64bits
void pop_ptr(Register r = rax);
void pop_ptr(Register r, Register tag);
void pop_i(Register r = rax);
void pop_l(Register lo = rax, Register hi = rdx);
void pop_f();
void pop_d();
void pop_ftos_to_rsp();
void pop_dtos_to_rsp();
void push_ptr(Register r = rax);
void push_ptr(Register r, Register tag);
void push_i(Register r = rax);
void push_l(Register lo = rax, Register hi = rdx);
void push_d(Register r = rax);
@ -112,33 +108,15 @@ class InterpreterMacroAssembler: public MacroAssembler {
void pop(void* v ); // Add unimplemented ambiguous method
void push(void* v ); // Add unimplemented ambiguous method
DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
#endif // CC_INTERP
#ifndef CC_INTERP
void empty_expression_stack() {
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
void empty_expression_stack() {
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
}
// Tagged stack helpers for swap and dup
void load_ptr_and_tag(int n, Register val, Register tag);
void store_ptr_and_tag(int n, Register val, Register tag);
// Tagged Local support
void tag_local(frame::Tag tag, int n);
void tag_local(Register tag, int n);
void tag_local(frame::Tag tag, Register idx);
void tag_local(Register tag, Register idx);
#ifdef ASSERT
void verify_local_tag(frame::Tag tag, int n);
void verify_local_tag(frame::Tag tag, Register idx);
#endif // ASSERT
// Helpers for swap and dup
void load_ptr(int n, Register val);
void store_ptr(int n, Register val);
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
void super_call_VM_leaf(address entry_point);

View File

@ -264,113 +264,51 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
// Java Expression Stack
#ifdef ASSERT
// Verifies that the stack tag matches. Must be called before the stack
// value is popped off the stack.
void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
if (TaggedStackInterpreter) {
frame::Tag tag = t;
if (t == frame::TagCategory2) {
tag = frame::TagValue;
Label hokay;
cmpptr(Address(rsp, 3*wordSize), (int32_t)tag);
jcc(Assembler::equal, hokay);
stop("Java Expression stack tag high value is bad");
bind(hokay);
}
Label okay;
cmpptr(Address(rsp, wordSize), (int32_t)tag);
jcc(Assembler::equal, okay);
// Also compare if the stack value is zero, then the tag might
// not have been set coming from deopt.
cmpptr(Address(rsp, 0), 0);
jcc(Assembler::equal, okay);
stop("Java Expression stack tag value is bad");
bind(okay);
}
}
#endif // ASSERT
void InterpreterMacroAssembler::pop_ptr(Register r) {
debug_only(verify_stack_tag(frame::TagReference));
pop(r);
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
pop(r);
if (TaggedStackInterpreter) pop(tag);
}
void InterpreterMacroAssembler::pop_i(Register r) {
// XXX can't use pop currently, upper half non clean
debug_only(verify_stack_tag(frame::TagValue));
movl(r, Address(rsp, 0));
addptr(rsp, wordSize);
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_l(Register r) {
debug_only(verify_stack_tag(frame::TagCategory2));
movq(r, Address(rsp, 0));
addptr(rsp, 2 * Interpreter::stackElementSize());
addptr(rsp, 2 * Interpreter::stackElementSize);
}
void InterpreterMacroAssembler::pop_f(XMMRegister r) {
debug_only(verify_stack_tag(frame::TagValue));
movflt(r, Address(rsp, 0));
addptr(rsp, wordSize);
if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_d(XMMRegister r) {
debug_only(verify_stack_tag(frame::TagCategory2));
movdbl(r, Address(rsp, 0));
addptr(rsp, 2 * Interpreter::stackElementSize());
addptr(rsp, 2 * Interpreter::stackElementSize);
}
void InterpreterMacroAssembler::push_ptr(Register r) {
if (TaggedStackInterpreter) push(frame::TagReference);
push(r);
}
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
if (TaggedStackInterpreter) push(tag);
push(r);
}
void InterpreterMacroAssembler::push_i(Register r) {
if (TaggedStackInterpreter) push(frame::TagValue);
push(r);
}
void InterpreterMacroAssembler::push_l(Register r) {
if (TaggedStackInterpreter) {
push(frame::TagValue);
subptr(rsp, 1 * wordSize);
push(frame::TagValue);
subptr(rsp, 1 * wordSize);
} else {
subptr(rsp, 2 * wordSize);
}
subptr(rsp, 2 * wordSize);
movq(Address(rsp, 0), r);
}
void InterpreterMacroAssembler::push_f(XMMRegister r) {
if (TaggedStackInterpreter) push(frame::TagValue);
subptr(rsp, wordSize);
movflt(Address(rsp, 0), r);
}
void InterpreterMacroAssembler::push_d(XMMRegister r) {
if (TaggedStackInterpreter) {
push(frame::TagValue);
subptr(rsp, 1 * wordSize);
push(frame::TagValue);
subptr(rsp, 1 * wordSize);
} else {
subptr(rsp, 2 * wordSize);
}
subptr(rsp, 2 * wordSize);
movdbl(Address(rsp, 0), r);
}
@ -407,118 +345,16 @@ void InterpreterMacroAssembler::push(TosState state) {
}
// Tagged stack helpers for swap and dup
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
Register tag) {
// Helpers for swap and dup
void InterpreterMacroAssembler::load_ptr(int n, Register val) {
movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
if (TaggedStackInterpreter) {
movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
}
}
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
Register tag) {
void InterpreterMacroAssembler::store_ptr(int n, Register val) {
movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
if (TaggedStackInterpreter) {
movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
}
}
// Tagged local support
void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) {
movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)),
(int32_t)frame::TagValue);
movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)),
(int32_t)frame::TagValue);
} else {
movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
}
}
}
void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) {
movptr(Address(r14, idx, Address::times_8,
Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
movptr(Address(r14, idx, Address::times_8,
Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
} else {
movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)),
(int32_t)tag);
}
}
}
void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
if (TaggedStackInterpreter) {
// can only be TagValue or TagReference
movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag);
}
}
void InterpreterMacroAssembler::tag_local(Register tag, int n) {
if (TaggedStackInterpreter) {
// can only be TagValue or TagReference
movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag);
}
}
#ifdef ASSERT
void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
if (TaggedStackInterpreter) {
frame::Tag t = tag;
if (tag == frame::TagCategory2) {
Label nbl;
t = frame::TagValue; // change to what is stored in locals
cmpptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double");
bind(nbl);
}
Label notBad;
cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might
// not have been set coming from deopt.
cmpptr(Address(r14, Interpreter::local_offset_in_bytes(n)), 0);
jcc(Assembler::equal, notBad);
stop("Local tag is bad");
bind(notBad);
}
}
void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
if (TaggedStackInterpreter) {
frame::Tag t = tag;
if (tag == frame::TagCategory2) {
Label nbl;
t = frame::TagValue; // change to what is stored in locals
cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double");
bind(nbl);
}
Label notBad;
cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might
// not have been set coming from deopt.
cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0);
jcc(Assembler::equal, notBad);
stop("Local tag is bad");
bind(notBad);
}
}
#endif // ASSERT
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
MacroAssembler::call_VM_leaf_base(entry_point, 0);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -120,38 +120,16 @@ class InterpreterMacroAssembler: public MacroAssembler {
void pop(TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos
// Tagged stack support, pop and push both tag and value.
void pop_ptr(Register r, Register tag);
void push_ptr(Register r, Register tag);
#endif // CC_INTERP
DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
#ifndef CC_INTERP
// Tagged stack helpers for swap and dup
void load_ptr_and_tag(int n, Register val, Register tag);
void store_ptr_and_tag(int n, Register val, Register tag);
// Tagged Local support
void tag_local(frame::Tag tag, int n);
void tag_local(Register tag, int n);
void tag_local(frame::Tag tag, Register idx);
void tag_local(Register tag, Register idx);
#ifdef ASSERT
void verify_local_tag(frame::Tag tag, int n);
void verify_local_tag(frame::Tag tag, Register idx);
#endif // ASSERT
void empty_expression_stack()
{
void empty_expression_stack() {
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
}
// Helpers for swap and dup
void load_ptr(int n, Register val);
void store_ptr(int n, Register val);
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
void super_call_VM_leaf(address entry_point);
void super_call_VM_leaf(address entry_point, Register arg_1);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,33 +86,23 @@ class SlowSignatureHandler: public NativeSignatureIterator {
address _from;
intptr_t* _to;
#ifdef ASSERT
void verify_tag(frame::Tag t) {
assert(!TaggedStackInterpreter ||
*(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
}
#endif // ASSERT
virtual void pass_int() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagValue));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
}
virtual void pass_long() {
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagValue));
_to += 2;
_from -= 2*Interpreter::stackElementSize();
_from -= 2*Interpreter::stackElementSize;
}
virtual void pass_object() {
// pass address of from
intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
*_to++ = (*(intptr_t*)from_addr == 0) ? NULL_WORD : from_addr;
debug_only(verify_tag(frame::TagReference));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
}
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -293,18 +293,10 @@ class SlowSignatureHandler
intptr_t* _fp_identifiers;
unsigned int _num_args;
#ifdef ASSERT
void verify_tag(frame::Tag t) {
assert(!TaggedStackInterpreter ||
*(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
}
#endif // ASSERT
virtual void pass_int()
{
jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagValue));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
if (_num_args < Argument::n_int_register_parameters_c-1) {
*_reg_args++ = from_obj;
@ -317,8 +309,7 @@ class SlowSignatureHandler
virtual void pass_long()
{
intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
debug_only(verify_tag(frame::TagValue));
_from -= 2*Interpreter::stackElementSize();
_from -= 2*Interpreter::stackElementSize;
if (_num_args < Argument::n_int_register_parameters_c-1) {
*_reg_args++ = from_obj;
@ -331,8 +322,7 @@ class SlowSignatureHandler
virtual void pass_object()
{
intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagReference));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
if (_num_args < Argument::n_int_register_parameters_c-1) {
*_reg_args++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
_num_args++;
@ -344,8 +334,7 @@ class SlowSignatureHandler
virtual void pass_float()
{
jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagValue));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
if (_num_args < Argument::n_float_register_parameters_c-1) {
*_reg_args++ = from_obj;
@ -359,8 +348,7 @@ class SlowSignatureHandler
virtual void pass_double()
{
intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
debug_only(verify_tag(frame::TagValue));
_from -= 2*Interpreter::stackElementSize();
_from -= 2*Interpreter::stackElementSize;
if (_num_args < Argument::n_float_register_parameters_c-1) {
*_reg_args++ = from_obj;
@ -397,18 +385,10 @@ class SlowSignatureHandler
unsigned int _num_int_args;
unsigned int _num_fp_args;
#ifdef ASSERT
void verify_tag(frame::Tag t) {
assert(!TaggedStackInterpreter ||
*(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
}
#endif // ASSERT
virtual void pass_int()
{
jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagValue));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
if (_num_int_args < Argument::n_int_register_parameters_c-1) {
*_int_args++ = from_obj;
@ -421,8 +401,7 @@ class SlowSignatureHandler
virtual void pass_long()
{
intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
debug_only(verify_tag(frame::TagValue));
_from -= 2*Interpreter::stackElementSize();
_from -= 2*Interpreter::stackElementSize;
if (_num_int_args < Argument::n_int_register_parameters_c-1) {
*_int_args++ = from_obj;
@ -435,8 +414,7 @@ class SlowSignatureHandler
virtual void pass_object()
{
intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagReference));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
if (_num_int_args < Argument::n_int_register_parameters_c-1) {
*_int_args++ = (*from_addr == 0) ? NULL : (intptr_t)from_addr;
@ -449,8 +427,7 @@ class SlowSignatureHandler
virtual void pass_float()
{
jint from_obj = *(jint*)(_from+Interpreter::local_offset_in_bytes(0));
debug_only(verify_tag(frame::TagValue));
_from -= Interpreter::stackElementSize();
_from -= Interpreter::stackElementSize;
if (_num_fp_args < Argument::n_float_register_parameters_c) {
*_fp_args++ = from_obj;
@ -463,7 +440,7 @@ class SlowSignatureHandler
virtual void pass_double()
{
intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_from -= 2*Interpreter::stackElementSize();
_from -= 2*Interpreter::stackElementSize;
if (_num_fp_args < Argument::n_float_register_parameters_c) {
*_fp_args++ = from_obj;

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,39 +31,16 @@
// the fpu stack.
static const int return_sentinel;
static Address::ScaleFactor stackElementScale() {
return TaggedStackInterpreter? Address::times_8 : Address::times_4;
}
static Address::ScaleFactor stackElementScale() { return Address::times_4; }
// Offset from rsp (which points to the last stack element)
static int expr_offset_in_bytes(int i) { return stackElementSize()*i ; }
static int expr_tag_offset_in_bytes(int i) {
assert(TaggedStackInterpreter, "should not call this");
return expr_offset_in_bytes(i) + wordSize;
}
// Support for Tagged Stacks
static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
// Stack index relative to tos (which points at value)
static int expr_index_at(int i) {
return stackElementWords() * i;
}
static int expr_tag_index_at(int i) {
assert(TaggedStackInterpreter, "should not call this");
// tag is one word above java stack element
return stackElementWords() * i + 1;
}
static int expr_index_at(int i) { return stackElementWords * i; }
// Already negated by c++ interpreter
static int local_index_at(int i) {
assert(i<=0, "local direction already negated");
return stackElementWords() * i + (value_offset_in_bytes()/wordSize);
}
static int local_tag_index_at(int i) {
assert(i<=0, "local direction already negated");
assert(TaggedStackInterpreter, "should not call this");
return stackElementWords() * i + (tag_offset_in_bytes()/wordSize);
static int local_index_at(int i) {
assert(i <= 0, "local direction already negated");
return stackElementWords * i;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -131,14 +131,7 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
// java methods. Interpreter::method_kind(...) will select
// this entry point for the corresponding methods in JDK 1.3.
// get argument
if (TaggedStackInterpreter) {
__ pushl(Address(rsp, 3*wordSize)); // push hi (and note rsp -= wordSize)
__ pushl(Address(rsp, 2*wordSize)); // push lo
__ fld_d(Address(rsp, 0)); // get double in ST0
__ addptr(rsp, 2*wordSize);
} else {
__ fld_d(Address(rsp, 1*wordSize));
}
__ fld_d(Address(rsp, 1*wordSize));
switch (kind) {
case Interpreter::java_lang_math_sin :
__ trigfunc('s');

View File

@ -127,7 +127,8 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
int arg_mask,
Register rax_argslot,
Register rbx_temp, Register rdx_temp) {
Register rbx_temp, Register rdx_temp, Register temp3_reg) {
assert(temp3_reg == noreg, "temp3 not required");
assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
(!arg_slots.is_register() ? rsp : arg_slots.as_register()));
@ -185,7 +186,8 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register rax_argslot,
Register rbx_temp, Register rdx_temp) {
Register rbx_temp, Register rdx_temp, Register temp3_reg) {
assert(temp3_reg == noreg, "temp3 not required");
assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
(!arg_slots.is_register() ? rsp : arg_slots.as_register()));
@ -260,6 +262,22 @@ void trace_method_handle_stub(const char* adaptername,
}
#endif //PRODUCT
// which conversion op types are implemented here?
int MethodHandles::adapter_conversion_ops_supported_mask() {
return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
|(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
|(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
|(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
|(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
//|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
);
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
}
// Generate an "entry" field for a method handle.
// This determines how the method handle will respond to calls.
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
@ -498,7 +516,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
#ifndef _LP64
if (arg_slots == 2) {
__ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
__ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
__ movl(Address(rax_argslot, Interpreter::stackElementSize), rdx_temp);
}
#endif //_LP64
}
@ -594,7 +612,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ lea(rax_argslot, __ argument_address(rax_argslot, 1));
remove_arg_slots(_masm, -stack_move_unit(),
rax_argslot, rbx_temp, rdx_temp);
vmarg = Address(rax_argslot, -Interpreter::stackElementSize());
vmarg = Address(rax_argslot, -Interpreter::stackElementSize);
__ movl(rdx_temp, vmarg);
}
break;
@ -663,8 +681,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ lea(rax_argslot, __ argument_address(rax_argslot, 1));
insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
rax_argslot, rbx_temp, rdx_temp);
Address vmarg1(rax_argslot, -Interpreter::stackElementSize());
Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize());
Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);
switch (ek) {
case _adapter_opt_i2l:
@ -716,7 +734,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
rax_argslot, rbx_temp, rdx_temp);
}
Address vmarg(rax_argslot, -Interpreter::stackElementSize());
Address vmarg(rax_argslot, -Interpreter::stackElementSize);
#ifdef _LP64
if (ek == _adapter_opt_f2d) {
@ -1014,7 +1032,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Array length checks out. Now insert any required stack slots.
if (length_constant == -1) {
// Form a pointer to the end of the affected region.
__ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
__ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
// 'stack_move' is negative number of words to insert
Register rdi_stack_move = rdi;
__ movl2ptr(rdi_stack_move, rcx_amh_conversion);
@ -1052,7 +1070,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_temp, Address(rsi_source, 0));
__ movptr(Address(rax_argslot, 0), rbx_temp);
__ addptr(rsi_source, type2aelembytes(elem_type));
__ addptr(rax_argslot, Interpreter::stackElementSize());
__ addptr(rax_argslot, Interpreter::stackElementSize);
__ cmpptr(rax_argslot, rdx_argslot_limit);
__ jccb(Assembler::less, loop);
} else if (length_constant == 0) {
@ -1065,7 +1083,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_temp, Address(rsi_array, elem_offset));
__ movptr(Address(rax_argslot, slot_offset), rbx_temp);
elem_offset += type2aelembytes(elem_type);
slot_offset += Interpreter::stackElementSize();
slot_offset += Interpreter::stackElementSize;
}
}

View File

@ -503,34 +503,9 @@ static void patch_callers_callsite(MacroAssembler *masm) {
}
// Helper function to put tags in interpreter stack.
static void tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
if (TaggedStackInterpreter) {
int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
if (sig == T_OBJECT || sig == T_ARRAY) {
__ movptr(Address(rsp, tag_offset), frame::TagReference);
} else if (sig == T_LONG || sig == T_DOUBLE) {
int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
__ movptr(Address(rsp, next_tag_offset), frame::TagValue);
__ movptr(Address(rsp, tag_offset), frame::TagValue);
} else {
__ movptr(Address(rsp, tag_offset), frame::TagValue);
}
}
}
// Double and long values with Tagged stacks are not contiguous.
static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
int next_off = st_off - Interpreter::stackElementSize();
if (TaggedStackInterpreter) {
__ movdbl(Address(rsp, next_off), r);
// Move top half up and put tag in the middle.
__ movl(rdi, Address(rsp, next_off+wordSize));
__ movl(Address(rsp, st_off), rdi);
tag_stack(masm, T_DOUBLE, next_off);
} else {
__ movdbl(Address(rsp, next_off), r);
}
int next_off = st_off - Interpreter::stackElementSize;
__ movdbl(Address(rsp, next_off), r);
}
static void gen_c2i_adapter(MacroAssembler *masm,
@ -560,7 +535,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
// Since all args are passed on the stack, total_args_passed * interpreter_
// stack_element_size is the
// space we need.
int extraspace = total_args_passed * Interpreter::stackElementSize();
int extraspace = total_args_passed * Interpreter::stackElementSize;
// Get return address
__ pop(rax);
@ -578,8 +553,8 @@ static void gen_c2i_adapter(MacroAssembler *masm,
}
// st_off points to lowest address on stack.
int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize();
int next_off = st_off - Interpreter::stackElementSize();
int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
int next_off = st_off - Interpreter::stackElementSize;
// Say 4 args:
// i st_off
@ -601,7 +576,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
if (!r_2->is_valid()) {
__ movl(rdi, Address(rsp, ld_off));
__ movptr(Address(rsp, st_off), rdi);
tag_stack(masm, sig_bt[i], st_off);
} else {
// ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
@ -619,13 +593,11 @@ static void gen_c2i_adapter(MacroAssembler *masm,
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
#endif // _LP64
tag_stack(masm, sig_bt[i], next_off);
}
} else if (r_1->is_Register()) {
Register r = r_1->as_Register();
if (!r_2->is_valid()) {
__ movl(Address(rsp, st_off), r);
tag_stack(masm, sig_bt[i], st_off);
} else {
// long/double in gpr
NOT_LP64(ShouldNotReachHere());
@ -639,17 +611,14 @@ static void gen_c2i_adapter(MacroAssembler *masm,
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
__ movptr(Address(rsp, next_off), r);
tag_stack(masm, sig_bt[i], next_off);
} else {
__ movptr(Address(rsp, st_off), r);
tag_stack(masm, sig_bt[i], st_off);
}
}
} else {
assert(r_1->is_XMMRegister(), "");
if (!r_2->is_valid()) {
__ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
tag_stack(masm, sig_bt[i], st_off);
} else {
assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
@ -665,20 +634,9 @@ static void gen_c2i_adapter(MacroAssembler *masm,
}
// For tagged stacks, double or long value aren't contiguous on the stack
// so get them contiguous for the xmm load
static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
int next_val_off = ld_off - Interpreter::stackElementSize();
if (TaggedStackInterpreter) {
// use tag slot temporarily for MSW
__ movptr(rsi, Address(saved_sp, ld_off));
__ movptr(Address(saved_sp, next_val_off+wordSize), rsi);
__ movdbl(r, Address(saved_sp, next_val_off));
// restore tag
__ movptr(Address(saved_sp, next_val_off+wordSize), frame::TagValue);
} else {
__ movdbl(r, Address(saved_sp, next_val_off));
}
int next_val_off = ld_off - Interpreter::stackElementSize;
__ movdbl(r, Address(saved_sp, next_val_off));
}
static void gen_i2c_adapter(MacroAssembler *masm,
@ -797,9 +755,9 @@ static void gen_i2c_adapter(MacroAssembler *masm,
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
"scrambled load targets?");
// Load in argument order going down.
int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
// Point to interpreter value (vs. tag)
int next_off = ld_off - Interpreter::stackElementSize();
int next_off = ld_off - Interpreter::stackElementSize;
//
//
//
@ -2322,7 +2280,7 @@ nmethod *SharedRuntime::generate_dtrace_nmethod(
// this function returns the adjust size (in number of words) to a c2i adapter
// activation for use during deoptimization
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
}

View File

@ -452,22 +452,6 @@ static void patch_callers_callsite(MacroAssembler *masm) {
__ bind(L);
}
// Helper function to put tags in interpreter stack.
static void tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
if (TaggedStackInterpreter) {
int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
if (sig == T_OBJECT || sig == T_ARRAY) {
__ movptr(Address(rsp, tag_offset), (int32_t) frame::TagReference);
} else if (sig == T_LONG || sig == T_DOUBLE) {
int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
__ movptr(Address(rsp, next_tag_offset), (int32_t) frame::TagValue);
__ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
} else {
__ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
}
}
}
static void gen_c2i_adapter(MacroAssembler *masm,
int total_args_passed,
@ -489,7 +473,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
// we also account for the return address location since
// we store it first rather than hold it in rax across all the shuffling
int extraspace = (total_args_passed * Interpreter::stackElementSize()) + wordSize;
int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
// stack is aligned, keep it that way
extraspace = round_to(extraspace, 2*wordSize);
@ -513,9 +497,8 @@ static void gen_c2i_adapter(MacroAssembler *masm,
}
// offset to start parameters
int st_off = (total_args_passed - i) * Interpreter::stackElementSize() +
Interpreter::value_offset_in_bytes();
int next_off = st_off - Interpreter::stackElementSize();
int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
int next_off = st_off - Interpreter::stackElementSize;
// Say 4 args:
// i st_off
@ -543,7 +526,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
// sign extend??
__ movl(rax, Address(rsp, ld_off));
__ movptr(Address(rsp, st_off), rax);
tag_stack(masm, sig_bt[i], st_off);
} else {
@ -560,10 +542,8 @@ static void gen_c2i_adapter(MacroAssembler *masm,
__ mov64(rax, CONST64(0xdeadffffdeadaaaa));
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
tag_stack(masm, sig_bt[i], next_off);
} else {
__ movq(Address(rsp, st_off), rax);
tag_stack(masm, sig_bt[i], st_off);
}
}
} else if (r_1->is_Register()) {
@ -572,7 +552,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
// must be only an int (or less ) so move only 32bits to slot
// why not sign extend??
__ movl(Address(rsp, st_off), r);
tag_stack(masm, sig_bt[i], st_off);
} else {
// Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
// T_DOUBLE and T_LONG use two slots in the interpreter
@ -584,10 +563,8 @@ static void gen_c2i_adapter(MacroAssembler *masm,
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
__ movq(Address(rsp, next_off), r);
tag_stack(masm, sig_bt[i], next_off);
} else {
__ movptr(Address(rsp, st_off), r);
tag_stack(masm, sig_bt[i], st_off);
}
}
} else {
@ -595,7 +572,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
if (!r_2->is_valid()) {
// only a float use just part of the slot
__ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
tag_stack(masm, sig_bt[i], st_off);
} else {
#ifdef ASSERT
// Overwrite the unused slot with known junk
@ -603,7 +579,6 @@ static void gen_c2i_adapter(MacroAssembler *masm,
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
__ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
tag_stack(masm, sig_bt[i], next_off);
}
}
}
@ -688,9 +663,9 @@ static void gen_i2c_adapter(MacroAssembler *masm,
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
"scrambled load targets?");
// Load in argument order going down.
int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
// Point to interpreter value (vs. tag)
int next_off = ld_off - Interpreter::stackElementSize();
int next_off = ld_off - Interpreter::stackElementSize;
//
//
//
@ -2535,7 +2510,7 @@ nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
// this function returns the adjust size (in number of words) to a c2i adapter
// activation for use during deoptimization
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
}

View File

@ -139,7 +139,7 @@ class StubGenerator: public StubCodeGenerator {
// stub code
__ enter();
__ movptr(rcx, parameter_size); // parameter counter
__ shlptr(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes
__ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes
__ addptr(rcx, locals_count_in_bytes); // reserve space for register saves
__ subptr(rsp, rcx);
__ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
@ -194,12 +194,6 @@ class StubGenerator: public StubCodeGenerator {
__ xorptr(rbx, rbx);
__ BIND(loop);
if (TaggedStackInterpreter) {
__ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(),
-2*wordSize)); // get tag
__ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
Interpreter::expr_tag_offset_in_bytes(0)), rax); // store tag
}
// get parameter
__ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize));

View File

@ -278,11 +278,6 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(c_rarg2, parameters); // parameter pointer
__ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
__ BIND(loop);
if (TaggedStackInterpreter) {
__ movl(rax, Address(c_rarg2, 0)); // get tag
__ addptr(c_rarg2, wordSize); // advance to next tag
__ push(rax); // pass tag
}
__ movptr(rax, Address(c_rarg2, 0));// get parameter
__ addptr(c_rarg2, wordSize); // advance to next parameter
__ decrementl(c_rarg1); // decrement counter

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,8 +28,8 @@
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreterSize to get the VM to print out the size.
// Max size with JVMTI and TaggedStackInterpreter
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
#ifdef AMD64
const static int InterpreterCodeSize = 200 * 1024;
#else

View File

@ -305,7 +305,6 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
case T_FLOAT :
{ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
__ pop(t); // remove return address first
__ pop_dtos_to_rsp();
// Must return a result for interpreter or compiler. In SSE
// mode, results are returned in xmm0 and the FPU stack must
// be empty.
@ -468,7 +467,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// see if the frame is greater than one page in size. If so,
// then we need to verify there is enough stack space remaining
// for the additional locals.
__ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize());
__ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
__ jcc(Assembler::belowEqual, after_frame_check);
// compute rsp as if this were going to be the last frame on
@ -882,7 +881,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ get_method(method);
__ verify_oop(method);
__ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
__ shlptr(t, Interpreter::logStackElementSize());
__ shlptr(t, Interpreter::logStackElementSize);
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
__ subptr(rsp, t);
__ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
@ -1225,9 +1224,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ testl(rdx, rdx);
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
__ bind(loop);
if (TaggedStackInterpreter) {
__ push((int32_t)NULL_WORD); // push tag
}
__ push((int32_t)NULL_WORD); // initialize local variables
__ decrement(rdx); // until everything initialized
__ jcc(Assembler::greater, loop);
@ -1463,7 +1459,7 @@ int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
const int extra_stack = methodOopDesc::extra_stack_entries();
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
Interpreter::stackElementWords();
Interpreter::stackElementWords;
return overhead_size + method_stack + stub_code;
}
@ -1487,9 +1483,9 @@ int AbstractInterpreter::layout_activation(methodOop method,
// NOTE: return size is in words not bytes
// fixed size of an interpreter frame:
int max_locals = method->max_locals() * Interpreter::stackElementWords();
int max_locals = method->max_locals() * Interpreter::stackElementWords;
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
Interpreter::stackElementWords();
Interpreter::stackElementWords;
int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
@ -1499,9 +1495,9 @@ int AbstractInterpreter::layout_activation(methodOop method,
int size = overhead +
((callee_locals - callee_param_count)*Interpreter::stackElementWords()) +
((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
(moncount*frame::interpreter_frame_monitor_size()) +
tempcount*Interpreter::stackElementWords() + popframe_extra_args;
tempcount*Interpreter::stackElementWords + popframe_extra_args;
if (interpreter_frame != NULL) {
#ifdef ASSERT
@ -1525,7 +1521,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
// Set last_sp
intptr_t* rsp = (intptr_t*) monbot -
tempcount*Interpreter::stackElementWords() -
tempcount*Interpreter::stackElementWords -
popframe_extra_args;
interpreter_frame->interpreter_frame_set_last_sp(rsp);
@ -1625,7 +1621,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ get_method(rax);
__ verify_oop(rax);
__ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
__ shlptr(rax, Interpreter::logStackElementSize());
__ shlptr(rax, Interpreter::logStackElementSize);
__ restore_locals();
__ subptr(rdi, rax);
__ addptr(rdi, wordSize);

View File

@ -199,7 +199,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
in_bytes(constantPoolCacheOopDesc::base_offset()) +
3 * wordSize));
__ andl(rbx, 0xFF);
if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
__ lea(rsp, Address(rsp, rbx, Address::times_8));
__ dispatch_next(state, step);
@ -417,7 +416,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// see if the frame is greater than one page in size. If so,
// then we need to verify there is enough stack space remaining
// for the additional locals.
__ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize());
__ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
__ jcc(Assembler::belowEqual, after_frame_check);
// compute rsp as if this were going to be the last frame on
@ -428,7 +427,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// locals + overhead, in bytes
__ mov(rax, rdx);
__ shlptr(rax, Interpreter::logStackElementSize()); // 2 slots per parameter.
__ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter.
__ addptr(rax, overhead_size);
#ifdef ASSERT
@ -759,7 +758,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// for natives the size of locals is zero
// compute beginning of parameters (r14)
if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
__ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
// add 2 zero-initialized slots for native calls
@ -865,7 +863,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ load_unsigned_short(t,
Address(method,
methodOopDesc::size_of_parameters_offset()));
__ shll(t, Interpreter::logStackElementSize());
__ shll(t, Interpreter::logStackElementSize);
__ subptr(rsp, t);
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
@ -1228,7 +1226,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ pop(rax);
// compute beginning of parameters (r14)
if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
__ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
// rdx - # of additional locals
@ -1239,7 +1236,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ testl(rdx, rdx);
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
__ bind(loop);
if (TaggedStackInterpreter) __ push((int) NULL_WORD); // push tag
__ push((int) NULL_WORD); // initialize local variables
__ decrementl(rdx); // until everything initialized
__ jcc(Assembler::greater, loop);
@ -1486,7 +1482,7 @@ int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
const int stub_code = frame::entry_frame_after_call_words;
const int extra_stack = methodOopDesc::extra_stack_entries();
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
Interpreter::stackElementWords();
Interpreter::stackElementWords;
return (overhead_size + method_stack + stub_code);
}
@ -1507,9 +1503,9 @@ int AbstractInterpreter::layout_activation(methodOop method,
// It is also guaranteed to be walkable even though it is in a skeletal state
// fixed size of an interpreter frame:
int max_locals = method->max_locals() * Interpreter::stackElementWords();
int max_locals = method->max_locals() * Interpreter::stackElementWords;
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
Interpreter::stackElementWords();
Interpreter::stackElementWords;
int overhead = frame::sender_sp_offset -
frame::interpreter_frame_initial_sp_offset;
@ -1518,9 +1514,9 @@ int AbstractInterpreter::layout_activation(methodOop method,
// for the callee's params we only need to account for the extra
// locals.
int size = overhead +
(callee_locals - callee_param_count)*Interpreter::stackElementWords() +
(callee_locals - callee_param_count)*Interpreter::stackElementWords +
moncount * frame::interpreter_frame_monitor_size() +
tempcount* Interpreter::stackElementWords() + popframe_extra_args;
tempcount* Interpreter::stackElementWords + popframe_extra_args;
if (interpreter_frame != NULL) {
#ifdef ASSERT
if (!EnableMethodHandles)
@ -1544,7 +1540,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
// Set last_sp
intptr_t* esp = (intptr_t*) monbot -
tempcount*Interpreter::stackElementWords() -
tempcount*Interpreter::stackElementWords -
popframe_extra_args;
interpreter_frame->interpreter_frame_set_last_sp(esp);
@ -1650,7 +1646,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ get_method(rax);
__ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::
size_of_parameters_offset())));
__ shll(rax, Interpreter::logStackElementSize());
__ shll(rax, Interpreter::logStackElementSize);
__ restore_locals(); // XXX do we need this?
__ subptr(r14, rax);
__ addptr(r14, wordSize);

View File

@ -50,7 +50,7 @@ static inline Address daddress(int n) { return laddress(n); }
static inline Address aaddress(int n) { return iaddress(n); }
static inline Address iaddress(Register r) {
return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::value_offset_in_bytes());
return Address(rdi, r, Interpreter::stackElementScale());
}
static inline Address laddress(Register r) {
return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
@ -59,12 +59,9 @@ static inline Address haddress(Register r) {
return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
}
static inline Address faddress(Register r) { return iaddress(r); };
static inline Address daddress(Register r) {
assert(!TaggedStackInterpreter, "This doesn't work");
return laddress(r);
};
static inline Address aaddress(Register r) { return iaddress(r); };
static inline Address faddress(Register r) { return iaddress(r); }
static inline Address daddress(Register r) { return laddress(r); }
static inline Address aaddress(Register r) { return iaddress(r); }
// expression stack
// (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
@ -448,7 +445,6 @@ void TemplateTable::iload() {
// Get the local value into tos
locals_index(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
@ -456,18 +452,15 @@ void TemplateTable::fast_iload2() {
transition(vtos, itos);
locals_index(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
__ push(itos);
locals_index(rbx, 3);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
void TemplateTable::fast_iload() {
transition(vtos, itos);
locals_index(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
@ -476,7 +469,6 @@ void TemplateTable::lload() {
locals_index(rbx);
__ movptr(rax, laddress(rbx));
NOT_LP64(__ movl(rdx, haddress(rbx)));
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
}
@ -484,26 +476,13 @@ void TemplateTable::fload() {
transition(vtos, ftos);
locals_index(rbx);
__ fld_s(faddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
void TemplateTable::dload() {
transition(vtos, dtos);
locals_index(rbx);
if (TaggedStackInterpreter) {
// Get double out of locals array, onto temp stack and load with
// float instruction into ST0
__ movl(rax, laddress(rbx));
__ movl(rdx, haddress(rbx));
__ push(rdx); // push hi first
__ push(rax);
__ fld_d(Address(rsp, 0));
__ addptr(rsp, 2*wordSize);
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
} else {
__ fld_d(daddress(rbx));
}
__ fld_d(daddress(rbx));
}
@ -511,7 +490,6 @@ void TemplateTable::aload() {
transition(vtos, atos);
locals_index(rbx);
__ movptr(rax, aaddress(rbx));
debug_only(__ verify_local_tag(frame::TagReference, rbx));
}
@ -527,7 +505,6 @@ void TemplateTable::wide_iload() {
transition(vtos, itos);
locals_index_wide(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
@ -536,7 +513,6 @@ void TemplateTable::wide_lload() {
locals_index_wide(rbx);
__ movptr(rax, laddress(rbx));
NOT_LP64(__ movl(rdx, haddress(rbx)));
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
}
@ -544,26 +520,13 @@ void TemplateTable::wide_fload() {
transition(vtos, ftos);
locals_index_wide(rbx);
__ fld_s(faddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
void TemplateTable::wide_dload() {
transition(vtos, dtos);
locals_index_wide(rbx);
if (TaggedStackInterpreter) {
// Get double out of locals array, onto temp stack and load with
// float instruction into ST0
__ movl(rax, laddress(rbx));
__ movl(rdx, haddress(rbx));
__ push(rdx); // push hi first
__ push(rax);
__ fld_d(Address(rsp, 0));
__ addl(rsp, 2*wordSize);
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
} else {
__ fld_d(daddress(rbx));
}
__ fld_d(daddress(rbx));
}
@ -571,7 +534,6 @@ void TemplateTable::wide_aload() {
transition(vtos, atos);
locals_index_wide(rbx);
__ movptr(rax, aaddress(rbx));
debug_only(__ verify_local_tag(frame::TagReference, rbx));
}
void TemplateTable::index_check(Register array, Register index) {
@ -672,7 +634,6 @@ void TemplateTable::fast_icaload() {
// load index out of locals
locals_index(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
// rdx: array
index_check(rdx, rax);
@ -695,7 +656,6 @@ void TemplateTable::saload() {
void TemplateTable::iload(int n) {
transition(vtos, itos);
__ movl(rax, iaddress(n));
debug_only(__ verify_local_tag(frame::TagValue, n));
}
@ -703,39 +663,24 @@ void TemplateTable::lload(int n) {
transition(vtos, ltos);
__ movptr(rax, laddress(n));
NOT_LP64(__ movptr(rdx, haddress(n)));
debug_only(__ verify_local_tag(frame::TagCategory2, n));
}
void TemplateTable::fload(int n) {
transition(vtos, ftos);
__ fld_s(faddress(n));
debug_only(__ verify_local_tag(frame::TagValue, n));
}
void TemplateTable::dload(int n) {
transition(vtos, dtos);
if (TaggedStackInterpreter) {
// Get double out of locals array, onto temp stack and load with
// float instruction into ST0
__ movl(rax, laddress(n));
__ movl(rdx, haddress(n));
__ push(rdx); // push hi first
__ push(rax);
__ fld_d(Address(rsp, 0));
__ addptr(rsp, 2*wordSize); // reset rsp
debug_only(__ verify_local_tag(frame::TagCategory2, n));
} else {
__ fld_d(daddress(n));
}
__ fld_d(daddress(n));
}
void TemplateTable::aload(int n) {
transition(vtos, atos);
__ movptr(rax, aaddress(n));
debug_only(__ verify_local_tag(frame::TagReference, n));
}
@ -809,7 +754,6 @@ void TemplateTable::istore() {
transition(itos, vtos);
locals_index(rbx);
__ movl(iaddress(rbx), rax);
__ tag_local(frame::TagValue, rbx);
}
@ -818,7 +762,6 @@ void TemplateTable::lstore() {
locals_index(rbx);
__ movptr(laddress(rbx), rax);
NOT_LP64(__ movptr(haddress(rbx), rdx));
__ tag_local(frame::TagCategory2, rbx);
}
@ -826,34 +769,21 @@ void TemplateTable::fstore() {
transition(ftos, vtos);
locals_index(rbx);
__ fstp_s(faddress(rbx));
__ tag_local(frame::TagValue, rbx);
}
void TemplateTable::dstore() {
transition(dtos, vtos);
locals_index(rbx);
if (TaggedStackInterpreter) {
// Store double on stack and reload into locals nonadjacently
__ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
__ pop(rax);
__ pop(rdx);
__ movptr(laddress(rbx), rax);
__ movptr(haddress(rbx), rdx);
__ tag_local(frame::TagCategory2, rbx);
} else {
__ fstp_d(daddress(rbx));
}
__ fstp_d(daddress(rbx));
}
void TemplateTable::astore() {
transition(vtos, vtos);
__ pop_ptr(rax, rdx); // will need to pop tag too
__ pop_ptr(rax);
locals_index(rbx);
__ movptr(aaddress(rbx), rax);
__ tag_local(rdx, rbx); // need to store same tag in local may be returnAddr
}
@ -862,7 +792,6 @@ void TemplateTable::wide_istore() {
__ pop_i(rax);
locals_index_wide(rbx);
__ movl(iaddress(rbx), rax);
__ tag_local(frame::TagValue, rbx);
}
@ -872,7 +801,6 @@ void TemplateTable::wide_lstore() {
locals_index_wide(rbx);
__ movptr(laddress(rbx), rax);
NOT_LP64(__ movl(haddress(rbx), rdx));
__ tag_local(frame::TagCategory2, rbx);
}
@ -888,10 +816,9 @@ void TemplateTable::wide_dstore() {
void TemplateTable::wide_astore() {
transition(vtos, vtos);
__ pop_ptr(rax, rdx);
__ pop_ptr(rax);
locals_index_wide(rbx);
__ movptr(aaddress(rbx), rax);
__ tag_local(rdx, rbx);
}
@ -990,7 +917,7 @@ void TemplateTable::aastore() {
// Pop stack arguments
__ bind(done);
__ addptr(rsp, 3 * Interpreter::stackElementSize());
__ addptr(rsp, 3 * Interpreter::stackElementSize);
}
@ -1024,7 +951,6 @@ void TemplateTable::sastore() {
void TemplateTable::istore(int n) {
transition(itos, vtos);
__ movl(iaddress(n), rax);
__ tag_local(frame::TagValue, n);
}
@ -1032,58 +958,45 @@ void TemplateTable::lstore(int n) {
transition(ltos, vtos);
__ movptr(laddress(n), rax);
NOT_LP64(__ movptr(haddress(n), rdx));
__ tag_local(frame::TagCategory2, n);
}
void TemplateTable::fstore(int n) {
transition(ftos, vtos);
__ fstp_s(faddress(n));
__ tag_local(frame::TagValue, n);
}
void TemplateTable::dstore(int n) {
transition(dtos, vtos);
if (TaggedStackInterpreter) {
__ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
__ pop(rax);
__ pop(rdx);
__ movl(laddress(n), rax);
__ movl(haddress(n), rdx);
__ tag_local(frame::TagCategory2, n);
} else {
__ fstp_d(daddress(n));
}
__ fstp_d(daddress(n));
}
void TemplateTable::astore(int n) {
transition(vtos, vtos);
__ pop_ptr(rax, rdx);
__ pop_ptr(rax);
__ movptr(aaddress(n), rax);
__ tag_local(rdx, n);
}
void TemplateTable::pop() {
transition(vtos, vtos);
__ addptr(rsp, Interpreter::stackElementSize());
__ addptr(rsp, Interpreter::stackElementSize);
}
void TemplateTable::pop2() {
transition(vtos, vtos);
__ addptr(rsp, 2*Interpreter::stackElementSize());
__ addptr(rsp, 2*Interpreter::stackElementSize);
}
void TemplateTable::dup() {
transition(vtos, vtos);
// stack: ..., a
__ load_ptr_and_tag(0, rax, rdx);
__ push_ptr(rax, rdx);
__ load_ptr(0, rax);
__ push_ptr(rax);
// stack: ..., a, a
}
@ -1091,11 +1004,11 @@ void TemplateTable::dup() {
void TemplateTable::dup_x1() {
transition(vtos, vtos);
// stack: ..., a, b
__ load_ptr_and_tag(0, rax, rdx); // load b
__ load_ptr_and_tag(1, rcx, rbx); // load a
__ store_ptr_and_tag(1, rax, rdx); // store b
__ store_ptr_and_tag(0, rcx, rbx); // store a
__ push_ptr(rax, rdx); // push b
__ load_ptr( 0, rax); // load b
__ load_ptr( 1, rcx); // load a
__ store_ptr(1, rax); // store b
__ store_ptr(0, rcx); // store a
__ push_ptr(rax); // push b
// stack: ..., b, a, b
}
@ -1103,15 +1016,15 @@ void TemplateTable::dup_x1() {
void TemplateTable::dup_x2() {
transition(vtos, vtos);
// stack: ..., a, b, c
__ load_ptr_and_tag(0, rax, rdx); // load c
__ load_ptr_and_tag(2, rcx, rbx); // load a
__ store_ptr_and_tag(2, rax, rdx); // store c in a
__ push_ptr(rax, rdx); // push c
__ load_ptr( 0, rax); // load c
__ load_ptr( 2, rcx); // load a
__ store_ptr(2, rax); // store c in a
__ push_ptr(rax); // push c
// stack: ..., c, b, c, c
__ load_ptr_and_tag(2, rax, rdx); // load b
__ store_ptr_and_tag(2, rcx, rbx); // store a in b
__ load_ptr( 2, rax); // load b
__ store_ptr(2, rcx); // store a in b
// stack: ..., c, a, c, c
__ store_ptr_and_tag(1, rax, rdx); // store b in c
__ store_ptr(1, rax); // store b in c
// stack: ..., c, a, b, c
}
@ -1119,10 +1032,10 @@ void TemplateTable::dup_x2() {
void TemplateTable::dup2() {
transition(vtos, vtos);
// stack: ..., a, b
__ load_ptr_and_tag(1, rax, rdx); // load a
__ push_ptr(rax, rdx); // push a
__ load_ptr_and_tag(1, rax, rdx); // load b
__ push_ptr(rax, rdx); // push b
__ load_ptr(1, rax); // load a
__ push_ptr(rax); // push a
__ load_ptr(1, rax); // load b
__ push_ptr(rax); // push b
// stack: ..., a, b, a, b
}
@ -1130,17 +1043,17 @@ void TemplateTable::dup2() {
void TemplateTable::dup2_x1() {
transition(vtos, vtos);
// stack: ..., a, b, c
__ load_ptr_and_tag(0, rcx, rbx); // load c
__ load_ptr_and_tag(1, rax, rdx); // load b
__ push_ptr(rax, rdx); // push b
__ push_ptr(rcx, rbx); // push c
__ load_ptr( 0, rcx); // load c
__ load_ptr( 1, rax); // load b
__ push_ptr(rax); // push b
__ push_ptr(rcx); // push c
// stack: ..., a, b, c, b, c
__ store_ptr_and_tag(3, rcx, rbx); // store c in b
__ store_ptr(3, rcx); // store c in b
// stack: ..., a, c, c, b, c
__ load_ptr_and_tag(4, rcx, rbx); // load a
__ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
__ load_ptr( 4, rcx); // load a
__ store_ptr(2, rcx); // store a in 2nd c
// stack: ..., a, c, a, b, c
__ store_ptr_and_tag(4, rax, rdx); // store b in a
__ store_ptr(4, rax); // store b in a
// stack: ..., b, c, a, b, c
// stack: ..., b, c, a, b, c
}
@ -1149,19 +1062,19 @@ void TemplateTable::dup2_x1() {
void TemplateTable::dup2_x2() {
transition(vtos, vtos);
// stack: ..., a, b, c, d
__ load_ptr_and_tag(0, rcx, rbx); // load d
__ load_ptr_and_tag(1, rax, rdx); // load c
__ push_ptr(rax, rdx); // push c
__ push_ptr(rcx, rbx); // push d
__ load_ptr( 0, rcx); // load d
__ load_ptr( 1, rax); // load c
__ push_ptr(rax); // push c
__ push_ptr(rcx); // push d
// stack: ..., a, b, c, d, c, d
__ load_ptr_and_tag(4, rax, rdx); // load b
__ store_ptr_and_tag(2, rax, rdx); // store b in d
__ store_ptr_and_tag(4, rcx, rbx); // store d in b
__ load_ptr( 4, rax); // load b
__ store_ptr(2, rax); // store b in d
__ store_ptr(4, rcx); // store d in b
// stack: ..., a, d, c, b, c, d
__ load_ptr_and_tag(5, rcx, rbx); // load a
__ load_ptr_and_tag(3, rax, rdx); // load c
__ store_ptr_and_tag(3, rcx, rbx); // store a in c
__ store_ptr_and_tag(5, rax, rdx); // store c in a
__ load_ptr( 5, rcx); // load a
__ load_ptr( 3, rax); // load c
__ store_ptr(3, rcx); // store a in c
__ store_ptr(5, rax); // store c in a
// stack: ..., c, d, a, b, c, d
// stack: ..., c, d, a, b, c, d
}
@ -1170,10 +1083,10 @@ void TemplateTable::dup2_x2() {
void TemplateTable::swap() {
transition(vtos, vtos);
// stack: ..., a, b
__ load_ptr_and_tag(1, rcx, rbx); // load a
__ load_ptr_and_tag(0, rax, rdx); // load b
__ store_ptr_and_tag(0, rcx, rbx); // store a in b
__ store_ptr_and_tag(1, rax, rdx); // store b in a
__ load_ptr( 1, rcx); // load a
__ load_ptr( 0, rax); // load b
__ store_ptr(0, rcx); // store a in b
__ store_ptr(1, rax); // store b in a
// stack: ..., b, a
}
@ -1181,12 +1094,12 @@ void TemplateTable::swap() {
void TemplateTable::iop2(Operation op) {
transition(itos, itos);
switch (op) {
case add : __ pop_i(rdx); __ addl (rax, rdx); break;
case add : __ pop_i(rdx); __ addl (rax, rdx); break;
case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
@ -1199,13 +1112,13 @@ void TemplateTable::lop2(Operation op) {
transition(ltos, ltos);
__ pop_l(rbx, rcx);
switch (op) {
case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
__ mov(rax, rbx); __ mov(rdx, rcx); break;
case _and: __ andl(rax, rbx); __ andl(rdx, rcx); break;
case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
case _xor: __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
default : ShouldNotReachHere();
case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
__ mov (rax, rbx); __ mov (rdx, rcx); break;
case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
default : ShouldNotReachHere();
}
}
@ -1299,7 +1212,6 @@ void TemplateTable::lushr() {
void TemplateTable::fop2(Operation op) {
transition(ftos, ftos);
__ pop_ftos_to_rsp(); // pop ftos into rsp
switch (op) {
case add: __ fadd_s (at_rsp()); break;
case sub: __ fsubr_s(at_rsp()); break;
@ -1315,7 +1227,6 @@ void TemplateTable::fop2(Operation op) {
void TemplateTable::dop2(Operation op) {
transition(dtos, dtos);
__ pop_dtos_to_rsp(); // pop dtos into rsp
switch (op) {
case add: __ fadd_d (at_rsp()); break;
@ -1557,10 +1468,8 @@ void TemplateTable::lcmp() {
void TemplateTable::float_cmp(bool is_float, int unordered_result) {
if (is_float) {
__ pop_ftos_to_rsp();
__ fld_s(at_rsp());
} else {
__ pop_dtos_to_rsp();
__ fld_d(at_rsp());
__ pop(rdx);
}
@ -2854,7 +2763,6 @@ void TemplateTable::fast_xaccess(TosState state) {
transition(vtos, state);
// get receiver
__ movptr(rax, aaddress(0));
debug_only(__ verify_local_tag(frame::TagReference, 0));
// access constant pool cache
__ get_cache_and_index_at_bcp(rcx, rdx, 2);
__ movptr(rbx, Address(rcx,

View File

@ -58,7 +58,7 @@ static inline Address aaddress(int n) {
}
static inline Address iaddress(Register r) {
return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes());
return Address(r14, r, Address::times_8);
}
static inline Address laddress(Register r) {
@ -418,7 +418,6 @@ void TemplateTable::ldc2_w() {
void TemplateTable::locals_index(Register reg, int offset) {
__ load_unsigned_byte(reg, at_bcp(offset));
__ negptr(reg);
if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2
}
void TemplateTable::iload() {
@ -460,53 +459,45 @@ void TemplateTable::iload() {
// Get the local value into tos
locals_index(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
void TemplateTable::fast_iload2() {
transition(vtos, itos);
locals_index(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
__ push(itos);
locals_index(rbx, 3);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
void TemplateTable::fast_iload() {
transition(vtos, itos);
locals_index(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
void TemplateTable::lload() {
transition(vtos, ltos);
locals_index(rbx);
__ movq(rax, laddress(rbx));
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
}
void TemplateTable::fload() {
transition(vtos, ftos);
locals_index(rbx);
__ movflt(xmm0, faddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
void TemplateTable::dload() {
transition(vtos, dtos);
locals_index(rbx);
__ movdbl(xmm0, daddress(rbx));
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
}
void TemplateTable::aload() {
transition(vtos, atos);
locals_index(rbx);
__ movptr(rax, aaddress(rbx));
debug_only(__ verify_local_tag(frame::TagReference, rbx));
}
void TemplateTable::locals_index_wide(Register reg) {
@ -514,42 +505,36 @@ void TemplateTable::locals_index_wide(Register reg) {
__ bswapl(reg);
__ shrl(reg, 16);
__ negptr(reg);
if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2
}
void TemplateTable::wide_iload() {
transition(vtos, itos);
locals_index_wide(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
void TemplateTable::wide_lload() {
transition(vtos, ltos);
locals_index_wide(rbx);
__ movq(rax, laddress(rbx));
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
}
void TemplateTable::wide_fload() {
transition(vtos, ftos);
locals_index_wide(rbx);
__ movflt(xmm0, faddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
}
void TemplateTable::wide_dload() {
transition(vtos, dtos);
locals_index_wide(rbx);
__ movdbl(xmm0, daddress(rbx));
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
}
void TemplateTable::wide_aload() {
transition(vtos, atos);
locals_index_wide(rbx);
__ movptr(rax, aaddress(rbx));
debug_only(__ verify_local_tag(frame::TagReference, rbx));
}
void TemplateTable::index_check(Register array, Register index) {
@ -654,7 +639,6 @@ void TemplateTable::fast_icaload() {
// load index out of locals
locals_index(rbx);
__ movl(rax, iaddress(rbx));
debug_only(__ verify_local_tag(frame::TagValue, rbx));
// eax: index
// rdx: array
@ -681,31 +665,26 @@ void TemplateTable::saload() {
void TemplateTable::iload(int n) {
transition(vtos, itos);
__ movl(rax, iaddress(n));
debug_only(__ verify_local_tag(frame::TagValue, n));
}
void TemplateTable::lload(int n) {
transition(vtos, ltos);
__ movq(rax, laddress(n));
debug_only(__ verify_local_tag(frame::TagCategory2, n));
}
void TemplateTable::fload(int n) {
transition(vtos, ftos);
__ movflt(xmm0, faddress(n));
debug_only(__ verify_local_tag(frame::TagValue, n));
}
void TemplateTable::dload(int n) {
transition(vtos, dtos);
__ movdbl(xmm0, daddress(n));
debug_only(__ verify_local_tag(frame::TagCategory2, n));
}
void TemplateTable::aload(int n) {
transition(vtos, atos);
__ movptr(rax, aaddress(n));
debug_only(__ verify_local_tag(frame::TagReference, n));
}
void TemplateTable::aload_0() {
@ -791,36 +770,31 @@ void TemplateTable::istore() {
transition(itos, vtos);
locals_index(rbx);
__ movl(iaddress(rbx), rax);
__ tag_local(frame::TagValue, rbx);
}
void TemplateTable::lstore() {
transition(ltos, vtos);
locals_index(rbx);
__ movq(laddress(rbx), rax);
__ tag_local(frame::TagCategory2, rbx);
}
void TemplateTable::fstore() {
transition(ftos, vtos);
locals_index(rbx);
__ movflt(faddress(rbx), xmm0);
__ tag_local(frame::TagValue, rbx);
}
void TemplateTable::dstore() {
transition(dtos, vtos);
locals_index(rbx);
__ movdbl(daddress(rbx), xmm0);
__ tag_local(frame::TagCategory2, rbx);
}
void TemplateTable::astore() {
transition(vtos, vtos);
__ pop_ptr(rax, rdx); // will need to pop tag too
__ pop_ptr(rax);
locals_index(rbx);
__ movptr(aaddress(rbx), rax);
__ tag_local(rdx, rbx); // store tag from stack, might be returnAddr
}
void TemplateTable::wide_istore() {
@ -828,7 +802,6 @@ void TemplateTable::wide_istore() {
__ pop_i();
locals_index_wide(rbx);
__ movl(iaddress(rbx), rax);
__ tag_local(frame::TagValue, rbx);
}
void TemplateTable::wide_lstore() {
@ -836,7 +809,6 @@ void TemplateTable::wide_lstore() {
__ pop_l();
locals_index_wide(rbx);
__ movq(laddress(rbx), rax);
__ tag_local(frame::TagCategory2, rbx);
}
void TemplateTable::wide_fstore() {
@ -844,7 +816,6 @@ void TemplateTable::wide_fstore() {
__ pop_f();
locals_index_wide(rbx);
__ movflt(faddress(rbx), xmm0);
__ tag_local(frame::TagValue, rbx);
}
void TemplateTable::wide_dstore() {
@ -852,15 +823,13 @@ void TemplateTable::wide_dstore() {
__ pop_d();
locals_index_wide(rbx);
__ movdbl(daddress(rbx), xmm0);
__ tag_local(frame::TagCategory2, rbx);
}
void TemplateTable::wide_astore() {
transition(vtos, vtos);
__ pop_ptr(rax, rdx); // will need to pop tag too
__ pop_ptr(rax);
locals_index_wide(rbx);
__ movptr(aaddress(rbx), rax);
__ tag_local(rdx, rbx); // store tag from stack, might be returnAddr
}
void TemplateTable::iastore() {
@ -972,7 +941,7 @@ void TemplateTable::aastore() {
// Pop stack arguments
__ bind(done);
__ addptr(rsp, 3 * Interpreter::stackElementSize());
__ addptr(rsp, 3 * Interpreter::stackElementSize);
}
void TemplateTable::bastore() {
@ -1010,130 +979,125 @@ void TemplateTable::sastore() {
void TemplateTable::istore(int n) {
transition(itos, vtos);
__ movl(iaddress(n), rax);
__ tag_local(frame::TagValue, n);
}
void TemplateTable::lstore(int n) {
transition(ltos, vtos);
__ movq(laddress(n), rax);
__ tag_local(frame::TagCategory2, n);
}
void TemplateTable::fstore(int n) {
transition(ftos, vtos);
__ movflt(faddress(n), xmm0);
__ tag_local(frame::TagValue, n);
}
void TemplateTable::dstore(int n) {
transition(dtos, vtos);
__ movdbl(daddress(n), xmm0);
__ tag_local(frame::TagCategory2, n);
}
void TemplateTable::astore(int n) {
transition(vtos, vtos);
__ pop_ptr(rax, rdx);
__ pop_ptr(rax);
__ movptr(aaddress(n), rax);
__ tag_local(rdx, n);
}
void TemplateTable::pop() {
transition(vtos, vtos);
__ addptr(rsp, Interpreter::stackElementSize());
__ addptr(rsp, Interpreter::stackElementSize);
}
void TemplateTable::pop2() {
transition(vtos, vtos);
__ addptr(rsp, 2 * Interpreter::stackElementSize());
__ addptr(rsp, 2 * Interpreter::stackElementSize);
}
void TemplateTable::dup() {
transition(vtos, vtos);
__ load_ptr_and_tag(0, rax, rdx);
__ push_ptr(rax, rdx);
__ load_ptr(0, rax);
__ push_ptr(rax);
// stack: ..., a, a
}
void TemplateTable::dup_x1() {
transition(vtos, vtos);
// stack: ..., a, b
__ load_ptr_and_tag(0, rax, rdx); // load b
__ load_ptr_and_tag(1, rcx, rbx); // load a
__ store_ptr_and_tag(1, rax, rdx); // store b
__ store_ptr_and_tag(0, rcx, rbx); // store a
__ push_ptr(rax, rdx); // push b
__ load_ptr( 0, rax); // load b
__ load_ptr( 1, rcx); // load a
__ store_ptr(1, rax); // store b
__ store_ptr(0, rcx); // store a
__ push_ptr(rax); // push b
// stack: ..., b, a, b
}
void TemplateTable::dup_x2() {
transition(vtos, vtos);
// stack: ..., a, b, c
__ load_ptr_and_tag(0, rax, rdx); // load c
__ load_ptr_and_tag(2, rcx, rbx); // load a
__ store_ptr_and_tag(2, rax, rdx); // store c in a
__ push_ptr(rax, rdx); // push c
__ load_ptr( 0, rax); // load c
__ load_ptr( 2, rcx); // load a
__ store_ptr(2, rax); // store c in a
__ push_ptr(rax); // push c
// stack: ..., c, b, c, c
__ load_ptr_and_tag(2, rax, rdx); // load b
__ store_ptr_and_tag(2, rcx, rbx); // store a in b
__ load_ptr( 2, rax); // load b
__ store_ptr(2, rcx); // store a in b
// stack: ..., c, a, c, c
__ store_ptr_and_tag(1, rax, rdx); // store b in c
__ store_ptr(1, rax); // store b in c
// stack: ..., c, a, b, c
}
void TemplateTable::dup2() {
transition(vtos, vtos);
// stack: ..., a, b
__ load_ptr_and_tag(1, rax, rdx); // load a
__ push_ptr(rax, rdx); // push a
__ load_ptr_and_tag(1, rax, rdx); // load b
__ push_ptr(rax, rdx); // push b
__ load_ptr(1, rax); // load a
__ push_ptr(rax); // push a
__ load_ptr(1, rax); // load b
__ push_ptr(rax); // push b
// stack: ..., a, b, a, b
}
void TemplateTable::dup2_x1() {
transition(vtos, vtos);
// stack: ..., a, b, c
__ load_ptr_and_tag(0, rcx, rbx); // load c
__ load_ptr_and_tag(1, rax, rdx); // load b
__ push_ptr(rax, rdx); // push b
__ push_ptr(rcx, rbx); // push c
__ load_ptr( 0, rcx); // load c
__ load_ptr( 1, rax); // load b
__ push_ptr(rax); // push b
__ push_ptr(rcx); // push c
// stack: ..., a, b, c, b, c
__ store_ptr_and_tag(3, rcx, rbx); // store c in b
__ store_ptr(3, rcx); // store c in b
// stack: ..., a, c, c, b, c
__ load_ptr_and_tag(4, rcx, rbx); // load a
__ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
__ load_ptr( 4, rcx); // load a
__ store_ptr(2, rcx); // store a in 2nd c
// stack: ..., a, c, a, b, c
__ store_ptr_and_tag(4, rax, rdx); // store b in a
__ store_ptr(4, rax); // store b in a
// stack: ..., b, c, a, b, c
}
void TemplateTable::dup2_x2() {
transition(vtos, vtos);
// stack: ..., a, b, c, d
__ load_ptr_and_tag(0, rcx, rbx); // load d
__ load_ptr_and_tag(1, rax, rdx); // load c
__ push_ptr(rax, rdx); // push c
__ push_ptr(rcx, rbx); // push d
__ load_ptr( 0, rcx); // load d
__ load_ptr( 1, rax); // load c
__ push_ptr(rax); // push c
__ push_ptr(rcx); // push d
// stack: ..., a, b, c, d, c, d
__ load_ptr_and_tag(4, rax, rdx); // load b
__ store_ptr_and_tag(2, rax, rdx); // store b in d
__ store_ptr_and_tag(4, rcx, rbx); // store d in b
__ load_ptr( 4, rax); // load b
__ store_ptr(2, rax); // store b in d
__ store_ptr(4, rcx); // store d in b
// stack: ..., a, d, c, b, c, d
__ load_ptr_and_tag(5, rcx, rbx); // load a
__ load_ptr_and_tag(3, rax, rdx); // load c
__ store_ptr_and_tag(3, rcx, rbx); // store a in c
__ store_ptr_and_tag(5, rax, rdx); // store c in a
__ load_ptr( 5, rcx); // load a
__ load_ptr( 3, rax); // load c
__ store_ptr(3, rcx); // store a in c
__ store_ptr(5, rax); // store c in a
// stack: ..., c, d, a, b, c, d
}
void TemplateTable::swap() {
transition(vtos, vtos);
// stack: ..., a, b
__ load_ptr_and_tag(1, rcx, rbx); // load a
__ load_ptr_and_tag(0, rax, rdx); // load b
__ store_ptr_and_tag(0, rcx, rbx); // store a in b
__ store_ptr_and_tag(1, rax, rdx); // store b in a
__ load_ptr( 1, rcx); // load a
__ load_ptr( 0, rax); // load b
__ store_ptr(0, rcx); // store a in b
__ store_ptr(1, rax); // store b in a
// stack: ..., b, a
}
@ -1156,12 +1120,12 @@ void TemplateTable::iop2(Operation op) {
void TemplateTable::lop2(Operation op) {
transition(ltos, ltos);
switch (op) {
case add : __ pop_l(rdx); __ addptr (rax, rdx); break;
case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr (rax, rdx); break;
case _and : __ pop_l(rdx); __ andptr (rax, rdx); break;
case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
case _xor : __ pop_l(rdx); __ xorptr (rax, rdx); break;
default : ShouldNotReachHere();
case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
default : ShouldNotReachHere();
}
}
@ -1250,7 +1214,7 @@ void TemplateTable::fop2(Operation op) {
switch (op) {
case add:
__ addss(xmm0, at_rsp());
__ addptr(rsp, Interpreter::stackElementSize());
__ addptr(rsp, Interpreter::stackElementSize);
break;
case sub:
__ movflt(xmm1, xmm0);
@ -1259,7 +1223,7 @@ void TemplateTable::fop2(Operation op) {
break;
case mul:
__ mulss(xmm0, at_rsp());
__ addptr(rsp, Interpreter::stackElementSize());
__ addptr(rsp, Interpreter::stackElementSize);
break;
case div:
__ movflt(xmm1, xmm0);
@ -1282,7 +1246,7 @@ void TemplateTable::dop2(Operation op) {
switch (op) {
case add:
__ addsd(xmm0, at_rsp());
__ addptr(rsp, 2 * Interpreter::stackElementSize());
__ addptr(rsp, 2 * Interpreter::stackElementSize);
break;
case sub:
__ movdbl(xmm1, xmm0);
@ -1291,7 +1255,7 @@ void TemplateTable::dop2(Operation op) {
break;
case mul:
__ mulsd(xmm0, at_rsp());
__ addptr(rsp, 2 * Interpreter::stackElementSize());
__ addptr(rsp, 2 * Interpreter::stackElementSize);
break;
case div:
__ movdbl(xmm1, xmm0);
@ -2782,7 +2746,6 @@ void TemplateTable::fast_xaccess(TosState state) {
// get receiver
__ movptr(rax, aaddress(0));
debug_only(__ verify_local_tag(frame::TagReference, 0));
// access constant pool cache
__ get_cache_and_index_at_bcp(rcx, rdx, 2);
__ movptr(rbx,
@ -2858,7 +2821,6 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
if (load_receiver) {
__ movl(recv, flags);
__ andl(recv, 0xFF);
if (TaggedStackInterpreter) __ shll(recv, 1); // index*2
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
__ movptr(recv, recv_addr);
__ verify_oop(recv);
@ -3610,13 +3572,11 @@ void TemplateTable::multianewarray() {
__ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
// last dim is on top of stack; we want address of first one:
// first_addr = last_addr + (ndims - 1) * wordSize
if (TaggedStackInterpreter) __ shll(rax, 1); // index*2
__ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
call_VM(rax,
CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
c_rarg1);
__ load_unsigned_byte(rbx, at_bcp(3));
if (TaggedStackInterpreter) __ shll(rbx, 1); // index*2
__ lea(rsp, Address(rsp, rbx, Address::times_8));
}
#endif // !CC_INTERP

View File

@ -37,15 +37,18 @@
thread->reset_last_Java_frame(); \
fixup_after_potential_safepoint()
void CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
int CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
// Allocate and initialize our frame.
InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
thread->push_zero_frame(frame);
// Execute those bytecodes!
main_loop(0, THREAD);
// No deoptimized frames on the stack
return 0;
}
void CppInterpreter::main_loop(int recurse, TRAPS) {
@ -165,7 +168,7 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
stack->push(result[-i]);
}
void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
int CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
// Make sure method is native and not abstract
assert(method->is_native() && !method->is_abstract(), "should be");
@ -173,7 +176,7 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
ZeroStack *stack = thread->zero_stack();
// Allocate and initialize our frame
InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
thread->push_zero_frame(frame);
interpreterState istate = frame->interpreter_state();
intptr_t *locals = istate->locals();
@ -430,25 +433,26 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
ShouldNotReachHere();
}
}
// No deoptimized frames on the stack
return 0;
}
void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
int CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
intptr_t *locals = stack->sp();
// Drop into the slow path if we need a safepoint check
if (SafepointSynchronize::do_call_back()) {
normal_entry(method, 0, THREAD);
return;
return normal_entry(method, 0, THREAD);
}
// Load the object pointer and drop into the slow path
// if we have a NullPointerException
oop object = LOCALS_OBJECT(0);
if (object == NULL) {
normal_entry(method, 0, THREAD);
return;
return normal_entry(method, 0, THREAD);
}
// Read the field index from the bytecode, which looks like this:
@ -470,15 +474,14 @@ void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
constantPoolCacheOop cache = method->constants()->cache();
ConstantPoolCacheEntry* entry = cache->entry_at(index);
if (!entry->is_resolved(Bytecodes::_getfield)) {
normal_entry(method, 0, THREAD);
return;
return normal_entry(method, 0, THREAD);
}
// Get the result and push it onto the stack
switch (entry->flag_state()) {
case ltos:
case dtos:
stack->overflow_check(1, CHECK);
stack->overflow_check(1, CHECK_0);
stack->alloc(wordSize);
break;
}
@ -558,20 +561,25 @@ void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
ShouldNotReachHere();
}
}
// No deoptimized frames on the stack
return 0;
}
void CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
int CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
ZeroStack *stack = thread->zero_stack();
// Drop into the slow path if we need a safepoint check
if (SafepointSynchronize::do_call_back()) {
normal_entry(method, 0, THREAD);
return;
return normal_entry(method, 0, THREAD);
}
// Pop our parameters
stack->set_sp(stack->sp() + method->size_of_parameters());
// No deoptimized frames on the stack
return 0;
}
InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
@ -833,7 +841,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
int callee_extra_locals = callee_locals - callee_param_count;
if (interpreter_frame) {
intptr_t *locals = interpreter_frame->sp() + method->max_locals();
intptr_t *locals = interpreter_frame->fp() + method->max_locals();
interpreterState istate = interpreter_frame->get_interpreterState();
intptr_t *monitor_base = (intptr_t*) istate;
intptr_t *stack_base = monitor_base - monitor_words;

View File

@ -29,10 +29,10 @@
public:
// Method entries
static void normal_entry(methodOop method, intptr_t UNUSED, TRAPS);
static void native_entry(methodOop method, intptr_t UNUSED, TRAPS);
static void accessor_entry(methodOop method, intptr_t UNUSED, TRAPS);
static void empty_entry(methodOop method, intptr_t UNUSED, TRAPS);
static int normal_entry(methodOop method, intptr_t UNUSED, TRAPS);
static int native_entry(methodOop method, intptr_t UNUSED, TRAPS);
static int accessor_entry(methodOop method, intptr_t UNUSED, TRAPS);
static int empty_entry(methodOop method, intptr_t UNUSED, TRAPS);
public:
// Main loop of normal_entry

View File

@ -1,6 +1,6 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2008, 2009 Red Hat, Inc.
* Copyright 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,20 +41,30 @@ class ZeroEntry {
}
private:
typedef void (*NormalEntryFunc)(methodOop method,
intptr_t base_pc,
TRAPS);
typedef void (*OSREntryFunc)(methodOop method,
address osr_buf,
intptr_t base_pc,
TRAPS);
typedef int (*NormalEntryFunc)(methodOop method,
intptr_t base_pc,
TRAPS);
typedef int (*OSREntryFunc)(methodOop method,
address osr_buf,
intptr_t base_pc,
TRAPS);
public:
void invoke(methodOop method, TRAPS) const {
((NormalEntryFunc) entry_point())(method, (intptr_t) this, THREAD);
maybe_deoptimize(
((NormalEntryFunc) entry_point())(method, (intptr_t) this, THREAD),
THREAD);
}
void invoke_osr(methodOop method, address osr_buf, TRAPS) const {
((OSREntryFunc) entry_point())(method, osr_buf, (intptr_t) this, THREAD);
maybe_deoptimize(
((OSREntryFunc) entry_point())(method, osr_buf, (intptr_t) this, THREAD),
THREAD);
}
private:
static void maybe_deoptimize(int deoptimized_frames, TRAPS) {
if (deoptimized_frames)
CppInterpreter::main_loop(deoptimized_frames - 1, THREAD);
}
public:

View File

@ -1,6 +1,6 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,10 @@ bool frame::is_interpreted_frame() const {
return zeroframe()->is_interpreter_frame();
}
bool frame::is_fake_stub_frame() const {
return zeroframe()->is_fake_stub_frame();
}
frame frame::sender_for_entry_frame(RegisterMap *map) const {
assert(zeroframe()->is_entry_frame(), "wrong type of frame");
assert(map != NULL, "map must be set");
@ -44,14 +48,14 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const {
"sender should be next Java frame");
map->clear();
assert(map->include_argument_oops(), "should be set by clear");
return frame(sender_sp(), sp() + 1);
return frame(zeroframe()->next(), sender_sp());
}
frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
assert(zeroframe()->is_interpreter_frame() ||
zeroframe()->is_shark_frame() ||
zeroframe()->is_fake_stub_frame(), "wrong type of frame");
return frame(sender_sp(), sp() + 1);
return frame(zeroframe()->next(), sender_sp());
}
frame frame::sender(RegisterMap* map) const {
@ -172,8 +176,8 @@ void frame::zero_print_on_error(int frame_index,
char *valuebuf = buf + buflen;
// Print each word of the frame
for (intptr_t *addr = fp(); addr <= sp(); addr++) {
int offset = sp() - addr;
for (intptr_t *addr = sp(); addr <= fp(); addr++) {
int offset = fp() - addr;
// Fill in default values, then try and improve them
snprintf(fieldbuf, buflen, "word[%d]", offset);

View File

@ -1,6 +1,6 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,17 +32,18 @@
// Constructor
public:
frame(intptr_t* sp, intptr_t* fp);
frame(ZeroFrame* zeroframe, intptr_t* sp);
// The sp of a Zero frame is the address of the highest word in
// that frame. We keep track of the lowest address too, so the
// boundaries of the frame are available for debug printing.
private:
intptr_t* _fp;
ZeroFrame* _zeroframe;
public:
const ZeroFrame *zeroframe() const {
return _zeroframe;
}
intptr_t* fp() const {
return _fp;
return (intptr_t *) zeroframe();
}
#ifdef CC_INTERP
@ -50,10 +51,6 @@
#endif // CC_INTERP
public:
const ZeroFrame *zeroframe() const {
return (ZeroFrame *) sp();
}
const EntryFrame *zero_entryframe() const {
return zeroframe()->as_entry_frame();
}
@ -64,6 +61,9 @@
return zeroframe()->as_shark_frame();
}
public:
bool is_fake_stub_frame() const;
public:
frame sender_for_nonentry_frame(RegisterMap* map) const;

View File

@ -1,6 +1,6 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,16 +26,16 @@
// Constructors
inline frame::frame() {
_zeroframe = NULL;
_sp = NULL;
_fp = NULL;
_pc = NULL;
_cb = NULL;
_deopt_state = unknown;
}
inline frame::frame(intptr_t* sp, intptr_t* fp) {
inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
_zeroframe = zf;
_sp = sp;
_fp = fp;
switch (zeroframe()->type()) {
case ZeroFrame::ENTRY_FRAME:
_pc = StubRoutines::call_stub_return_pc();
@ -66,7 +66,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
// Accessors
inline intptr_t* frame::sender_sp() const {
return (intptr_t *) zeroframe()->next();
return fp() + 1;
}
inline intptr_t* frame::link() const {
@ -120,7 +120,7 @@ inline jint frame::interpreter_frame_expression_stack_direction() {
// we can distinguish identity and younger/older relationship. NULL
// represents an invalid (incomparable) frame.
inline intptr_t* frame::id() const {
return sp();
return fp();
}
inline JavaCallWrapper* frame::entry_frame_call_wrapper() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -36,26 +36,14 @@
public:
static int expr_index_at(int i) {
return stackElementWords() * i;
}
static int expr_tag_index_at(int i) {
assert(TaggedStackInterpreter, "should not call this");
Unimplemented();
return stackElementWords * i;
}
static int expr_offset_in_bytes(int i) {
return stackElementSize() * i;
}
static int expr_tag_offset_in_bytes(int i) {
assert(TaggedStackInterpreter, "should not call this");
Unimplemented();
return stackElementSize * i;
}
static int local_index_at(int i) {
assert(i <= 0, "local direction already negated");
return stackElementWords() * i + (value_offset_in_bytes() / wordSize);
}
static int local_tag_index_at(int i) {
assert(TaggedStackInterpreter, "should not call this");
Unimplemented();
return stackElementWords * i;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008 Red Hat, Inc.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,21 +23,31 @@
*
*/
private:
ZeroFrame* volatile _last_Java_fp;
public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:
// 1 - initializing a new state (thread creation, javaCalls)
// 2 - saving a current state (javaCalls)
// 3 - restoring an old state (javaCalls)
// Note that whenever _last_Java_sp != NULL other anchor fields
// must be valid. The profiler apparently depends on this.
void clear() {
// clearing _last_Java_sp must be first
_last_Java_sp = NULL;
// fence?
_last_Java_fp = NULL;
_last_Java_pc = NULL;
}
void copy(JavaFrameAnchor* src) {
set(src->_last_Java_sp, src->_last_Java_pc, src->_last_Java_fp);
}
void set(intptr_t* sp, address pc, ZeroFrame* fp) {
// In order to make sure the transition state is valid for "this"
// We must clear _last_Java_sp before copying the rest of the new
// data
@ -46,13 +56,14 @@
// previous version (pd_cache_state) don't NULL _last_Java_sp
// unless the value is changing
//
if (_last_Java_sp != src->_last_Java_sp)
if (_last_Java_sp != sp)
_last_Java_sp = NULL;
_last_Java_pc = src->_last_Java_pc;
_last_Java_fp = fp;
_last_Java_pc = pc;
// Must be last so profiler will always see valid frame if
// has_last_frame() is true
_last_Java_sp = src->_last_Java_sp;
_last_Java_sp = sp;
}
bool walkable() {
@ -67,6 +78,10 @@
return _last_Java_sp;
}
void set_last_Java_sp(intptr_t* sp) {
_last_Java_sp = sp;
ZeroFrame* last_Java_fp() const {
return _last_Java_fp;
}
static ByteSize last_Java_fp_offset() {
return byte_offset_of(JavaFrameAnchor, _last_Java_fp);
}

View File

@ -26,6 +26,10 @@
#include "incls/_precompiled.incl"
#include "incls/_methodHandles_zero.cpp.incl"
int MethodHandles::adapter_conversion_ops_supported_mask() {
ShouldNotCallThis();
}
void MethodHandles::generate_method_handle_stub(MacroAssembler* masm,
MethodHandles::EntryKind ek) {
ShouldNotCallThis();

View File

@ -26,12 +26,18 @@
#include "incls/_precompiled.incl"
#include "incls/_stack_zero.cpp.incl"
int ZeroStack::suggest_size(Thread *thread) const {
assert(needs_setup(), "already set up");
return align_size_down(abi_stack_available(thread) / 2, wordSize);
}
void ZeroStack::handle_overflow(TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
// Set up the frame anchor if it isn't already
bool has_last_Java_frame = thread->has_last_Java_frame();
if (!has_last_Java_frame) {
intptr_t *sp = thread->zero_stack()->sp();
ZeroFrame *frame = thread->top_zero_frame();
while (frame) {
if (frame->is_shark_frame())
@ -44,13 +50,14 @@ void ZeroStack::handle_overflow(TRAPS) {
break;
}
sp = ((intptr_t *) frame) + 1;
frame = frame->next();
}
if (frame == NULL)
fatal("unrecoverable stack overflow");
thread->set_last_Java_frame(frame);
thread->set_last_Java_frame(frame, sp);
}
// Throw the exception
@ -71,3 +78,9 @@ void ZeroStack::handle_overflow(TRAPS) {
if (!has_last_Java_frame)
thread->reset_last_Java_frame();
}
#ifndef PRODUCT
void ZeroStack::zap(int c) {
memset(_base, c, available_words() * wordSize);
}
#endif // PRODUCT

View File

@ -42,6 +42,8 @@ class ZeroStack {
return _base == NULL;
}
int suggest_size(Thread *thread) const;
void setup(void *mem, size_t size) {
assert(needs_setup(), "already set up");
assert(!(size & WordAlignmentMask), "unaligned");
@ -67,6 +69,9 @@ class ZeroStack {
_sp = new_sp;
}
int total_words() const {
return _top - _base;
}
int available_words() const {
return _sp - _base;
}
@ -89,11 +94,15 @@ class ZeroStack {
int shadow_pages_size() const {
return _shadow_pages_size;
}
int abi_stack_available(Thread *thread) const;
public:
void overflow_check(int required_words, TRAPS);
static void handle_overflow(TRAPS);
public:
void zap(int c) PRODUCT_RETURN;
public:
static ByteSize base_offset() {
return byte_offset_of(ZeroStack, _base);

View File

@ -25,19 +25,24 @@
// This function should match SharkStack::CreateStackOverflowCheck
inline void ZeroStack::overflow_check(int required_words, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
// Check the Zero stack
if (required_words > available_words()) {
if (available_words() < required_words) {
handle_overflow(THREAD);
return;
}
// Check the ABI stack
address stack_top = thread->stack_base() - thread->stack_size();
int free_stack = ((address) &stack_top) - stack_top;
if (free_stack < shadow_pages_size()) {
if (abi_stack_available(THREAD) < 0) {
handle_overflow(THREAD);
return;
}
}
// This method returns the amount of ABI stack available for us
// to use under normal circumstances. Note that the returned
// value can be negative.
inline int ZeroStack::abi_stack_available(Thread *thread) const {
int stack_used = thread->stack_base() - (address) &stack_used;
int stack_free = thread->stack_size() - stack_used;
return stack_free - shadow_pages_size();
}

View File

@ -51,10 +51,7 @@ class StubGenerator: public StubCodeGenerator {
// Set up the stack if necessary
bool stack_needs_teardown = false;
if (stack->needs_setup()) {
size_t stack_used = thread->stack_base() - (address) &stack_used;
size_t stack_free = thread->stack_size() - stack_used;
size_t zero_stack_size = align_size_down(stack_free / 2, wordSize);
size_t zero_stack_size = stack->suggest_size(thread);
stack->setup(alloca(zero_stack_size), zero_stack_size);
stack_needs_teardown = true;
}

View File

@ -461,7 +461,7 @@ bool AttachListener::is_init_trigger() {
if (init_at_startup() || is_initialized()) {
return false; // initialized at startup or already initialized
}
char fn[128];
char fn[PATH_MAX+1];
sprintf(fn, ".attach_pid%d", os::current_process_id());
int ret;
struct stat64 st;

View File

@ -2305,7 +2305,7 @@ void linux_wrap_code(char* base, size_t size) {
return;
}
char buf[40];
char buf[PATH_MAX+1];
int num = Atomic::add(1, &cnt);
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
@ -3495,7 +3495,8 @@ void os::Linux::set_signal_handler(int sig, bool set_installed) {
// libjsig also interposes the sigaction() call below and saves the
// old sigaction on it own.
} else {
fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig);
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
"%#lx for signal %d.", (long)oldhand, sig));
}
}
@ -3817,7 +3818,8 @@ void os::init(void) {
Linux::set_page_size(sysconf(_SC_PAGESIZE));
if (Linux::page_size() == -1) {
fatal1("os_linux.cpp: os::init: sysconf failed (%s)", strerror(errno));
fatal(err_msg("os_linux.cpp: os::init: sysconf failed (%s)",
strerror(errno)));
}
init_page_sizes((size_t) Linux::page_size());

View File

@ -592,7 +592,7 @@ bool AttachListener::is_init_trigger() {
if (init_at_startup() || is_initialized()) {
return false; // initialized at startup or already initialized
}
char fn[128];
char fn[PATH_MAX+1];
sprintf(fn, ".attach_pid%d", os::current_process_id());
int ret;
struct stat64 st;

View File

@ -1567,7 +1567,8 @@ int os::allocate_thread_local_storage() {
// treat %g2 as a caller-save register, preserving it in a %lN.
thread_key_t tk;
if (thr_keycreate( &tk, NULL ) )
fatal1("os::allocate_thread_local_storage: thr_keycreate failed (%s)", strerror(errno));
fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
"(%s)", strerror(errno)));
return int(tk);
}
@ -1585,7 +1586,8 @@ void os::thread_local_storage_at_put(int index, void* value) {
if (errno == ENOMEM) {
vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
} else {
fatal1("os::thread_local_storage_at_put: thr_setspecific failed (%s)", strerror(errno));
fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
"(%s)", strerror(errno)));
}
} else {
ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
@ -1738,7 +1740,7 @@ jlong getTimeMillis() {
jlong os::javaTimeMillis() {
timeval t;
if (gettimeofday( &t, NULL) == -1)
fatal1("os::javaTimeMillis: gettimeofday (%s)", strerror(errno));
fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
}
@ -4233,7 +4235,8 @@ void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain
// libjsig also interposes the sigaction() call below and saves the
// old sigaction on it own.
} else {
fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig);
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
"%#lx for signal %d.", (long)oldhand, sig));
}
}
@ -4764,7 +4767,8 @@ void os::init(void) {
page_size = sysconf(_SC_PAGESIZE);
if (page_size == -1)
fatal1("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno));
fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
strerror(errno)));
init_page_sizes((size_t) page_size);
Solaris::initialize_system_info();
@ -4775,7 +4779,7 @@ void os::init(void) {
int fd = open("/dev/zero", O_RDWR);
if (fd < 0) {
fatal1("os::init: cannot open /dev/zero (%s)", strerror(errno));
fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
} else {
Solaris::set_dev_zero_fd(fd);

View File

@ -47,7 +47,8 @@ ThreadCritical::ThreadCritical() {
thread_t owner = thr_self();
if (global_mut_owner != owner) {
if (os::Solaris::mutex_lock(&global_mut))
fatal1("ThreadCritical::ThreadCritical: mutex_lock failed (%s)", strerror(errno));
fatal(err_msg("ThreadCritical::ThreadCritical: mutex_lock failed (%s)",
strerror(errno)));
assert(global_mut_count == 0, "must have clean count");
assert(global_mut_owner == -1, "must have clean owner");
}
@ -66,7 +67,8 @@ ThreadCritical::~ThreadCritical() {
if (global_mut_count == 0) {
global_mut_owner = -1;
if (os::Solaris::mutex_unlock(&global_mut))
fatal1("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", strerror(errno));
fatal(err_msg("ThreadCritical::~ThreadCritical: mutex_unlock failed "
"(%s)", strerror(errno)));
}
} else {
assert (Threads::number_of_threads() == 0, "valid only during initialization");

View File

@ -724,7 +724,7 @@ jlong offset() {
java_origin.wMilliseconds = 0;
FILETIME jot;
if (!SystemTimeToFileTime(&java_origin, &jot)) {
fatal1("Error = %d\nWindows error", GetLastError());
fatal(err_msg("Error = %d\nWindows error", GetLastError()));
}
_calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
_has_calculated_offset = 1;
@ -4095,7 +4095,7 @@ bool os::check_heap(bool force) {
}
int err = GetLastError();
if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
fatal1("heap walk aborted with error %d", err);
fatal(err_msg("heap walk aborted with error %d", err));
}
HeapUnlock(heap);
}

View File

@ -153,7 +153,7 @@ static void current_stack_region(address* bottom, size_t* size) {
if (rslt == ENOMEM) {
vm_exit_out_of_memory(0, "pthread_getattr_np");
} else {
fatal1("pthread_getattr_np failed with errno = %d", rslt);
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,9 +30,9 @@ define_pd_global(bool, DontYieldALot, false);
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 1024);
#else
// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
// to run while keeping the number of threads that can be created high.
// System default ThreadStackSize appears to be 512 which is too big.
// ThreadStackSize 320 allows a couple of test cases to run while
// keeping the number of threads that can be created high. System
// default ThreadStackSize appears to be 512 which is too big.
define_pd_global(intx, ThreadStackSize, 320);
define_pd_global(intx, VMThreadStackSize, 512);
#endif // AMD64

View File

@ -680,7 +680,7 @@ static void current_stack_region(address * bottom, size_t * size) {
if (rslt == ENOMEM) {
vm_exit_out_of_memory(0, "pthread_getattr_np");
} else {
fatal1("pthread_getattr_np failed with errno = %d", rslt);
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
}
}

View File

@ -288,7 +288,7 @@ static void current_stack_region(address *bottom, size_t *size) {
vm_exit_out_of_memory(0, "pthread_getattr_np");
}
else {
fatal1("pthread_getattr_np failed with errno = %d", res);
fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
}
}
@ -296,7 +296,7 @@ static void current_stack_region(address *bottom, size_t *size) {
size_t stack_bytes;
res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
if (res != 0) {
fatal1("pthread_attr_getstack failed with errno = %d", res);
fatal(err_msg("pthread_attr_getstack failed with errno = %d", res));
}
address stack_top = stack_bottom + stack_bytes;
@ -308,7 +308,7 @@ static void current_stack_region(address *bottom, size_t *size) {
size_t guard_bytes;
res = pthread_attr_getguardsize(&attr, &guard_bytes);
if (res != 0) {
fatal1("pthread_attr_getguardsize failed with errno = %d", res);
fatal(err_msg("pthread_attr_getguardsize failed with errno = %d", res));
}
int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");

View File

@ -68,19 +68,30 @@
public:
void set_last_Java_frame() {
set_last_Java_frame(top_zero_frame());
set_last_Java_frame(top_zero_frame(), zero_stack()->sp());
}
void reset_last_Java_frame() {
set_last_Java_frame(NULL);
frame_anchor()->zap();
}
void set_last_Java_frame(ZeroFrame* frame) {
frame_anchor()->set_last_Java_sp((intptr_t *) frame);
void set_last_Java_frame(ZeroFrame* fp, intptr_t* sp) {
frame_anchor()->set(sp, NULL, fp);
}
public:
ZeroFrame* last_Java_fp() {
return frame_anchor()->last_Java_fp();
}
private:
frame pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
return frame(last_Java_sp(), zero_stack()->sp());
return frame(last_Java_fp(), last_Java_sp());
}
public:
static ByteSize last_Java_fp_offset() {
return byte_offset_of(JavaThread, _anchor) +
JavaFrameAnchor::last_Java_fp_offset();
}
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,8 +31,8 @@ define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system defau
define_pd_global(intx, VMThreadStackSize, 1024);
define_pd_global(uintx,JVMInvokeMethodSlack, 8*K);
#else
// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
// to run while keeping the number of threads that can be created high.
// ThreadStackSize 320 allows a couple of test cases to run while
// keeping the number of threads that can be created high.
define_pd_global(intx, ThreadStackSize, 320);
define_pd_global(intx, VMThreadStackSize, 512);
define_pd_global(uintx,JVMInvokeMethodSlack, 10*K);

View File

@ -721,8 +721,8 @@ void ArchDesc::build_pipe_classes(FILE *fp_cpp) {
fprintf(fp_cpp, " }\n");
fprintf(fp_cpp, "#endif\n\n");
#endif
fprintf(fp_cpp, " assert(this, \"NULL pipeline info\")\n");
fprintf(fp_cpp, " assert(pred, \"NULL predecessor pipline info\")\n\n");
fprintf(fp_cpp, " assert(this, \"NULL pipeline info\");\n");
fprintf(fp_cpp, " assert(pred, \"NULL predecessor pipline info\");\n\n");
fprintf(fp_cpp, " if (pred->hasFixedLatency())\n return (pred->fixedLatency());\n\n");
fprintf(fp_cpp, " // If this is not an operand, then assume a dependence with 0 latency\n");
fprintf(fp_cpp, " if (opnd > _read_stage_count)\n return (0);\n\n");

View File

@ -43,7 +43,8 @@ AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
_code_pos = cs->end();
_oop_recorder= code->oop_recorder();
if (_code_begin == NULL) {
vm_exit_out_of_memory1(0, "CodeCache: no room for %s", code->name());
vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
code->name()));
}
}

View File

@ -1062,7 +1062,7 @@ class LIR_OpJavaCall: public LIR_OpCall {
is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
||
(method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
method()->name() == ciSymbol::invoke_name());
methodOopDesc::is_method_handle_invoke_name(method()->name()->sid()));
}
intptr_t vtable_offset() const {

View File

@ -731,26 +731,29 @@ ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
// ciEnv::get_fake_invokedynamic_method_impl
ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
int index, Bytecodes::Code bc) {
// Compare the following logic with InterpreterRuntime::resolve_invokedynamic.
assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
// Get the CallSite from the constant pool cache.
ConstantPoolCacheEntry* cpc_entry = cpool->cache()->secondary_entry_at(index);
assert(cpc_entry != NULL && cpc_entry->is_secondary_entry(), "sanity");
Handle call_site = cpc_entry->f1();
bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc);
if (is_resolved && (oop) cpool->cache()->secondary_entry_at(index)->f1() == NULL)
// FIXME: code generation could allow for null (unlinked) call site
is_resolved = false;
// Call site might not be linked yet.
if (call_site.is_null()) {
// Call site might not be resolved yet. We could create a real invoker method from the
// compiler, but it is simpler to stop the code path here with an unlinked method.
if (!is_resolved) {
ciInstanceKlass* mh_klass = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
ciSymbol* sig_sym = get_object(cpool->signature_ref_at(index))->as_symbol();
return get_unloaded_method(mh_klass, ciSymbol::invoke_name(), sig_sym);
ciSymbol* sig_sym = get_object(cpool->signature_ref_at(index))->as_symbol();
return get_unloaded_method(mh_klass, ciSymbol::invokeExact_name(), sig_sym);
}
// Get the methodOop from the CallSite.
methodOop method_oop = (methodOop) java_dyn_CallSite::vmmethod(call_site());
assert(method_oop != NULL, "sanity");
assert(method_oop->is_method_handle_invoke(), "consistent");
// Get the invoker methodOop from the constant pool.
intptr_t f2_value = cpool->cache()->main_entry_at(index)->f2();
methodOop signature_invoker = methodOop(f2_value);
assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
"correct result from LinkResolver::resolve_invokedynamic");
return get_object(method_oop)->as_method();
return get_object(signature_invoker)->as_method();
}

View File

@ -103,7 +103,7 @@ void ciObjectFactory::init_shared_objects() {
for (i = vmSymbols::FIRST_SID; i < vmSymbols::SID_LIMIT; i++) {
symbolHandle sym_handle = vmSymbolHandles::symbol_handle_at((vmSymbols::SID) i);
assert(vmSymbols::find_sid(sym_handle()) == i, "1-1 mapping");
ciSymbol* sym = new (_arena) ciSymbol(sym_handle);
ciSymbol* sym = new (_arena) ciSymbol(sym_handle, (vmSymbols::SID) i);
init_ident_of(sym);
_shared_ci_symbols[i] = sym;
}
@ -273,7 +273,8 @@ ciObject* ciObjectFactory::create_new_object(oop o) {
if (o->is_symbol()) {
symbolHandle h_o(THREAD, (symbolOop)o);
return new (arena()) ciSymbol(h_o);
assert(vmSymbols::find_sid(h_o()) == vmSymbols::NO_SID, "");
return new (arena()) ciSymbol(h_o, vmSymbols::NO_SID);
} else if (o->is_klass()) {
KlassHandle h_k(THREAD, (klassOop)o);
Klass* k = ((klassOop)o)->klass_part();

View File

@ -29,7 +29,17 @@
// ciSymbol::ciSymbol
//
// Preallocated handle variant. Used with handles from vmSymboHandles.
ciSymbol::ciSymbol(symbolHandle h_s) : ciObject(h_s) {
ciSymbol::ciSymbol(symbolHandle h_s, vmSymbols::SID sid)
: ciObject(h_s), _sid(sid)
{
assert(sid_ok(), "must be in vmSymbols");
}
// Normal case for non-famous symbols.
ciSymbol::ciSymbol(symbolOop s)
: ciObject(s), _sid(vmSymbols::NO_SID)
{
assert(sid_ok(), "must not be in vmSymbols");
}
// ciSymbol

View File

@ -36,8 +36,11 @@ class ciSymbol : public ciObject {
friend class ciObjArrayKlass;
private:
ciSymbol(symbolOop s) : ciObject(s) {}
ciSymbol(symbolHandle s); // for use with vmSymbolHandles
const vmSymbols::SID _sid;
DEBUG_ONLY( bool sid_ok() { return vmSymbols::find_sid(get_symbolOop()) == _sid; } )
ciSymbol(symbolOop s); // normal case, for symbols not mentioned in vmSymbols
ciSymbol(symbolHandle s, vmSymbols::SID sid); // for use with vmSymbolHandles
symbolOop get_symbolOop() const { return (symbolOop)get_oop(); }
@ -52,6 +55,9 @@ private:
static ciSymbol* make_impl(const char* s);
public:
// The enumeration ID from vmSymbols, or vmSymbols::NO_SID if none.
vmSymbols::SID sid() const { return _sid; }
// The text of the symbol as a null-terminated utf8 string.
const char* as_utf8();
int utf8_length();

View File

@ -334,7 +334,8 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
}
break;
default:
fatal1("bad constant pool tag value %u", cp->tag_at(index).value());
fatal(err_msg("bad constant pool tag value %u",
cp->tag_at(index).value()));
ShouldNotReachHere();
break;
} // end of switch
@ -1837,7 +1838,8 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf
_has_vanilla_constructor = true;
}
if (EnableMethodHandles && m->is_method_handle_invoke()) {
if (EnableMethodHandles && (m->is_method_handle_invoke() ||
m->is_method_handle_adapter())) {
THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(),
"Method handle invokers must be defined internally to the VM", nullHandle);
}

View File

@ -127,7 +127,7 @@ void DictionaryEntry::add_protection_domain(oop protection_domain) {
bool Dictionary::do_unloading(BoolObjectClosure* is_alive) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
bool class_was_unloaded = false;
int index = 0; // Defined here for portability! Do not move
@ -561,10 +561,11 @@ SymbolPropertyTable::SymbolPropertyTable(int table_size, HashtableBucket* t,
SymbolPropertyEntry* SymbolPropertyTable::find_entry(int index, unsigned int hash,
symbolHandle sym) {
assert(index == index_for(sym), "incorrect index?");
symbolHandle sym,
intptr_t sym_mode) {
assert(index == index_for(sym, sym_mode), "incorrect index?");
for (SymbolPropertyEntry* p = bucket(index); p != NULL; p = p->next()) {
if (p->hash() == hash && p->symbol() == sym()) {
if (p->hash() == hash && p->symbol() == sym() && p->symbol_mode() == sym_mode) {
return p;
}
}
@ -573,12 +574,12 @@ SymbolPropertyEntry* SymbolPropertyTable::find_entry(int index, unsigned int has
SymbolPropertyEntry* SymbolPropertyTable::add_entry(int index, unsigned int hash,
symbolHandle sym) {
symbolHandle sym, intptr_t sym_mode) {
assert_locked_or_safepoint(SystemDictionary_lock);
assert(index == index_for(sym), "incorrect index?");
assert(find_entry(index, hash, sym) == NULL, "no double entry");
assert(index == index_for(sym, sym_mode), "incorrect index?");
assert(find_entry(index, hash, sym, sym_mode) == NULL, "no double entry");
SymbolPropertyEntry* p = new_entry(hash, sym());
SymbolPropertyEntry* p = new_entry(hash, sym(), sym_mode);
Hashtable::add_entry(index, p);
return p;
}

View File

@ -223,12 +223,16 @@ class DictionaryEntry : public HashtableEntry {
class SymbolPropertyEntry : public HashtableEntry {
friend class VMStructs;
private:
intptr_t _symbol_mode; // secondary key
oop _property_oop;
address _property_data;
public:
symbolOop symbol() const { return (symbolOop) literal(); }
intptr_t symbol_mode() const { return _symbol_mode; }
void set_symbol_mode(intptr_t m) { _symbol_mode = m; }
oop property_oop() const { return _property_oop; }
void set_property_oop(oop p) { _property_oop = p; }
@ -248,6 +252,7 @@ class SymbolPropertyEntry : public HashtableEntry {
void print_on(outputStream* st) const {
symbol()->print_value_on(st);
st->print("/mode="INTX_FORMAT, symbol_mode());
st->print(" -> ");
bool printed = false;
if (property_oop() != NULL) {
@ -285,8 +290,9 @@ private:
ShouldNotReachHere();
}
SymbolPropertyEntry* new_entry(unsigned int hash, symbolOop symbol) {
SymbolPropertyEntry* new_entry(unsigned int hash, symbolOop symbol, intptr_t symbol_mode) {
SymbolPropertyEntry* entry = (SymbolPropertyEntry*) Hashtable::new_entry(hash, symbol);
entry->set_symbol_mode(symbol_mode);
entry->set_property_oop(NULL);
entry->set_property_data(NULL);
return entry;
@ -300,16 +306,20 @@ public:
Hashtable::free_entry(entry);
}
unsigned int compute_hash(symbolHandle sym) {
unsigned int compute_hash(symbolHandle sym, intptr_t symbol_mode) {
// Use the regular identity_hash.
return Hashtable::compute_hash(sym);
return Hashtable::compute_hash(sym) ^ symbol_mode;
}
int index_for(symbolHandle name, intptr_t symbol_mode) {
return hash_to_index(compute_hash(name, symbol_mode));
}
// need not be locked; no state change
SymbolPropertyEntry* find_entry(int index, unsigned int hash, symbolHandle name);
SymbolPropertyEntry* find_entry(int index, unsigned int hash, symbolHandle name, intptr_t name_mode);
// must be done under SystemDictionary_lock
SymbolPropertyEntry* add_entry(int index, unsigned int hash, symbolHandle name);
SymbolPropertyEntry* add_entry(int index, unsigned int hash, symbolHandle name, intptr_t name_mode);
// GC support
void oops_do(OopClosure* f);

View File

@ -2446,24 +2446,20 @@ oop java_dyn_MethodTypeForm::erasedType(oop mtform) {
// Support for java_dyn_CallSite
int java_dyn_CallSite::_type_offset;
int java_dyn_CallSite::_target_offset;
int java_dyn_CallSite::_vmmethod_offset;
int java_dyn_CallSite::_caller_method_offset;
int java_dyn_CallSite::_caller_bci_offset;
void java_dyn_CallSite::compute_offsets() {
if (!EnableInvokeDynamic) return;
klassOop k = SystemDictionary::CallSite_klass();
if (k != NULL) {
compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_dyn_MethodType_signature(), true);
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
compute_offset(_vmmethod_offset, k, vmSymbols::vmmethod_name(), vmSymbols::object_signature(), true);
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature());
compute_offset(_caller_method_offset, k, vmSymbols::vmmethod_name(), vmSymbols::sun_dyn_MemberName_signature());
compute_offset(_caller_bci_offset, k, vmSymbols::vmindex_name(), vmSymbols::int_signature());
}
}
oop java_dyn_CallSite::type(oop site) {
return site->obj_field(_type_offset);
}
oop java_dyn_CallSite::target(oop site) {
return site->obj_field(_target_offset);
}
@ -2472,12 +2468,20 @@ void java_dyn_CallSite::set_target(oop site, oop target) {
site->obj_field_put(_target_offset, target);
}
oop java_dyn_CallSite::vmmethod(oop site) {
return site->obj_field(_vmmethod_offset);
oop java_dyn_CallSite::caller_method(oop site) {
return site->obj_field(_caller_method_offset);
}
void java_dyn_CallSite::set_vmmethod(oop site, oop ref) {
site->obj_field_put(_vmmethod_offset, ref);
void java_dyn_CallSite::set_caller_method(oop site, oop ref) {
site->obj_field_put(_caller_method_offset, ref);
}
jint java_dyn_CallSite::caller_bci(oop site) {
return site->int_field(_caller_bci_offset);
}
void java_dyn_CallSite::set_caller_bci(oop site, jint bci) {
site->int_field_put(_caller_bci_offset, bci);
}

View File

@ -1068,21 +1068,22 @@ class java_dyn_CallSite: AllStatic {
friend class JavaClasses;
private:
static int _type_offset;
static int _target_offset;
static int _vmmethod_offset;
static int _caller_method_offset;
static int _caller_bci_offset;
static void compute_offsets();
public:
// Accessors
static oop type(oop site);
static oop target(oop site);
static void set_target(oop site, oop target);
static oop vmmethod(oop site);
static void set_vmmethod(oop site, oop ref);
static oop caller_method(oop site);
static void set_caller_method(oop site, oop ref);
static jint caller_bci(oop site);
static void set_caller_bci(oop site, jint bci);
// Testers
static bool is_subclass(klassOop klass) {
@ -1094,8 +1095,8 @@ public:
// Accessors for code generation:
static int target_offset_in_bytes() { return _target_offset; }
static int type_offset_in_bytes() { return _type_offset; }
static int vmmethod_offset_in_bytes() { return _vmmethod_offset; }
static int caller_method_offset_in_bytes() { return _caller_method_offset; }
static int caller_bci_offset_in_bytes() { return _caller_bci_offset; }
};

View File

@ -103,7 +103,7 @@ LoaderConstraintEntry** LoaderConstraintTable::find_loader_constraint(
void LoaderConstraintTable::purge_loader_constraints(BoolObjectClosure* is_alive) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
// Remove unloaded entries from constraint table
for (int index = 0; index < table_size(); index++) {
LoaderConstraintEntry** p = bucket_addr(index);

View File

@ -102,7 +102,7 @@ void ResolutionErrorTable::always_strong_classes_do(OopClosure* blk) {
// Remove unloaded entries from the table
void ResolutionErrorTable::purge_resolution_errors(BoolObjectClosure* is_alive) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
for (int i = 0; i < table_size(); i++) {
for (ResolutionErrorEntry** p = bucket_addr(i); *p != NULL; ) {
ResolutionErrorEntry* entry = *p;

View File

@ -2341,118 +2341,150 @@ char* SystemDictionary::check_signature_loaders(symbolHandle signature,
}
methodOop SystemDictionary::find_method_handle_invoke(symbolHandle signature,
Handle class_loader,
Handle protection_domain,
methodOop SystemDictionary::find_method_handle_invoke(symbolHandle name,
symbolHandle signature,
KlassHandle accessing_klass,
TRAPS) {
if (!EnableMethodHandles) return NULL;
assert(class_loader.is_null() && protection_domain.is_null(),
"cannot load specialized versions of MethodHandle.invoke");
if (invoke_method_table() == NULL) {
// create this side table lazily
_invoke_method_table = new SymbolPropertyTable(_invoke_method_size);
}
unsigned int hash = invoke_method_table()->compute_hash(signature);
vmSymbols::SID name_id = vmSymbols::find_sid(name());
assert(name_id != vmSymbols::NO_SID, "must be a known name");
unsigned int hash = invoke_method_table()->compute_hash(signature, name_id);
int index = invoke_method_table()->hash_to_index(hash);
SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature);
SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature, name_id);
methodHandle non_cached_result;
if (spe == NULL || spe->property_oop() == NULL) {
spe = NULL;
// Must create lots of stuff here, but outside of the SystemDictionary lock.
if (THREAD->is_Compiler_thread())
return NULL; // do not attempt from within compiler
Handle mt = compute_method_handle_type(signature(),
class_loader, protection_domain,
CHECK_NULL);
bool found_on_bcp = false;
Handle mt = find_method_handle_type(signature(), accessing_klass, found_on_bcp, CHECK_NULL);
KlassHandle mh_klass = SystemDictionaryHandles::MethodHandle_klass();
methodHandle m = methodOopDesc::make_invoke_method(mh_klass, signature,
methodHandle m = methodOopDesc::make_invoke_method(mh_klass, name, signature,
mt, CHECK_NULL);
// Now grab the lock. We might have to throw away the new method,
// if a racing thread has managed to install one at the same time.
{
if (found_on_bcp) {
MutexLocker ml(SystemDictionary_lock, Thread::current());
spe = invoke_method_table()->find_entry(index, hash, signature);
spe = invoke_method_table()->find_entry(index, hash, signature, name_id);
if (spe == NULL)
spe = invoke_method_table()->add_entry(index, hash, signature);
spe = invoke_method_table()->add_entry(index, hash, signature, name_id);
if (spe->property_oop() == NULL)
spe->set_property_oop(m());
} else {
non_cached_result = m;
}
}
methodOop m = (methodOop) spe->property_oop();
assert(m->is_method(), "");
return m;
if (spe != NULL && spe->property_oop() != NULL) {
assert(spe->property_oop()->is_method(), "");
return (methodOop) spe->property_oop();
} else {
return non_cached_result();
}
}
// Ask Java code to find or construct a java.dyn.MethodType for the given
// signature, as interpreted relative to the given class loader.
// Because of class loader constraints, all method handle usage must be
// consistent with this loader.
Handle SystemDictionary::compute_method_handle_type(symbolHandle signature,
Handle class_loader,
Handle protection_domain,
TRAPS) {
Handle SystemDictionary::find_method_handle_type(symbolHandle signature,
KlassHandle accessing_klass,
bool& return_bcp_flag,
TRAPS) {
Handle class_loader, protection_domain;
bool is_on_bcp = true; // keep this true as long as we can materialize from the boot classloader
Handle empty;
int npts = ArgumentCount(signature()).size();
objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::Class_klass(), npts, CHECK_(empty));
int arg = 0;
Handle rt; // the return type from the signature
for (SignatureStream ss(signature()); !ss.is_done(); ss.next()) {
oop mirror;
if (!ss.is_object()) {
mirror = Universe::java_mirror(ss.type());
} else {
symbolOop name_oop = ss.as_symbol(CHECK_(empty));
symbolHandle name(THREAD, name_oop);
klassOop klass = resolve_or_fail(name,
class_loader, protection_domain,
true, CHECK_(empty));
mirror = Klass::cast(klass)->java_mirror();
oop mirror = NULL;
if (is_on_bcp) {
mirror = ss.as_java_mirror(class_loader, protection_domain,
SignatureStream::ReturnNull, CHECK_(empty));
if (mirror == NULL) {
// fall back from BCP to accessing_klass
if (accessing_klass.not_null()) {
class_loader = Handle(THREAD, instanceKlass::cast(accessing_klass())->class_loader());
protection_domain = Handle(THREAD, instanceKlass::cast(accessing_klass())->protection_domain());
}
is_on_bcp = false;
}
}
if (!is_on_bcp) {
// Resolve, throwing a real error if it doesn't work.
mirror = ss.as_java_mirror(class_loader, protection_domain,
SignatureStream::NCDFError, CHECK_(empty));
}
if (ss.at_return_type())
rt = Handle(THREAD, mirror);
else
pts->obj_at_put(arg++, mirror);
// Check accessibility.
if (ss.is_object() && accessing_klass.not_null()) {
klassOop sel_klass = java_lang_Class::as_klassOop(mirror);
// Emulate constantPoolOopDesc::verify_constant_pool_resolve.
if (Klass::cast(sel_klass)->oop_is_objArray())
sel_klass = objArrayKlass::cast(sel_klass)->bottom_klass();
if (Klass::cast(sel_klass)->oop_is_instance()) {
KlassHandle sel_kh(THREAD, sel_klass);
LinkResolver::check_klass_accessability(accessing_klass, sel_kh, CHECK_(empty));
}
}
}
assert(arg == npts, "");
// call MethodType java.dyn.MethodType::makeImpl(Class rt, Class[] pts, false, true)
bool varargs = false, trusted = true;
// call sun.dyn.MethodHandleNatives::findMethodType(Class rt, Class[] pts) -> MethodType
JavaCallArguments args(Handle(THREAD, rt()));
args.push_oop(pts());
args.push_int(false);
args.push_int(trusted);
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
SystemDictionary::MethodType_klass(),
vmSymbols::makeImpl_name(), vmSymbols::makeImpl_signature(),
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::findMethodHandleType_name(),
vmSymbols::findMethodHandleType_signature(),
&args, CHECK_(empty));
// report back to the caller with the MethodType and the "on_bcp" flag
return_bcp_flag = is_on_bcp;
return Handle(THREAD, (oop) result.get_jobject());
}
// Ask Java code to find or construct a java.dyn.CallSite for the given
// name and signature, as interpreted relative to the given class loader.
Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
int caller_method_idnum,
int caller_bci,
Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
symbolHandle name,
methodHandle mh_invdyn,
methodHandle signature_invoker,
Handle info,
methodHandle caller_method,
int caller_bci,
TRAPS) {
Handle empty;
// call java.dyn.CallSite::makeSite(caller, name, mtype, cmid, cbci)
Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty));
MethodHandles::init_MemberName(caller_mname(), caller_method());
// call sun.dyn.MethodHandleNatives::makeDynamicCallSite(bootm, name, mtype, info, caller_mname, caller_pos)
oop name_str_oop = StringTable::intern(name(), CHECK_(empty)); // not a handle!
JavaCallArguments args(Handle(THREAD, caller->java_mirror()));
JavaCallArguments args(Handle(THREAD, bootstrap_method()));
args.push_oop(name_str_oop);
args.push_oop(mh_invdyn->method_handle_type());
args.push_int(caller_method_idnum);
args.push_oop(signature_invoker->method_handle_type());
args.push_oop(info());
args.push_oop(caller_mname());
args.push_int(caller_bci);
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
SystemDictionary::CallSite_klass(),
vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::makeDynamicCallSite_name(),
vmSymbols::makeDynamicCallSite_signature(),
&args, CHECK_(empty));
oop call_site_oop = (oop) result.get_jobject();
assert(call_site_oop->is_oop()
/*&& java_dyn_CallSite::is_instance(call_site_oop)*/, "must be sane");
java_dyn_CallSite::set_vmmethod(call_site_oop, mh_invdyn());
if (TraceMethodHandles) {
#ifndef PRODUCT
tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
@ -2463,9 +2495,7 @@ Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
return call_site_oop;
}
Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
KlassHandle search_bootstrap_klass,
TRAPS) {
Handle SystemDictionary::find_bootstrap_method(KlassHandle caller, TRAPS) {
Handle empty;
if (!caller->oop_is_instance()) return empty;
@ -2476,57 +2506,12 @@ Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
if (TraceMethodHandles) {
tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop);
}
NOT_PRODUCT(if (!boot_method_oop->is_oop()) { tty->print_cr("*** boot MH of "PTR_FORMAT" = "PTR_FORMAT, ik(), boot_method_oop); ik()->print(); });
assert(boot_method_oop->is_oop()
&& java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
return Handle(THREAD, boot_method_oop);
}
boot_method_oop = NULL; // GC safety
// call java.dyn.Linkage::findBootstrapMethod(caller, sbk)
JavaCallArguments args(Handle(THREAD, ik->java_mirror()));
if (search_bootstrap_klass.is_null())
args.push_oop(Handle());
else
args.push_oop(search_bootstrap_klass->java_mirror());
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
SystemDictionary::Linkage_klass(),
vmSymbols::findBootstrapMethod_name(),
vmSymbols::findBootstrapMethod_signature(),
&args, CHECK_(empty));
boot_method_oop = (oop) result.get_jobject();
if (boot_method_oop != NULL) {
if (TraceMethodHandles) {
#ifndef PRODUCT
tty->print_cr("--------");
tty->print_cr("bootstrap method for "PTR_FORMAT" computed as "PTR_FORMAT":", ik(), boot_method_oop);
ik()->print();
boot_method_oop->print();
tty->print_cr("========");
#endif //PRODUCT
}
assert(boot_method_oop->is_oop()
&& java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
// probably no race conditions, but let's be careful:
if (Atomic::cmpxchg_ptr(boot_method_oop, ik->adr_bootstrap_method(), NULL) == NULL)
ik->set_bootstrap_method(boot_method_oop);
else
boot_method_oop = ik->bootstrap_method();
} else {
if (TraceMethodHandles) {
#ifndef PRODUCT
tty->print_cr("--------");
tty->print_cr("bootstrap method for "PTR_FORMAT" computed as NULL:", ik());
ik()->print();
tty->print_cr("========");
#endif //PRODUCT
}
boot_method_oop = ik->bootstrap_method();
}
return Handle(THREAD, boot_method_oop);
return empty;
}
// Since the identity hash code for symbols changes when the symbols are

View File

@ -136,6 +136,7 @@ class SymbolPropertyTable;
template(MethodHandle_klass, java_dyn_MethodHandle, Opt) \
template(MemberName_klass, sun_dyn_MemberName, Opt) \
template(MethodHandleImpl_klass, sun_dyn_MethodHandleImpl, Opt) \
template(MethodHandleNatives_klass, sun_dyn_MethodHandleNatives, Opt) \
template(AdapterMethodHandle_klass, sun_dyn_AdapterMethodHandle, Opt) \
template(BoundMethodHandle_klass, sun_dyn_BoundMethodHandle, Opt) \
template(DirectMethodHandle_klass, sun_dyn_DirectMethodHandle, Opt) \
@ -463,29 +464,28 @@ public:
// JSR 292
// find the java.dyn.MethodHandles::invoke method for a given signature
static methodOop find_method_handle_invoke(symbolHandle signature,
Handle class_loader,
Handle protection_domain,
static methodOop find_method_handle_invoke(symbolHandle name,
symbolHandle signature,
KlassHandle accessing_klass,
TRAPS);
// ask Java to compute the java.dyn.MethodType object for a given signature
static Handle compute_method_handle_type(symbolHandle signature,
Handle class_loader,
Handle protection_domain,
TRAPS);
// ask Java to compute a java.dyn.MethodType object for a given signature
static Handle find_method_handle_type(symbolHandle signature,
KlassHandle accessing_klass,
bool& return_bcp_flag,
TRAPS);
// ask Java to create a dynamic call site, while linking an invokedynamic op
static Handle make_dynamic_call_site(KlassHandle caller,
int caller_method_idnum,
int caller_bci,
static Handle make_dynamic_call_site(Handle bootstrap_method,
// Callee information:
symbolHandle name,
methodHandle mh_invoke,
methodHandle signature_invoker,
Handle info,
// Caller information:
methodHandle caller_method,
int caller_bci,
TRAPS);
// coordinate with Java about bootstrap methods
static Handle find_bootstrap_method(KlassHandle caller,
// This argument is non-null only when a
// classfile attribute has been found:
KlassHandle search_bootstrap_klass,
TRAPS);
static Handle find_bootstrap_method(KlassHandle caller, TRAPS);
// Utility for printing loader "name" as part of tracing constraints
static const char* loader_name(oop loader) {

View File

@ -137,6 +137,7 @@
template(java_lang_CloneNotSupportedException, "java/lang/CloneNotSupportedException") \
template(java_lang_IllegalAccessException, "java/lang/IllegalAccessException") \
template(java_lang_IllegalArgumentException, "java/lang/IllegalArgumentException") \
template(java_lang_IllegalStateException, "java/lang/IllegalStateException") \
template(java_lang_IllegalMonitorStateException, "java/lang/IllegalMonitorStateException") \
template(java_lang_IllegalThreadStateException, "java/lang/IllegalThreadStateException") \
template(java_lang_IndexOutOfBoundsException, "java/lang/IndexOutOfBoundsException") \
@ -201,6 +202,11 @@
template(newField_signature, "(Lsun/reflect/FieldInfo;)Ljava/lang/reflect/Field;") \
template(newMethod_name, "newMethod") \
template(newMethod_signature, "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Method;") \
/* the following two names must be in order: */ \
template(invokeExact_name, "invokeExact") \
template(invokeGeneric_name, "invokeGeneric") \
template(invokeVarargs_name, "invokeVarargs") \
template(star_name, "*") /*not really a name*/ \
template(invoke_name, "invoke") \
template(override_name, "override") \
template(parameterTypes_name, "parameterTypes") \
@ -231,16 +237,17 @@
template(java_dyn_MethodTypeForm, "java/dyn/MethodTypeForm") \
template(java_dyn_MethodTypeForm_signature, "Ljava/dyn/MethodTypeForm;") \
template(sun_dyn_MemberName, "sun/dyn/MemberName") \
template(sun_dyn_MemberName_signature, "Lsun/dyn/MemberName;") \
template(sun_dyn_MethodHandleImpl, "sun/dyn/MethodHandleImpl") \
template(sun_dyn_MethodHandleNatives, "sun/dyn/MethodHandleNatives") \
template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") \
template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") \
template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") \
template(makeImpl_name, "makeImpl") /*MethodType::makeImpl*/ \
template(makeImpl_signature, "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \
template(makeSite_name, "makeSite") /*CallSite::makeSite*/ \
template(makeSite_signature, "(Ljava/lang/Class;Ljava/lang/String;Ljava/dyn/MethodType;II)Ljava/dyn/CallSite;") \
template(findBootstrapMethod_name, "findBootstrapMethod") \
template(findBootstrapMethod_signature, "(Ljava/lang/Class;Ljava/lang/Class;)Ljava/dyn/MethodHandle;") \
/* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
template(makeDynamicCallSite_name, "makeDynamicCallSite") \
template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \
NOT_LP64( do_alias(machine_word_signature, int_signature) ) \
LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \
\
@ -408,8 +415,9 @@
template(void_classloader_signature, "()Ljava/lang/ClassLoader;") \
template(void_object_signature, "()Ljava/lang/Object;") \
template(void_class_signature, "()Ljava/lang/Class;") \
template(void_string_signature, "()Ljava/lang/String;") \
template(object_array_object_object_signature, "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
template(void_string_signature, "()Ljava/lang/String;") \
template(object_array_object_signature, "([Ljava/lang/Object;)Ljava/lang/Object;") \
template(object_object_array_object_signature, "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
template(exception_void_signature, "(Ljava/lang/Exception;)V") \
template(protectiondomain_signature, "[Ljava/security/ProtectionDomain;") \
template(accesscontrolcontext_signature, "Ljava/security/AccessControlContext;") \
@ -863,11 +871,15 @@
do_intrinsic(_Object_init, java_lang_Object, object_initializer_name, void_method_signature, F_R) \
/* (symbol object_initializer_name defined above) */ \
\
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_object_array_object_signature, F_R) \
/* (symbols invoke_name and invoke_signature defined above) */ \
do_intrinsic(_checkSpreadArgument, sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \
do_name( checkSpreadArgument_name, "checkSpreadArgument") \
do_name( checkSpreadArgument_signature, "(Ljava/lang/Object;I)V") \
do_intrinsic(_invokeExact, java_dyn_MethodHandle, invokeExact_name, object_array_object_signature, F_RN) \
do_intrinsic(_invokeGeneric, java_dyn_MethodHandle, invokeGeneric_name, object_array_object_signature, F_RN) \
do_intrinsic(_invokeVarargs, java_dyn_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \
do_intrinsic(_invokeDynamic, java_dyn_InvokeDynamic, star_name, object_array_object_signature, F_SN) \
\
/* unboxing methods: */ \
do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \

View File

@ -221,6 +221,6 @@ void ImplicitExceptionTable::verify(nmethod *nm) const {
for (uint i = 0; i < len(); i++) {
if ((*adr(i) > (unsigned int)nm->code_size()) ||
(*(adr(i)+1) > (unsigned int)nm->code_size()))
fatal1("Invalid offset in ImplicitExceptionTable at %lx", _data);
fatal(err_msg("Invalid offset in ImplicitExceptionTable at " PTR_FORMAT, _data));
}
}

View File

@ -1534,7 +1534,8 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive,
}
}
ic->set_to_clean();
assert(ic->cached_oop() == NULL, "cached oop in IC should be cleared")
assert(ic->cached_oop() == NULL,
"cached oop in IC should be cleared");
}
}
}
@ -2123,7 +2124,7 @@ void nmethod::verify() {
ResourceMark rm;
if (!CodeCache::contains(this)) {
fatal1("nmethod at " INTPTR_FORMAT " not in zone", this);
fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
}
if(is_native_method() )
@ -2131,7 +2132,8 @@ void nmethod::verify() {
nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
if (nm != this) {
fatal1("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", this);
fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
this));
}
for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {

View File

@ -62,7 +62,9 @@ StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
Mutex* lock, const char* name) : _mutex(lock) {
intptr_t size = round_to(buffer_size, 2*BytesPerWord);
BufferBlob* blob = BufferBlob::create(name, size);
if( blob == NULL ) vm_exit_out_of_memory1(size, "CodeCache: no room for %s", name);
if( blob == NULL) {
vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name));
}
_stub_interface = stub_interface;
_buffer_size = blob->instructions_size();
_buffer_limit = blob->instructions_size();

View File

@ -45,7 +45,9 @@ void* VtableStub::operator new(size_t size, int code_size) {
if (_chunk == NULL || _chunk + real_size > _chunk_end) {
const int bytes = chunk_factor * real_size + pd_code_alignment();
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if( blob == NULL ) vm_exit_out_of_memory1(bytes, "CodeCache: no room for %s", "vtable chunks");
if (blob == NULL) {
vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks");
}
_chunk = blob->instructions_begin();
_chunk_end = _chunk + bytes;
VTune::register_stub("vtable stub", _chunk, _chunk_end);
@ -189,7 +191,9 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int
instanceKlass* ik = instanceKlass::cast(klass);
klassVtable* vt = ik->vtable();
klass->print();
fatal3("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", index %d (vtable length %d)", (address)receiver, index, vt->length());
fatal(err_msg("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
"index %d (vtable length %d)",
(address)receiver, index, vt->length()));
}
#endif // Product

View File

@ -310,7 +310,7 @@ class CompileBroker: AllStatic {
static AbstractCompiler* compiler(int level ) {
if (level == CompLevel_fast_compile) return _compilers[0];
assert(level == CompLevel_highest_tier, "what level?")
assert(level == CompLevel_highest_tier, "what level?");
return _compilers[1];
}

Some files were not shown because too many files have changed in this diff Show More