This commit is contained in:
Yong Jeffrey Huang 2010-04-11 21:04:45 -07:00
commit 6798c200f0
1260 changed files with 76358 additions and 11110 deletions

10
.hgtags
View File

@ -52,3 +52,13 @@ ce74bd35ce948d629a356e168797f44b593b1578 jdk7-b73
946518568340c4e511549318f19f47f06b7f5f9b jdk7-b75
09e0b33177af2b98a03c9ca19eedf61440bd1cf6 jdk7-b76
1d0121b741f029dc4b828e4b36ba6fda92907dd7 jdk7-b77
4061c66ba1af1a2e27c2c839ba887407dd3ce050 jdk7-b78
e9c98378f6b9256c0595ef2985ca5899f0c0e274 jdk7-b79
e6abd38682d237306d6c147c17538ec9e7f8e3a7 jdk7-b80
dcc938ac40cc45f1ef454d76020b5db5d943001c jdk7-b81
a30062be6d9ca1d48579826f870f85974300004e jdk7-b82
34c8199936a1682aa8587857f44cfaf37c2b6381 jdk7-b83
b1e55627a6980b9508854ed0c0f21d4f981b4494 jdk7-b84
b6f633a93ae0ec4555ff4bf756f5e2150c9bdede jdk7-b85
c94d9cc81f495d97817eba9d71b84fc45f7661a5 jdk7-b86
b7456c473862048fa70ed8092313a4ef0a55d403 jdk7-b87

View File

@ -52,3 +52,13 @@ e1b972ff53cd58f825791f8ed9b2deffd16e768c jdk7-b68
d1516b9f23954b29b8e76e6f4efc467c08c78133 jdk7-b75
c8b63075403d53a208104a8a6ea5072c1cb66aab jdk7-b76
1f17ca8353babb13f4908c1f87d11508232518c8 jdk7-b77
ab4ae8f4514693a9fe17ca2fec0239d8f8450d2c jdk7-b78
20aeeb51713990dbea6929a2e100a8bbf5df70d4 jdk7-b79
a3242906c7747b5d9bcc3d118c7c3c69aa40f4b7 jdk7-b80
8403096d1fe7ff5318df9708cfec84a3fd3e1cf9 jdk7-b81
e1176f86805fe07fd9fb9da065dc51b47712ce76 jdk7-b82
6880a3af9addb41541e80ebe8cde6f79ec402a58 jdk7-b83
2f3ea057d1ad56cf3b269cdc4de2741411151982 jdk7-b84
cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85
433a60a9c0bf1b26ee7e65cebaa89c541f497aed jdk7-b86
6b1069f53fbc30663ccef49d78c31bb7d6967bde jdk7-b87

View File

@ -52,3 +52,13 @@ b751c528c55560cf2adeaeef24b39ca1f4d1cbf7 jdk7-b73
0fb137085952c8e47878e240d1cb40f14de463c4 jdk7-b75
937144222e2219939101b0129d26a872a7956b13 jdk7-b76
6881f0383f623394b5ec73f27a5f329ff55d0467 jdk7-b77
a7f7276b48cd74d8eb1baa83fbf3d1ef4a2603c8 jdk7-b78
ec0421b5703b677e2226cf4bf7ae4eaafd8061c5 jdk7-b79
0336e70ca0aeabc783cc01658f36cb6e27ea7934 jdk7-b80
e08a42a2a94d97ea8eedb187a94dbff822c8fbba jdk7-b81
1e8c1bfad1abb4b81407a0f2645e0fb85764ca48 jdk7-b82
fde0df7a2384f7fe33204a79678989807d9c2b98 jdk7-b83
68c8961a82e4a3ad2a67991e5d834192a81eb4cd jdk7-b84
c67a9df7bc0ca291f08f9a9cc05cb78ea15d25e6 jdk7-b85
6253e28826d16cf1aecc39ce04c8de1f6bf2df5f jdk7-b86
09a41111a401d327f65e453384d976a10154d9ea jdk7-b87

View File

@ -123,7 +123,7 @@ public abstract class Expression
/**
* Coerces a number to the target type of this expression.
* @parm number The number to coerce.
* @param obj The number to coerce.
* @return the value of number coerced to the (target) type of
* this expression.
**/
@ -142,7 +142,7 @@ public abstract class Expression
/**
* Coerces an integral value (BigInteger) to its corresponding unsigned
* representation, if the target type of this expression is unsigned.
* @parm b The BigInteger to be coerced.
* @param b The BigInteger to be coerced.
* @return the value of an integral type coerced to its corresponding
* unsigned integral type, if the target type of this expression is
* unsigned.
@ -170,7 +170,7 @@ public abstract class Expression
/**
* Coerces an integral value (BigInteger) to its corresponding signed
* representation, if the target type of this expression is signed.
* @parm b The BigInteger to be coerced.
* @param b The BigInteger to be coerced.
* @return the value of an integral type coerced to its corresponding
* signed integral type, if the target type of this expression is
* signed.

View File

@ -161,7 +161,7 @@ public class PortableRemoteObject {
* happens implicitly when the object is sent or received as an argument
* on a remote method call, but in some circumstances it is useful to
* perform this action by making an explicit call. See the
* {@link Stub#connect} method for more information.
* {@link javax.rmi.CORBA.Stub#connect} method for more information.
* @param target the object to connect.
* @param source a previously connected object.
* @throws RemoteException if <code>source</code> is not connected

View File

@ -31,7 +31,7 @@ package org.omg.CORBA;
* indicate whether policies should replace the
* existing policies of an <code>Object</code> or be added to them.
* <P>
* The method {@link omg.org.CORBA.Object._set_policy_override} takes
* The method {@link org.omg.CORBA.Object#_set_policy_override} takes
* either <code>SetOverrideType.SET_OVERRIDE</code> or
* <code>SetOverrideType.ADD_OVERRIDE</code> as its second argument.
* The method <code>_set_policy_override</code>

View File

@ -545,8 +545,6 @@ public class TCKind {
* @param _value the <code>int</code> to convert. It must be one of
* the <code>int</code> constants in the class
* <code>TCKind</code>.
* @return a new <code>TCKind</code> instance whose <code>value</code>
* field matches the given <code>int</code>
*/
@Deprecated
protected TCKind(int _value){

View File

@ -56,7 +56,7 @@ public final class UnknownUserException extends UserException {
* Constructs an <code>UnknownUserException</code> object that contains the given
* <code>Any</code> object.
*
* @ param a an <code>Any</code> object that contains a user exception returned
* @param a an <code>Any</code> object that contains a user exception returned
* by the server
*/
public UnknownUserException(Any a) {

View File

@ -43,7 +43,6 @@ public class ServantObject
/** The real servant. The local stub may cast this field to the expected type, and then
* invoke the operation directly. Note, the object may or may not be the actual servant
* instance.
* @return The real servant
*/
public java.lang.Object servant;
}

View File

@ -256,7 +256,7 @@ module CosNaming
*
* @param n Name of the object <p>
*
* @parm obj The Object to rebind with the given name <p>
* @param obj The Object to rebind with the given name <p>
*
* @exception org.omg.CosNaming.NamingContextPackage.NotFound Indicates the name does not identify a binding.<p>
*

View File

@ -1730,7 +1730,7 @@ module PortableInterceptor {
* <p>
* Any number of components may exist with the same component ID.
*
* @param a_component The IOP.TaggedComponent to add.
* @param tagged_component The IOP.TaggedComponent to add.
*/
void add_ior_component
(in IOP::TaggedComponent tagged_component);
@ -1744,7 +1744,7 @@ module PortableInterceptor {
* <p>
* Any number of components may exist with the same component ID.
*
* @param a_component The <code>IOP.TaggedComponent</code> to add.
* @param tagged_component The <code>IOP.TaggedComponent</code> to add.
* @param profile_id The profile id of the profile to
* which this component will be added.
* @exception BAD_PARAM thrown, with a standard minor code of 29, if the

View File

@ -52,3 +52,37 @@ f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 jdk7-b74
d8dd291a362acb656026a9c0a9da48501505a1e7 jdk7-b75
9174bb32e934965288121f75394874eeb1fcb649 jdk7-b76
455105fc81d941482f8f8056afaa7aa0949c9300 jdk7-b77
e703499b4b51e3af756ae77c3d5e8b3058a14e4e jdk7-b78
a5a6adfca6ecefb5894a848debabfe442ff50e25 jdk7-b79
3003ddd1d4330b06cb4691ae74d600d3685899eb jdk7-b80
1f9b07674480c224828852ffe137beea36b3cab5 jdk7-b81
1999f5b12482d66c8b0daf6709daea4f51893a04 jdk7-b82
a94714c550658fd6741793ef036cb9625dc2ab1a hs17-b01
faf94d94786b621f8e13cbcc941ca69c6d967c3f hs17-b02
f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 hs17-b03
d8dd291a362acb656026a9c0a9da48501505a1e7 hs17-b04
9174bb32e934965288121f75394874eeb1fcb649 hs17-b05
a5a6adfca6ecefb5894a848debabfe442ff50e25 hs17-b06
3003ddd1d4330b06cb4691ae74d600d3685899eb hs17-b07
1f9b07674480c224828852ffe137beea36b3cab5 hs17-b08
ff3232b68fbb35185b338d7ff4695b52460243f3 hs17-b09
981375ca07b7f0605f92f57aad95122e8c385a4d hs16-b01
f4cbf78110c726919f46b59a3b054c54c7e889b4 hs16-b02
07c1c01e031513bfe6a7d17c6cf30d2752824ae9 hs16-b03
08f86fa55a31113df626a75c8a626e66a543a1bd hs16-b04
32c83fb84370a35344676991a48440378e6b6c8a hs16-b05
ba313800759b678979434d6da8ed3bf49eb8bea4 hs16-b06
3c0f729815607e1678bd0c41ae68494c700dcc71 hs16-b07
ac59d4e6dae51ac5fc31a9a4940d1857f91161b1 hs16-b08
3f844a28c5f4912bd04043b44f21b25b0805ffc2 hs15-b01
1605bb4eb5a7a1703b13d5b077a22cc665fe45f7 hs15-b02
2581d90c6c9b2012da930eb4742add94a03069a0 hs15-b03
9ab385cb0c42997e16a7761ebcd25c90560a2714 hs15-b04
fafab5d5349c7c066d677538db67a1ee0fb33bd2 hs15-b05
3f370a32906eb5ba993fabd7b4279be7f31052b9 jdk7-b83
ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84
6c9796468b91dcbb39e09dfa1baf9779ac45eb66 jdk7-b85
418bc80ce13995149eadc9eecbba21d7a9fa02ae hs17-b10
bf823ef06b4f211e66988d76a2e2669be5c0820e jdk7-b86
07226e9eab8f74b37346b32715f829a2ef2c3188 hs18-b01
e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87

View File

@ -174,7 +174,7 @@ lib_info* add_lib_info_fd(struct ps_prochandle* ph, const char* libname, int fd,
return NULL;
}
newlib->symtab = build_symtab(newlib->fd);
newlib->symtab = build_symtab(newlib->fd, libname);
if (newlib->symtab == NULL) {
print_debug("symbol table build failed for %s\n", newlib->name);
}

View File

@ -53,8 +53,274 @@ typedef struct symtab {
struct hsearch_data *hash_table;
} symtab_t;
// read symbol table from given fd.
struct symtab* build_symtab(int fd) {
// Directory that contains global debuginfo files. In theory it
// should be possible to change this, but in a Java environment there
// is no obvious place to put a user interface to do it. Maybe this
// could be set with an environment variable.
static const char debug_file_directory[] = "/usr/lib/debug";
/* The CRC used in gnu_debuglink, retrieved from
http://sourceware.org/gdb/current/onlinedocs/gdb/Separate-Debug-Files.html#Separate-Debug-Files. */
unsigned int gnu_debuglink_crc32 (unsigned int crc,
unsigned char *buf, size_t len)
{
static const unsigned int crc32_table[256] =
{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
};
unsigned char *end;
crc = ~crc & 0xffffffff;
for (end = buf + len; buf < end; ++buf)
crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8);
return ~crc & 0xffffffff;
}
/* Open a debuginfo file and check its CRC. If it exists and the CRC
matches return its fd. */
static int
open_debug_file (const char *pathname, unsigned int crc)
{
unsigned int file_crc = 0;
unsigned char buffer[8 * 1024];
int fd = pathmap_open(pathname);
if (fd < 0)
return -1;
lseek(fd, 0, SEEK_SET);
for (;;) {
int len = read(fd, buffer, sizeof buffer);
if (len <= 0)
break;
file_crc = gnu_debuglink_crc32(file_crc, buffer, len);
}
if (crc == file_crc)
return fd;
else {
close(fd);
return -1;
}
}
/* Find an ELF section. */
static struct elf_section *find_section_by_name(char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
ELF_SHDR* cursct = NULL;
char *strtab;
int cnt;
if (scn_cache[ehdr->e_shstrndx].c_data == NULL) {
if ((scn_cache[ehdr->e_shstrndx].c_data
= read_section_data(fd, ehdr, cursct)) == NULL) {
return NULL;
}
}
strtab = scn_cache[ehdr->e_shstrndx].c_data;
for (cursct = shbuf, cnt = 0;
cnt < ehdr->e_shnum;
cnt++, cursct++) {
if (strcmp(cursct->sh_name + strtab, name) == 0) {
scn_cache[cnt].c_data = read_section_data(fd, ehdr, cursct);
return &scn_cache[cnt];
}
}
return NULL;
}
/* Look for a ".gnu_debuglink" section. If one exists, try to open a
suitable debuginfo file. */
static int open_file_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
int debug_fd;
struct elf_section *debug_link = find_section_by_name(".gnu_debuglink", fd, ehdr,
shbuf, scn_cache);
if (debug_link == NULL)
return -1;
char *debug_filename = debug_link->c_data;
int offset = (strlen(debug_filename) + 4) >> 2;
static unsigned int crc;
crc = ((unsigned int*)debug_link->c_data)[offset];
char *debug_pathname = malloc(strlen(debug_filename)
+ strlen(name)
+ strlen(".debug/")
+ strlen(debug_file_directory)
+ 2);
strcpy(debug_pathname, name);
char *last_slash = strrchr(debug_pathname, '/');
if (last_slash == NULL)
return -1;
/* Look in the same directory as the object. */
strcpy(last_slash+1, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
return debug_fd;
}
/* Look in a subdirectory named ".debug". */
strcpy(last_slash+1, ".debug/");
strcat(last_slash, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
return debug_fd;
}
/* Look in /usr/lib/debug + the full pathname. */
strcpy(debug_pathname, debug_file_directory);
strcat(debug_pathname, name);
last_slash = strrchr(debug_pathname, '/');
strcpy(last_slash+1, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
return debug_fd;
}
free(debug_pathname);
return -1;
}
static struct symtab* build_symtab_internal(int fd, const char *filename, bool try_debuginfo);
/* Look for a ".gnu_debuglink" section. If one exists, try to open a
suitable debuginfo file and read a symbol table from it. */
static struct symtab *build_symtab_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
fd = open_file_from_debug_link(name, fd, ehdr, shbuf, scn_cache);
if (fd >= 0) {
struct symtab *symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
close(fd);
return symtab;
}
return NULL;
}
// Given a build_id, find the associated debuginfo file
static char *
build_id_to_debug_filename (size_t size, unsigned char *data)
{
char *filename, *s;
filename = malloc(strlen (debug_file_directory) + (sizeof "/.build-id/" - 1) + 1
+ 2 * size + (sizeof ".debug" - 1) + 1);
s = filename + sprintf (filename, "%s/.build-id/", debug_file_directory);
if (size > 0)
{
size--;
s += sprintf (s, "%02x", *data++);
}
if (size > 0)
*s++ = '/';
while (size-- > 0)
s += sprintf (s, "%02x", *data++);
strcpy (s, ".debug");
return filename;
}
// Read a build ID note. Try to open any associated debuginfo file
// and return its symtab
static struct symtab* build_symtab_from_build_id(Elf64_Nhdr *note)
{
int fd;
struct symtab *symtab = NULL;
unsigned char *bytes
= (unsigned char*)(note+1) + note->n_namesz;
unsigned char *filename
= (build_id_to_debug_filename (note->n_descsz, bytes));
fd = pathmap_open(filename);
if (fd >= 0) {
symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
close(fd);
}
free(filename);
return symtab;
}
// read symbol table from given fd. If try_debuginfo) is true, also
// try to open an associated debuginfo file
static struct symtab* build_symtab_internal(int fd, const char *filename, bool try_debuginfo) {
ELF_EHDR ehdr;
char *names = NULL;
struct symtab* symtab = NULL;
@ -66,6 +332,7 @@ struct symtab* build_symtab(int fd) {
ELF_SHDR* cursct = NULL;
ELF_PHDR* phbuf = NULL;
ELF_PHDR* phdr = NULL;
int sym_section = SHT_DYNSYM;
uintptr_t baseaddr = (uintptr_t)-1;
@ -90,18 +357,23 @@ struct symtab* build_symtab(int fd) {
for (cursct = shbuf, cnt = 0; cnt < ehdr.e_shnum; cnt++) {
scn_cache[cnt].c_shdr = cursct;
if (cursct->sh_type == SHT_SYMTAB || cursct->sh_type == SHT_STRTAB) {
if (cursct->sh_type == SHT_SYMTAB || cursct->sh_type == SHT_STRTAB
|| cursct->sh_type == SHT_NOTE || cursct->sh_type == SHT_DYNSYM) {
if ( (scn_cache[cnt].c_data = read_section_data(fd, &ehdr, cursct)) == NULL) {
goto quit;
}
}
if (cursct->sh_type == SHT_SYMTAB) {
// Full symbol table available so use that
sym_section = cursct->sh_type;
}
cursct++;
}
for (cnt = 1; cnt < ehdr.e_shnum; cnt++) {
ELF_SHDR *shdr = scn_cache[cnt].c_shdr;
if (shdr->sh_type == SHT_SYMTAB) {
if (shdr->sh_type == sym_section) {
ELF_SYM *syms;
int j, n, rslt;
size_t size;
@ -163,6 +435,45 @@ struct symtab* build_symtab(int fd) {
}
}
// Look for a separate debuginfo file.
if (try_debuginfo) {
// We prefer a debug symtab to an object's own symtab, so look in
// the debuginfo file. We stash a copy of the old symtab in case
// there is no debuginfo.
struct symtab* prev_symtab = symtab;
symtab = NULL;
#ifdef NT_GNU_BUILD_ID
// First we look for a Build ID
for (cursct = shbuf, cnt = 0;
symtab == NULL && cnt < ehdr.e_shnum;
cnt++) {
if (cursct->sh_type == SHT_NOTE) {
Elf64_Nhdr *note = (Elf64_Nhdr *)scn_cache[cnt].c_data;
if (note->n_type == NT_GNU_BUILD_ID) {
symtab = build_symtab_from_build_id(note);
}
}
cursct++;
}
#endif
// Then, if that doesn't work, the debug link
if (symtab == NULL) {
symtab = build_symtab_from_debug_link(filename, fd, &ehdr, shbuf,
scn_cache);
}
// If we still haven't found a symtab, use the object's own symtab.
if (symtab != NULL) {
if (prev_symtab != NULL)
destroy_symtab(prev_symtab);
} else {
symtab = prev_symtab;
}
}
quit:
if (shbuf) free(shbuf);
if (phbuf) free(phbuf);
@ -177,6 +488,11 @@ quit:
return symtab;
}
struct symtab* build_symtab(int fd, const char *filename) {
return build_symtab_internal(fd, filename, /* try_debuginfo */ true);
}
void destroy_symtab(struct symtab* symtab) {
if (!symtab) return;
if (symtab->strs) free(symtab->strs);

View File

@ -32,7 +32,7 @@
struct symtab;
// build symbol table for a given ELF file descriptor
struct symtab* build_symtab(int fd);
struct symtab* build_symtab(int fd, const char *filename);
// destroy the symbol table
void destroy_symtab(struct symtab* symtab);

View File

@ -63,12 +63,12 @@ public class SystemDictionary {
javaSystemLoaderField = type.getOopField("_java_system_loader");
nofBuckets = db.lookupIntConstant("SystemDictionary::_nof_buckets").intValue();
objectKlassField = type.getOopField(WK_KLASS("object_klass"));
classLoaderKlassField = type.getOopField(WK_KLASS("classloader_klass"));
stringKlassField = type.getOopField(WK_KLASS("string_klass"));
systemKlassField = type.getOopField(WK_KLASS("system_klass"));
threadKlassField = type.getOopField(WK_KLASS("thread_klass"));
threadGroupKlassField = type.getOopField(WK_KLASS("threadGroup_klass"));
objectKlassField = type.getOopField(WK_KLASS("Object_klass"));
classLoaderKlassField = type.getOopField(WK_KLASS("ClassLoader_klass"));
stringKlassField = type.getOopField(WK_KLASS("String_klass"));
systemKlassField = type.getOopField(WK_KLASS("System_klass"));
threadKlassField = type.getOopField(WK_KLASS("Thread_klass"));
threadGroupKlassField = type.getOopField(WK_KLASS("ThreadGroup_klass"));
}
// This WK functions must follow the definitions in systemDictionary.hpp:

View File

@ -1,5 +1,5 @@
#
# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
# Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -312,10 +312,13 @@ endif
$(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
$(install-file)
# Include files (jvmti.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
$(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/%
$(install-file)
$(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/%
$(install-file)
$(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/prims/%
$(install-file)

View File

@ -1,5 +1,5 @@
#
# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved.
# Copyright 2006-2010 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -261,6 +261,7 @@ EXPORT_JRE_LIB_ARCH_DIR = $(EXPORT_JRE_LIB_DIR)/$(LIBARCH)
# Common export list of files
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmti.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmticmlr.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jni.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h

View File

@ -31,11 +31,11 @@
#
# Don't put quotes (fail windows build).
HOTSPOT_VM_COPYRIGHT=Copyright 2009
HOTSPOT_VM_COPYRIGHT=Copyright 2010
HS_MAJOR_VER=17
HS_MAJOR_VER=18
HS_MINOR_VER=0
HS_BUILD_NUMBER=05
HS_BUILD_NUMBER=02
JDK_MAJOR_VER=1
JDK_MINOR_VER=7

View File

@ -0,0 +1,5 @@
#!/bin/sh
nm --defined-only $* | awk '
{ if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";" }
'

View File

@ -38,7 +38,7 @@ _JUNK_ := $(shell echo -e >&2 ""\
"Please use 'make jvmg' to build debug JVM. \n" \
"----------------------------------------------------------------------\n")
G_SUFFIX =
G_SUFFIX = _g
VERSION = debug
SYSDEFS += -DASSERT -DDEBUG
PICFLAGS = DEFAULT

View File

@ -58,7 +58,7 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
# Linker mapfile
MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
G_SUFFIX =
G_SUFFIX = _g
VERSION = optimized
SYSDEFS += -DASSERT -DFASTDEBUG
PICFLAGS = DEFAULT

View File

@ -25,9 +25,12 @@
# Rules to build signal interposition library, used by vm.make
# libjsig[_g].so: signal interposition library
JSIG = jsig$(G_SUFFIX)
JSIG = jsig
LIBJSIG = lib$(JSIG).so
JSIG_G = $(JSIG)$(G_SUFFIX)
LIBJSIG_G = lib$(JSIG_G).so
JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
DEST_JSIG = $(JDK_LIBDIR)/$(LIBJSIG)
@ -50,6 +53,7 @@ $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
@echo Making signal interposition lib...
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
$(QUIETLY) [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
install_jsig: $(LIBJSIG)
@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"

View File

@ -35,7 +35,7 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE)
# Linker mapfile
MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
G_SUFFIX =
G_SUFFIX = _g
VERSION = debug
SYSDEFS += -DASSERT -DDEBUG
PICFLAGS = DEFAULT

View File

@ -25,7 +25,9 @@
# Rules to build gamma launcher, used by vm.make
# gamma[_g]: launcher
LAUNCHER = gamma$(G_SUFFIX)
LAUNCHER = gamma
LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
LAUNCHERDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
LAUNCHERFLAGS = $(ARCHFLAG) \
@ -70,4 +72,5 @@ $(LAUNCHER): $(LAUNCHER.o) $(LIBJVM) $(LAUNCHER_MAPFILE)
$(LINK_LAUNCHER/PRE_HOOK) \
$(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
$(LINK_LAUNCHER/POST_HOOK) \
[ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
}

View File

@ -290,6 +290,9 @@ SUNWprivate_1.1 {
# This is for Forte Analyzer profiling support.
AsyncGetCallTrace;
# INSERT VTABLE SYMBOLS HERE
local:
*;
};

View File

@ -285,6 +285,9 @@ SUNWprivate_1.1 {
# This is for Forte Analyzer profiling support.
AsyncGetCallTrace;
# INSERT VTABLE SYMBOLS HERE
local:
*;
};

View File

@ -25,9 +25,13 @@
# Rules to build serviceability agent library, used by vm.make
# libsaproc[_g].so: serviceability agent
SAPROC = saproc$(G_SUFFIX)
SAPROC = saproc
LIBSAPROC = lib$(SAPROC).so
SAPROC_G = $(SAPROC)$(G_SUFFIX)
LIBSAPROC_G = lib$(SAPROC_G).so
AGENT_DIR = $(GAMMADIR)/agent
SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)
@ -75,6 +79,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
$(SA_DEBUG_CFLAGS) \
-o $@ \
-lthread_db
$(QUIETLY) [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
install_saproc: checkAndBuildSA
$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then \

View File

@ -113,21 +113,29 @@ include $(MAKEFILES_DIR)/dtrace.make
#----------------------------------------------------------------------
# JVM
JVM = jvm$(G_SUFFIX)
LIBJVM = lib$(JVM).so
JVM = jvm
LIBJVM = lib$(JVM).so
LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
JVM_OBJ_FILES = $(Obj_Files)
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
mapfile : $(MAPFILE)
mapfile : $(MAPFILE) vm.def
rm -f $@
cat $^ > $@
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
{ system ("cat vm.def"); } \
else \
{ print $$0 } \
}' > $@ < $(MAPFILE)
mapfile_reorder : mapfile $(REORDERFILE)
rm -f $@
cat $^ > $@
vm.def: $(Res_Files) $(Obj_Files)
sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
ifeq ($(ZERO_LIBARCH), ppc64)
STATIC_CXX = false
else
@ -201,6 +209,7 @@ $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
$(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
$(LINK_LIB.CC/POST_HOOK) \
rm -f $@.1; ln -s $@ $@.1; \
[ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
if [ -x /usr/sbin/selinuxenabled ] ; then \
/usr/sbin/selinuxenabled; \
if [ $$? = 0 ] ; then \

View File

@ -54,7 +54,7 @@ _JUNK_ := $(shell echo >&2 ""\
"Please use 'gnumake jvmg' to build debug JVM. \n" \
"-------------------------------------------------------------------------\n")
G_SUFFIX =
G_SUFFIX = _g
VERSION = debug
SYSDEFS += -DASSERT -DDEBUG
PICFLAGS = DEFAULT

View File

@ -24,8 +24,8 @@
# Rules to build jvm_db/dtrace, used by vm.make
# we build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
# but not for CORE configuration
# We build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
# but not for CORE or KERNEL configurations.
ifneq ("${TYPE}", "CORE")
ifneq ("${TYPE}", "KERNEL")
@ -37,12 +37,13 @@ dtraceCheck:
else
JVM_DB = libjvm_db
LIBJVM_DB = libjvm$(G_SUFFIX)_db.so
LIBJVM_DB = libjvm_db.so
LIBJVM_DB_G = libjvm$(G_SUFFIX)_db.so
JVM_DTRACE = jvm_dtrace
LIBJVM_DTRACE = libjvm$(G_SUFFIX)_dtrace.so
LIBJVM_DTRACE = libjvm_dtrace.so
LIBJVM_DTRACE_G = libjvm$(G_SUFFIX)_dtrace.so
JVMOFFS = JvmOffsets
JVMOFFS.o = $(JVMOFFS).o
@ -77,7 +78,7 @@ LFLAGS_JVM_DB += -D_REENTRANT $(PICFLAG)
LFLAGS_JVM_DTRACE += -D_REENTRANT $(PICFLAG)
else
LFLAGS_JVM_DB += -mt $(PICFLAG) -xnolib
LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib
LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib -ldl
endif
ISA = $(subst i386,i486,$(shell isainfo -n))
@ -86,18 +87,24 @@ ISA = $(subst i386,i486,$(shell isainfo -n))
ifneq ("${ISA}","${BUILDARCH}")
XLIBJVM_DB = 64/$(LIBJVM_DB)
XLIBJVM_DB_G = 64/$(LIBJVM_DB_G)
XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
XLIBJVM_DTRACE_G = 64/$(LIBJVM_DTRACE_G)
$(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
@echo Making $@
$(QUIETLY) mkdir -p 64/ ; \
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
[ -f $(XLIBJVM_DB_G) ] || { ln -s $(LIBJVM_DB) $(XLIBJVM_DB_G); }
$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
@echo Making $@
$(QUIETLY) mkdir -p 64/ ; \
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
[ -f $(XLIBJVM_DTRACE_G) ] || { ln -s $(LIBJVM_DTRACE) $(XLIBJVM_DTRACE_G); }
endif # ifneq ("${ISA}","${BUILDARCH}")
ifdef USE_GCC
@ -142,11 +149,13 @@ $(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_D
@echo Making $@
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
[ -f $(LIBJVM_DB_G) ] || { ln -s $@ $(LIBJVM_DB_G); }
$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
@echo Making $@
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
[ -f $(LIBJVM_DTRACE_G) ] || { ln -s $@ $(LIBJVM_DTRACE_G); }
$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
$(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d

View File

@ -90,7 +90,6 @@ endif # Platform_compiler == sparcWorks
# for this method for now. (fix this when dtrace bug 6258412 is fixed)
OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_
# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
@ -115,8 +114,7 @@ MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
# and mustn't be otherwise.
MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
G_SUFFIX =
G_SUFFIX = _g
VERSION = optimized
SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS
PICFLAGS = DEFAULT

View File

@ -25,8 +25,11 @@
# Rules to build signal interposition library, used by vm.make
# libjsig[_g].so: signal interposition library
JSIG = jsig$(G_SUFFIX)
LIBJSIG = lib$(JSIG).so
JSIG = jsig
LIBJSIG = lib$(JSIG).so
JSIG_G = $(JSIG)$(G_SUFFIX)
LIBJSIG_G = lib$(JSIG_G).so
JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
@ -46,6 +49,7 @@ $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
@echo Making signal interposition lib...
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_JSIG) -o $@ $< -ldl
[ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
install_jsig: $(LIBJSIG)
@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"

View File

@ -51,7 +51,7 @@ MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
# and mustn't be otherwise.
MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
G_SUFFIX =
G_SUFFIX = _g
VERSION = debug
SYSDEFS += -DASSERT -DDEBUG
PICFLAGS = DEFAULT

View File

@ -25,7 +25,8 @@
# Rules to build gamma launcher, used by vm.make
# gamma[_g]: launcher
LAUNCHER = gamma$(G_SUFFIX)
LAUNCHER = gamma
LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
LAUNCHERDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
LAUNCHERFLAGS = $(ARCHFLAG) \
@ -88,5 +89,6 @@ $(LAUNCHER): $(LAUNCHER.o) $(LIBJVM) $(LAUNCHER_MAPFILE)
$(LINK_LAUNCHER/PRE_HOOK) \
$(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
$(LINK_LAUNCHER/POST_HOOK) \
[ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
;; \
esac

View File

@ -25,9 +25,13 @@
# Rules to build serviceability agent library, used by vm.make
# libsaproc[_g].so: serviceability agent
SAPROC = saproc$(G_SUFFIX)
SAPROC = saproc
LIBSAPROC = lib$(SAPROC).so
SAPROC_G = $(SAPROC)$(G_SUFFIX)
LIBSAPROC_G = lib$(SAPROC_G).so
AGENT_DIR = $(GAMMADIR)/agent
SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)/proc
@ -69,6 +73,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
$(SA_LFLAGS) \
-o $@ \
-ldl -ldemangle -lthread -lc
[ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
install_saproc: checkAndBuildSA
$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then \

View File

@ -281,8 +281,6 @@ else
OPT_CFLAGS=-xO4 $(EXTRA_OPT_CFLAGS)
endif
CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_sparc/vm/solaris_sparc.il
endif # sparc
ifeq ("${Platform_arch_model}", "x86_32")
@ -293,13 +291,14 @@ OPT_CFLAGS=-xtarget=pentium $(EXTRA_OPT_CFLAGS)
# [phh] Is this still true for 6.1?
OPT_CFLAGS+=-xO3
CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_x86/vm/solaris_x86_32.il
endif # 32bit x86
# no more exceptions
CFLAGS/NOEX=-noex
# Inline functions
CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_${Platform_arch}/vm/solaris_${Platform_arch_model}.il
# Reduce code bloat by reverting back to 5.0 behavior for static initializers
CFLAGS += -Qoption ccfe -one_static_init
@ -312,6 +311,15 @@ PICFLAG/DEFAULT = $(PICFLAG)
PICFLAG/BETTER = $(PICFLAG/DEFAULT)
PICFLAG/BYFILE = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@))
# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
MAPFLAG = -M FILENAME
# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj
SONAMEFLAG = -h SONAME
# Build shared library
SHARED_FLAG = -G
# Would be better if these weren't needed, since we link with CC, but
# at present removing them causes run-time errors
LFLAGS += -library=Crun

View File

@ -108,11 +108,16 @@ ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1)
# older libm before libCrun, just to make sure it's found and used first.
LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc
else
ifeq ($(COMPILER_REV_NUMERIC), 502)
# SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
LIBS += -ldl -lthread -lsocket -lm -lsched -ldoor
else
LIBS += -ldl -lthread -lsocket $(LIBM) -lsched -ldoor
endif
endif # 502
endif # 505
else
LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc
endif
endif # sparcWorks
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
@ -126,8 +131,9 @@ include $(MAKEFILES_DIR)/dtrace.make
#----------------------------------------------------------------------
# JVM
JVM = jvm$(G_SUFFIX)
LIBJVM = lib$(JVM).so
JVM = jvm
LIBJVM = lib$(JVM).so
LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
JVM_OBJ_FILES = $(Obj_Files) $(DTRACE_OBJS)
@ -173,11 +179,12 @@ $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE)
-sbfast|-xsbfast) \
;; \
*) \
echo Linking vm...; \
$(LINK_LIB.CC/PRE_HOOK) \
$(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
$(LINK_LIB.CC/POST_HOOK) \
rm -f $@.1; ln -s $@ $@.1; \
echo Linking vm...; \
$(LINK_LIB.CC/PRE_HOOK) \
$(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
$(LINK_LIB.CC/POST_HOOK) \
rm -f $@.1; ln -s $@ $@.1; \
[ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
;; \
esac

View File

@ -28,6 +28,9 @@ REM
REM Since we don't have uname and we could be cross-compiling,
REM Use the compiler to determine which ARCH we are building
REM
REM Note: Running this batch file from the Windows command shell requires
REM that "grep" be accessible on the PATH. An MKS install does this.
REM
cl 2>&1 | grep "IA-64" >NUL
if %errorlevel% == 0 goto isia64
cl 2>&1 | grep "AMD64" >NUL
@ -57,11 +60,12 @@ if not "%7" == "" goto usage
if "%1" == "product" goto test1
if "%1" == "debug" goto test1
if "%1" == "fastdebug" goto test1
if "%1" == "tree" goto test1
goto usage
:test1
if "%2" == "core" goto test2
if "%2" == "kernel" goto test2
if "%2" == "kernel" goto test2
if "%2" == "compiler1" goto test2
if "%2" == "compiler2" goto test2
if "%2" == "tiered" goto test2
@ -70,6 +74,7 @@ if "%2" == "adlc" goto build_adlc
goto usage
:test2
if "%1" == "tree" goto build_tree
REM check_j2se_version
REM jvmti.make requires J2SE 1.4.x or newer.
REM If not found then fail fast.
@ -93,6 +98,10 @@ goto end
nmake -f %3/make/windows/build.make Variant=compiler2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION=%5 ADLC_ONLY=1 %1
goto end
:build_tree
nmake -f %3/make/windows/build.make Variant=%2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION="%5" %1
goto end
:usage
echo Usage: build flavor version workspace bootstrap_dir [build_id] [windbg_home]
echo.
@ -100,8 +109,10 @@ echo where:
echo flavor is "product", "debug" or "fastdebug",
echo version is "core", "kernel", "compiler1", "compiler2", or "tiered",
echo workspace is source directory without trailing slash,
echo bootstrap_dir is a full path to echo a JDK in which bin/java
echo and bin/javac are present and working, and echo build_id is an
echo bootstrap_dir is a full path to a JDK in which bin/java
echo and bin/javac are present and working, and build_id is an
echo optional build identifier displayed by java -version
exit /b 1
:end
exit /b %errorlevel%

View File

@ -27,6 +27,9 @@
# environment variables (Variant, WorkSpace, BootStrapDir, BuildUser, HOTSPOT_BUILD_VERSION)
# are passed in as command line arguments.
# Note: Running nmake or build.bat from the Windows command shell requires
# that "sh" be accessible on the PATH. An MKS install does this.
# SA components are built if BUILD_WIN_SA=1 is specified.
# See notes in README. This produces files:
# 1. sa-jdi.jar - This is built before building jvm.dll
@ -233,6 +236,12 @@ develop: checks $(variantDir) $(variantDir)\local.make sanity
cd $(variantDir)
nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product DEVELOP=1 ARCH=$(ARCH)
# target to create just the directory structure
tree: checks $(variantDir) $(variantDir)\local.make sanity
mkdir $(variantDir)\product
mkdir $(variantDir)\debug
mkdir $(variantDir)\fastdebug
sanity:
@ echo;
@ cd $(variantDir)

View File

@ -36,6 +36,9 @@ REM
REM Since we don't have uname and we could be cross-compiling,
REM Use the compiler to determine which ARCH we are building
REM
REM Note: Running this batch file from the Windows command shell requires
REM that "grep" be accessible on the PATH. An MKS install does this.
REM
cl 2>&1 | grep "IA-64" >NUL
if %errorlevel% == 0 goto isia64
cl 2>&1 | grep "AMD64" >NUL

View File

@ -22,6 +22,8 @@
#
#
set -e
# This shell script echoes "MSC_VER=<munged version of cl>"
# It ignores the micro version component.
# Examples:
@ -38,17 +40,20 @@
# sh, and it has been found that sometimes `which sh` fails.
if [ "x$HotSpotMksHome" != "x" ]; then
MKS_HOME="$HotSpotMksHome"
TOOL_DIR="$HotSpotMksHome"
else
SH=`which sh`
MKS_HOME=`dirname "$SH"`
# HotSpotMksHome is not set so use the directory that contains "sh".
# This works with both MKS and Cygwin.
SH=`which sh`
TOOL_DIR=`dirname "$SH"`
fi
HEAD="$MKS_HOME/head"
ECHO="$MKS_HOME/echo"
EXPR="$MKS_HOME/expr"
CUT="$MKS_HOME/cut"
SED="$MKS_HOME/sed"
DIRNAME="$TOOL_DIR/dirname"
HEAD="$TOOL_DIR/head"
ECHO="$TOOL_DIR/echo"
EXPR="$TOOL_DIR/expr"
CUT="$TOOL_DIR/cut"
SED="$TOOL_DIR/sed"
if [ "x$FORCE_MSC_VER" != "x" ]; then
echo "MSC_VER=$FORCE_MSC_VER"
@ -70,7 +75,15 @@ fi
if [ "x$FORCE_LINK_VER" != "x" ]; then
echo "LINK_VER=$FORCE_LINK_VER"
else
LINK_VER_RAW=`link 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'`
# use the "link" command that is co-located with the "cl" command
cl_cmd=`which cl`
if [ "x$cl_cmd" != "x" ]; then
link_cmd=`$DIRNAME "$cl_cmd"`/link
else
# which can't find "cl" so just use which ever "link" we find
link_cmd="link"
fi
LINK_VER_RAW=`"$link_cmd" 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'`
LINK_VER_MAJOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f1`
LINK_VER_MINOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f2`
LINK_VER_MICRO=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f3`

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -377,6 +377,16 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
}
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ call(SharedRuntime::deopt_blob()->unpack_with_reexecution());
__ delayed()->nop();
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
//---------------slow case: call to native-----------------
__ bind(_entry);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -143,3 +143,6 @@
static bool is_caller_save_register (LIR_Opr reg);
static bool is_caller_save_register (Register r);
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; }

View File

@ -1,5 +1,5 @@
/*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -189,14 +189,17 @@ void LIR_Assembler::osr_entry() {
Register OSR_buf = osrBufferPointer()->as_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
int monitor_offset = BytesPerWord * method()->max_locals() +
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
(2 * BytesPerWord) * (number_of_locks - 1);
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
// the OSR buffer using 2 word entries: first the lock and then
// the oop.
for (int i = 0; i < number_of_locks; i++) {
int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
#ifdef ASSERT
// verify the interpreter's monitor has a non-null object
{
Label L;
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
__ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
__ cmp(G0, O7);
__ br(Assembler::notEqual, false, Assembler::pt, L);
__ delayed()->nop();
@ -205,9 +208,9 @@ void LIR_Assembler::osr_entry() {
}
#endif // ASSERT
// Copy the lock field into the compiled activation.
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7);
__ ld_ptr(OSR_buf, slot_offset + 0, O7);
__ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
__ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
__ st_ptr(O7, frame_map()->address_for_monitor_object(i));
}
}
@ -354,7 +357,7 @@ void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr,
}
void LIR_Assembler::emit_exception_handler() {
int LIR_Assembler::emit_exception_handler() {
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
@ -370,28 +373,22 @@ void LIR_Assembler::emit_exception_handler() {
if (handler_base == NULL) {
// not enough space left for the handler
bailout("exception handler overflow");
return;
return -1;
}
#ifdef ASSERT
int offset = code_offset();
#endif // ASSERT
compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
__ delayed()->nop();
}
__ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
__ delayed()->nop();
debug_only(__ stop("should have gone to the caller");)
assert(code_offset() - offset <= exception_handler_size, "overflow");
__ end_a_stub();
return offset;
}
void LIR_Assembler::emit_deopt_handler() {
int LIR_Assembler::emit_deopt_handler() {
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
@ -405,23 +402,18 @@ void LIR_Assembler::emit_deopt_handler() {
if (handler_base == NULL) {
// not enough space left for the handler
bailout("deopt handler overflow");
return;
return -1;
}
#ifdef ASSERT
int offset = code_offset();
#endif // ASSERT
compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
__ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
__ delayed()->nop();
assert(code_offset() - offset <= deopt_handler_size, "overflow");
debug_only(__ stop("should have gone to the caller");)
__ end_a_stub();
return offset;
}
@ -688,29 +680,29 @@ void LIR_Assembler::align_call(LIR_Code) {
}
void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) {
__ call(entry, rtype);
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
__ call(op->addr(), rtype);
// the peephole pass fills the delay slot
}
void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
RelocationHolder rspec = virtual_call_Relocation::spec(pc());
__ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
__ relocate(rspec);
__ call(entry, relocInfo::none);
__ call(op->addr(), relocInfo::none);
// the peephole pass fills the delay slot
}
void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
add_debug_info_for_null_check_here(info);
void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
add_debug_info_for_null_check_here(op->info());
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
if (__ is_simm13(vtable_offset) ) {
__ ld_ptr(G3_scratch, vtable_offset, G5_method);
if (__ is_simm13(op->vtable_offset())) {
__ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
} else {
// This will generate 2 instructions
__ set(vtable_offset, G5_method);
__ set(op->vtable_offset(), G5_method);
// ld_ptr, set_hi, set
__ ld_ptr(G3_scratch, G5_method, G5_method);
}
@ -720,6 +712,16 @@ void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
}
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
// load with 32-bit displacement
int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
int load_offset = code_offset();
@ -953,9 +955,11 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
} else {
#ifdef _LP64
assert(base != to_reg->as_register_lo(), "can't handle this");
assert(O7 != to_reg->as_register_lo(), "can't handle this");
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
__ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
__ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
__ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
#else
if (base == to_reg->as_register_lo()) {
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
@ -976,8 +980,8 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
FloatRegister reg = to_reg->as_double_reg();
// split unaligned loads
if (unaligned || PatchALot) {
__ ldf(FloatRegisterImpl::S, base, offset + BytesPerWord, reg->successor());
__ ldf(FloatRegisterImpl::S, base, offset, reg);
__ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
__ ldf(FloatRegisterImpl::S, base, offset, reg);
} else {
__ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
}
@ -1068,7 +1072,8 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
LIR_Const* c = src->as_constant_ptr();
switch (c->type()) {
case T_INT:
case T_FLOAT: {
case T_FLOAT:
case T_ADDRESS: {
Register src_reg = O7;
int value = c->as_jint_bits();
if (value == 0) {
@ -1124,7 +1129,8 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
}
switch (c->type()) {
case T_INT:
case T_FLOAT: {
case T_FLOAT:
case T_ADDRESS: {
LIR_Opr tmp = FrameMap::O7_opr;
int value = c->as_jint_bits();
if (value == 0) {
@ -1196,6 +1202,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
switch (c->type()) {
case T_INT:
case T_ADDRESS:
{
jint con = c->as_jint();
if (to_reg->is_single_cpu()) {
@ -2200,6 +2207,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register len = O2;
__ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
if (shift == 0) {
__ add(src_ptr, src_pos, src_ptr);
} else {
@ -2208,6 +2216,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
}
__ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
if (shift == 0) {
__ add(dst_ptr, dst_pos, dst_ptr);
} else {
@ -2729,9 +2738,6 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
}
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
__ lduw(counter_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, counter_addr);
Bytecodes::Code bc = method->java_code_at_bci(bci);
// Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes
@ -2821,15 +2827,23 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
__ set(DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
if (i < (VirtualCallData::row_limit() - 1)) {
__ br(Assembler::always, false, Assembler::pt, update_done);
__ delayed()->nop();
}
__ br(Assembler::always, false, Assembler::pt, update_done);
__ delayed()->nop();
__ bind(next_test);
}
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
__ lduw(counter_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, counter_addr);
__ bind(update_done);
}
} else {
// Static call
__ lduw(counter_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, counter_addr);
}
}

View File

@ -144,17 +144,17 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
if (index->is_register()) {
// apply the shift and accumulate the displacement
if (shift > 0) {
LIR_Opr tmp = new_register(T_INT);
LIR_Opr tmp = new_pointer_register();
__ shift_left(index, shift, tmp);
index = tmp;
}
if (disp != 0) {
LIR_Opr tmp = new_register(T_INT);
LIR_Opr tmp = new_pointer_register();
if (Assembler::is_simm13(disp)) {
__ add(tmp, LIR_OprFact::intConst(disp), tmp);
__ add(tmp, LIR_OprFact::intptrConst(disp), tmp);
index = tmp;
} else {
__ move(LIR_OprFact::intConst(disp), tmp);
__ move(LIR_OprFact::intptrConst(disp), tmp);
__ add(tmp, index, tmp);
index = tmp;
}
@ -162,8 +162,8 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
}
} else if (disp != 0 && !Assembler::is_simm13(disp)) {
// index is illegal so replace it with the displacement loaded into a register
index = new_register(T_INT);
__ move(LIR_OprFact::intConst(disp), index);
index = new_pointer_register();
__ move(LIR_OprFact::intptrConst(disp), index);
disp = 0;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,17 +42,6 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
}
void C1_MacroAssembler::method_exit(bool restore_frame) {
// this code must be structured this way so that the return
// instruction can be a safepoint.
if (restore_frame) {
restore();
}
retl();
delayed()->nop();
}
void C1_MacroAssembler::explicit_null_check(Register base) {
Unimplemented();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -677,7 +677,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
Oissuing_pc->after_save());
G2_thread, Oissuing_pc->after_save());
__ verify_not_null_oop(Oexception->after_save());
__ jmp(O0, 0);
__ delayed()->restore();
@ -985,7 +985,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) {
Label no_deopt;
Label no_handler;
__ verify_not_null_oop(Oexception);
@ -1003,9 +1002,14 @@ void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_map
// whether it had a handler or not we will deoptimize
// by entering the deopt blob with a pending exception.
#ifdef ASSERT
Label done;
__ tst(O0);
__ br(Assembler::zero, false, Assembler::pn, no_handler);
__ br(Assembler::notZero, false, Assembler::pn, done);
__ delayed()->nop();
__ stop("should have found address");
__ bind(done);
#endif
// restore the registers that were saved at the beginning and jump to the exception handler.
restore_live_registers(sasm);
@ -1013,20 +1017,6 @@ void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_map
__ jmp(O0, 0);
__ delayed()->restore();
__ bind(no_handler);
__ mov(L0, I7); // restore return address
// restore exception oop
__ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception->after_save());
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
__ restore();
AddressLiteral exc(Runtime1::entry_for(Runtime1::unwind_exception_id));
__ jump_to(exc, G4);
__ delayed()->nop();
oop_maps->add_gc_map(call_offset, oop_map);
}

View File

@ -22,10 +22,9 @@
*
*/
//
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)
//
#ifndef TIERED
define_pd_global(bool, BackgroundCompilation, true );
define_pd_global(bool, CICompileOSR, true );
@ -48,27 +47,24 @@ define_pd_global(intx, OnStackReplacePercentage, 1400 );
define_pd_global(bool, UseTLAB, true );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(intx, NewRatio, 8 ); // Design center runs on 1.3.1
define_pd_global(bool, ResizeTLAB, true );
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
define_pd_global(uintx, PermSize, 12*M );
define_pd_global(uintx, MaxPermSize, 64*M );
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uintx,PermSize, 12*M );
define_pd_global(uintx,MaxPermSize, 64*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(intx, NewSizeThreadIncrease, 16*K );
define_pd_global(uintx, DefaultMaxRAM, 1*G);
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(intx, InitialCodeCacheSize, 160*K);
#endif // TIERED
#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, RoundFPResults, false);
define_pd_global(bool, LIRFillDelaySlots, true);
define_pd_global(bool, LIRFillDelaySlots, true );
define_pd_global(bool, OptimizeSinglePrecision, false);
define_pd_global(bool, CSEArrayLength, true);
define_pd_global(bool, CSEArrayLength, true );
define_pd_global(bool, TwoOperandLIRForm, false);
define_pd_global(intx, SafepointPollOffset, 0);
define_pd_global(intx, SafepointPollOffset, 0 );

View File

@ -59,7 +59,6 @@ define_pd_global(intx, FLOATPRESSURE, 52); // C2 on V9 gets to u
define_pd_global(intx, FreqInlineSize, 175);
define_pd_global(intx, INTPRESSURE, 48); // large register set
define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment
define_pd_global(intx, NewRatio, 2);
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
// The default setting 16/16 seems to work best.
// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
@ -83,25 +82,25 @@ define_pd_global(bool, OptoScheduling, true);
// sequence of instructions to load a 64 bit pointer.
//
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uintx, DefaultMaxRAM, 32*G);
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
#else
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uintx, DefaultMaxRAM, 1*G);
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
#endif
define_pd_global(uintx,CodeCacheMinBlockLength, 4);
define_pd_global(uintx,CodeCacheMinBlockLength, 4);
// Heap related flags
define_pd_global(uintx, PermSize, ScaleForWordSize(16*M));
define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
define_pd_global(uintx,PermSize, ScaleForWordSize(16*M));
define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -366,8 +366,9 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_sta
// as get_original_pc() needs correct value for unextended_sp()
if (_pc != NULL) {
_cb = CodeCache::find_blob(_pc);
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
_pc = ((nmethod*)_cb)->get_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
@ -519,9 +520,9 @@ void frame::patch_pc(Thread* thread, address pc) {
_cb = CodeCache::find_blob(pc);
*O7_addr() = pc - pc_return_offset;
_cb = CodeCache::find_blob(_pc);
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
address orig = ((nmethod*)_cb)->get_original_pc(this);
assert(orig == _pc, "expected original to be stored before patching");
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
assert(original_pc == _pc, "expected original to be stored before patching");
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;

View File

@ -22,10 +22,8 @@
*
*/
//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
//
// For sparc we do not do call backs when a thread is in the interpreter, because the
// interpreter dispatch needs at least two instructions - first to load the dispatch address
@ -41,26 +39,23 @@ define_pd_global(bool, NeedsDeoptSuspend, true); // register window ma
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(uintx, TLABSize, 0);
define_pd_global(uintx, NewSize, ScaleForWordSize((2048 * K) + (2 * (64 * K))));
define_pd_global(intx, SurvivorRatio, 8);
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
define_pd_global(intx, InlineSmallCode, 1500);
define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
define_pd_global(intx, InlineSmallCode, 1500);
#ifdef _LP64
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 1024);
#else
define_pd_global(intx, ThreadStackSize, 512);
define_pd_global(intx, VMThreadStackSize, 512);
define_pd_global(intx, ThreadStackSize, 512);
define_pd_global(intx, VMThreadStackSize, 512);
#endif
define_pd_global(intx, StackYellowPages, 2);
define_pd_global(intx, StackRedPages, 1);
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
define_pd_global(intx, PreInflateSpin, 40); // Determined by running design center
define_pd_global(intx, PreInflateSpin, 40); // Determined by running design center
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -244,9 +244,10 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
}
void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
mov(arg_1, O0);
MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 1);
mov(arg_2, O1);
MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
}
#endif /* CC_INTERP */
@ -1681,11 +1682,8 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
// If no method data exists, go to profile_continue.
test_method_data_pointer(profile_continue);
// We are making a call. Increment the count.
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
// Record the receiver type.
record_klass_in_profile(receiver, scratch);
record_klass_in_profile(receiver, scratch, true);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
@ -1695,9 +1693,13 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
void InterpreterMacroAssembler::record_klass_in_profile_helper(
Register receiver, Register scratch,
int start_row, Label& done) {
if (TypeProfileWidth == 0)
int start_row, Label& done, bool is_virtual_call) {
if (TypeProfileWidth == 0) {
if (is_virtual_call) {
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
}
return;
}
int last_row = VirtualCallData::row_limit() - 1;
assert(start_row <= last_row, "must be work left to do");
@ -1714,6 +1716,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
// See if the receiver is receiver[n].
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
// delayed()->tst(scratch);
// The receiver is receiver[n]. Increment count[n].
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
@ -1723,20 +1726,31 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
bind(next_test);
if (test_for_null_also) {
Label found_null;
// Failed the equality check on receiver[n]... Test for null.
if (start_row == last_row) {
// The only thing left to do is handle the null case.
brx(Assembler::notZero, false, Assembler::pt, done);
delayed()->nop();
if (is_virtual_call) {
brx(Assembler::zero, false, Assembler::pn, found_null);
delayed()->nop();
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
ba(false, done);
delayed()->nop();
bind(found_null);
} else {
brx(Assembler::notZero, false, Assembler::pt, done);
delayed()->nop();
}
break;
}
// Since null is rare, make it be the branch-taken case.
Label found_null;
brx(Assembler::zero, false, Assembler::pn, found_null);
delayed()->nop();
// Put all the "Case 3" tests here.
record_klass_in_profile_helper(receiver, scratch, start_row + 1, done);
record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
// Found a null. Keep searching for a matching receiver,
// but remember that this is an empty (unused) slot.
@ -1753,16 +1767,18 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
mov(DataLayout::counter_increment, scratch);
set_mdp_data_at(count_offset, scratch);
ba(false, done);
delayed()->nop();
if (start_row > 0) {
ba(false, done);
delayed()->nop();
}
}
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
Register scratch) {
Register scratch, bool is_virtual_call) {
assert(ProfileInterpreter, "must be profiling");
Label done;
record_klass_in_profile_helper(receiver, scratch, 0, done);
record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
bind (done);
}
@ -1840,7 +1856,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
record_klass_in_profile(klass, scratch);
record_klass_in_profile(klass, scratch, false);
}
// The method data pointer needs to be updated.

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -121,7 +121,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool check_exception = true);
#ifndef CC_INTERP
void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3.
@ -290,9 +290,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void test_mdp_data_at(int offset, Register value, Label& not_equal_continue,
Register scratch);
void record_klass_in_profile(Register receiver, Register scratch);
void record_klass_in_profile(Register receiver, Register scratch, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register scratch,
int start_row, Label& done);
int start_row, Label& done, bool is_virtual_call);
void update_mdp_by_offset(int offset_of_disp, Register scratch);
void update_mdp_by_offset(Register reg, int offset_of_disp,

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -394,6 +394,11 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
}
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
// No special entry points that preclude compilation
return true;
}
// This method tells the deoptimizer how big an interpreted frame must be:
int AbstractInterpreter::size_activation(methodOop method,
int tempcount,

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -851,10 +851,10 @@ void AdapterGenerator::gen_c2i_adapter(
__ set(reg2offset(r_1) + extraspace + bias, ld_off);
#else
int ld_off = reg2offset(r_1) + extraspace + bias;
#endif // _LP64
#ifdef ASSERT
G1_forced = true;
#endif // ASSERT
#endif // _LP64
r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
else __ ldx(base, ld_off, G1_scratch);
@ -865,9 +865,11 @@ void AdapterGenerator::gen_c2i_adapter(
if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
store_c2i_object(r, base, st_off);
} else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
#ifndef _LP64
if (TieredCompilation) {
assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
}
#endif // _LP64
store_c2i_long(r, base, st_off, r_2->is_stack());
} else {
store_c2i_int(r, base, st_off);
@ -1189,7 +1191,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
// VMReg max_arg,
int comp_args_on_stack, // VMRegStackSlots
const BasicType *sig_bt,
const VMRegPair *regs) {
const VMRegPair *regs,
AdapterFingerPrint* fingerprint) {
address i2c_entry = __ pc();
AdapterGenerator agen(masm);
@ -1258,7 +1261,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
__ flush();
return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
}

View File

@ -1,5 +1,5 @@
//
// Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
// Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -1803,8 +1803,9 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
// to implement the UseStrictFP mode.
const bool Matcher::strict_fp_requires_explicit_rounding = false;
// Do floats take an entire double register or just half?
const bool Matcher::float_in_double = false;
// Are floats conerted to double when stored to stack during deoptimization?
// Sparc does not handle callee-save floats.
bool Matcher::float_in_double() { return false; }
// Do ints take an entire long register or just half?
// Note that we if-def off of _LP64.
@ -1885,6 +1886,10 @@ RegMask Matcher::modL_proj_mask() {
return RegMask();
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return RegMask();
}
%}
@ -6664,7 +6669,7 @@ instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{
ins_pipe(ialu_imm);
%}
instruct cmovII_U_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
ins_cost(150);
size(4);
@ -6673,7 +6678,7 @@ instruct cmovII_U_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
ins_pipe(ialu_reg);
%}
instruct cmovII_U_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
ins_cost(140);
size(4);
@ -6719,6 +6724,16 @@ instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{
ins_pipe(ialu_reg);
%}
// This instruction also works with CmpN so we don't need cmovNN_reg.
instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
ins_cost(150);
size(4);
format %{ "MOV$cmp $icc,$src,$dst" %}
ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
ins_pipe(ialu_reg);
%}
instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
ins_cost(150);
@ -6756,6 +6771,16 @@ instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
ins_pipe(ialu_reg);
%}
instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
ins_cost(150);
size(4);
format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
ins_pipe(ialu_reg);
%}
instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
ins_cost(140);
@ -6766,6 +6791,16 @@ instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
ins_pipe(ialu_imm);
%}
instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
ins_cost(140);
size(4);
format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
ins_pipe(ialu_imm);
%}
instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
ins_cost(150);
@ -6805,6 +6840,17 @@ instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
ins_pipe(int_conditional_float_move);
%}
instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
ins_cost(150);
size(4);
format %{ "FMOVS$cmp $icc,$src,$dst" %}
opcode(0x101);
ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
ins_pipe(int_conditional_float_move);
%}
// Conditional move,
instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
@ -6838,6 +6884,17 @@ instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
ins_pipe(int_conditional_double_move);
%}
instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
ins_cost(150);
size(4);
format %{ "FMOVD$cmp $icc,$src,$dst" %}
opcode(0x102);
ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
ins_pipe(int_conditional_double_move);
%}
// Conditional move,
instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
@ -6877,6 +6934,17 @@ instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
%}
instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
ins_cost(150);
size(4);
format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
ins_pipe(ialu_reg);
%}
instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
ins_cost(150);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -379,7 +379,7 @@ class StubGenerator: public StubCodeGenerator {
__ save_frame(0); // compensates for compiler weakness
__ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Lscratch);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
__ mov(O0, handler_reg);
__ restore(); // compensates for compiler weakness
@ -2862,6 +2862,9 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
// Don't initialize the platform math functions since sparc
// doesn't have intrinsics for these operations.
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,8 +37,13 @@ static bool returns_to_call_stub(address return_pc) {
enum /* platform_dependent_constants */ {
// %%%%%%%% May be able to shrink this a lot
code_size1 = 20000, // simply increase if too small (assembler will crash if too small)
code_size2 = 20000 // simply increase if too small (assembler will crash if too small)
code_size1 = 20000, // simply increase if too small (assembler will crash if too small)
code_size2 = 20000 // simply increase if too small (assembler will crash if too small)
};
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000
};
class Sparc {

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -150,8 +150,7 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
assert(!unbox, "NYI");//6815692//
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
address compiled_entry = __ pc();
Label cont;
@ -1823,7 +1822,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
__ super_call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
Oissuing_pc->after_save());
G2_thread, Oissuing_pc->after_save());
// The caller's SP was adjusted upon method entry to accomodate
// the callee's non-argument locals. Undo that adjustment.

View File

@ -2251,6 +2251,7 @@ void Assembler::popf() {
emit_byte(0x9D);
}
#ifndef _LP64 // no 32bit push/pop on amd64
void Assembler::popl(Address dst) {
// NOTE: this will adjust stack by 8byte on 64bits
InstructionMark im(this);
@ -2258,6 +2259,7 @@ void Assembler::popl(Address dst) {
emit_byte(0x8F);
emit_operand(rax, dst);
}
#endif
void Assembler::prefetch_prefix(Address src) {
prefix(src);
@ -2428,6 +2430,7 @@ void Assembler::pushf() {
emit_byte(0x9C);
}
#ifndef _LP64 // no 32bit push/pop on amd64
void Assembler::pushl(Address src) {
// Note this will push 64bit on 64bit
InstructionMark im(this);
@ -2435,6 +2438,7 @@ void Assembler::pushl(Address src) {
emit_byte(0xFF);
emit_operand(rsi, src);
}
#endif
void Assembler::pxor(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
@ -5591,7 +5595,12 @@ void MacroAssembler::align(int modulus) {
}
void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
andpd(dst, as_Address(src));
if (reachable(src)) {
andpd(dst, as_Address(src));
} else {
lea(rscratch1, src);
andpd(dst, Address(rscratch1, 0));
}
}
void MacroAssembler::andptr(Register dst, int32_t imm32) {
@ -6078,11 +6087,21 @@ void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
}
void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
comisd(dst, as_Address(src));
if (reachable(src)) {
comisd(dst, as_Address(src));
} else {
lea(rscratch1, src);
comisd(dst, Address(rscratch1, 0));
}
}
void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
comiss(dst, as_Address(src));
if (reachable(src)) {
comiss(dst, as_Address(src));
} else {
lea(rscratch1, src);
comiss(dst, Address(rscratch1, 0));
}
}
@ -7647,7 +7666,7 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
#ifdef ASSERT
Label L;
testl(tmp, tmp);
testptr(tmp, tmp);
jccb(Assembler::notZero, L);
hlt();
bind(L);
@ -8441,6 +8460,7 @@ void MacroAssembler::string_indexof(Register str1, Register str2,
subptr(str1, result); // Restore counter
shrl(str1, 1);
addl(cnt1, str1);
decrementl(cnt1);
lea(str1, Address(result, 2)); // Reload string
// Load substr

View File

@ -1244,7 +1244,9 @@ private:
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
#ifndef _LP64 // no 32bit push/pop on amd64
void popl(Address dst);
#endif
#ifdef _LP64
void popq(Address dst);
@ -1285,7 +1287,9 @@ private:
// Interleave Low Bytes
void punpcklbw(XMMRegister dst, XMMRegister src);
#ifndef _LP64 // no 32bit push/pop on amd64
void pushl(Address src);
#endif
void pushq(Address src);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -373,6 +373,14 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
}
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution()));
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -126,3 +126,6 @@
assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds");
return _caller_save_xmm_regs[i];
}
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; }

View File

@ -1,5 +1,5 @@
/*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -301,22 +301,25 @@ void LIR_Assembler::osr_entry() {
Register OSR_buf = osrBufferPointer()->as_pointer_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
int monitor_offset = BytesPerWord * method()->max_locals() +
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
(2 * BytesPerWord) * (number_of_locks - 1);
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
// the OSR buffer using 2 word entries: first the lock and then
// the oop.
for (int i = 0; i < number_of_locks; i++) {
int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
#ifdef ASSERT
// verify the interpreter's monitor has a non-null object
{
Label L;
__ cmpptr(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("locked object is NULL");
__ bind(L);
}
#endif
__ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes()));
__ movptr(rbx, Address(OSR_buf, slot_offset + 0));
__ movptr(frame_map()->address_for_monitor_lock(i), rbx);
__ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()));
__ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
__ movptr(frame_map()->address_for_monitor_object(i), rbx);
}
}
@ -415,13 +418,12 @@ int LIR_Assembler::initial_frame_size_in_bytes() {
}
void LIR_Assembler::emit_exception_handler() {
int LIR_Assembler::emit_exception_handler() {
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
__ nop();
// generate code for exception handler
@ -429,61 +431,36 @@ void LIR_Assembler::emit_exception_handler() {
if (handler_base == NULL) {
// not enough space left for the handler
bailout("exception handler overflow");
return;
return -1;
}
#ifdef ASSERT
int offset = code_offset();
#endif // ASSERT
compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
// if the method does not have an exception handler, then there is
// no reason to search for one
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
// the exception oop and pc are in rax, and rdx
// no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception
__ verify_not_null_oop(rax);
// search an exception handler (rax: exception oop, rdx: throwing pc)
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
// if the call returns here, then the exception handler for particular
// exception doesn't exist -> unwind activation and forward exception to caller
}
// the exception oop is in rax,
// the exception oop and pc are in rax, and rdx
// no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, true, true, true);
__ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception
__ verify_not_null_oop(rax);
// unlock the receiver/klass if necessary
// rax,: exception
ciMethod* method = compilation()->method();
if (method->is_synchronized() && GenerateSynchronizationCode) {
monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax);
}
// search an exception handler (rax: exception oop, rdx: throwing pc)
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
// unwind activation and forward exception to caller
// rax,: exception
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
__ stop("should not reach here");
assert(code_offset() - offset <= exception_handler_size, "overflow");
__ end_a_stub();
return offset;
}
void LIR_Assembler::emit_deopt_handler() {
int LIR_Assembler::emit_deopt_handler() {
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
__ nop();
// generate code for exception handler
@ -491,23 +468,19 @@ void LIR_Assembler::emit_deopt_handler() {
if (handler_base == NULL) {
// not enough space left for the handler
bailout("deopt handler overflow");
return;
return -1;
}
#ifdef ASSERT
int offset = code_offset();
#endif // ASSERT
compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
InternalAddress here(__ pc());
__ pushptr(here.addr());
__ pushptr(here.addr());
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(code_offset() - offset <= deopt_handler_size, "overflow");
__ end_a_stub();
return offset;
}
@ -600,7 +573,7 @@ void LIR_Assembler::return_op(LIR_Opr result) {
}
// Pop the stack before the safepoint code
__ leave();
__ remove_frame(initial_frame_size_in_bytes());
bool result_is_oop = result->is_valid() ? result->is_oop() : false;
@ -655,7 +628,8 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
LIR_Const* c = src->as_constant_ptr();
switch (c->type()) {
case T_INT: {
case T_INT:
case T_ADDRESS: {
assert(patch_code == lir_patch_none, "no patching handled here");
__ movl(dest->as_register(), c->as_jint());
break;
@ -738,6 +712,7 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
switch (c->type()) {
case T_INT: // fall through
case T_FLOAT:
case T_ADDRESS:
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
break;
@ -773,6 +748,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
switch (type) {
case T_INT: // fall through
case T_FLOAT:
case T_ADDRESS:
__ movl(as_Address(addr), c->as_jint_bits());
break;
@ -785,7 +761,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
ShouldNotReachHere();
__ movoop(as_Address(addr, noreg), c->as_jobject());
} else {
#ifdef _LP64
__ movoop(rscratch1, c->as_jobject());
null_check_here = code_offset();
__ movptr(as_Address_lo(addr), rscratch1);
#else
__ movoop(as_Address(addr), c->as_jobject());
#endif
}
}
break;
@ -1118,8 +1100,14 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
} else {
#ifndef _LP64
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
__ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
#else
//no pushl on 64bits
__ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
#endif
}
} else if (src->is_double_stack()) {
@ -2733,6 +2721,7 @@ void LIR_Assembler::align_call(LIR_Code code) {
switch (code) {
case lir_static_call:
case lir_optvirtual_call:
case lir_dynamic_call:
offset += NativeCall::displacement_offset;
break;
case lir_icvirtual_call:
@ -2748,30 +2737,41 @@ void LIR_Assembler::align_call(LIR_Code code) {
}
void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) {
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned");
__ call(AddressLiteral(entry, rtype));
add_call_info(code_offset(), info);
__ call(AddressLiteral(op->addr(), rtype));
add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
}
void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
RelocationHolder rh = virtual_call_Relocation::spec(pc());
__ movoop(IC_Klass, (jobject)Universe::non_oop_word());
assert(!os::is_MP() ||
(__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned");
__ call(AddressLiteral(entry, rh));
add_call_info(code_offset(), info);
__ call(AddressLiteral(op->addr(), rh));
add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
}
/* Currently, vtable-dispatch is only enabled for sparc platforms */
void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
ShouldNotReachHere();
}
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
__ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp);
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
__ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register());
}
void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size);
@ -2824,10 +2824,12 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
} else {
unwind_id = Runtime1::handle_exception_nofpu_id;
}
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
} else {
unwind_id = Runtime1::unwind_exception_id;
// remove the activation
__ remove_frame(initial_frame_size_in_bytes());
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
}
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
// enough room for two byte trap
__ nop();
@ -3136,8 +3138,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#ifdef _LP64
assert_different_registers(c_rarg0, dst, dst_pos, length);
__ movl2ptr(src_pos, src_pos); //higher 32bits must be null
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
assert_different_registers(c_rarg1, length);
__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
__ mov(c_rarg2, length);
@ -3202,7 +3206,6 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
Register mdo = op->mdo()->as_register();
__ movoop(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addl(counter_addr, DataLayout::counter_increment);
Bytecodes::Code bc = method->java_code_at_bci(bci);
// Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes
@ -3269,14 +3272,18 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
__ jcc(Assembler::notEqual, next_test);
__ movptr(recv_addr, recv);
__ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
if (i < (VirtualCallData::row_limit() - 1)) {
__ jmp(update_done);
}
__ jmp(update_done);
__ bind(next_test);
}
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
__ addl(counter_addr, DataLayout::counter_increment);
__ bind(update_done);
}
} else {
// Static call
__ addl(counter_addr, DataLayout::counter_increment);
}
}

View File

@ -755,8 +755,19 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
}
LIR_Opr addr = new_pointer_register();
__ move(obj.result(), addr);
__ add(addr, offset.result(), addr);
LIR_Address* a;
if(offset.result()->is_constant()) {
a = new LIR_Address(obj.result(),
NOT_LP64(offset.result()->as_constant_ptr()->as_jint()) LP64_ONLY((int)offset.result()->as_constant_ptr()->as_jlong()),
as_BasicType(type));
} else {
a = new LIR_Address(obj.result(),
offset.result(),
LIR_Address::times_1,
0,
as_BasicType(type));
}
__ leal(LIR_OprFact::address(a), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
// Do the pre-write barrier, if any.

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -317,14 +317,6 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
}
void C1_MacroAssembler::method_exit(bool restore_frame) {
if (restore_frame) {
leave();
}
ret(0);
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
// Make sure there is enough stack space for this method's activation.
// Note that we do this before doing an enter(). This matches the
@ -333,7 +325,7 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
// between the two compilers.
generate_stack_overflow_check(frame_size_in_bytes);
enter();
push(rbp);
#ifdef TIERED
// c2 leaves fpu stack dirty. Clean it on entry
if (UseSSE < 2 ) {
@ -344,6 +336,12 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
}
void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0
pop(rbp);
}
void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
if (C1Breakpoint) int3();
inline_cache_check(receiver, ic_klass);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -688,18 +688,21 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
oop_maps->add_gc_map(call_offset, oop_map);
// rax,: handler address or NULL if no handler exists
// rax,: handler address
// will be the deopt blob if nmethod was deoptimized while we looked up
// handler regardless of whether handler existed in the nmethod.
// only rax, is valid at this time, all other registers have been destroyed by the runtime call
__ invalidate_registers(false, true, true, true, true, true);
#ifdef ASSERT
// Do we have an exception handler in the nmethod?
Label no_handler;
Label done;
__ testptr(rax, rax);
__ jcc(Assembler::zero, no_handler);
__ jcc(Assembler::notZero, done);
__ stop("no handler found");
__ bind(done);
#endif
// exception handler found
// patch the return address -> the stub will directly return to the exception handler
@ -712,36 +715,14 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
__ leave();
__ ret(0);
__ bind(no_handler);
// no exception handler found in this method, so the exception is
// forwarded to the caller (using the unwind code of the nmethod)
// there is no need to restore the registers
// restore the real return address that was saved before the RT-call
__ movptr(real_return_addr, Address(rsp, temp_1_off * VMRegImpl::stack_slot_size));
__ movptr(Address(rbp, 1*BytesPerWord), real_return_addr);
// load address of JavaThread object for thread-local data
NOT_LP64(__ get_thread(thread);)
// restore exception oop into rax, (convention for unwind code)
__ movptr(exception_oop, Address(thread, JavaThread::exception_oop_offset()));
// clear exception fields in JavaThread because they are no longer needed
// (fields must be cleared because they are processed by GC otherwise)
__ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
__ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
// pop the stub frame off
__ leave();
generate_unwind_exception(sasm);
__ stop("should not reach here");
}
void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// incoming parameters
const Register exception_oop = rax;
// callee-saved copy of exception_oop during runtime call
const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
// other registers used in this stub
const Register exception_pc = rdx;
const Register handler_addr = rbx;
@ -769,38 +750,39 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// clear the FPU stack in case any FPU results are left behind
__ empty_FPU_stack();
// leave activation of nmethod
__ leave();
// store return address (is on top of stack after leave)
// save exception_oop in callee-saved register to preserve it during runtime calls
__ verify_not_null_oop(exception_oop);
__ movptr(exception_oop_callee_saved, exception_oop);
NOT_LP64(__ get_thread(thread);)
// Get return address (is on top of stack after leave).
__ movptr(exception_pc, Address(rsp, 0));
__ verify_oop(exception_oop);
// save exception oop from rax, to stack before call
__ push(exception_oop);
// search the exception handler address of the caller (using the return address)
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc);
// rax,: exception handler address of the caller
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
// rax: exception handler address of the caller
// only rax, is valid at this time, all other registers have been destroyed by the call
__ invalidate_registers(false, true, true, true, true, true);
// Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
__ invalidate_registers(false, true, true, true, false, true);
// move result of call into correct register
__ movptr(handler_addr, rax);
// restore exception oop in rax, (required convention of exception handler)
__ pop(exception_oop);
// Restore exception oop to RAX (required convention of exception handler).
__ movptr(exception_oop, exception_oop_callee_saved);
__ verify_oop(exception_oop);
// verify that there is really a valid exception in rax
__ verify_not_null_oop(exception_oop);
// get throwing pc (= return address).
// rdx has been destroyed by the call, so it must be set again
// the pop is also necessary to simulate the effect of a ret(0)
__ pop(exception_pc);
// verify that that there is really a valid exception in rax,
__ verify_not_null_oop(exception_oop);
// Restore SP from BP if the exception PC is a MethodHandle call site.
NOT_LP64(__ get_thread(thread);)
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
// continue at exception handler (return address removed)
// note: do *not* remove arguments when unwinding the
@ -808,9 +790,9 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// all arguments on the stack when entering the
// runtime to determine the exception handler
// (GC happens at call site with arguments!)
// rax,: exception oop
// rax: exception oop
// rdx: throwing pc
// rbx,: exception handler
// rbx: exception handler
__ jmp(handler_addr);
}

View File

@ -22,10 +22,8 @@
*
*/
//
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)
//
#ifndef TIERED
define_pd_global(bool, BackgroundCompilation, true );
@ -48,27 +46,24 @@ define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 933 );
define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(intx, NewRatio, 12 );
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
define_pd_global(uintx, PermSize, 12*M );
define_pd_global(uintx, MaxPermSize, 64*M );
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uintx, DefaultMaxRAM, 1*G);
define_pd_global(uintx,PermSize, 12*M );
define_pd_global(uintx,MaxPermSize, 64*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
#endif // TIERED
#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, RoundFPResults, true );
define_pd_global(bool, LIRFillDelaySlots, false);
define_pd_global(bool, OptimizeSinglePrecision, true);
define_pd_global(bool, OptimizeSinglePrecision, true );
define_pd_global(bool, CSEArrayLength, false);
define_pd_global(bool, TwoOperandLIRForm, true);
define_pd_global(bool, TwoOperandLIRForm, true );
define_pd_global(intx, SafepointPollOffset, 256);
define_pd_global(intx, SafepointPollOffset, 256 );

View File

@ -22,7 +22,6 @@
*
*/
//
// Sets the default values for platform dependent flags used by the server compiler.
// (see c2_globals.hpp). Alpha-sorted.
@ -46,8 +45,8 @@ define_pd_global(intx, CompileThreshold, 1000);
define_pd_global(intx, CompileThreshold, 10000);
#endif // TIERED
define_pd_global(intx, Tier2CompileThreshold, 10000);
define_pd_global(intx, Tier3CompileThreshold, 20000 );
define_pd_global(intx, Tier4CompileThreshold, 40000 );
define_pd_global(intx, Tier3CompileThreshold, 20000);
define_pd_global(intx, Tier4CompileThreshold, 40000);
define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
@ -61,7 +60,6 @@ define_pd_global(intx, FreqInlineSize, 325);
#ifdef AMD64
define_pd_global(intx, INTPRESSURE, 13);
define_pd_global(intx, InteriorEntryAlignment, 16);
define_pd_global(intx, NewRatio, 2);
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, LoopUnrollLimit, 60);
// InitialCodeCacheSize derived from specjbb2000 run.
@ -69,19 +67,18 @@ define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multip
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uintx, DefaultMaxRAM, 32*G);
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
#else
define_pd_global(intx, INTPRESSURE, 6);
define_pd_global(intx, InteriorEntryAlignment, 4);
define_pd_global(intx, NewRatio, 8); // Design center runs on 1.3.1
define_pd_global(intx, NewSizeThreadIncrease, 4*K);
define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uintx, DefaultMaxRAM, 1*G);
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
#endif // AMD64
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, RegisterCostAreaRatio, 16000);
@ -97,8 +94,8 @@ define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(uintx,CodeCacheMinBlockLength, 4);
// Heap related flags
define_pd_global(uintx, PermSize, ScaleForWordSize(16*M));
define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
define_pd_global(uintx,PermSize, ScaleForWordSize(16*M));
define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -222,9 +222,9 @@ void frame::patch_pc(Thread* thread, address pc) {
}
((address *)sp())[-1] = pc;
_cb = CodeCache::find_blob(pc);
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
address orig = (((nmethod*)_cb)->get_original_pc(this));
assert(orig == _pc, "expected original to be stored before patching");
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
assert(original_pc == _pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
// leave _pc as is
} else {
@ -323,13 +323,63 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
return fr;
}
//------------------------------------------------------------------------------
// frame::verify_deopt_original_pc
//
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP. The unextended SP might also be the saved SP
// for MethodHandle call sites.
#if ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
// to take an SP value as argument. And it's only a debugging
// method anyway.
fr._unextended_sp = unextended_sp;
address original_pc = nm->get_original_pc(&fr);
assert(nm->code_contains(original_pc), "original PC must be in nmethod");
assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
}
#endif
//------------------------------------------------------------------------------
// frame::sender_for_interpreter_frame
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// sp is the raw sp from the sender after adapter or interpreter extension
intptr_t* sp = (intptr_t*) addr_at(sender_sp_offset);
// SP is the raw SP from the sender after adapter or interpreter
// extension.
intptr_t* sender_sp = this->sender_sp();
// This is the sp before any possible extension (adapter/locals).
intptr_t* unextended_sp = interpreter_frame_sender_sp();
// Stored FP.
intptr_t* saved_fp = link();
address sender_pc = this->sender_pc();
CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
assert(sender_cb, "sanity");
nmethod* sender_nm = sender_cb->as_nmethod_or_null();
if (sender_nm != NULL) {
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
if (sender_nm->is_deopt_mh_entry(sender_pc)) {
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
unextended_sp = saved_fp;
}
else if (sender_nm->is_deopt_entry(sender_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
}
else if (sender_nm->is_method_handle_return(sender_pc)) {
unextended_sp = saved_fp;
}
}
// The interpreter and compiler(s) always save EBP/RBP in a known
// location on entry. We must record where that location is
// so this if EBP/RBP was live on callout from c2 we can find
@ -351,29 +401,52 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
}
#endif // AMD64
}
#endif /* COMPILER2 */
return frame(sp, unextended_sp, link(), sender_pc());
#endif // COMPILER2
return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
}
//------------------------------sender_for_compiled_frame-----------------------
//------------------------------------------------------------------------------
// frame::sender_for_compiled_frame
frame frame::sender_for_compiled_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set");
const bool c1_compiled = _cb->is_compiled_by_c1();
// frame owned by optimizing compiler
intptr_t* sender_sp = NULL;
assert(_cb->frame_size() >= 0, "must have non-zero frame size");
sender_sp = unextended_sp() + _cb->frame_size();
intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
intptr_t* unextended_sp = sender_sp;
// On Intel the return_address is always the word on the stack
address sender_pc = (address) *(sender_sp-1);
// This is the saved value of ebp which may or may not really be an fp.
// it is only an fp if the sender is an interpreter frame (or c1?)
// This is the saved value of EBP which may or may not really be an FP.
// It is only an FP if the sender is an interpreter frame (or C1?).
intptr_t* saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
// If we are returning to a compiled MethodHandle call site, the
// saved_fp will in fact be a saved value of the unextended SP. The
// simplest way to tell whether we are returning to such a call site
// is as follows:
CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
assert(sender_cb, "sanity");
nmethod* sender_nm = sender_cb->as_nmethod_or_null();
if (sender_nm != NULL) {
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
if (sender_nm->is_deopt_mh_entry(sender_pc)) {
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
unextended_sp = saved_fp;
}
else if (sender_nm->is_deopt_entry(sender_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
}
else if (sender_nm->is_method_handle_return(sender_pc)) {
unextended_sp = saved_fp;
}
}
if (map->update_map()) {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
@ -383,7 +456,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
if (_cb->oop_maps() != NULL) {
OopMapSet::update_register_map(this, map);
}
// Since the prolog does the save and restore of epb there is no oopmap
// Since the prolog does the save and restore of EBP there is no oopmap
// for it so we must fill in its location as if there was an oopmap entry
// since if our caller was compiled code there could be live jvm state in it.
map->set_location(rbp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset));
@ -399,9 +472,12 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
}
assert(sender_sp != sp(), "must have changed");
return frame(sender_sp, saved_fp, sender_pc);
return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
}
//------------------------------------------------------------------------------
// frame::sender
frame frame::sender(RegisterMap* map) const {
// Default is we done have to follow them. The sender_for_xxx will
// update it accordingly

View File

@ -163,6 +163,14 @@
return (intptr_t*) addr_at(offset);
}
#if ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
verify_deopt_original_pc(nm, unextended_sp, true);
}
#endif
public:
// Constructors

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,32 +35,35 @@ inline frame::frame() {
_deopt_state = unknown;
}
inline frame:: frame(intptr_t* sp, intptr_t* fp, address pc) {
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
_sp = sp;
_unextended_sp = sp;
_fp = fp;
_pc = pc;
assert(pc != NULL, "no pc?");
_cb = CodeCache::find_blob(pc);
_deopt_state = not_deoptimized;
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
_pc = (((nmethod*)_cb)->get_original_pc(this));
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
}
}
inline frame:: frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
_sp = sp;
_unextended_sp = unextended_sp;
_fp = fp;
_pc = pc;
assert(pc != NULL, "no pc?");
_cb = CodeCache::find_blob(pc);
_deopt_state = not_deoptimized;
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
_pc = (((nmethod*)_cb)->get_original_pc(this));
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
assert(((nmethod*)_cb)->code_contains(_pc), "original PC must be in nmethod");
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
@ -86,9 +89,9 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
_cb = CodeCache::find_blob(_pc);
_deopt_state = not_deoptimized;
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
_pc = (((nmethod*)_cb)->get_original_pc(this));
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
@ -225,11 +228,13 @@ inline methodOop* frame::interpreter_frame_method_addr() const {
// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
intptr_t* last_sp = interpreter_frame_last_sp();
if (last_sp == NULL ) {
if (last_sp == NULL) {
return sp();
} else {
// sp() may have been extended by an adapter
assert(last_sp < fp() && last_sp >= sp(), "bad tos");
// sp() may have been extended or shrunk by an adapter. At least
// check that we don't fall behind the legal region.
// For top deoptimized frame last_sp == interpreter_frame_monitor_end.
assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
return last_sp;
}
}

View File

@ -22,17 +22,16 @@
*
*/
//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
//
define_pd_global(bool, ConvertSleepToYield, true);
define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, CountInterpCalls, true);
define_pd_global(bool, ConvertSleepToYield, true);
define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, CountInterpCalls, true);
define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
// See 4827828 for this change. There is no globals_core_i486.hpp. I can't
// assign a different value for C2 without touching a number of files. Use
@ -42,29 +41,24 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NUL
// the uep and the vep doesn't get real alignment but just slops on by
// only assured that the entry instruction meets the 5 byte size requirement.
#ifdef COMPILER2
define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(intx, CodeEntryAlignment, 32);
#else
define_pd_global(intx, CodeEntryAlignment, 16);
define_pd_global(intx, CodeEntryAlignment, 16);
#endif // COMPILER2
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1000);
define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
define_pd_global(uintx, TLABSize, 0);
define_pd_global(intx, StackYellowPages, 2);
define_pd_global(intx, StackRedPages, 1);
#ifdef AMD64
define_pd_global(uintx, NewSize, ScaleForWordSize(2048 * K));
// Very large C++ stack frames using solaris-amd64 optimized builds
// due to lack of optimization caused by C++ compiler bugs
define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
#else
define_pd_global(uintx, NewSize, 1024 * K);
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
#endif // AMD64
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1000);
define_pd_global(intx, PreInflateSpin, 10);
define_pd_global(intx, StackYellowPages, 2);
define_pd_global(intx, StackRedPages, 1);
define_pd_global(intx, PreInflateSpin, 10);
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -196,6 +196,9 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_off
} else {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(reg, Address(rsi, bcp_offset));
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
// plain index.
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(reg); // convert to plain index
}
@ -1236,17 +1239,19 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
// We are making a call. Increment the count.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
testptr(receiver, receiver);
jcc(Assembler::zero, skip_receiver_profile);
jccb(Assembler::notZero, not_null);
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
jmp(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2);
record_klass_in_profile(receiver, mdp, reg2, true);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
@ -1260,10 +1265,14 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register
void InterpreterMacroAssembler::record_klass_in_profile_helper(
Register receiver, Register mdp,
Register reg2,
int start_row, Label& done) {
if (TypeProfileWidth == 0)
Register reg2, int start_row,
Label& done, bool is_virtual_call) {
if (TypeProfileWidth == 0) {
if (is_virtual_call) {
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
}
return;
}
int last_row = VirtualCallData::row_limit() - 1;
assert(start_row <= last_row, "must be work left to do");
@ -1291,19 +1300,28 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
bind(next_test);
if (row == start_row) {
Label found_null;
// Failed the equality check on receiver[n]... Test for null.
testptr(reg2, reg2);
if (start_row == last_row) {
// The only thing left to do is handle the null case.
jcc(Assembler::notZero, done);
if (is_virtual_call) {
jccb(Assembler::zero, found_null);
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
jmp(done);
bind(found_null);
} else {
jcc(Assembler::notZero, done);
}
break;
}
// Since null is rare, make it be the branch-taken case.
Label found_null;
jcc(Assembler::zero, found_null);
// Put all the "Case 3" tests here.
record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
// Found a null. Keep searching for a matching receiver,
// but remember that this is an empty (unused) slot.
@ -1320,16 +1338,18 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
movptr(reg2, (int32_t)DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2);
jmp(done);
if (start_row > 0) {
jmp(done);
}
}
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
Register mdp,
Register reg2) {
Register mdp, Register reg2,
bool is_virtual_call) {
assert(ProfileInterpreter, "must be profiling");
Label done;
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
bind (done);
}
@ -1422,7 +1442,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
record_klass_in_profile(klass, mdp, reg2);
record_klass_in_profile(klass, mdp, reg2, false);
assert(reg2 == rdi, "we know how to fix this blown reg");
restore_locals(); // Restore EDI
}

View File

@ -213,10 +213,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2);
Register reg2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2,
int start_row, Label& done);
Register reg2, int start_row,
Label& done, bool is_virtual_call);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -185,12 +185,30 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
}
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
int bcp_offset,
bool giant_index) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) {
load_unsigned_short(index, Address(r13, bcp_offset));
} else {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(index, Address(r13, bcp_offset));
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
// plain index.
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(index); // convert to plain index
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register index,
int bcp_offset) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
int bcp_offset,
bool giant_index) {
assert(cache != index, "must use different registers");
load_unsigned_short(index, Address(r13, bcp_offset));
get_cache_index_at_bcp(index, bcp_offset, giant_index);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
@ -200,10 +218,10 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
int bcp_offset,
bool giant_index) {
assert(cache != tmp, "must use different register");
load_unsigned_short(tmp, Address(r13, bcp_offset));
get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
@ -1236,18 +1254,28 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
Register reg2) {
Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
// We are making a call. Increment the count.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
testptr(receiver, receiver);
jccb(Assembler::notZero, not_null);
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
jmp(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2);
record_klass_in_profile(receiver, mdp, reg2, true);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp,
@ -1270,10 +1298,14 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
// See below for example code.
void InterpreterMacroAssembler::record_klass_in_profile_helper(
Register receiver, Register mdp,
Register reg2,
int start_row, Label& done) {
if (TypeProfileWidth == 0)
Register reg2, int start_row,
Label& done, bool is_virtual_call) {
if (TypeProfileWidth == 0) {
if (is_virtual_call) {
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
}
return;
}
int last_row = VirtualCallData::row_limit() - 1;
assert(start_row <= last_row, "must be work left to do");
@ -1301,19 +1333,28 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
bind(next_test);
if (test_for_null_also) {
Label found_null;
// Failed the equality check on receiver[n]... Test for null.
testptr(reg2, reg2);
if (start_row == last_row) {
// The only thing left to do is handle the null case.
jcc(Assembler::notZero, done);
if (is_virtual_call) {
jccb(Assembler::zero, found_null);
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
jmp(done);
bind(found_null);
} else {
jcc(Assembler::notZero, done);
}
break;
}
// Since null is rare, make it be the branch-taken case.
Label found_null;
jcc(Assembler::zero, found_null);
// Put all the "Case 3" tests here.
record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
// Found a null. Keep searching for a matching receiver,
// but remember that this is an empty (unused) slot.
@ -1330,7 +1371,9 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
movl(reg2, DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2);
jmp(done);
if (start_row > 0) {
jmp(done);
}
}
// Example state machine code for three profile rows:
@ -1342,7 +1385,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
// if (row[1].rec != NULL) {
// // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != NULL) { goto done; } // overflow
// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done;
// } else {
// // remember row[1] is empty
@ -1355,14 +1398,15 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[0].init(rec); goto done;
// }
// done:
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
Register mdp,
Register reg2) {
Register mdp, Register reg2,
bool is_virtual_call) {
assert(ProfileInterpreter, "must be profiling");
Label done;
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
bind (done);
}
@ -1458,7 +1502,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
// Record the object type.
record_klass_in_profile(klass, mdp, reg2);
record_klass_in_profile(klass, mdp, reg2, false);
}
update_mdp_by_constant(mdp, mdp_delta);

View File

@ -95,9 +95,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index,
int bcp_offset);
int bcp_offset, bool giant_index = false);
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset);
int bcp_offset, bool giant_index = false);
void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
void pop_ptr(Register r = rax);
@ -221,10 +222,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2);
Register reg2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2,
int start_row, Label& done);
Register reg2, int start_row,
Label& done, bool is_virtual_call);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
@ -236,7 +237,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2);
Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);

View File

@ -277,12 +277,11 @@ address InterpreterGenerator::generate_abstract_entry(void) {
address entry_point = __ pc();
// abstract method entry
// remove return address. Not really needed, since exception
// handling throws away expression stack
__ pop(rbx);
// adjust stack to what a normal return would do
__ mov(rsp, r13);
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
@ -300,7 +299,10 @@ address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableMethodHandles) {
return generate_abstract_entry();
}
return generate_abstract_entry(); //6815692//
address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm);
return entry_point;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,14 +60,14 @@ MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _mas
}
#ifdef ASSERT
static void verify_argslot(MacroAssembler* _masm, Register rax_argslot,
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
const char* error_message) {
// Verify that argslot lies within (rsp, rbp].
Label L_ok, L_bad;
__ cmpptr(rax_argslot, rbp);
__ jcc(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot);
__ jcc(Assembler::below, L_ok);
__ cmpptr(argslot_reg, rbp);
__ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, argslot_reg);
__ jccb(Assembler::below, L_ok);
__ bind(L_bad);
__ stop(error_message);
__ bind(L_ok);
@ -136,9 +136,9 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
__ jcc(Assembler::greater, L_bad);
__ jccb(Assembler::greater, L_bad);
__ testl(arg_slots.as_register(), -stack_move_unit() - 1);
__ jcc(Assembler::zero, L_ok);
__ jccb(Assembler::zero, L_ok);
__ bind(L_bad);
__ stop("assert arg_slots <= 0 and clear low bits");
__ bind(L_ok);
@ -173,27 +173,11 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
__ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
__ addptr(rdx_temp, wordSize);
__ cmpptr(rdx_temp, rax_argslot);
__ jcc(Assembler::less, loop);
__ jccb(Assembler::less, loop);
}
// Now move the argslot down, to point to the opened-up space.
__ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
if (TaggedStackInterpreter && arg_mask != _INSERT_NO_MASK) {
// The caller has specified a bitmask of tags to put into the opened space.
// This only works when the arg_slots value is an assembly-time constant.
int constant_arg_slots = arg_slots.as_constant() / stack_move_unit();
int tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
for (int slot = 0; slot < constant_arg_slots; slot++) {
BasicType slot_type = ((arg_mask & (1 << slot)) == 0 ? T_OBJECT : T_INT);
int slot_offset = Interpreter::stackElementSize() * slot;
Address tag_addr(rax_argslot, slot_offset + tag_offset);
__ movptr(tag_addr, frame::tag_for_basic_type(slot_type));
}
// Note that the new argument slots are tagged properly but contain
// garbage at this point. The value portions must be initialized
// by the caller. (Especially references!)
}
}
// Helper to remove argument slots from the stack.
@ -206,24 +190,15 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
(!arg_slots.is_register() ? rsp : arg_slots.as_register()));
#ifdef ASSERT
{
// Verify that [argslot..argslot+size) lies within (rsp, rbp).
Label L_ok, L_bad;
__ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
__ cmpptr(rbx_temp, rbp);
__ jcc(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot);
__ jcc(Assembler::below, L_ok);
__ bind(L_bad);
__ stop("deleted argument(s) must fall within current frame");
__ bind(L_ok);
}
// Verify that [argslot..argslot+size) lies within (rsp, rbp).
__ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame");
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
__ jcc(Assembler::less, L_bad);
__ jccb(Assembler::less, L_bad);
__ testl(arg_slots.as_register(), -stack_move_unit() - 1);
__ jcc(Assembler::zero, L_ok);
__ jccb(Assembler::zero, L_ok);
__ bind(L_bad);
__ stop("assert arg_slots >= 0 and clear low bits");
__ bind(L_ok);
@ -258,7 +233,7 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
__ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
__ addptr(rdx_temp, -wordSize);
__ cmpptr(rdx_temp, rsp);
__ jcc(Assembler::greaterEqual, loop);
__ jccb(Assembler::greaterEqual, loop);
}
// Now move the argslot up, to point to the just-copied block.
@ -268,8 +243,9 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
}
#ifndef PRODUCT
extern "C" void print_method_handle(oop mh);
void trace_method_handle_stub(const char* adaptername,
oopDesc* mh,
oop mh,
intptr_t* entry_sp,
intptr_t* saved_sp,
intptr_t* saved_bp) {
@ -280,6 +256,7 @@ void trace_method_handle_stub(const char* adaptername,
adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
if (last_sp != saved_sp)
printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
if (Verbose) print_method_handle(mh);
}
#endif //PRODUCT
@ -319,12 +296,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
Address vmarg; // __ argument_address(vmargslot)
int tag_offset = -1;
if (TaggedStackInterpreter) {
tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
assert(tag_offset = wordSize, "stack grows as expected");
}
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
if (have_entry(ek)) {
@ -370,11 +341,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ mov(rsp, rsi); // cut the stack back to where the caller started
// Repush the arguments as if coming from the interpreter.
if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_INT));
__ push(rdx_code);
if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_OBJECT));
__ push(rcx_fail);
if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_OBJECT));
__ push(rax_want);
Register rbx_method = rbx_temp;
@ -382,11 +350,11 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
__ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
__ testptr(rbx_method, rbx_method);
__ jcc(Assembler::zero, no_method);
__ jccb(Assembler::zero, no_method);
int jobject_oop_offset = 0;
__ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
__ testptr(rbx_method, rbx_method);
__ jcc(Assembler::zero, no_method);
__ jccb(Assembler::zero, no_method);
__ verify_oop(rbx_method);
__ push(rdi_pc); // and restore caller PC
__ jmp(rbx_method_fie);
@ -395,7 +363,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Do something that is at least causes a valid throw from the interpreter.
__ bind(no_method);
__ pop(rax_want);
if (TaggedStackInterpreter) __ pop(rcx_fail);
__ pop(rcx_fail);
__ push(rax_want);
__ push(rcx_fail);
@ -448,7 +415,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
rbx_index, Address::times_ptr,
base + vtableEntry::method_offset_in_bytes());
Register rbx_method = rbx_temp;
__ movl(rbx_method, vtable_entry_addr);
__ movptr(rbx_method, vtable_entry_addr);
__ verify_oop(rbx_method);
__ jmp(rbx_method_fie);
@ -508,18 +475,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _bound_long_direct_mh:
{
bool direct_to_method = (ek >= _bound_ref_direct_mh);
BasicType arg_type = T_ILLEGAL;
if (ek == _bound_long_mh || ek == _bound_long_direct_mh) {
arg_type = T_LONG;
} else if (ek == _bound_int_mh || ek == _bound_int_direct_mh) {
arg_type = T_INT;
} else {
assert(ek == _bound_ref_mh || ek == _bound_ref_direct_mh, "must be ref");
arg_type = T_OBJECT;
}
int arg_slots = type2size[arg_type];
int arg_mask = (arg_type == T_OBJECT ? _INSERT_REF_MASK :
arg_slots == 1 ? _INSERT_INT_MASK : _INSERT_LONG_MASK);
BasicType arg_type = T_ILLEGAL;
int arg_mask = _INSERT_NO_MASK;
int arg_slots = -1;
get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
// make room for the new argument:
__ movl(rax_argslot, rcx_bmh_vmargslot);
@ -533,16 +492,15 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
if (arg_type == T_OBJECT) {
__ movptr(Address(rax_argslot, 0), rbx_temp);
} else {
__ load_sized_value(rbx_temp, prim_value_addr,
__ load_sized_value(rdx_temp, prim_value_addr,
type2aelembytes(arg_type), is_signed_subword_type(arg_type));
__ movptr(Address(rax_argslot, 0), rbx_temp);
__ movptr(Address(rax_argslot, 0), rdx_temp);
#ifndef _LP64
if (arg_slots == 2) {
__ movl(rbx_temp, prim_value_addr.plus_disp(wordSize));
__ movl(Address(rax_argslot, Interpreter::stackElementSize()), rbx_temp);
__ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
__ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
}
#endif //_LP64
break;
}
if (direct_to_method) {
@ -583,8 +541,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Label done;
__ movptr(rdx_temp, vmarg);
__ testl(rdx_temp, rdx_temp);
__ jcc(Assembler::zero, done); // no cast if null
__ testptr(rdx_temp, rdx_temp);
__ jccb(Assembler::zero, done); // no cast if null
__ load_klass(rdx_temp, rdx_temp);
// live at this point:
@ -659,13 +617,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
}
break;
default:
assert(false, "");
ShouldNotReachHere();
}
goto finish_int_conversion;
}
finish_int_conversion:
{
// Do the requested conversion and store the value.
Register rbx_vminfo = rbx_temp;
__ movl(rbx_vminfo, rcx_amh_conversion);
assert(CONV_VMINFO_SHIFT == 0, "preshifted");
@ -675,24 +630,24 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// (now we are done with the old MH)
// original 32-bit vmdata word must be of this form:
// | MBZ:16 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
__ xchgl(rcx, rbx_vminfo); // free rcx for shifts
// | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
__ xchgptr(rcx, rbx_vminfo); // free rcx for shifts
__ shll(rdx_temp /*, rcx*/);
Label zero_extend, done;
__ testl(rcx, CONV_VMINFO_SIGN_FLAG);
__ jcc(Assembler::zero, zero_extend);
__ jccb(Assembler::zero, zero_extend);
// this path is taken for int->byte, int->short
__ sarl(rdx_temp /*, rcx*/);
__ jmp(done);
__ jmpb(done);
__ bind(zero_extend);
// this is taken for int->char
__ shrl(rdx_temp /*, rcx*/);
__ bind(done);
__ movptr(vmarg, rdx_temp);
__ xchgl(rcx, rbx_vminfo); // restore rcx_recv
__ movl(vmarg, rdx_temp); // Store the value.
__ xchgptr(rcx, rbx_vminfo); // restore rcx_recv
__ jump_to_method_handle_entry(rcx_recv, rdx_temp);
}
@ -714,9 +669,14 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
switch (ek) {
case _adapter_opt_i2l:
{
#ifdef _LP64
__ movslq(rdx_temp, vmarg1); // Load sign-extended
__ movq(vmarg1, rdx_temp); // Store into first slot
#else
__ movl(rdx_temp, vmarg1);
__ sarl(rdx_temp, 31); // __ extend_sign()
__ sarl(rdx_temp, BitsPerInt - 1); // __ extend_sign()
__ movl(vmarg2, rdx_temp); // store second word
#endif
}
break;
case _adapter_opt_unboxl:
@ -726,14 +686,19 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
__ null_check(rdx_temp, value_offset);
#ifdef _LP64
__ movq(rbx_temp, Address(rdx_temp, value_offset));
__ movq(vmarg1, rbx_temp);
#else
__ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
__ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
__ movl(vmarg1, rbx_temp);
__ movl(vmarg2, rdx_temp);
#endif
}
break;
default:
assert(false, "");
ShouldNotReachHere();
}
__ movptr(rcx_recv, rcx_mh_vmtarget);
@ -767,19 +732,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
if (ek == _adapter_opt_f2d) {
__ fld_s(vmarg); // load float to ST0
__ fstp_s(vmarg); // store single
} else if (!TaggedStackInterpreter) {
__ fld_d(vmarg); // load double to ST0
__ fstp_s(vmarg); // store single
} else {
Address vmarg_tag = vmarg.plus_disp(tag_offset);
Address vmarg2 = vmarg.plus_disp(Interpreter::stackElementSize());
// vmarg2_tag does not participate in this code
Register rbx_tag = rbx_temp;
__ movl(rbx_tag, vmarg_tag); // preserve tag
__ movl(rdx_temp, vmarg2); // get second word of double
__ movl(vmarg_tag, rdx_temp); // align with first word
__ fld_d(vmarg); // load double to ST0
__ movl(vmarg_tag, rbx_tag); // restore tag
__ fstp_s(vmarg); // store single
}
#endif //_LP64
@ -811,19 +765,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _adapter_opt_rot_2_up:
case _adapter_opt_rot_2_down:
{
int rotate = 0, swap_slots = 0;
switch ((int)ek) {
case _adapter_opt_swap_1: swap_slots = 1; break;
case _adapter_opt_swap_2: swap_slots = 2; break;
case _adapter_opt_rot_1_up: swap_slots = 1; rotate++; break;
case _adapter_opt_rot_1_down: swap_slots = 1; rotate--; break;
case _adapter_opt_rot_2_up: swap_slots = 2; rotate++; break;
case _adapter_opt_rot_2_down: swap_slots = 2; rotate--; break;
default: assert(false, "");
}
// the real size of the move must be doubled if TaggedStackInterpreter:
int swap_bytes = (int)( swap_slots * Interpreter::stackElementWords() * wordSize );
int swap_bytes = 0, rotate = 0;
get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
// 'argslot' is the position of the first argument to swap
__ movl(rax_argslot, rcx_amh_vmargslot);
@ -861,7 +804,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Verify that argslot > destslot, by at least swap_bytes.
Label L_ok;
__ cmpptr(rax_argslot, rbx_destslot);
__ jcc(Assembler::aboveEqual, L_ok);
__ jccb(Assembler::aboveEqual, L_ok);
__ stop("source must be above destination (upward rotation)");
__ bind(L_ok);
}
@ -877,7 +820,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
__ addptr(rax_argslot, -wordSize);
__ cmpptr(rax_argslot, rbx_destslot);
__ jcc(Assembler::aboveEqual, loop);
__ jccb(Assembler::aboveEqual, loop);
} else {
__ addptr(rax_argslot, swap_bytes);
#ifdef ASSERT
@ -885,7 +828,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Verify that argslot < destslot, by at least swap_bytes.
Label L_ok;
__ cmpptr(rax_argslot, rbx_destslot);
__ jcc(Assembler::belowEqual, L_ok);
__ jccb(Assembler::belowEqual, L_ok);
__ stop("source must be below destination (downward rotation)");
__ bind(L_ok);
}
@ -901,7 +844,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
__ addptr(rax_argslot, wordSize);
__ cmpptr(rax_argslot, rbx_destslot);
__ jcc(Assembler::belowEqual, loop);
__ jccb(Assembler::belowEqual, loop);
}
// pop the original first chunk into the destination slot, now free
@ -924,8 +867,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// 'stack_move' is negative number of words to duplicate
Register rdx_stack_move = rdx_temp;
__ movl(rdx_stack_move, rcx_amh_conversion);
__ sarl(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
__ movl2ptr(rdx_stack_move, rcx_amh_conversion);
__ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
int argslot0_num = 0;
Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
@ -967,7 +910,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ addptr(rax_argslot, wordSize);
__ addptr(rdx_newarg, wordSize);
__ cmpptr(rdx_newarg, rbx_oldarg);
__ jcc(Assembler::less, loop);
__ jccb(Assembler::less, loop);
__ pop(rdi); // restore temp
@ -987,8 +930,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// 'stack_move' is number of words to drop
Register rdi_stack_move = rdi;
__ movl(rdi_stack_move, rcx_amh_conversion);
__ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
__ movl2ptr(rdi_stack_move, rcx_amh_conversion);
__ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
remove_arg_slots(_masm, rdi_stack_move,
rax_argslot, rbx_temp, rdx_temp);
@ -1013,11 +956,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _adapter_opt_spread_more:
{
// spread an array out into a group of arguments
int length_constant = -1;
switch (ek) {
case _adapter_opt_spread_0: length_constant = 0; break;
case _adapter_opt_spread_1: length_constant = 1; break;
}
int length_constant = get_ek_adapter_opt_spread_info(ek);
// find the address of the array argument
__ movl(rax_argslot, rcx_amh_vmargslot);
@ -1078,8 +1017,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
// 'stack_move' is negative number of words to insert
Register rdi_stack_move = rdi;
__ movl(rdi_stack_move, rcx_amh_conversion);
__ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
__ movl2ptr(rdi_stack_move, rcx_amh_conversion);
__ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
Register rsi_temp = rsi_array; // spill this
insert_arg_slots(_masm, rdi_stack_move, -1,
rax_argslot, rbx_temp, rsi_temp);
@ -1113,13 +1052,9 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_temp, Address(rsi_source, 0));
__ movptr(Address(rax_argslot, 0), rbx_temp);
__ addptr(rsi_source, type2aelembytes(elem_type));
if (TaggedStackInterpreter) {
__ movptr(Address(rax_argslot, tag_offset),
frame::tag_for_basic_type(elem_type));
}
__ addptr(rax_argslot, Interpreter::stackElementSize());
__ cmpptr(rax_argslot, rdx_argslot_limit);
__ jcc(Assembler::less, loop);
__ jccb(Assembler::less, loop);
} else if (length_constant == 0) {
__ bind(skip_array_check);
// nothing to copy
@ -1130,11 +1065,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_temp, Address(rsi_array, elem_offset));
__ movptr(Address(rax_argslot, slot_offset), rbx_temp);
elem_offset += type2aelembytes(elem_type);
if (TaggedStackInterpreter) {
__ movptr(Address(rax_argslot, slot_offset + tag_offset),
frame::tag_for_basic_type(elem_type));
}
slot_offset += Interpreter::stackElementSize();
slot_offset += Interpreter::stackElementSize();
}
}

View File

@ -43,11 +43,11 @@ ExceptionBlob* OptoRuntime::_exception_blob;
// This code is entered with a jmp.
//
// Arguments:
// rax,: exception oop
// rax: exception oop
// rdx: exception pc
//
// Results:
// rax,: exception oop
// rax: exception oop
// rdx: exception pc in caller or ???
// destination: exception handler of caller
//
@ -113,17 +113,17 @@ void OptoRuntime::generate_exception_blob() {
__ addptr(rsp, return_off * wordSize); // Epilog!
__ pop(rdx); // Exception pc
// rax: exception handler for given <exception oop/exception pc>
// rax,: exception handler for given <exception oop/exception pc>
// Restore SP from BP if the exception PC is a MethodHandle call.
__ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
// We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it.
__ push(rax);
// rcx contains handler address
__ get_thread(rcx); // TLS
// Get the exception
__ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
// Get the exception pc in case we are deoptimized
@ -137,7 +137,7 @@ void OptoRuntime::generate_exception_blob() {
__ pop(rcx);
// rax,: exception oop
// rax: exception oop
// rcx: exception handler
// rdx: exception pc
__ jmp (rcx);

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -907,7 +907,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
int total_args_passed,
int comp_args_on_stack,
const BasicType *sig_bt,
const VMRegPair *regs) {
const VMRegPair *regs,
AdapterFingerPrint* fingerprint) {
address i2c_entry = __ pc();
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
@ -954,7 +955,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
__ flush();
return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -638,6 +638,10 @@ static void gen_i2c_adapter(MacroAssembler *masm,
__ movptr(rax, Address(rsp, 0));
// Must preserve original SP for loading incoming arguments because
// we need to align the outgoing SP for compiled code.
__ movptr(r11, rsp);
// Cut-out for having no stack args. Since up to 2 int/oop args are passed
// in registers, we will occasionally have no stack args.
int comp_words_on_stack = 0;
@ -661,6 +665,10 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// as far as the placement of the call instruction
__ push(rax);
// Put saved SP in another register
const Register saved_sp = rax;
__ movptr(saved_sp, r11);
// Will jump to the compiled code just as if compiled code was doing it.
// Pre-load the register-jump target early, to schedule it better.
__ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
@ -680,11 +688,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
"scrambled load targets?");
// Load in argument order going down.
// int ld_off = (total_args_passed + comp_words_on_stack -i)*wordSize;
// base ld_off on r13 (sender_sp) as the stack alignment makes offsets from rsp
// unpredictable
int ld_off = ((total_args_passed - 1) - i)*Interpreter::stackElementSize();
int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
// Point to interpreter value (vs. tag)
int next_off = ld_off - Interpreter::stackElementSize();
//
@ -699,10 +703,14 @@ static void gen_i2c_adapter(MacroAssembler *masm,
if (r_1->is_stack()) {
// Convert stack slot to an SP offset (+ wordSize to account for return address )
int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
// We can use r13 as a temp here because compiled code doesn't need r13 as an input
// and if we end up going thru a c2i because of a miss a reasonable value of r13
// will be generated.
if (!r_2->is_valid()) {
// sign extend???
__ movl(rax, Address(r13, ld_off));
__ movptr(Address(rsp, st_off), rax);
__ movl(r13, Address(saved_sp, ld_off));
__ movptr(Address(rsp, st_off), r13);
} else {
//
// We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
@ -715,9 +723,9 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// ld_off is MSW so get LSW
const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
next_off : ld_off;
__ movq(rax, Address(r13, offset));
__ movq(r13, Address(saved_sp, offset));
// st_off is LSW (i.e. reg.first())
__ movq(Address(rsp, st_off), rax);
__ movq(Address(rsp, st_off), r13);
}
} else if (r_1->is_Register()) { // Register argument
Register r = r_1->as_Register();
@ -732,16 +740,16 @@ static void gen_i2c_adapter(MacroAssembler *masm,
next_off : ld_off;
// this can be a misaligned move
__ movq(r, Address(r13, offset));
__ movq(r, Address(saved_sp, offset));
} else {
// sign extend and use a full word?
__ movl(r, Address(r13, ld_off));
__ movl(r, Address(saved_sp, ld_off));
}
} else {
if (!r_2->is_valid()) {
__ movflt(r_1->as_XMMRegister(), Address(r13, ld_off));
__ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
} else {
__ movdbl(r_1->as_XMMRegister(), Address(r13, next_off));
__ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
}
}
}
@ -770,7 +778,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
int total_args_passed,
int comp_args_on_stack,
const BasicType *sig_bt,
const VMRegPair *regs) {
const VMRegPair *regs,
AdapterFingerPrint* fingerprint) {
address i2c_entry = __ pc();
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
@ -816,7 +825,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
__ flush();
return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
@ -3319,6 +3328,10 @@ void OptoRuntime::generate_exception_blob() {
// rax: exception handler
// Restore SP from BP if the exception PC is a MethodHandle call.
__ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
// We have a handler in rax (could be deopt blob).
__ mov(r8, rax);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -369,7 +369,7 @@ class StubGenerator: public StubCodeGenerator {
// The pending exception in Thread is converted into a Java-level exception.
//
// Contract with Java-level exception handlers:
// rax,: exception
// rax: exception
// rdx: throwing pc
//
// NOTE: At entry of this stub, exception-pc must be on stack !!
@ -377,6 +377,12 @@ class StubGenerator: public StubCodeGenerator {
address generate_forward_exception() {
StubCodeMark mark(this, "StubRoutines", "forward exception");
address start = __ pc();
const Register thread = rcx;
// other registers used in this stub
const Register exception_oop = rax;
const Register handler_addr = rbx;
const Register exception_pc = rdx;
// Upon entry, the sp points to the return address returning into Java
// (interpreted or compiled) code; i.e., the return address becomes the
@ -389,8 +395,8 @@ class StubGenerator: public StubCodeGenerator {
#ifdef ASSERT
// make sure this code is only executed if there is a pending exception
{ Label L;
__ get_thread(rcx);
__ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ get_thread(thread);
__ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (1)");
__ bind(L);
@ -398,33 +404,40 @@ class StubGenerator: public StubCodeGenerator {
#endif
// compute exception handler into rbx,
__ movptr(rax, Address(rsp, 0));
__ get_thread(thread);
__ movptr(exception_pc, Address(rsp, 0));
BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rax);
__ mov(rbx, rax);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
__ mov(handler_addr, rax);
// setup rax, & rdx, remove return address & clear pending exception
__ get_thread(rcx);
__ pop(rdx);
__ movptr(rax, Address(rcx, Thread::pending_exception_offset()));
__ movptr(Address(rcx, Thread::pending_exception_offset()), NULL_WORD);
// setup rax & rdx, remove return address & clear pending exception
__ get_thread(thread);
__ pop(exception_pc);
__ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
__ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
#ifdef ASSERT
// make sure exception is set
{ Label L;
__ testptr(rax, rax);
__ testptr(exception_oop, exception_oop);
__ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (2)");
__ bind(L);
}
#endif
// Verify that there is really a valid exception in RAX.
__ verify_oop(exception_oop);
// Restore SP from BP if the exception PC is a MethodHandle call site.
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
// continue at exception handler (return address removed)
// rax,: exception
// rbx,: exception handler
// rax: exception
// rbx: exception handler
// rdx: throwing pc
__ verify_oop(rax);
__ jmp(rbx);
__ jmp(handler_addr);
return start;
}
@ -718,10 +731,8 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCTLogging:
{
__ pusha(); // push registers
__ push(count);
__ push(start);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
__ addptr(rsp, 2*wordSize);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
start, count);
__ popa();
}
break;
@ -752,10 +763,8 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCTLogging:
{
__ pusha(); // push registers
__ push(count);
__ push(start);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
__ addptr(rsp, 2*wordSize);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
start, count);
__ popa();
}
break;
@ -2030,6 +2039,54 @@ class StubGenerator: public StubCodeGenerator {
entry_checkcast_arraycopy);
}
void generate_math_stubs() {
{
StubCodeMark mark(this, "StubRoutines", "log");
StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
__ fld_d(Address(rsp, 4));
__ flog();
__ ret(0);
}
{
StubCodeMark mark(this, "StubRoutines", "log10");
StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
__ fld_d(Address(rsp, 4));
__ flog10();
__ ret(0);
}
{
StubCodeMark mark(this, "StubRoutines", "sin");
StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
__ fld_d(Address(rsp, 4));
__ trigfunc('s');
__ ret(0);
}
{
StubCodeMark mark(this, "StubRoutines", "cos");
StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
__ fld_d(Address(rsp, 4));
__ trigfunc('c');
__ ret(0);
}
{
StubCodeMark mark(this, "StubRoutines", "tan");
StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
__ fld_d(Address(rsp, 4));
__ trigfunc('t');
__ ret(0);
}
// The intrinsic version of these seem to return the same value as
// the strict version.
StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
}
public:
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
@ -2219,15 +2276,7 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
// generic method handle stubs
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
generate_math_stubs();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -466,7 +466,7 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address),
c_rarg0);
r15_thread, c_rarg0);
__ mov(rbx, rax);
// setup rax & rdx, remove return address & clear pending exception
@ -1172,7 +1172,7 @@ class StubGenerator: public StubCodeGenerator {
__ movptr(c_rarg0, addr);
__ movptr(c_rarg1, count);
}
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
__ popa();
}
break;
@ -1212,7 +1212,7 @@ class StubGenerator: public StubCodeGenerator {
__ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
__ mov(c_rarg0, start);
__ mov(c_rarg1, scratch);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
__ popa();
}
break;
@ -2731,6 +2731,79 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
}
void generate_math_stubs() {
{
StubCodeMark mark(this, "StubRoutines", "log");
StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
__ subq(rsp, 8);
__ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0));
__ flog();
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addq(rsp, 8);
__ ret(0);
}
{
StubCodeMark mark(this, "StubRoutines", "log10");
StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
__ subq(rsp, 8);
__ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0));
__ flog10();
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addq(rsp, 8);
__ ret(0);
}
{
StubCodeMark mark(this, "StubRoutines", "sin");
StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
__ subq(rsp, 8);
__ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0));
__ trigfunc('s');
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addq(rsp, 8);
__ ret(0);
}
{
StubCodeMark mark(this, "StubRoutines", "cos");
StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
__ subq(rsp, 8);
__ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0));
__ trigfunc('c');
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addq(rsp, 8);
__ ret(0);
}
{
StubCodeMark mark(this, "StubRoutines", "tan");
StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
__ subq(rsp, 8);
__ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0));
__ trigfunc('t');
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addq(rsp, 8);
__ ret(0);
}
// The intrinsic version of these seem to return the same value as
// the strict version.
StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
}
#undef __
#define __ masm->
@ -2935,6 +3008,8 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
generate_math_stubs();
}
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,11 @@ enum platform_dependent_constants {
code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
};
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000
};
class x86 {
friend class StubGenerator;
friend class VMStructs;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,12 +28,14 @@
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
enum platform_dependent_constants
{
code_size1 = 19000, // simply increase if too small (assembler will
// crash if too small)
code_size2 = 22000 // simply increase if too small (assembler will
// crash if too small)
enum platform_dependent_constants {
code_size1 = 19000, // simply increase if too small (assembler will crash if too small)
code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
};
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 13000
};
class x86 {

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -155,15 +155,8 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
TosState incoming_state = state;
if (EnableInvokeDynamic) {
if (unbox) {
incoming_state = atos;
}
} else {
assert(!unbox, "old behavior");
}
Label interpreter_entry;
address compiled_entry = __ pc();
@ -216,46 +209,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ restore_bcp();
__ restore_locals();
Label L_fail;
if (unbox && state != atos) {
// cast and unbox
BasicType type = as_BasicType(state);
if (type == T_BYTE) type = T_BOOLEAN; // FIXME
KlassHandle boxk = SystemDictionaryHandles::box_klass(type);
__ mov32(rbx, ExternalAddress((address) boxk.raw_value()));
__ testl(rax, rax);
Label L_got_value, L_get_value;
// convert nulls to zeroes (avoid NPEs here)
if (!(type == T_FLOAT || type == T_DOUBLE)) {
// if rax already contains zero bits, forge ahead
__ jcc(Assembler::zero, L_got_value);
} else {
__ jcc(Assembler::notZero, L_get_value);
__ fldz();
__ jmp(L_got_value);
}
__ bind(L_get_value);
__ cmp32(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
__ jcc(Assembler::notEqual, L_fail);
int offset = java_lang_boxing_object::value_offset_in_bytes(type);
// Cf. TemplateTable::getfield_or_static
switch (type) {
case T_BYTE: // fall through:
case T_BOOLEAN: __ load_signed_byte(rax, Address(rax, offset)); break;
case T_CHAR: __ load_unsigned_short(rax, Address(rax, offset)); break;
case T_SHORT: __ load_signed_short(rax, Address(rax, offset)); break;
case T_INT: __ movl(rax, Address(rax, offset)); break;
case T_FLOAT: __ fld_s(Address(rax, offset)); break;
case T_DOUBLE: __ fld_d(Address(rax, offset)); break;
// Access to java.lang.Double.value does not need to be atomic:
case T_LONG: { __ movl(rdx, Address(rax, offset + 4));
__ movl(rax, Address(rax, offset + 0)); } break;
default: ShouldNotReachHere();
}
__ bind(L_got_value);
}
Label L_got_cache, L_giant_index;
if (EnableInvokeDynamic) {
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
@ -263,32 +216,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
}
__ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
__ bind(L_got_cache);
if (unbox && state == atos) {
// insert a casting conversion, to keep verifier sane
Label L_ok, L_ok_pops;
__ testl(rax, rax);
__ jcc(Assembler::zero, L_ok);
__ push(rax); // save the object to check
__ push(rbx); // save CP cache reference
__ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
__ movl(rbx, Address(rbx, rcx,
Address::times_4, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f1_offset()));
__ movl(rbx, Address(rbx, __ delayed_value(sun_dyn_CallSiteImpl::type_offset_in_bytes, rcx)));
__ movl(rbx, Address(rbx, __ delayed_value(java_dyn_MethodType::rtype_offset_in_bytes, rcx)));
__ movl(rax, Address(rbx, __ delayed_value(java_lang_Class::klass_offset_in_bytes, rcx)));
__ check_klass_subtype(rdx, rax, rbx, L_ok_pops);
__ pop(rcx); // pop and discard CP cache
__ mov(rbx, rax); // target supertype into rbx for L_fail
__ pop(rax); // failed object into rax for L_fail
__ jmp(L_fail);
__ bind(L_ok_pops);
// restore pushed temp regs:
__ pop(rbx);
__ pop(rax);
__ bind(L_ok);
}
__ movl(rbx, Address(rbx, rcx,
Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
@ -301,14 +228,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
__ jmp(L_got_cache);
if (unbox) {
__ bind(L_fail);
__ push(rbx); // missed klass (required)
__ push(rax); // bad object (actual)
__ movptr(rdx, ExternalAddress((address) &Interpreter::_throw_WrongMethodType_entry));
__ call(rdx);
}
}
return entry;
@ -1512,6 +1431,23 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
switch (method_kind(m)) {
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt :
return false;
default:
return true;
}
}
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
@ -1569,7 +1505,10 @@ int AbstractInterpreter::layout_activation(methodOop method,
if (interpreter_frame != NULL) {
#ifdef ASSERT
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
if (!EnableMethodHandles)
// @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
// Probably, since deoptimization doesn't work yet.
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
@ -1611,6 +1550,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
void TemplateInterpreterGenerator::generate_throw_exception() {
// Entry point in previous activation (i.e., if the caller was interpreted)
Interpreter::_rethrow_exception_entry = __ pc();
const Register thread = rcx;
// Restore sp to interpreter_frame_last_sp even though we are going
// to empty the expression stack for the exception processing.
@ -1659,10 +1599,10 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Set the popframe_processing bit in pending_popframe_condition indicating that we are
// currently handling popframe, so that call_VMs that may happen later do not trigger new
// popframe handling cycles.
__ get_thread(rcx);
__ movl(rdx, Address(rcx, JavaThread::popframe_condition_offset()));
__ get_thread(thread);
__ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
__ orl(rdx, JavaThread::popframe_processing_bit);
__ movl(Address(rcx, JavaThread::popframe_condition_offset()), rdx);
__ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
{
// Check to see whether we are returning to a deoptimized frame.
@ -1690,8 +1630,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ subptr(rdi, rax);
__ addptr(rdi, wordSize);
// Save these arguments
__ get_thread(rcx);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi);
__ get_thread(thread);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
__ remove_activation(vtos, rdx,
/* throw_monitor_exception */ false,
@ -1699,8 +1639,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
/* notify_jvmdi */ false);
// Inform deoptimization that it is responsible for restoring these arguments
__ get_thread(rcx);
__ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
__ get_thread(thread);
__ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
// Continue in deoptimization handler
__ jmp(rdx);
@ -1726,12 +1666,12 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// expression stack if necessary.
__ mov(rax, rsp);
__ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ get_thread(rcx);
__ get_thread(thread);
// PC must point into interpreter here
__ set_last_Java_frame(rcx, noreg, rbp, __ pc());
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), rcx, rax, rbx);
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, true, true);
__ set_last_Java_frame(thread, noreg, rbp, __ pc());
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
__ get_thread(thread);
__ reset_last_Java_frame(thread, true, true);
// Restore the last_sp and null it out
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
@ -1745,8 +1685,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
}
// Clear the popframe condition flag
__ get_thread(rcx);
__ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
__ get_thread(thread);
__ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
__ dispatch_next(vtos);
// end of PopFrame support
@ -1755,27 +1695,27 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// preserve exception over this code sequence
__ pop_ptr(rax);
__ get_thread(rcx);
__ movptr(Address(rcx, JavaThread::vm_result_offset()), rax);
__ get_thread(thread);
__ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
// remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false);
// restore exception
__ get_thread(rcx);
__ movptr(rax, Address(rcx, JavaThread::vm_result_offset()));
__ movptr(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD);
__ get_thread(thread);
__ movptr(rax, Address(thread, JavaThread::vm_result_offset()));
__ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
__ verify_oop(rax);
// Inbetween activations - previous activation type unknown yet
// compute continuation point - the continuation point expects
// the following registers set up:
//
// rax,: exception
// rax: exception
// rdx: return address/pc that threw exception
// rsp: expression stack of caller
// rbp,: rbp, of caller
// rbp: rbp, of caller
__ push(rax); // save exception
__ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
__ mov(rbx, rax); // save exception handler
__ pop(rdx); // restore return address
__ pop(rax); // restore exception
@ -1789,6 +1729,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
//
address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
address entry = __ pc();
const Register thread = rcx;
__ restore_bcp();
__ restore_locals();
@ -1796,8 +1737,8 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
__ empty_FPU_stack();
__ load_earlyret_value(state);
__ get_thread(rcx);
__ movptr(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset()));
__ get_thread(thread);
__ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -100,21 +100,26 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
return entry;
}
// Arguments are: required type in rarg1, failing object (or NULL) in rarg2
// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4.
address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
address entry = __ pc();
__ pop(c_rarg2); // failing object is at TOS
__ pop(c_rarg1); // required type is at TOS+8
// expression stack must be empty before entering the VM if an
// exception happened
__ verify_oop(c_rarg1);
__ verify_oop(c_rarg2);
// Various method handle types use interpreter registers as temps.
__ restore_bcp();
__ restore_locals();
// Expression stack must be empty before entering the VM for an exception.
__ empty_expression_stack();
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::
throw_WrongMethodTypeException),
InterpreterRuntime::throw_WrongMethodTypeException),
// pass required type, failing object (or NULL)
c_rarg1, c_rarg2);
return entry;
@ -166,8 +171,7 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
int step, bool unbox) {
assert(!unbox, "NYI");//6815692//
int step) {
// amd64 doesn't need to do anything special about compiled returns
// to the interpreter so the code that exists on x86 to place a sentinel
@ -183,15 +187,29 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ restore_bcp();
__ restore_locals();
__ get_cache_and_index_at_bcp(rbx, rcx, 1);
Label L_got_cache, L_giant_index;
if (EnableInvokeDynamic) {
__ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
__ jcc(Assembler::equal, L_giant_index);
}
__ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
__ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
Address::times_8,
Address::times_ptr,
in_bytes(constantPoolCacheOopDesc::base_offset()) +
3 * wordSize));
__ andl(rbx, 0xFF);
if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
__ lea(rsp, Address(rsp, rbx, Address::times_8));
__ dispatch_next(state, step);
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
__ jmp(L_got_cache);
}
return entry;
}
@ -431,8 +449,12 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
__ addptr(rax, stack_base);
__ subptr(rax, stack_size);
// Use the maximum number of pages we might bang.
const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
(StackRedPages+StackYellowPages);
// add in the red and yellow zone sizes
__ addptr(rax, (StackRedPages + StackYellowPages) * page_size);
__ addptr(rax, max_pages * page_size);
// check against the current stack bottom
__ cmpptr(rsp, rax);
@ -1434,6 +1456,23 @@ address AbstractInterpreterGenerator::generate_method_entry(
generate_normal_entry(synchronized);
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
switch (method_kind(m)) {
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt :
return false;
default:
return true;
}
}
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
const int entry_size = frame::interpreter_frame_monitor_size();
@ -1484,8 +1523,10 @@ int AbstractInterpreter::layout_activation(methodOop method,
tempcount* Interpreter::stackElementWords() + popframe_extra_args;
if (interpreter_frame != NULL) {
#ifdef ASSERT
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(),
"Frame not properly walkable");
if (!EnableMethodHandles)
// @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
// Probably, since deoptimization doesn't work yet.
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
@ -1700,7 +1741,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address),
rdx);
r15_thread, rdx);
__ mov(rbx, rax); // save exception handler
__ pop(rdx); // restore return address
__ pop(rax); // restore exception

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2890,9 +2890,6 @@ void TemplateTable::count_calls(Register method, Register temp) {
void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
bool is_invdyn_bootstrap = (byte_no < 0);
if (is_invdyn_bootstrap) byte_no = -byte_no;
// determine flags
Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
@ -2907,8 +2904,6 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
const Register flags = rdx;
assert_different_registers(method, index, recv, flags);
assert(!is_invdyn_bootstrap || is_invokedynamic, "byte_no<0 hack only for invdyn");
// save 'interpreter return address'
__ save_bcp();
@ -2920,12 +2915,8 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
__ andl(recv, 0xFF);
// recv count is 0 based?
Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
if (is_invokedynamic) {
__ lea(recv, recv_addr);
} else {
__ movptr(recv, recv_addr);
__ verify_oop(recv);
}
__ movptr(recv, recv_addr);
__ verify_oop(recv);
}
// do null check if needed
@ -2944,9 +2935,7 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// load return address
{
address table_addr;
if (is_invdyn_bootstrap)
table_addr = (address)Interpreter::return_5_unbox_addrs_by_index_table();
else if (is_invokeinterface || is_invokedynamic)
if (is_invokeinterface || is_invokedynamic)
table_addr = (address)Interpreter::return_5_addrs_by_index_table();
else
table_addr = (address)Interpreter::return_3_addrs_by_index_table();
@ -3153,54 +3142,10 @@ void TemplateTable::invokedynamic(int byte_no) {
__ profile_call(rsi);
}
Label handle_unlinked_site;
__ movptr(rcx, Address(rax, __ delayed_value(sun_dyn_CallSiteImpl::target_offset_in_bytes, rcx)));
__ testptr(rcx, rcx);
__ jcc(Assembler::zero, handle_unlinked_site);
__ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
__ null_check(rcx);
__ prepare_to_jump_from_interpreted();
__ jump_to_method_handle_entry(rcx, rdx);
// Initial calls come here...
__ bind(handle_unlinked_site);
__ pop(rcx); // remove return address pushed by prepare_invoke
// box stacked arguments into an array for the bootstrap method
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::bootstrap_invokedynamic);
__ restore_bcp(); // rsi must be correct for call_VM
__ call_VM(rax, entry, rax);
__ movl(rdi, rax); // protect bootstrap MH from prepare_invoke
// recompute return address
__ restore_bcp(); // rsi must be correct for prepare_invoke
prepare_invoke(rax, rbx, -byte_no); // smashes rcx, rdx
// rax: CallSite object (f1)
// rbx: unused (f2)
// rdi: bootstrap MH
// rdx: flags
// now load up the arglist, which has been neatly boxed
__ get_thread(rcx);
__ movptr(rdx, Address(rcx, JavaThread::vm_result_2_offset()));
__ movptr(Address(rcx, JavaThread::vm_result_2_offset()), NULL_WORD);
__ verify_oop(rdx);
// rdx = arglist
// save SP now, before we add the bootstrap call to the stack
// We must preserve a fiction that the original arguments are outgoing,
// because the return sequence will reset the stack to this point
// and then pop all those arguments. It seems error-prone to use
// a different argument list size just for bootstrapping.
__ prepare_to_jump_from_interpreted();
// Now let's play adapter, pushing the real arguments on the stack.
__ pop(rbx); // return PC
__ push(rdi); // boot MH
__ push(rax); // call site
__ push(rdx); // arglist
__ push(rbx); // return PC, again
__ mov(rcx, rdi);
__ jump_to_method_handle_entry(rcx, rdx);
}
//----------------------------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -203,18 +203,15 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
__ jcc(Assembler::notEqual, fast_patch);
__ get_method(scratch);
// Let breakpoint table handling rewrite to quicker bytecode
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::set_original_bytecode_at),
scratch, r13, bc);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc);
#ifndef ASSERT
__ jmpb(patch_done);
__ bind(fast_patch);
}
#else
__ jmp(patch_done);
#endif
__ bind(fast_patch);
}
#ifdef ASSERT
Label okay;
__ load_unsigned_byte(scratch, at_bcp(0));
__ cmpl(scratch, (int) Bytecodes::java_code(bytecode));
@ -2054,26 +2051,28 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
}
}
void TemplateTable::resolve_cache_and_index(int byte_no,
Register Rcache,
Register index) {
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
const Register temp = rbx;
assert_different_registers(Rcache, index, temp);
const int shift_count = (1 + byte_no) * BitsPerByte;
Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1);
__ movl(temp, Address(Rcache,
index, Address::times_8,
constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset()));
__ shrl(temp, shift_count);
// have we resolved this bytecode?
__ andl(temp, 0xFF);
__ cmpl(temp, (int) bytecode());
__ jcc(Assembler::equal, resolved);
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
if (is_invokedynamic) {
// we are resolved if the f1 field contains a non-null CallSite object
__ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
__ jcc(Assembler::notEqual, resolved);
} else {
__ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(temp, shift_count);
// have we resolved this bytecode?
__ andl(temp, 0xFF);
__ cmpl(temp, (int) bytecode());
__ jcc(Assembler::equal, resolved);
}
// resolve first time through
address entry;
@ -2090,6 +2089,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_invokeinterface:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
break;
case Bytecodes::_invokedynamic:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
break;
default:
ShouldNotReachHere();
break;
@ -2098,7 +2100,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1);
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
__ bind(resolved);
}
@ -2832,15 +2834,14 @@ void TemplateTable::count_calls(Register method, Register temp) {
ShouldNotReachHere();
}
void TemplateTable::prepare_invoke(Register method,
Register index,
int byte_no,
Bytecodes::Code code) {
void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
// determine flags
Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = code != Bytecodes::_invokestatic;
const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
const bool receiver_null_check = is_invokespecial;
const bool save_flags = is_invokeinterface || is_invokevirtual;
// setup registers & access constant pool cache
@ -2858,8 +2859,8 @@ void TemplateTable::prepare_invoke(Register method,
__ movl(recv, flags);
__ andl(recv, 0xFF);
if (TaggedStackInterpreter) __ shll(recv, 1); // index*2
__ movptr(recv, Address(rsp, recv, Address::times_8,
-Interpreter::expr_offset_in_bytes(1)));
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
__ movptr(recv, recv_addr);
__ verify_oop(recv);
}
@ -2878,10 +2879,14 @@ void TemplateTable::prepare_invoke(Register method,
ConstantPoolCacheEntry::verify_tosBits();
// load return address
{
ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table());
ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table());
__ lea(rscratch1, (is_invokeinterface ? return_5 : return_3));
__ movptr(flags, Address(rscratch1, flags, Address::times_8));
address table_addr;
if (is_invokeinterface || is_invokedynamic)
table_addr = (address)Interpreter::return_5_addrs_by_index_table();
else
table_addr = (address)Interpreter::return_3_addrs_by_index_table();
ExternalAddress table(table_addr);
__ lea(rscratch1, table);
__ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
}
// push return address
@ -2947,7 +2952,7 @@ void TemplateTable::invokevirtual_helper(Register index,
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
prepare_invoke(rbx, noreg, byte_no, bytecode());
prepare_invoke(rbx, noreg, byte_no);
// rbx: index
// rcx: receiver
@ -2959,7 +2964,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
prepare_invoke(rbx, noreg, byte_no, bytecode());
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
__ profile_call(rax);
@ -2969,7 +2974,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
prepare_invoke(rbx, noreg, byte_no, bytecode());
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
__ profile_call(rax);
@ -2983,7 +2988,7 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
prepare_invoke(rax, rbx, byte_no, bytecode());
prepare_invoke(rax, rbx, byte_no);
// rax: Interface
// rbx: index
@ -3072,7 +3077,24 @@ void TemplateTable::invokedynamic(int byte_no) {
return;
}
__ stop("invokedynamic NYI");//6815692//
prepare_invoke(rax, rbx, byte_no);
// rax: CallSite object (f1)
// rbx: unused (f2)
// rcx: receiver address
// rdx: flags (unused)
if (ProfileInterpreter) {
Label L;
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
__ profile_call(r13);
}
__ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
__ null_check(rcx);
__ prepare_to_jump_from_interpreted();
__ jump_to_method_handle_entry(rcx, rdx);
}
@ -3212,17 +3234,19 @@ void TemplateTable::_new() {
__ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
__ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
__ store_klass(rax, rsi); // store klass last
{
SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
// Trigger dtrace event for fastpath
__ push(atos); // save the return value
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
__ pop(atos); // restore the return value
}
__ jmp(done);
}
{
SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
// Trigger dtrace event for fastpath
__ push(atos); // save the return value
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
__ pop(atos); // restore the return value
}
// slow case
__ bind(slow_case);

View File

@ -22,8 +22,7 @@
*
*/
static void prepare_invoke(Register method, Register index, int byte_no,
Bytecodes::Code code);
static void prepare_invoke(Register method, Register index, int byte_no);
static void invokevirtual_helper(Register index, Register recv,
Register flags);
static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);

View File

@ -255,6 +255,8 @@ void VM_Version::get_processor_features() {
if (!VM_Version::supports_sse2()) {
vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
}
// in 64 bit the use of SSE2 is the minimum
if (UseSSE < 2) UseSSE = 2;
#endif
// If the OS doesn't support SSE, we can't use this feature even if the HW does

View File

@ -235,6 +235,11 @@ reg_class xdb_reg7( XMM7a,XMM7b );
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
source_hpp %{
// Must be visible to the DFA in dfa_x86_32.cpp
extern bool is_operand_hi32_zero(Node* n);
%}
source %{
#define RELOC_IMM32 Assembler::imm_operand
#define RELOC_DISP32 Assembler::disp32_operand
@ -268,22 +273,36 @@ static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CON
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
// Offset hacking within calls.
static int pre_call_FPU_size() {
if (Compile::current()->in_24_bit_fp_mode())
return 6; // fldcw
return 0;
}
static int preserve_SP_size() {
return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg)
}
// !!!!! Special hack to get all type of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset() {
return 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0); // 5 bytes from start of call to where return address points
int offset = 5 + pre_call_FPU_size(); // 5 bytes from start of call to where return address points
if (_method_handle_invoke)
offset += preserve_SP_size();
return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset() {
return 10 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0); // 10 bytes from start of call to where return address points
return 10 + pre_call_FPU_size(); // 10 bytes from start of call to where return address points
}
static int sizeof_FFree_Float_Stack_All = -1;
int MachCallRuntimeNode::ret_addr_offset() {
assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
return sizeof_FFree_Float_Stack_All + 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0);
return sizeof_FFree_Float_Stack_All + 5 + pre_call_FPU_size();
}
// Indicate if the safepoint node needs the polling page as an input.
@ -299,8 +318,16 @@ bool SafePointNode::needs_polling_address_input() {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
if (Compile::current()->in_24_bit_fp_mode())
current_offset += 6; // skip fldcw in pre_call_FPU, if any
current_offset += pre_call_FPU_size(); // skip fldcw, if any
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
current_offset += pre_call_FPU_size(); // skip fldcw, if any
current_offset += preserve_SP_size(); // skip mov rbp, rsp
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@ -308,8 +335,7 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
if (Compile::current()->in_24_bit_fp_mode())
current_offset += 6; // skip fldcw in pre_call_FPU, if any
current_offset += pre_call_FPU_size(); // skip fldcw, if any
current_offset += 5; // skip MOV instruction
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
@ -1418,8 +1444,10 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
// to implement the UseStrictFP mode.
const bool Matcher::strict_fp_requires_explicit_rounding = true;
// Do floats take an entire double register or just half?
const bool Matcher::float_in_double = true;
// Are floats conerted to double when stored to stack during deoptimization?
// On x32 it is stored with convertion only when FPU is used for floats.
bool Matcher::float_in_double() { return (UseSSE == 0); }
// Do ints take an entire long register or just half?
const bool Matcher::int_in_long = false;
@ -1460,6 +1488,25 @@ RegMask Matcher::modL_proj_mask() {
return RegMask();
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return EBP_REG_mask;
}
// Returns true if the high 32 bits of the value is known to be zero.
bool is_operand_hi32_zero(Node* n) {
int opc = n->Opcode();
if (opc == Op_LoadUI2L) {
return true;
}
if (opc == Op_AndL) {
Node* o2 = n->in(2);
if (o2->is_Con() && (o2->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
return true;
}
}
return false;
}
%}
//----------ENCODING BLOCK-----------------------------------------------------
@ -1772,10 +1819,13 @@ encode %{
enc_class pre_call_FPU %{
// If method sets FPU control word restore it here
debug_only(int off0 = cbuf.code_size());
if( Compile::current()->in_24_bit_fp_mode() ) {
MacroAssembler masm(&cbuf);
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
debug_only(int off1 = cbuf.code_size());
assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
%}
enc_class post_call_FPU %{
@ -1786,6 +1836,21 @@ encode %{
}
%}
enc_class preserve_SP %{
debug_only(int off0 = cbuf.code_size());
MacroAssembler _masm(&cbuf);
// RBP is preserved across all calls, even compiled calls.
// Use it to preserve RSP in places where the callee might change the SP.
__ movptr(rbp, rsp);
debug_only(int off1 = cbuf.code_size());
assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
%}
enc_class restore_SP %{
MacroAssembler _masm(&cbuf);
__ movptr(rsp, rbp);
%}
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call.
@ -8556,6 +8621,63 @@ instruct mulL_eReg(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
ins_pipe( pipe_slow );
%}
// Multiply Register Long where the left operand's high 32 bits are zero
instruct mulL_eReg_lhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
predicate(is_operand_hi32_zero(n->in(1)));
match(Set dst (MulL dst src));
effect(KILL cr, TEMP tmp);
ins_cost(2*100+2*400);
// Basic idea: lo(result) = lo(x_lo * y_lo)
// hi(result) = hi(x_lo * y_lo) + lo(x_lo * y_hi) where lo(x_hi * y_lo) = 0 because x_hi = 0
format %{ "MOV $tmp,$src.hi\n\t"
"IMUL $tmp,EAX\n\t"
"MUL EDX:EAX,$src.lo\n\t"
"ADD EDX,$tmp" %}
ins_encode %{
__ movl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
__ imull($tmp$$Register, rax);
__ mull($src$$Register);
__ addl(rdx, $tmp$$Register);
%}
ins_pipe( pipe_slow );
%}
// Multiply Register Long where the right operand's high 32 bits are zero
instruct mulL_eReg_rhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
predicate(is_operand_hi32_zero(n->in(2)));
match(Set dst (MulL dst src));
effect(KILL cr, TEMP tmp);
ins_cost(2*100+2*400);
// Basic idea: lo(result) = lo(x_lo * y_lo)
// hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) where lo(x_lo * y_hi) = 0 because y_hi = 0
format %{ "MOV $tmp,$src.lo\n\t"
"IMUL $tmp,EDX\n\t"
"MUL EDX:EAX,$src.lo\n\t"
"ADD EDX,$tmp" %}
ins_encode %{
__ movl($tmp$$Register, $src$$Register);
__ imull($tmp$$Register, rdx);
__ mull($src$$Register);
__ addl(rdx, $tmp$$Register);
%}
ins_pipe( pipe_slow );
%}
// Multiply Register Long where the left and the right operands' high 32 bits are zero
instruct mulL_eReg_hi0(eADXRegL dst, eRegL src, eFlagsReg cr) %{
predicate(is_operand_hi32_zero(n->in(1)) && is_operand_hi32_zero(n->in(2)));
match(Set dst (MulL dst src));
effect(KILL cr);
ins_cost(1*400);
// Basic idea: lo(result) = lo(x_lo * y_lo)
// hi(result) = hi(x_lo * y_lo) where lo(x_hi * y_lo) = 0 and lo(x_lo * y_hi) = 0 because x_hi = 0 and y_hi = 0
format %{ "MUL EDX:EAX,$src.lo\n\t" %}
ins_encode %{
__ mull($src$$Register);
%}
ins_pipe( pipe_slow );
%}
// Multiply Register Long by small constant
instruct mulL_eReg_con(eADXRegL dst, immL_127 src, eRegI tmp, eFlagsReg cr) %{
match(Set dst (MulL dst src));
@ -13406,6 +13528,7 @@ instruct cmovXX_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regX dst,
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@ -13420,6 +13543,30 @@ instruct CallStaticJavaDirect(method meth) %{
ins_alignment(4);
%}
// Call Java Static Instruction (method handle version)
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{
match(CallStaticJava);
predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
// EBP is saved by all callees (for interpreter stack correction).
// We use it here for a similar purpose, in {preserve,restore}_SP.
ins_cost(300);
format %{ "CALL,static/MethodHandle " %}
opcode(0xE8); /* E8 cd */
ins_encode( pre_call_FPU,
preserve_SP,
Java_Static_Call( meth ),
restore_SP,
call_epilog,
post_call_FPU );
ins_pipe( pipe_slow );
ins_pc_relative(1);
ins_alignment(4);
%}
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.

View File

@ -551,12 +551,19 @@ source %{
#define __ _masm.
static int preserve_SP_size() {
return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg)
}
// !!!!! Special hack to get all types of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset()
{
return 5; // 5 bytes from start of call to where return address points
int offset = 5; // 5 bytes from start of call to where return address points
if (_method_handle_invoke)
offset += preserve_SP_size();
return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset()
@ -587,6 +594,15 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const
return round_to(current_offset, alignment_required()) - current_offset;
}
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaHandleNode::compute_padding(int current_offset) const
{
current_offset += preserve_SP_size(); // skip mov rbp, rsp
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
@ -2058,8 +2074,10 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
// implement the UseStrictFP mode.
const bool Matcher::strict_fp_requires_explicit_rounding = true;
// Do floats take an entire double register or just half?
const bool Matcher::float_in_double = true;
// Are floats conerted to double when stored to stack during deoptimization?
// On x64 it is stored without convertion so we can use normal access.
bool Matcher::float_in_double() { return false; }
// Do ints take an entire long register or just half?
const bool Matcher::int_in_long = true;
@ -2113,6 +2131,10 @@ RegMask Matcher::modL_proj_mask() {
return LONG_RDX_REG_mask;
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return PTR_RBP_REG_mask;
}
static Address build_address(int b, int i, int s, int d) {
Register index = as_Register(i);
Address::ScaleFactor scale = (Address::ScaleFactor)s;
@ -2608,6 +2630,21 @@ encode %{
RELOC_DISP32);
%}
enc_class preserve_SP %{
debug_only(int off0 = cbuf.code_size());
MacroAssembler _masm(&cbuf);
// RBP is preserved across all calls, even compiled calls.
// Use it to preserve RSP in places where the callee might change the SP.
__ movptr(rbp, rsp);
debug_only(int off1 = cbuf.code_size());
assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
%}
enc_class restore_SP %{
MacroAssembler _masm(&cbuf);
__ movptr(rsp, rbp);
%}
enc_class Java_Static_Call(method meth)
%{
// JAVA STATIC CALL
@ -12526,9 +12563,9 @@ instruct safePoint_poll(rFlagsReg cr)
// Call Java Static Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaDirect(method meth)
%{
instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
predicate(!((CallStaticJavaNode*) n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@ -12540,6 +12577,28 @@ instruct CallStaticJavaDirect(method meth)
ins_alignment(4);
%}
// Call Java Static Instruction (method handle version)
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{
match(CallStaticJava);
predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
effect(USE meth);
// RBP is saved by all callees (for interpreter stack correction).
// We use it here for a similar purpose, in {preserve,restore}_SP.
ins_cost(300);
format %{ "call,static/MethodHandle " %}
opcode(0xE8); /* E8 cd */
ins_encode(preserve_SP,
Java_Static_Call(meth),
restore_SP,
call_epilog);
ins_pipe(pipe_slow);
ins_pc_relative(1);
ins_alignment(4);
%}
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.

View File

@ -1,6 +1,6 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -145,7 +145,7 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
}
else if (istate->msg() == BytecodeInterpreter::return_from_method) {
// Copy the result into the caller's frame
result_slots = type2size[method->result_type()];
result_slots = type2size[result_type_of(method)];
assert(result_slots >= 0 && result_slots <= 2, "what?");
result = istate->stack() + result_slots;
break;
@ -204,6 +204,20 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
goto unwind_and_return;
}
// Update the invocation counter
if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
thread->set_do_not_unlock();
InvocationCounter *counter = method->invocation_counter();
counter->increment();
if (counter->reached_InvocationLimit()) {
CALL_VM_NOCHECK(
InterpreterRuntime::frequency_counter_overflow(thread, NULL));
if (HAS_PENDING_EXCEPTION)
goto unwind_and_return;
}
thread->clr_do_not_unlock();
}
// Lock if necessary
BasicObjectLock *monitor;
monitor = NULL;
@ -231,7 +245,7 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
if (handlerAddr == NULL) {
CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method));
if (HAS_PENDING_EXCEPTION)
goto unwind_and_return;
goto unlock_unwind_and_return;
handlerAddr = method->signature_handler();
assert(handlerAddr != NULL, "eh?");
@ -240,7 +254,7 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
CALL_VM_NOCHECK(handlerAddr =
InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL));
if (HAS_PENDING_EXCEPTION)
goto unwind_and_return;
goto unlock_unwind_and_return;
}
handler = \
InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr);
@ -351,10 +365,10 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
// Reset handle block
thread->active_handles()->clear();
// Unlock if necessary. It seems totally wrong that this
// is skipped in the event of an exception but apparently
// the template interpreter does this so we do too.
if (monitor && !HAS_PENDING_EXCEPTION) {
unlock_unwind_and_return:
// Unlock if necessary
if (monitor) {
BasicLock *lock = monitor->lock();
markOop header = lock->displaced_header();
oop rcvr = monitor->obj();
@ -380,9 +394,10 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
// Push our result
if (!HAS_PENDING_EXCEPTION) {
stack->set_sp(stack->sp() - type2size[method->result_type()]);
BasicType type = result_type_of(method);
stack->set_sp(stack->sp() - type2size[type]);
switch (method->result_type()) {
switch (type) {
case T_VOID:
break;
@ -693,6 +708,26 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
BasicType CppInterpreter::result_type_of(methodOop method) {
BasicType t;
switch (method->result_index()) {
case 0 : t = T_BOOLEAN; break;
case 1 : t = T_CHAR; break;
case 2 : t = T_BYTE; break;
case 3 : t = T_SHORT; break;
case 4 : t = T_INT; break;
case 5 : t = T_LONG; break;
case 6 : t = T_VOID; break;
case 7 : t = T_FLOAT; break;
case 8 : t = T_DOUBLE; break;
case 9 : t = T_OBJECT; break;
default: ShouldNotReachHere();
}
assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
"out of step with AbstractInterpreter::BasicType_as_index");
return t;
}
address InterpreterGenerator::generate_empty_entry() {
if (!UseFastEmptyMethods)
return NULL;

View File

@ -1,6 +1,6 @@
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008 Red Hat, Inc.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,3 +41,7 @@
private:
// Stack overflow checks
static bool stack_overflow_imminent(JavaThread *thread);
private:
// Fast result type determination
static BasicType result_type_of(methodOop method);

View File

@ -36,11 +36,8 @@ bool frame::is_interpreted_frame() const {
return zeroframe()->is_interpreter_frame();
}
bool frame::is_fake_stub_frame() const {
return zeroframe()->is_fake_stub_frame();
}
frame frame::sender_for_entry_frame(RegisterMap *map) const {
assert(zeroframe()->is_entry_frame(), "wrong type of frame");
assert(map != NULL, "map must be set");
assert(!entry_frame_is_first(), "next Java fp must be non zero");
assert(entry_frame_call_wrapper()->anchor()->last_Java_sp() == sender_sp(),
@ -50,15 +47,10 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const {
return frame(sender_sp(), sp() + 1);
}
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
return frame(sender_sp(), sp() + 1);
}
frame frame::sender_for_compiled_frame(RegisterMap *map) const {
return frame(sender_sp(), sp() + 1);
}
frame frame::sender_for_fake_stub_frame(RegisterMap *map) const {
frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
assert(zeroframe()->is_interpreter_frame() ||
zeroframe()->is_shark_frame() ||
zeroframe()->is_fake_stub_frame(), "wrong type of frame");
return frame(sender_sp(), sp() + 1);
}
@ -69,17 +61,8 @@ frame frame::sender(RegisterMap* map) const {
if (is_entry_frame())
return sender_for_entry_frame(map);
if (is_interpreted_frame())
return sender_for_interpreter_frame(map);
if (is_compiled_frame())
return sender_for_compiled_frame(map);
if (is_fake_stub_frame())
return sender_for_fake_stub_frame(map);
ShouldNotReachHere();
else
return sender_for_nonentry_frame(map);
}
#ifdef CC_INTERP

View File

@ -65,10 +65,7 @@
}
public:
bool is_fake_stub_frame() const;
public:
frame sender_for_fake_stub_frame(RegisterMap* map) const;
frame sender_for_nonentry_frame(RegisterMap* map) const;
public:
void zero_print_on_error(int index,

View File

@ -1,6 +1,6 @@
/*
* Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,10 +23,8 @@
*
*/
//
// Set the default values for platform dependent flags used by the
// runtime system. See globals.hpp for details of what they do.
//
define_pd_global(bool, ConvertSleepToYield, true);
define_pd_global(bool, ShareVtableStubs, true);
@ -37,19 +35,12 @@ define_pd_global(bool, ImplicitNullChecks, true);
define_pd_global(bool, UncommonNullCast, true);
define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(uintx, TLABSize, 0);
#ifdef _LP64
define_pd_global(uintx, NewSize, ScaleForWordSize(2048 * K));
#else
define_pd_global(uintx, NewSize, ScaleForWordSize(1024 * K));
#endif // _LP64
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1000);
define_pd_global(intx, PreInflateSpin, 10);
define_pd_global(intx, StackYellowPages, 2);
define_pd_global(intx, StackRedPages, 1);
define_pd_global(intx, StackShadowPages, 3 LP64_ONLY(+3) DEBUG_ONLY(+3));
define_pd_global(intx, StackShadowPages, 5 LP64_ONLY(+1) DEBUG_ONLY(+3));
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);

Some files were not shown because too many files have changed in this diff Show More