This commit is contained in:
J. Duke 2017-07-05 19:03:41 +02:00
commit b8c12c2e14
80 changed files with 1427 additions and 1559 deletions

View File

@ -219,3 +219,4 @@ cb51fb4789ac0b8be4056482077ddfb8f3bd3805 jdk8-b91
785d07fe38901ecc1b7e0145e53e1c3da9361fee jdk8-b95
c156084add486f941c12d886a0b1b2854795d557 jdk8-b96
a1c1e8bf71f354f3aec0214cf13d6668811e021d jdk8-b97
0d0c983a817bbe8518a5ff201306334a8de267f2 jdk8-b98

View File

@ -219,3 +219,4 @@ c8286839d0df04aba819ec4bef12b86babccf30e jdk8-b90
2cf36f43df36137980d9828cec27003ec10daeee jdk8-b95
3357c2776431d51a8de326a85e0f41420e40774f jdk8-b96
469995a8e97424f450c880606d689bf345277b19 jdk8-b97
3370fb6146e47a6cc05a213fc213e12fc0a38d07 jdk8-b98

View File

@ -357,3 +357,5 @@ e6a4b8c71fa6f225bd989a34de2d0d0a656a8be8 jdk8-b96
2b9380b0bf0b649f40704735773e8956c2d88ba0 hs25-b39
d197d377ab2e016d024e8c86cb06a57bd7eae590 jdk8-b97
c9dd82da51ed34a28f7c6b3245163ee962e94572 hs25-b40
30b5b75c42ac5174b640fbef8aa87527668e8400 jdk8-b98
2b9946e10587f74ef75ae8145bea484df4a2738b hs25-b41

View File

@ -49,7 +49,6 @@ public class ArrayKlass extends Klass {
higherDimension = new MetadataField(type.getAddressField("_higher_dimension"), 0);
lowerDimension = new MetadataField(type.getAddressField("_lower_dimension"), 0);
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
allocSize = new CIntField(type.getCIntegerField("_alloc_size"), 0);
componentMirror = new OopField(type.getOopField("_component_mirror"), 0);
javaLangCloneableName = null;
javaLangObjectName = null;
@ -64,7 +63,6 @@ public class ArrayKlass extends Klass {
private static MetadataField higherDimension;
private static MetadataField lowerDimension;
private static CIntField vtableLen;
private static CIntField allocSize;
private static OopField componentMirror;
public Klass getJavaSuper() {
@ -76,7 +74,6 @@ public class ArrayKlass extends Klass {
public Klass getHigherDimension() { return (Klass) higherDimension.getValue(this); }
public Klass getLowerDimension() { return (Klass) lowerDimension.getValue(this); }
public long getVtableLen() { return vtableLen.getValue(this); }
public long getAllocSize() { return allocSize.getValue(this); }
public Oop getComponentMirror() { return componentMirror.getValue(this); }
// constant class names - javaLangCloneable, javaIoSerializable, javaLangObject
@ -147,7 +144,6 @@ public class ArrayKlass extends Klass {
visitor.doMetadata(higherDimension, true);
visitor.doMetadata(lowerDimension, true);
visitor.doCInt(vtableLen, true);
visitor.doCInt(allocSize, true);
visitor.doOop(componentMirror, true);
}
}

View File

@ -57,7 +57,6 @@ public class Klass extends Metadata implements ClassConstants {
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
subklass = new MetadataField(type.getAddressField("_subklass"), 0);
nextSibling = new MetadataField(type.getAddressField("_next_sibling"), 0);
allocCount = new CIntField(type.getCIntegerField("_alloc_count"), 0);
LH_INSTANCE_SLOW_PATH_BIT = db.lookupIntConstant("Klass::_lh_instance_slow_path_bit").intValue();
LH_LOG2_ELEMENT_SIZE_SHIFT = db.lookupIntConstant("Klass::_lh_log2_element_size_shift").intValue();
@ -87,7 +86,6 @@ public class Klass extends Metadata implements ClassConstants {
private static CIntField accessFlags;
private static MetadataField subklass;
private static MetadataField nextSibling;
private static CIntField allocCount;
private Address getValue(AddressField field) {
return addr.getAddressAt(field.getOffset());
@ -108,7 +106,6 @@ public class Klass extends Metadata implements ClassConstants {
public AccessFlags getAccessFlagsObj(){ return new AccessFlags(getAccessFlags()); }
public Klass getSubklassKlass() { return (Klass) subklass.getValue(this); }
public Klass getNextSiblingKlass() { return (Klass) nextSibling.getValue(this); }
public long getAllocCount() { return allocCount.getValue(this); }
// computed access flags - takes care of inner classes etc.
// This is closer to actual source level than getAccessFlags() etc.
@ -172,7 +169,6 @@ public class Klass extends Metadata implements ClassConstants {
visitor.doCInt(accessFlags, true);
visitor.doMetadata(subklass, true);
visitor.doMetadata(nextSibling, true);
visitor.doCInt(allocCount, true);
}
public long getObjectSize() {

View File

@ -221,7 +221,6 @@
_JVM_SetLength
_JVM_SetNativeThreadName
_JVM_SetPrimitiveArrayElement
_JVM_SetProtectionDomain
_JVM_SetSockOpt
_JVM_SetThreadPriority
_JVM_Sleep

View File

@ -221,7 +221,6 @@
_JVM_SetLength
_JVM_SetNativeThreadName
_JVM_SetPrimitiveArrayElement
_JVM_SetProtectionDomain
_JVM_SetSockOpt
_JVM_SetThreadPriority
_JVM_Sleep

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=40
HS_BUILD_NUMBER=41
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View File

@ -223,7 +223,6 @@ SUNWprivate_1.1 {
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;
JVM_SetThreadPriority;
JVM_Sleep;

View File

@ -223,7 +223,6 @@ SUNWprivate_1.1 {
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;
JVM_SetThreadPriority;
JVM_Sleep;

View File

@ -223,7 +223,6 @@ SUNWprivate_1.1 {
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;
JVM_SetThreadPriority;
JVM_Sleep;

View File

@ -1234,12 +1234,13 @@ bool os::address_is_in_vm(address addr) {
Dl_info dlinfo;
if (libjvm_base_addr == NULL) {
dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
libjvm_base_addr = (address)dlinfo.dli_fbase;
if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
libjvm_base_addr = (address)dlinfo.dli_fbase;
}
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
}
if (dladdr((void *)addr, &dlinfo)) {
if (dladdr((void *)addr, &dlinfo) != 0) {
if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
}
@ -1251,35 +1252,40 @@ bool os::address_is_in_vm(address addr) {
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int *offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
char localbuf[MACH_MAXSYMLEN];
// dladdr will find names of dynamic functions only, but does
// it set dli_fbase with mach_header address when it "fails" ?
if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) {
if (buf != NULL) {
if(!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
if (dladdr((void*)addr, &dlinfo) != 0) {
// see if we have a matching symbol
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
// no matching symbol so try for just file info
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
}
}
}
// Handle non-dymanic manually:
if (dlinfo.dli_fbase != NULL &&
Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, dlinfo.dli_fbase)) {
if(!Decoder::demangle(localbuf, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", localbuf);
// Handle non-dynamic manually:
if (dlinfo.dli_fbase != NULL &&
Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset,
dlinfo.dli_fbase)) {
if (!Decoder::demangle(localbuf, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", localbuf);
}
return true;
}
return true;
}
if (buf != NULL) buf[0] = '\0';
buf[0] = '\0';
if (offset != NULL) *offset = -1;
return false;
}
@ -1287,17 +1293,24 @@ bool os::dll_address_to_function_name(address addr, char *buf,
// ported from solaris version
bool os::dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
if (dladdr((void*)addr, &dlinfo)){
if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
if (offset) *offset = addr - (address)dlinfo.dli_fbase;
return true;
} else {
if (buf) buf[0] = '\0';
if (offset) *offset = -1;
return false;
if (dladdr((void*)addr, &dlinfo) != 0) {
if (dlinfo.dli_fname != NULL) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase != NULL && offset != NULL) {
*offset = addr - (address)dlinfo.dli_fbase;
}
return true;
}
buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
// Loads .dll/.so and
@ -1520,49 +1533,50 @@ static bool _print_ascii_file(const char* filename, outputStream* st) {
}
void os::print_dll_info(outputStream *st) {
st->print_cr("Dynamic libraries:");
st->print_cr("Dynamic libraries:");
#ifdef RTLD_DI_LINKMAP
Dl_info dli;
void *handle;
Link_map *map;
Link_map *p;
Dl_info dli;
void *handle;
Link_map *map;
Link_map *p;
if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
handle = dlopen(dli.dli_fname, RTLD_LAZY);
if (handle == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
dlinfo(handle, RTLD_DI_LINKMAP, &map);
if (map == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
dli.dli_fname == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
handle = dlopen(dli.dli_fname, RTLD_LAZY);
if (handle == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
dlinfo(handle, RTLD_DI_LINKMAP, &map);
if (map == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
while (map->l_prev != NULL)
map = map->l_prev;
while (map->l_prev != NULL)
map = map->l_prev;
while (map != NULL) {
st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
map = map->l_next;
}
while (map != NULL) {
st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
map = map->l_next;
}
dlclose(handle);
dlclose(handle);
#elif defined(__APPLE__)
uint32_t count;
uint32_t i;
uint32_t count;
uint32_t i;
count = _dyld_image_count();
for (i = 1; i < count; i++) {
const char *name = _dyld_get_image_name(i);
intptr_t slide = _dyld_get_image_vmaddr_slide(i);
st->print_cr(PTR_FORMAT " \t%s", slide, name);
}
count = _dyld_image_count();
for (i = 1; i < count; i++) {
const char *name = _dyld_get_image_name(i);
intptr_t slide = _dyld_get_image_vmaddr_slide(i);
st->print_cr(PTR_FORMAT " \t%s", slide, name);
}
#else
st->print_cr("Error: Cannot print dynamic libraries.");
st->print_cr("Error: Cannot print dynamic libraries.");
#endif
}
@ -1707,8 +1721,11 @@ void os::jvm_path(char *buf, jint buflen) {
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), NULL);
assert(ret != 0, "cannot locate libjvm");
char *rp = realpath(dli_fname, buf);
assert(ret, "cannot locate libjvm");
char *rp = NULL;
if (ret && dli_fname[0] != '\0') {
rp = realpath(dli_fname, buf);
}
if (rp == NULL)
return;
@ -3747,20 +3764,20 @@ int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex,
bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
if (dladdr(addr, &dlinfo)) {
if (dladdr(addr, &dlinfo) != 0) {
st->print(PTR_FORMAT ": ", addr);
if (dlinfo.dli_sname != NULL) {
if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
st->print("%s+%#x", dlinfo.dli_sname,
addr - (intptr_t)dlinfo.dli_saddr);
} else if (dlinfo.dli_fname) {
} else if (dlinfo.dli_fbase != NULL) {
st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
} else {
st->print("<absolute address>");
}
if (dlinfo.dli_fname) {
if (dlinfo.dli_fname != NULL) {
st->print(" in %s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase) {
if (dlinfo.dli_fbase != NULL) {
st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
}
st->cr();
@ -3773,7 +3790,7 @@ bool os::find(address addr, outputStream* st) {
if (!lowest) lowest = (address) dlinfo.dli_fbase;
if (begin < lowest) begin = lowest;
Dl_info dlinfo2;
if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
end = (address) dlinfo2.dli_saddr;
Disassembler::decode(begin, end, st);

View File

@ -1682,12 +1682,13 @@ bool os::address_is_in_vm(address addr) {
Dl_info dlinfo;
if (libjvm_base_addr == NULL) {
dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
libjvm_base_addr = (address)dlinfo.dli_fbase;
if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
libjvm_base_addr = (address)dlinfo.dli_fbase;
}
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
}
if (dladdr((void *)addr, &dlinfo)) {
if (dladdr((void *)addr, &dlinfo) != 0) {
if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
}
@ -1696,24 +1697,30 @@ bool os::address_is_in_vm(address addr) {
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int *offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) {
if (buf != NULL) {
if(!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
if (dladdr((void*)addr, &dlinfo) != 0) {
// see if we have a matching symbol
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
// no matching symbol so try for just file info
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
}
}
}
if (buf != NULL) buf[0] = '\0';
buf[0] = '\0';
if (offset != NULL) *offset = -1;
return false;
}
@ -1764,6 +1771,9 @@ static int address_to_library_name_callback(struct dl_phdr_info *info,
bool os::dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
struct _address_to_library_name data;
@ -1782,15 +1792,20 @@ bool os::dll_address_to_library_name(address addr, char* buf,
// buf already contains library name
if (offset) *offset = addr - data.base;
return true;
} else if (dladdr((void*)addr, &dlinfo)){
if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
if (offset) *offset = addr - (address)dlinfo.dli_fbase;
return true;
} else {
if (buf) buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
if (dladdr((void*)addr, &dlinfo) != 0) {
if (dlinfo.dli_fname != NULL) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase != NULL && offset != NULL) {
*offset = addr - (address)dlinfo.dli_fbase;
}
return true;
}
buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
// Loads .dll/.so and
@ -2317,8 +2332,11 @@ void os::jvm_path(char *buf, jint buflen) {
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), NULL);
assert(ret != 0, "cannot locate libjvm");
char *rp = realpath(dli_fname, buf);
assert(ret, "cannot locate libjvm");
char *rp = NULL;
if (ret && dli_fname[0] != '\0') {
rp = realpath(dli_fname, buf);
}
if (rp == NULL)
return;
@ -4730,20 +4748,20 @@ int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mute
bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
if (dladdr(addr, &dlinfo)) {
if (dladdr(addr, &dlinfo) != 0) {
st->print(PTR_FORMAT ": ", addr);
if (dlinfo.dli_sname != NULL) {
if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
st->print("%s+%#x", dlinfo.dli_sname,
addr - (intptr_t)dlinfo.dli_saddr);
} else if (dlinfo.dli_fname) {
} else if (dlinfo.dli_fbase != NULL) {
st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
} else {
st->print("<absolute address>");
}
if (dlinfo.dli_fname) {
if (dlinfo.dli_fname != NULL) {
st->print(" in %s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase) {
if (dlinfo.dli_fbase != NULL) {
st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
}
st->cr();
@ -4756,7 +4774,7 @@ bool os::find(address addr, outputStream* st) {
if (!lowest) lowest = (address) dlinfo.dli_fbase;
if (begin < lowest) begin = lowest;
Dl_info dlinfo2;
if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
end = (address) dlinfo2.dli_saddr;
Disassembler::decode(begin, end, st);

View File

@ -30,15 +30,6 @@
//
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
\
product(bool, UseISM, false, \
"Use Intimate Shared Memory (Solaris Only)") \
\
product(bool, UsePermISM, false, \
"Obsolete flag for compatibility (same as UseISM)") \
\
product(bool, UseMPSS, true, \
"Use Multiple Page Size Support (Solaris 9 Only)") \
\
product(bool, UseExtendedFileIO, true, \
"Enable workaround for limitations of stdio FILE structure")

View File

@ -115,45 +115,6 @@
// for timer info max values which include all bits
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
#ifdef _GNU_SOURCE
// See bug #6514594
extern "C" int madvise(caddr_t, size_t, int);
extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
int attr, int mask);
#endif //_GNU_SOURCE
/*
MPSS Changes Start.
The JVM binary needs to be built and run on pre-Solaris 9
systems, but the constants needed by MPSS are only in Solaris 9
header files. They are textually replicated here to allow
building on earlier systems. Once building on Solaris 8 is
no longer a requirement, these #defines can be replaced by ordinary
system .h inclusion.
In earlier versions of the JDK and Solaris, we used ISM for large pages.
But ISM requires shared memory to achieve this and thus has many caveats.
MPSS is a fully transparent and is a cleaner way to get large pages.
Although we still require keeping ISM for backward compatiblitiy as well as
giving the opportunity to use large pages on older systems it is
recommended that MPSS be used for Solaris 9 and above.
*/
#ifndef MC_HAT_ADVISE
struct memcntl_mha {
uint_t mha_cmd; /* command(s) */
uint_t mha_flags;
size_t mha_pagesize;
};
#define MC_HAT_ADVISE 7 /* advise hat map size */
#define MHA_MAPSIZE_VA 0x1 /* set preferred page size */
#define MAP_ALIGN 0x200 /* addr specifies alignment */
#endif
// MPSS Changes End.
// Here are some liblgrp types from sys/lgrp_user.h to be able to
// compile on older systems without this header file.
@ -172,32 +133,6 @@ struct memcntl_mha {
# define LGRP_RSRC_MEM 1 /* memory resources */
#endif
// Some more macros from sys/mman.h that are not present in Solaris 8.
#ifndef MAX_MEMINFO_CNT
/*
* info_req request type definitions for meminfo
* request types starting with MEMINFO_V are used for Virtual addresses
* and should not be mixed with MEMINFO_PLGRP which is targeted for Physical
* addresses
*/
# define MEMINFO_SHIFT 16
# define MEMINFO_MASK (0xFF << MEMINFO_SHIFT)
# define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */
# define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */
# define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */
# define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */
# define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */
# define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */
# define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */
/* maximum number of addresses meminfo() can process at a time */
# define MAX_MEMINFO_CNT 256
/* maximum number of request types */
# define MAX_MEMINFO_REQ 31
#endif
// see thr_setprio(3T) for the basis of these numbers
#define MinimumPriority 0
#define NormalPriority 64
@ -1924,12 +1859,13 @@ bool os::address_is_in_vm(address addr) {
Dl_info dlinfo;
if (libjvm_base_addr == NULL) {
dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
libjvm_base_addr = (address)dlinfo.dli_fbase;
if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
libjvm_base_addr = (address)dlinfo.dli_fbase;
}
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
}
if (dladdr((void *)addr, &dlinfo)) {
if (dladdr((void *)addr, &dlinfo) != 0) {
if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
}
@ -1941,114 +1877,133 @@ static dladdr1_func_type dladdr1_func = NULL;
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int * offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
// dladdr1_func was initialized in os::init()
if (dladdr1_func){
// yes, we have dladdr1
if (dladdr1_func != NULL) {
// yes, we have dladdr1
// Support for dladdr1 is checked at runtime; it may be
// available even if the vm is built on a machine that does
// not have dladdr1 support. Make sure there is a value for
// RTLD_DL_SYMENT.
#ifndef RTLD_DL_SYMENT
#define RTLD_DL_SYMENT 1
#endif
// Support for dladdr1 is checked at runtime; it may be
// available even if the vm is built on a machine that does
// not have dladdr1 support. Make sure there is a value for
// RTLD_DL_SYMENT.
#ifndef RTLD_DL_SYMENT
#define RTLD_DL_SYMENT 1
#endif
#ifdef _LP64
Elf64_Sym * info;
Elf64_Sym * info;
#else
Elf32_Sym * info;
Elf32_Sym * info;
#endif
if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
RTLD_DL_SYMENT)) {
if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
if (buf != NULL) {
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
}
}
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
RTLD_DL_SYMENT) != 0) {
// see if we have a matching symbol that covers our address
if (dlinfo.dli_saddr != NULL &&
(char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
if (dlinfo.dli_sname != NULL) {
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
}
}
if (buf != NULL) buf[0] = '\0';
if (offset != NULL) *offset = -1;
return false;
} else {
// no, only dladdr is available
if (dladdr((void *)addr, &dlinfo)) {
if (buf != NULL) {
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
jio_snprintf(buf, buflen, dlinfo.dli_sname);
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
// no matching symbol so try for just file info
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
}
}
if (buf != NULL) buf[0] = '\0';
if (offset != NULL) *offset = -1;
return false;
}
buf[0] = '\0';
if (offset != NULL) *offset = -1;
return false;
}
// no, only dladdr is available
if (dladdr((void *)addr, &dlinfo) != 0) {
// see if we have a matching symbol
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
jio_snprintf(buf, buflen, dlinfo.dli_sname);
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
}
// no matching symbol so try for just file info
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
}
}
}
buf[0] = '\0';
if (offset != NULL) *offset = -1;
return false;
}
bool os::dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
if (dladdr((void*)addr, &dlinfo)){
if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
if (offset) *offset = addr - (address)dlinfo.dli_fbase;
return true;
} else {
if (buf) buf[0] = '\0';
if (offset) *offset = -1;
return false;
if (dladdr((void*)addr, &dlinfo) != 0) {
if (dlinfo.dli_fname != NULL) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase != NULL && offset != NULL) {
*offset = addr - (address)dlinfo.dli_fbase;
}
return true;
}
buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
// Prints the names and full paths of all opened dynamic libraries
// for current process
void os::print_dll_info(outputStream * st) {
Dl_info dli;
void *handle;
Link_map *map;
Link_map *p;
Dl_info dli;
void *handle;
Link_map *map;
Link_map *p;
st->print_cr("Dynamic libraries:"); st->flush();
st->print_cr("Dynamic libraries:"); st->flush();
if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
handle = dlopen(dli.dli_fname, RTLD_LAZY);
if (handle == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
dlinfo(handle, RTLD_DI_LINKMAP, &map);
if (map == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
dli.dli_fname == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
handle = dlopen(dli.dli_fname, RTLD_LAZY);
if (handle == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
dlinfo(handle, RTLD_DI_LINKMAP, &map);
if (map == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
while (map->l_prev != NULL)
map = map->l_prev;
while (map->l_prev != NULL)
map = map->l_prev;
while (map != NULL) {
st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
map = map->l_next;
}
while (map != NULL) {
st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
map = map->l_next;
}
dlclose(handle);
dlclose(handle);
}
// Loads .dll/.so and
@ -2475,7 +2430,12 @@ void os::jvm_path(char *buf, jint buflen) {
Dl_info dlinfo;
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
assert(ret != 0, "cannot locate libjvm");
realpath((char *)dlinfo.dli_fname, buf);
if (ret != 0 && dlinfo.dli_fname != NULL) {
realpath((char *)dlinfo.dli_fname, buf);
} else {
buf[0] = '\0';
return;
}
if (Arguments::created_by_gamma_launcher()) {
// Support for the gamma launcher. Typical value for buf is
@ -2859,7 +2819,7 @@ int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
size_t alignment_hint, bool exec) {
int err = Solaris::commit_memory_impl(addr, bytes, exec);
if (err == 0) {
if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
// If the large page size has been set and the VM
// is using large pages, use the large page size
// if it is smaller than the alignment hint. This is
@ -2878,7 +2838,7 @@ int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
page_size = alignment_hint;
}
// Since this is a hint, ignore any failures.
(void)Solaris::set_mpss_range(addr, bytes, page_size);
(void)Solaris::setup_large_pages(addr, bytes, page_size);
}
}
return err;
@ -2921,8 +2881,8 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
if (UseLargePages && UseMPSS) {
Solaris::set_mpss_range(addr, bytes, alignment_hint);
if (UseLargePages) {
Solaris::setup_large_pages(addr, bytes, alignment_hint);
}
}
@ -3321,47 +3281,8 @@ bool os::unguard_memory(char* addr, size_t bytes) {
}
// Large page support
// UseLargePages is the master flag to enable/disable large page memory.
// UseMPSS and UseISM are supported for compatibility reasons. Their combined
// effects can be described in the following table:
//
// UseLargePages UseMPSS UseISM
// false * * => UseLargePages is the master switch, turning
// it off will turn off both UseMPSS and
// UseISM. VM will not use large page memory
// regardless the settings of UseMPSS/UseISM.
// true false false => Unless future Solaris provides other
// mechanism to use large page memory, this
// combination is equivalent to -UseLargePages,
// VM will not use large page memory
// true true false => JVM will use MPSS for large page memory.
// This is the default behavior.
// true false true => JVM will use ISM for large page memory.
// true true true => JVM will use ISM if it is available.
// Otherwise, JVM will fall back to MPSS.
// Becaues ISM is now available on all
// supported Solaris versions, this combination
// is equivalent to +UseISM -UseMPSS.
static size_t _large_page_size = 0;
bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) {
// x86 uses either 2M or 4M page, depending on whether PAE (Physical Address
// Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc
// can support multiple page sizes.
// Don't bother to probe page size because getpagesizes() comes with MPSS.
// ISM is only recommended on old Solaris where there is no MPSS support.
// Simply choose a conservative value as default.
*page_size = LargePageSizeInBytes ? LargePageSizeInBytes :
SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M)
ARM_ONLY(2 * M);
// ISM is available on all supported Solaris versions
return true;
}
// Insertion sort for small arrays (descending order).
static void insertion_sort_descending(size_t* array, int len) {
for (int i = 0; i < len; i++) {
@ -3374,7 +3295,7 @@ static void insertion_sort_descending(size_t* array, int len) {
}
}
bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
const unsigned int usable_count = VM_Version::page_size_count();
if (usable_count == 1) {
return false;
@ -3440,41 +3361,24 @@ bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
}
void os::large_page_init() {
if (!UseLargePages) {
UseISM = false;
UseMPSS = false;
return;
if (UseLargePages) {
// print a warning if any large page related flag is specified on command line
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
!FLAG_IS_DEFAULT(LargePageSizeInBytes);
UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
}
// print a warning if any large page related flag is specified on command line
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
!FLAG_IS_DEFAULT(UseISM) ||
!FLAG_IS_DEFAULT(UseMPSS) ||
!FLAG_IS_DEFAULT(LargePageSizeInBytes);
UseISM = UseISM &&
Solaris::ism_sanity_check(warn_on_failure, &_large_page_size);
if (UseISM) {
// ISM disables MPSS to be compatible with old JDK behavior
UseMPSS = false;
_page_sizes[0] = _large_page_size;
_page_sizes[1] = vm_page_size();
}
UseMPSS = UseMPSS &&
Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
UseLargePages = UseISM || UseMPSS;
}
bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
// Signal to OS that we want large pages for addresses
// from addr, addr + bytes
struct memcntl_mha mpss_struct;
mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
mpss_struct.mha_pagesize = align;
mpss_struct.mha_flags = 0;
if (memcntl(start, bytes, MC_HAT_ADVISE,
(caddr_t) &mpss_struct, 0, 0) < 0) {
// Upon successful completion, memcntl() returns 0
if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
debug_only(warning("Attempt to use MPSS failed."));
return false;
}
@ -3482,72 +3386,13 @@ bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
}
char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
// "exec" is passed in but not used. Creating the shared image for
// the code cache doesn't have an SHM_X executable permission to check.
assert(UseLargePages && UseISM, "only for ISM large pages");
char* retAddr = NULL;
int shmid;
key_t ismKey;
bool warn_on_failure = UseISM &&
(!FLAG_IS_DEFAULT(UseLargePages) ||
!FLAG_IS_DEFAULT(UseISM) ||
!FLAG_IS_DEFAULT(LargePageSizeInBytes)
);
char msg[128];
ismKey = IPC_PRIVATE;
// Create a large shared memory region to attach to based on size.
// Currently, size is the total size of the heap
shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT);
if (shmid == -1){
if (warn_on_failure) {
jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
warning(msg);
}
return NULL;
}
// Attach to the region
retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W);
int err = errno;
// Remove shmid. If shmat() is successful, the actual shared memory segment
// will be deleted when it's detached by shmdt() or when the process
// terminates. If shmat() is not successful this will remove the shared
// segment immediately.
shmctl(shmid, IPC_RMID, NULL);
if (retAddr == (char *) -1) {
if (warn_on_failure) {
jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
warning(msg);
}
return NULL;
}
if ((retAddr != NULL) && UseNUMAInterleaving) {
numa_make_global(retAddr, size);
}
// The memory is committed
MemTracker::record_virtual_memory_reserve_and_commit((address)retAddr, size, mtNone, CURRENT_PC);
return retAddr;
fatal("os::reserve_memory_special should not be called on Solaris.");
return NULL;
}
bool os::release_memory_special(char* base, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
if (rslt == 0) {
tkr.record((address)base, bytes);
return true;
} else {
tkr.discard();
return false;
}
fatal("os::release_memory_special should not be called on Solaris.");
return false;
}
size_t os::large_page_size() {
@ -3557,11 +3402,11 @@ size_t os::large_page_size() {
// MPSS allows application to commit large page memory on demand; with ISM
// the entire memory region must be allocated as shared memory.
bool os::can_commit_large_page_memory() {
return UseISM ? false : true;
return true;
}
bool os::can_execute_large_page_memory() {
return UseISM ? false : true;
return true;
}
static int os_sleep(jlong millis, bool interruptible) {
@ -3835,28 +3680,6 @@ static bool priocntl_enable = false;
static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
static int java_MaxPriority_to_os_priority = 0; // Saved mapping
// Call the version of priocntl suitable for all supported versions
// of Solaris. We need to call through this wrapper so that we can
// build on Solaris 9 and run on Solaris 8, 9 and 10.
//
// This code should be removed if we ever stop supporting Solaris 8
// and earlier releases.
static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
static priocntl_type priocntl_ptr = priocntl_stub;
// Stub to set the value of the real pointer, and then call the real
// function.
static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) {
// Try Solaris 8- name only.
priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl");
guarantee(tmp != NULL, "priocntl function not found.");
priocntl_ptr = tmp;
return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg);
}
// lwp_priocntl_init
//
@ -3864,9 +3687,7 @@ static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t
//
// Return errno or 0 if OK.
//
static
int lwp_priocntl_init ()
{
static int lwp_priocntl_init () {
int rslt;
pcinfo_t ClassInfo;
pcparms_t ParmInfo;
@ -3906,7 +3727,7 @@ int lwp_priocntl_init ()
strcpy(ClassInfo.pc_clname, "TS");
ClassInfo.pc_cid = -1;
rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
if (rslt < 0) return errno;
assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
tsLimits.schedPolicy = ClassInfo.pc_cid;
@ -3915,7 +3736,7 @@ int lwp_priocntl_init ()
strcpy(ClassInfo.pc_clname, "IA");
ClassInfo.pc_cid = -1;
rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
if (rslt < 0) return errno;
assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
iaLimits.schedPolicy = ClassInfo.pc_cid;
@ -3924,7 +3745,7 @@ int lwp_priocntl_init ()
strcpy(ClassInfo.pc_clname, "RT");
ClassInfo.pc_cid = -1;
rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
if (rslt < 0) return errno;
assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
rtLimits.schedPolicy = ClassInfo.pc_cid;
@ -3933,7 +3754,7 @@ int lwp_priocntl_init ()
strcpy(ClassInfo.pc_clname, "FX");
ClassInfo.pc_cid = -1;
rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
if (rslt < 0) return errno;
assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
fxLimits.schedPolicy = ClassInfo.pc_cid;
@ -3944,7 +3765,7 @@ int lwp_priocntl_init ()
// This will normally be IA, TS or, rarely, FX or RT.
memset(&ParmInfo, 0, sizeof(ParmInfo));
ParmInfo.pc_cid = PC_CLNULL;
rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
if (rslt < 0) return errno;
myClass = ParmInfo.pc_cid;
@ -3952,7 +3773,7 @@ int lwp_priocntl_init ()
// about the class.
ClassInfo.pc_cid = myClass;
ClassInfo.pc_clname[0] = 0;
rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
if (rslt < 0) return errno;
if (ThreadPriorityVerbose) {
@ -3961,7 +3782,7 @@ int lwp_priocntl_init ()
memset(&ParmInfo, 0, sizeof(pcparms_t));
ParmInfo.pc_cid = PC_CLNULL;
rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
if (rslt < 0) return errno;
if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
@ -4065,7 +3886,7 @@ int set_lwp_class_and_priority(int ThreadID, int lwpid,
memset(&ParmInfo, 0, sizeof(pcparms_t));
ParmInfo.pc_cid = PC_CLNULL;
rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
if (rslt < 0) return errno;
int cur_class = ParmInfo.pc_cid;
@ -4133,7 +3954,7 @@ int set_lwp_class_and_priority(int ThreadID, int lwpid,
return EINVAL; // no clue, punt
}
rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
if (ThreadPriorityVerbose && rslt) {
tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
}
@ -4152,7 +3973,7 @@ int set_lwp_class_and_priority(int ThreadID, int lwpid,
memset(&ReadBack, 0, sizeof(pcparms_t));
ReadBack.pc_cid = PC_CLNULL;
rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
assert(rslt >= 0, "priocntl failed");
Actual = Expected = 0xBAD;
assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
@ -5244,11 +5065,6 @@ uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
return _getisax(array, n);
}
// Symbol doesn't exist in Solaris 8 pset.h
#ifndef PS_MYID
#define PS_MYID -3
#endif
// int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
static pset_getloadavg_type pset_getloadavg_ptr = NULL;
@ -5418,20 +5234,6 @@ jint os::init_2(void) {
UseNUMA = false;
}
}
// ISM is not compatible with the NUMA allocator - it always allocates
// pages round-robin across the lgroups.
if (UseNUMA && UseLargePages && UseISM) {
if (!FLAG_IS_DEFAULT(UseNUMA)) {
if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) {
UseLargePages = false;
} else {
warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator");
UseNUMA = false;
}
} else {
UseNUMA = false;
}
}
if (!UseNUMA && ForceNUMA) {
UseNUMA = true;
}
@ -6077,24 +5879,20 @@ int os::loadavg(double loadavg[], int nelem) {
bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
if (dladdr(addr, &dlinfo)) {
#ifdef _LP64
st->print("0x%016lx: ", addr);
#else
st->print("0x%08x: ", addr);
#endif
if (dlinfo.dli_sname != NULL)
if (dladdr(addr, &dlinfo) != 0) {
st->print(PTR_FORMAT ": ", addr);
if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
else if (dlinfo.dli_fname)
} else if (dlinfo.dli_fbase != NULL)
st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
else
st->print("<absolute address>");
if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname);
#ifdef _LP64
if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase);
#else
if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase);
#endif
if (dlinfo.dli_fname != NULL) {
st->print(" in %s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase != NULL) {
st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
}
st->cr();
if (Verbose) {
@ -6105,7 +5903,7 @@ bool os::find(address addr, outputStream* st) {
if (!lowest) lowest = (address) dlinfo.dli_fbase;
if (begin < lowest) begin = lowest;
Dl_info dlinfo2;
if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
end = (address) dlinfo2.dli_saddr;
Disassembler::decode(begin, end, st);

View File

@ -106,8 +106,8 @@ class Solaris {
static meminfo_func_t _meminfo;
// Large Page Support--mpss.
static bool set_mpss_range(caddr_t start, size_t bytes, size_t align);
// Large Page Support
static bool setup_large_pages(caddr_t start, size_t bytes, size_t align);
static void init_thread_fpu_state(void);
@ -174,7 +174,6 @@ class Solaris {
static char* mmap_chunk(char *addr, size_t size, int flags, int prot);
static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed);
static bool mpss_sanity_check(bool warn, size_t * page_size);
static bool ism_sanity_check (bool warn, size_t * page_size);
// Workaround for 4352906. thr_stksegment sometimes returns
// a bad value for the primordial thread's stack base when

View File

@ -1420,34 +1420,40 @@ static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
bool os::dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
// return the full path to the DLL file, sometimes it returns path
// to the corresponding PDB file (debug info); sometimes it only
// returns partial path, which makes life painful.
struct _modinfo mi;
mi.addr = addr;
mi.full_path = buf;
mi.buflen = buflen;
int pid = os::current_process_id();
if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
// buf already contains path name
if (offset) *offset = addr - mi.base_addr;
return true;
} else {
if (buf) buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
struct _modinfo mi;
mi.addr = addr;
mi.full_path = buf;
mi.buflen = buflen;
int pid = os::current_process_id();
if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
// buf already contains path name
if (offset) *offset = addr - mi.base_addr;
return true;
}
buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int *offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
if (Decoder::decode(addr, buf, buflen, offset)) {
return true;
}
if (offset != NULL) *offset = -1;
if (buf != NULL) buf[0] = '\0';
buf[0] = '\0';
return false;
}
@ -2689,6 +2695,19 @@ address os::win32::fast_jni_accessor_wrapper(BasicType type) {
}
#endif
#ifndef PRODUCT
void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
// Install a win32 structured exception handler around the test
// function call so the VM can generate an error dump if needed.
__try {
(*funcPtr)();
} __except(topLevelExceptionFilter(
(_EXCEPTION_POINTERS*)_exception_info())) {
// Nothing to do.
}
}
#endif
// Virtual Memory
int os::vm_page_size() { return os::win32::vm_page_size(); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,6 +94,10 @@ class win32 {
static address fast_jni_accessor_wrapper(BasicType);
#endif
#ifndef PRODUCT
static void call_test_func_with_wrapper(void (*funcPtr)(void));
#endif
// filter function to ignore faults on serializations page
static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
};

View File

@ -106,4 +106,10 @@ inline size_t os::write(int fd, const void *buf, unsigned int nBytes) {
inline int os::close(int fd) {
return ::close(fd);
}
#ifndef PRODUCT
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
os::win32::call_test_func_with_wrapper(f)
#endif
#endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -146,7 +146,7 @@ public:
// Public Methods
Form(int formType=0, int line=0)
: _next(NULL), _linenum(line), _ftype(formType) { };
~Form() {};
virtual ~Form() {};
virtual bool ideal_only() const {
assert(0,"Check of ideal status on non-instruction/operand form.\n");

View File

@ -318,17 +318,17 @@ class KeepAliveVisitor : public HierarchyVisitor<KeepAliveVisitor> {
}
};
// A method family contains a set of all methods that implement a single
// language-level method. Because of erasure, these methods may have different
// signatures. As members of the set are collected while walking over the
// erased method. As members of the set are collected while walking over the
// hierarchy, they are tagged with a qualification state. The qualification
// state for an erased method is set to disqualified if there exists a path
// from the root of hierarchy to the method that contains an interleaving
// language-equivalent method defined in an interface.
// erased method defined in an interface.
class MethodFamily : public ResourceObj {
private:
generic::MethodDescriptor* _descriptor; // language-level description
GrowableArray<Pair<Method*,QualifiedState> > _members;
ResourceHashtable<Method*, int> _member_index;
@ -358,15 +358,8 @@ class MethodFamily : public ResourceObj {
public:
MethodFamily(generic::MethodDescriptor* canonical_desc)
: _descriptor(canonical_desc), _selected_target(NULL),
_exception_message(NULL) {}
generic::MethodDescriptor* descriptor() const { return _descriptor; }
bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
return descriptor()->covariant_match(md, ctx);
}
MethodFamily()
: _selected_target(NULL), _exception_message(NULL) {}
void set_target_if_empty(Method* m) {
if (_selected_target == NULL && !m->is_overpass()) {
@ -441,16 +434,10 @@ class MethodFamily : public ResourceObj {
}
#ifndef PRODUCT
void print_on(outputStream* str) const {
print_on(str, 0);
}
void print_on(outputStream* str, int indent) const {
void print_sig_on(outputStream* str, Symbol* signature, int indent) const {
streamIndentor si(str, indent * 2);
generic::Context ctx(NULL); // empty, as _descriptor already canonicalized
TempNewSymbol family = descriptor()->reify_signature(&ctx, Thread::current());
str->indent().print_cr("Logical Method %s:", family->as_C_string());
str->indent().print_cr("Logical Method %s:", signature->as_C_string());
streamIndentor si2(str);
for (int i = 0; i < _members.length(); ++i) {
@ -516,38 +503,94 @@ Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
}
// A generic method family contains a set of all methods that implement a single
// language-level method. Because of erasure, these methods may have different
// signatures. As members of the set are collected while walking over the
// hierarchy, they are tagged with a qualification state. The qualification
// state for an erased method is set to disqualified if there exists a path
// from the root of hierarchy to the method that contains an interleaving
// language-equivalent method defined in an interface.
class GenericMethodFamily : public MethodFamily {
private:
generic::MethodDescriptor* _descriptor; // language-level description
public:
GenericMethodFamily(generic::MethodDescriptor* canonical_desc)
: _descriptor(canonical_desc) {}
generic::MethodDescriptor* descriptor() const { return _descriptor; }
bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
return descriptor()->covariant_match(md, ctx);
}
#ifndef PRODUCT
Symbol* get_generic_sig() const {
generic::Context ctx(NULL); // empty, as _descriptor already canonicalized
TempNewSymbol sig = descriptor()->reify_signature(&ctx, Thread::current());
return sig;
}
#endif // ndef PRODUCT
};
class StateRestorer;
// StatefulMethodFamily is a wrapper around MethodFamily that maintains the
// StatefulMethodFamily is a wrapper around a MethodFamily that maintains the
// qualification state during hierarchy visitation, and applies that state
// when adding members to the MethodFamily.
// when adding members to the MethodFamily
class StatefulMethodFamily : public ResourceObj {
friend class StateRestorer;
private:
MethodFamily* _method;
QualifiedState _qualification_state;
void set_qualification_state(QualifiedState state) {
_qualification_state = state;
}
protected:
MethodFamily* _method_family;
public:
StatefulMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx) {
_method = new MethodFamily(md->canonicalize(ctx));
_qualification_state = QUALIFIED;
StatefulMethodFamily() {
_method_family = new MethodFamily();
_qualification_state = QUALIFIED;
}
void set_target_if_empty(Method* m) { _method->set_target_if_empty(m); }
MethodFamily* get_method_family() { return _method; }
bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
return _method->descriptor_matches(md, ctx);
StatefulMethodFamily(MethodFamily* mf) {
_method_family = mf;
_qualification_state = QUALIFIED;
}
void set_target_if_empty(Method* m) { _method_family->set_target_if_empty(m); }
MethodFamily* get_method_family() { return _method_family; }
StateRestorer* record_method_and_dq_further(Method* mo);
};
// StatefulGenericMethodFamily is a wrapper around GenericMethodFamily that maintains the
// qualification state during hierarchy visitation, and applies that state
// when adding members to the GenericMethodFamily.
class StatefulGenericMethodFamily : public StatefulMethodFamily {
public:
StatefulGenericMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx)
: StatefulMethodFamily(new GenericMethodFamily(md->canonicalize(ctx))) {
}
GenericMethodFamily* get_method_family() {
return (GenericMethodFamily*)_method_family;
}
bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
return get_method_family()->descriptor_matches(md, ctx);
}
};
class StateRestorer : public PseudoScopeMark {
private:
StatefulMethodFamily* _method;
@ -563,9 +606,9 @@ class StateRestorer : public PseudoScopeMark {
StateRestorer* StatefulMethodFamily::record_method_and_dq_further(Method* mo) {
StateRestorer* mark = new StateRestorer(this, _qualification_state);
if (_qualification_state == QUALIFIED) {
_method->record_qualified_method(mo);
_method_family->record_qualified_method(mo);
} else {
_method->record_disqualified_method(mo);
_method_family->record_disqualified_method(mo);
}
// Everything found "above"??? this method in the hierarchy walk is set to
// disqualified
@ -573,15 +616,15 @@ StateRestorer* StatefulMethodFamily::record_method_and_dq_further(Method* mo) {
return mark;
}
class StatefulMethodFamilies : public ResourceObj {
class StatefulGenericMethodFamilies : public ResourceObj {
private:
GrowableArray<StatefulMethodFamily*> _methods;
GrowableArray<StatefulGenericMethodFamily*> _methods;
public:
StatefulMethodFamily* find_matching(
StatefulGenericMethodFamily* find_matching(
generic::MethodDescriptor* md, generic::Context* ctx) {
for (int i = 0; i < _methods.length(); ++i) {
StatefulMethodFamily* existing = _methods.at(i);
StatefulGenericMethodFamily* existing = _methods.at(i);
if (existing->descriptor_matches(md, ctx)) {
return existing;
}
@ -589,17 +632,17 @@ class StatefulMethodFamilies : public ResourceObj {
return NULL;
}
StatefulMethodFamily* find_matching_or_create(
StatefulGenericMethodFamily* find_matching_or_create(
generic::MethodDescriptor* md, generic::Context* ctx) {
StatefulMethodFamily* method = find_matching(md, ctx);
StatefulGenericMethodFamily* method = find_matching(md, ctx);
if (method == NULL) {
method = new StatefulMethodFamily(md, ctx);
method = new StatefulGenericMethodFamily(md, ctx);
_methods.append(method);
}
return method;
}
void extract_families_into(GrowableArray<MethodFamily*>* array) {
void extract_families_into(GrowableArray<GenericMethodFamily*>* array) {
for (int i = 0; i < _methods.length(); ++i) {
array->append(_methods.at(i)->get_method_family());
}
@ -683,26 +726,79 @@ static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
return slots;
}
// Iterates over the superinterface type hierarchy looking for all methods
// with a specific erased signature.
class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
private:
// Context data
Symbol* _method_name;
Symbol* _method_signature;
StatefulMethodFamily* _family;
public:
FindMethodsByErasedSig(Symbol* name, Symbol* signature) :
_method_name(name), _method_signature(signature),
_family(NULL) {}
void get_discovered_family(MethodFamily** family) {
if (_family != NULL) {
*family = _family->get_method_family();
} else {
*family = NULL;
}
}
void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); }
void free_node_data(void* node_data) {
PseudoScope::cast(node_data)->destroy();
}
// Find all methods on this hierarchy that match this
// method's erased (name, signature)
bool visit() {
PseudoScope* scope = PseudoScope::cast(current_data());
InstanceKlass* iklass = current_class();
Method* m = iklass->find_method(_method_name, _method_signature);
if (m != NULL) {
if (_family == NULL) {
_family = new StatefulMethodFamily();
}
if (iklass->is_interface()) {
StateRestorer* restorer = _family->record_method_and_dq_further(m);
scope->add_mark(restorer);
} else {
// This is the rule that methods in classes "win" (bad word) over
// methods in interfaces. This works because of single inheritance
_family->set_target_if_empty(m);
}
}
return true;
}
};
// Iterates over the type hierarchy looking for all methods with a specific
// method name. The result of this is a set of method families each of
// which is populated with a set of methods that implement the same
// language-level signature.
class FindMethodsByName : public HierarchyVisitor<FindMethodsByName> {
class FindMethodsByGenericSig : public HierarchyVisitor<FindMethodsByGenericSig> {
private:
// Context data
Thread* THREAD;
generic::DescriptorCache* _cache;
Symbol* _method_name;
generic::Context* _ctx;
StatefulMethodFamilies _families;
StatefulGenericMethodFamilies _families;
public:
FindMethodsByName(generic::DescriptorCache* cache, Symbol* name,
FindMethodsByGenericSig(generic::DescriptorCache* cache, Symbol* name,
generic::Context* ctx, Thread* thread) :
_cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {}
void get_discovered_families(GrowableArray<MethodFamily*>* methods) {
void get_discovered_families(GrowableArray<GenericMethodFamily*>* methods) {
_families.extract_families_into(methods);
}
@ -733,7 +829,7 @@ class FindMethodsByName : public HierarchyVisitor<FindMethodsByName> {
// Find all methods on this hierarchy that match this method
// (name, signature). This class collects other families of this
// method name.
StatefulMethodFamily* family =
StatefulGenericMethodFamily* family =
_families.find_matching_or_create(md, _ctx);
if (klass->is_interface()) {
@ -752,8 +848,8 @@ class FindMethodsByName : public HierarchyVisitor<FindMethodsByName> {
};
#ifndef PRODUCT
static void print_families(
GrowableArray<MethodFamily*>* methods, Symbol* match) {
static void print_generic_families(
GrowableArray<GenericMethodFamily*>* methods, Symbol* match) {
streamIndentor si(tty, 4);
if (methods->length() == 0) {
tty->indent();
@ -761,22 +857,87 @@ static void print_families(
}
for (int i = 0; i < methods->length(); ++i) {
tty->indent();
MethodFamily* lm = methods->at(i);
GenericMethodFamily* lm = methods->at(i);
if (lm->contains_signature(match)) {
tty->print_cr("<Matching>");
} else {
tty->print_cr("<Non-Matching>");
}
lm->print_on(tty, 1);
lm->print_sig_on(tty, lm->get_generic_sig(), 1);
}
}
#endif // ndef PRODUCT
static void merge_in_new_methods(InstanceKlass* klass,
GrowableArray<Method*>* new_methods, TRAPS);
static void create_overpasses(
GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
static void generate_generic_defaults(
InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
EmptyVtableSlot* slot, int current_slot_index, TRAPS) {
if (slot->is_bound()) {
#ifndef PRODUCT
if (TraceDefaultMethods) {
streamIndentor si(tty, 4);
tty->indent().print_cr("Already bound to logical method:");
GenericMethodFamily* lm = (GenericMethodFamily*)(slot->get_binding());
lm->print_sig_on(tty, lm->get_generic_sig(), 1);
}
#endif // ndef PRODUCT
return; // covered by previous processing
}
generic::DescriptorCache cache;
generic::Context ctx(&cache);
FindMethodsByGenericSig visitor(&cache, slot->name(), &ctx, CHECK);
visitor.run(klass);
GrowableArray<GenericMethodFamily*> discovered_families;
visitor.get_discovered_families(&discovered_families);
#ifndef PRODUCT
if (TraceDefaultMethods) {
print_generic_families(&discovered_families, slot->signature());
}
#endif // ndef PRODUCT
// Find and populate any other slots that match the discovered families
for (int j = current_slot_index; j < empty_slots->length(); ++j) {
EmptyVtableSlot* open_slot = empty_slots->at(j);
if (slot->name() == open_slot->name()) {
for (int k = 0; k < discovered_families.length(); ++k) {
GenericMethodFamily* lm = discovered_families.at(k);
if (lm->contains_signature(open_slot->signature())) {
lm->determine_target(klass, CHECK);
open_slot->bind_family(lm);
}
}
}
}
}
static void generate_erased_defaults(
InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
EmptyVtableSlot* slot, TRAPS) {
// sets up a set of methods with the same exact erased signature
FindMethodsByErasedSig visitor(slot->name(), slot->signature());
visitor.run(klass);
MethodFamily* family;
visitor.get_discovered_family(&family);
if (family != NULL) {
family->determine_target(klass, CHECK);
slot->bind_family(family);
}
}
static void merge_in_new_methods(InstanceKlass* klass,
GrowableArray<Method*>* new_methods, TRAPS);
// This is the guts of the default methods implementation. This is called just
// after the classfile has been parsed if some ancestor has default methods.
//
@ -807,8 +968,6 @@ void DefaultMethods::generate_default_methods(
// whatever scope it's in.
ResourceMark rm(THREAD);
generic::DescriptorCache cache;
// Keep entire hierarchy alive for the duration of the computation
KeepAliveRegistrar keepAlive(THREAD);
KeepAliveVisitor loadKeepAlive(&keepAlive);
@ -837,47 +996,13 @@ void DefaultMethods::generate_default_methods(
tty->print_cr("");
}
#endif // ndef PRODUCT
if (slot->is_bound()) {
#ifndef PRODUCT
if (TraceDefaultMethods) {
streamIndentor si(tty, 4);
tty->indent().print_cr("Already bound to logical method:");
slot->get_binding()->print_on(tty, 1);
}
#endif // ndef PRODUCT
continue; // covered by previous processing
if (ParseGenericDefaults) {
generate_generic_defaults(klass, empty_slots, slot, i, CHECK);
} else {
generate_erased_defaults(klass, empty_slots, slot, CHECK);
}
generic::Context ctx(&cache);
FindMethodsByName visitor(&cache, slot->name(), &ctx, CHECK);
visitor.run(klass);
GrowableArray<MethodFamily*> discovered_families;
visitor.get_discovered_families(&discovered_families);
#ifndef PRODUCT
if (TraceDefaultMethods) {
print_families(&discovered_families, slot->signature());
}
#endif // ndef PRODUCT
// Find and populate any other slots that match the discovered families
for (int j = i; j < empty_slots->length(); ++j) {
EmptyVtableSlot* open_slot = empty_slots->at(j);
if (slot->name() == open_slot->name()) {
for (int k = 0; k < discovered_families.length(); ++k) {
MethodFamily* lm = discovered_families.at(k);
if (lm->contains_signature(open_slot->signature())) {
lm->determine_target(klass, CHECK);
open_slot->bind_family(lm);
}
}
}
}
}
}
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr("Creating overpasses...");
@ -893,7 +1018,6 @@ void DefaultMethods::generate_default_methods(
#endif // ndef PRODUCT
}
/**
* Generic analysis was used upon interface '_target' and found a unique
* default method candidate with generic signature '_method_desc'. This
@ -912,17 +1036,85 @@ void DefaultMethods::generate_default_methods(
* the selected method along that path.
*/
class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
private:
generic::DescriptorCache* _cache;
protected:
Thread* THREAD;
InstanceKlass* _target;
Symbol* _method_name;
InstanceKlass* _method_holder;
generic::MethodDescriptor* _method_desc;
bool _found_shadow;
public:
ShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
InstanceKlass* target)
: THREAD(thread), _method_name(name), _method_holder(holder),
_target(target), _found_shadow(false) {}
void* new_node_data(InstanceKlass* cls) { return NULL; }
void free_node_data(void* data) { return; }
bool visit() {
InstanceKlass* ik = current_class();
if (ik == _target && current_depth() == 1) {
return false; // This was the specified super -- no need to search it
}
if (ik == _method_holder || ik == _target) {
// We found a path that should be examined to see if it shadows _method
if (path_has_shadow()) {
_found_shadow = true;
cancel_iteration();
}
return false; // no need to continue up hierarchy
}
return true;
}
virtual bool path_has_shadow() = 0;
bool found_shadow() { return _found_shadow; }
};
// Used for Invokespecial.
// Invokespecial is allowed to invoke a concrete interface method
// and can be used to disambuiguate among qualified candidates,
// which are methods in immediate superinterfaces,
// but may not be used to invoke a candidate that would be shadowed
// from the perspective of the caller.
// Invokespecial is also used in the overpass generation today
// We re-run the shadowchecker because we can't distinguish this case,
// but it should return the same answer, since the overpass target
// is now the invokespecial caller.
class ErasedShadowChecker : public ShadowChecker {
private:
bool path_has_shadow() {
for (int i = current_depth() - 1; i > 0; --i) {
InstanceKlass* ik = class_at_depth(i);
if (ik->is_interface()) {
int end;
int start = ik->find_method_by_name(_method_name, &end);
if (start != -1) {
return true;
}
}
}
return false;
}
public:
ErasedShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
InstanceKlass* target)
: ShadowChecker(thread, name, holder, target) {}
};
class GenericShadowChecker : public ShadowChecker {
private:
generic::DescriptorCache* _cache;
generic::MethodDescriptor* _method_desc;
bool path_has_shadow() {
generic::Context ctx(_cache);
@ -950,104 +1142,42 @@ class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
public:
ShadowChecker(generic::DescriptorCache* cache, Thread* thread,
GenericShadowChecker(generic::DescriptorCache* cache, Thread* thread,
Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc,
InstanceKlass* target)
: _cache(cache), THREAD(thread), _method_name(name), _method_holder(holder),
_method_desc(desc), _target(target), _found_shadow(false) {}
void* new_node_data(InstanceKlass* cls) { return NULL; }
void free_node_data(void* data) { return; }
bool visit() {
InstanceKlass* ik = current_class();
if (ik == _target && current_depth() == 1) {
return false; // This was the specified super -- no need to search it
}
if (ik == _method_holder || ik == _target) {
// We found a path that should be examined to see if it shadows _method
if (path_has_shadow()) {
_found_shadow = true;
cancel_iteration();
}
return false; // no need to continue up hierarchy
}
return true;
}
bool found_shadow() { return _found_shadow; }
: ShadowChecker(thread, name, holder, target) {
_cache = cache;
_method_desc = desc;
}
};
// This is called during linktime when we find an invokespecial call that
// refers to a direct superinterface. It indicates that we should find the
// default method in the hierarchy of that superinterface, and if that method
// would have been a candidate from the point of view of 'this' class, then we
// return that method.
Method* DefaultMethods::find_super_default(
Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) {
ResourceMark rm(THREAD);
assert(cls != NULL && super != NULL, "Need real classes");
// Find the unique qualified candidate from the perspective of the super_class
// which is the resolved_klass, which must be an immediate superinterface
// of klass
Method* find_erased_super_default(InstanceKlass* current_class, InstanceKlass* super_class, Symbol* method_name, Symbol* sig, TRAPS) {
InstanceKlass* current_class = InstanceKlass::cast(cls);
InstanceKlass* direction = InstanceKlass::cast(super);
FindMethodsByErasedSig visitor(method_name, sig);
visitor.run(super_class); // find candidates from resolved_klass
// Keep entire hierarchy alive for the duration of the computation
KeepAliveRegistrar keepAlive(THREAD);
KeepAliveVisitor loadKeepAlive(&keepAlive);
loadKeepAlive.run(current_class);
MethodFamily* family;
visitor.get_discovered_family(&family);
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr("Finding super default method %s.%s%s from %s",
direction->name()->as_C_string(),
method_name->as_C_string(), sig->as_C_string(),
current_class->name()->as_C_string());
}
#endif // ndef PRODUCT
if (!direction->is_interface()) {
// We should not be here
return NULL;
if (family != NULL) {
family->determine_target(current_class, CHECK_NULL); // get target from current_class
}
generic::DescriptorCache cache;
generic::Context ctx(&cache);
// Prime the initial generic context for current -> direction
ctx.apply_type_arguments(current_class, direction, CHECK_NULL);
FindMethodsByName visitor(&cache, method_name, &ctx, CHECK_NULL);
visitor.run(direction);
GrowableArray<MethodFamily*> families;
visitor.get_discovered_families(&families);
#ifndef PRODUCT
if (TraceDefaultMethods) {
print_families(&families, sig);
}
#endif // ndef PRODUCT
MethodFamily* selected_family = NULL;
for (int i = 0; i < families.length(); ++i) {
MethodFamily* lm = families.at(i);
if (lm->contains_signature(sig)) {
lm->determine_target(current_class, CHECK_NULL);
selected_family = lm;
}
}
if (selected_family->has_target()) {
Method* target = selected_family->get_selected_target();
if (family->has_target()) {
Method* target = family->get_selected_target();
InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
// Verify that the identified method is valid from the context of
// the current class
ShadowChecker checker(&cache, THREAD, target->name(),
holder, selected_family->descriptor(), direction);
// the current class, which is the caller class for invokespecial
// link resolution, i.e. ensure there it is not shadowed.
// You can use invokespecial to disambiguate interface methods, but
// you can not use it to skip over an interface method that would shadow it.
ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
checker.run(current_class);
if (checker.found_shadow()) {
@ -1061,13 +1191,71 @@ Method* DefaultMethods::find_super_default(
} else {
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print(" Returning ");
print_method(tty, target, true);
tty->print_cr("");
family->print_sig_on(tty, target->signature(), 1);
}
#endif // ndef PRODUCT
return target;
}
} else {
assert(family->throws_exception(), "must have target or throw");
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
family->get_exception_message()->as_C_string(), NULL);
}
}
// super_class is assumed to be the direct super of current_class
Method* find_generic_super_default( InstanceKlass* current_class,
InstanceKlass* super_class,
Symbol* method_name, Symbol* sig, TRAPS) {
generic::DescriptorCache cache;
generic::Context ctx(&cache);
// Prime the initial generic context for current -> super_class
ctx.apply_type_arguments(current_class, super_class, CHECK_NULL);
FindMethodsByGenericSig visitor(&cache, method_name, &ctx, CHECK_NULL);
visitor.run(super_class);
GrowableArray<GenericMethodFamily*> families;
visitor.get_discovered_families(&families);
#ifndef PRODUCT
if (TraceDefaultMethods) {
print_generic_families(&families, sig);
}
#endif // ndef PRODUCT
GenericMethodFamily* selected_family = NULL;
for (int i = 0; i < families.length(); ++i) {
GenericMethodFamily* lm = families.at(i);
if (lm->contains_signature(sig)) {
lm->determine_target(current_class, CHECK_NULL);
selected_family = lm;
}
}
if (selected_family->has_target()) {
Method* target = selected_family->get_selected_target();
InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
// Verify that the identified method is valid from the context of
// the current class
GenericShadowChecker checker(&cache, THREAD, target->name(),
holder, selected_family->descriptor(), super_class);
checker.run(current_class);
if (checker.found_shadow()) {
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr(" Only candidate found was shadowed.");
}
#endif // ndef PRODUCT
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
"Accessible default method not found", NULL);
} else {
return target;
}
} else {
assert(selected_family->throws_exception(), "must have target or throw");
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
@ -1075,6 +1263,71 @@ Method* DefaultMethods::find_super_default(
}
}
// This is called during linktime when we find an invokespecial call that
// refers to a direct superinterface. It indicates that we should find the
// default method in the hierarchy of that superinterface, and if that method
// would have been a candidate from the point of view of 'this' class, then we
// return that method.
// This logic assumes that the super is a direct superclass of the caller
Method* DefaultMethods::find_super_default(
Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) {
ResourceMark rm(THREAD);
assert(cls != NULL && super != NULL, "Need real classes");
InstanceKlass* current_class = InstanceKlass::cast(cls);
InstanceKlass* super_class = InstanceKlass::cast(super);
// Keep entire hierarchy alive for the duration of the computation
KeepAliveRegistrar keepAlive(THREAD);
KeepAliveVisitor loadKeepAlive(&keepAlive);
loadKeepAlive.run(current_class); // get hierarchy from current class
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr("Finding super default method %s.%s%s from %s",
super_class->name()->as_C_string(),
method_name->as_C_string(), sig->as_C_string(),
current_class->name()->as_C_string());
}
#endif // ndef PRODUCT
assert(super_class->is_interface(), "only call for default methods");
Method* target = NULL;
if (ParseGenericDefaults) {
target = find_generic_super_default(current_class, super_class,
method_name, sig, CHECK_NULL);
} else {
target = find_erased_super_default(current_class, super_class,
method_name, sig, CHECK_NULL);
}
#ifndef PRODUCT
if (target != NULL) {
if (TraceDefaultMethods) {
tty->print(" Returning ");
print_method(tty, target, true);
tty->print_cr("");
}
}
#endif // ndef PRODUCT
return target;
}
#ifndef PRODUCT
// Return true is broad type is a covariant return of narrow type
static bool covariant_return_type(BasicType narrow, BasicType broad) {
if (narrow == broad) {
return true;
}
if (broad == T_OBJECT) {
return true;
}
return false;
}
#endif // ndef PRODUCT
static int assemble_redirect(
BytecodeConstantPool* cp, BytecodeBuffer* buffer,
@ -1103,7 +1356,7 @@ static int assemble_redirect(
out.next();
}
assert(out.at_return_type(), "Parameter counts do not match");
assert(in.type() == out.type(), "Return types are not compatible");
assert(covariant_return_type(out.type(), in.type()), "Return types are not compatible");
if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) {
++parameter_count; // need room for return value
@ -1144,10 +1397,15 @@ static Method* new_method(
Symbol* sig, AccessFlags flags, int max_stack, int params,
ConstMethod::MethodType mt, TRAPS) {
address code_start = static_cast<address>(bytecodes->adr_at(0));
int code_length = bytecodes->length();
address code_start = 0;
int code_length = 0;
InlineTableSizes sizes;
if (bytecodes != NULL && bytecodes->length() > 0) {
code_start = static_cast<address>(bytecodes->adr_at(0));
code_length = bytecodes->length();
}
Method* m = Method::allocate(cp->pool_holder()->class_loader_data(),
code_length, flags, &sizes,
mt, CHECK_NULL);

View File

@ -234,6 +234,7 @@ class java_lang_Class : AllStatic {
static GrowableArray<Klass*>* _fixup_mirror_list;
static void set_init_lock(oop java_class, oop init_lock);
static void set_protection_domain(oop java_class, oop protection_domain);
public:
static void compute_offsets();
@ -272,7 +273,6 @@ class java_lang_Class : AllStatic {
// Support for embedded per-class oops
static oop protection_domain(oop java_class);
static void set_protection_domain(oop java_class, oop protection_domain);
static oop init_lock(oop java_class);
static objArrayOop signers(oop java_class);
static void set_signers(oop java_class, objArrayOop signers);

View File

@ -2017,12 +2017,6 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
// ugghh... how would one do this efficiently for a non-contiguous space?
guarantee(false, "NYI");
}
bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
return _smallLinearAllocBlock._word_size == 0;
}

View File

@ -396,7 +396,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// iteration support for promotion
void save_marks();
bool no_allocs_since_save_marks();
void object_iterate_since_last_GC(ObjectClosure* cl);
// iteration support for sweeping
void save_sweep_limit() {

View File

@ -3129,26 +3129,6 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
void
ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
{
// Not currently implemented; need to do the following. -- ysr.
// dld -- I think that is used for some sort of allocation profiler. So it
// really means the objects allocated by the mutator since the last
// GC. We could potentially implement this cheaply by recording only
// the direct allocations in a side data structure.
//
// I think we probably ought not to be required to support these
// iterations at any arbitrary point; I think there ought to be some
// call to enable/disable allocation profiling in a generation/space,
// and the iterator ought to return the objects allocated in the
// gen/space since the enable call, or the last iterator call (which
// will probably be at a GC.) That way, for gens like CM&S that would
// require some extra data structure to support this, we only pay the
// cost when it's in use...
cmsSpace()->object_iterate_since_last_GC(blk);
}
void
ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
cl->set_generation(this);

View File

@ -1273,7 +1273,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Iteration support and related enquiries
void save_marks();
bool no_allocs_since_save_marks();
void object_iterate_since_last_GC(ObjectClosure* cl);
void younger_refs_iterate(OopsInGenClosure* cl);
// Iteration support specific to CMS generations

View File

@ -54,7 +54,6 @@
#include "memory/referenceProcessor.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/vmThread.hpp"
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
@ -2665,11 +2664,6 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
heap_region_iterate(&blk);
}
void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
// FIXME: is this right?
guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
}
// Calls a SpaceClosure on a HeapRegion.
class SpaceClosureRegionClosure: public HeapRegionClosure {
@ -3598,8 +3592,6 @@ G1CollectedHeap* G1CollectedHeap::heap() {
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
// always_do_update_barrier = false;
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
// Call allocation profiler
AllocationProfiler::iterate_since_last_gc();
// Fill TLAB's and such
ensure_parsability(true);
}

View File

@ -1360,11 +1360,6 @@ public:
object_iterate(cl);
}
// Iterate over all objects allocated since the last collection, calling
// "cl.do_object" on each. The heap must have been initialized properly
// to support this function, or else this call will fail.
virtual void object_iterate_since_last_GC(ObjectClosure* cl);
// Iterate over all spaces in use in the heap, in ascending address order.
virtual void space_iterate(SpaceClosure* cl);

View File

@ -43,7 +43,6 @@
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/synchronizer.hpp"

View File

@ -23,12 +23,14 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "gc_implementation/shared/objectCountEventSender.hpp"
#include "memory/heapInspection.hpp"
#include "memory/referenceProcessorStats.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#if INCLUDE_ALL_GCS
@ -38,7 +40,7 @@
#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?")
#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?")
static jlong GCTracer_next_gc_id = 0;
static GCId GCTracer_next_gc_id = 0;
static GCId create_new_gc_id() {
return GCTracer_next_gc_id++;
}
@ -91,26 +93,38 @@ void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) con
}
#if INCLUDE_SERVICES
void ObjectCountEventSenderClosure::do_cinfo(KlassInfoEntry* entry) {
if (should_send_event(entry)) {
send_event(entry);
class ObjectCountEventSenderClosure : public KlassInfoClosure {
const GCId _gc_id;
const double _size_threshold_percentage;
const size_t _total_size_in_words;
const jlong _timestamp;
public:
ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, jlong timestamp) :
_gc_id(gc_id),
_size_threshold_percentage(ObjectCountCutOffPercent / 100),
_total_size_in_words(total_size_in_words),
_timestamp(timestamp)
{}
virtual void do_cinfo(KlassInfoEntry* entry) {
if (should_send_event(entry)) {
ObjectCountEventSender::send(entry, _gc_id, _timestamp);
}
}
}
void ObjectCountEventSenderClosure::send_event(KlassInfoEntry* entry) {
_gc_tracer->send_object_count_after_gc_event(entry->klass(), entry->count(),
entry->words() * BytesPerWord);
}
bool ObjectCountEventSenderClosure::should_send_event(KlassInfoEntry* entry) const {
double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
return percentage_of_heap > _size_threshold_percentage;
}
private:
bool should_send_event(const KlassInfoEntry* entry) const {
double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
return percentage_of_heap >= _size_threshold_percentage;
}
};
void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
assert_set_gc_id();
assert(is_alive_cl != NULL, "Must supply function to check liveness");
if (should_send_object_count_after_gc_event()) {
if (ObjectCountEventSender::should_send_event()) {
ResourceMark rm;
KlassInfoTable cit(false);
@ -118,12 +132,13 @@ void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
HeapInspection hi(false, false, false, NULL);
hi.populate_table(&cit, is_alive_cl);
ObjectCountEventSenderClosure event_sender(this, cit.size_of_instances_in_words());
jlong timestamp = os::elapsed_counter();
ObjectCountEventSenderClosure event_sender(_shared_gc_info.id(), cit.size_of_instances_in_words(), timestamp);
cit.iterate(&event_sender);
}
}
}
#endif
#endif // INCLUDE_SERVICES
void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const {
assert_set_gc_id();

View File

@ -30,7 +30,6 @@
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/allocation.hpp"
#include "memory/klassInfoClosure.hpp"
#include "memory/referenceType.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1YCTypes.hpp"
@ -113,7 +112,6 @@ class G1YoungGCInfo VALUE_OBJ_CLASS_SPEC {
#endif // INCLUDE_ALL_GCS
class GCTracer : public ResourceObj {
friend class ObjectCountEventSenderClosure;
protected:
SharedGCInfo _shared_gc_info;
@ -123,7 +121,6 @@ class GCTracer : public ResourceObj {
void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const;
void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN;
bool has_reported_gc_start() const;
protected:
@ -137,25 +134,6 @@ class GCTracer : public ResourceObj {
void send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const;
void send_reference_stats_event(ReferenceType type, size_t count) const;
void send_phase_events(TimePartitions* time_partitions) const;
void send_object_count_after_gc_event(Klass* klass, jlong count, julong total_size) const NOT_SERVICES_RETURN;
bool should_send_object_count_after_gc_event() const;
};
class ObjectCountEventSenderClosure : public KlassInfoClosure {
GCTracer* _gc_tracer;
const double _size_threshold_percentage;
const size_t _total_size_in_words;
public:
ObjectCountEventSenderClosure(GCTracer* gc_tracer, size_t total_size_in_words) :
_gc_tracer(gc_tracer),
_size_threshold_percentage(ObjectCountCutOffPercent / 100),
_total_size_in_words(total_size_in_words)
{}
virtual void do_cinfo(KlassInfoEntry* entry);
protected:
virtual void send_event(KlassInfoEntry* entry);
private:
bool should_send_event(KlassInfoEntry* entry) const;
};
class YoungGCTracer : public GCTracer {

View File

@ -123,27 +123,6 @@ void OldGCTracer::send_concurrent_mode_failure_event() {
}
}
#if INCLUDE_SERVICES
void GCTracer::send_object_count_after_gc_event(Klass* klass, jlong count, julong total_size) const {
EventObjectCountAfterGC e;
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_class(klass);
e.set_count(count);
e.set_totalSize(total_size);
e.commit();
}
}
#endif
bool GCTracer::should_send_object_count_after_gc_event() const {
#if INCLUDE_TRACE
return Tracing::is_event_enabled(EventObjectCountAfterGC::eventId);
#else
return false;
#endif
}
#if INCLUDE_ALL_GCS
void G1NewTracer::send_g1_young_gc_event() {
EventGCG1GarbageCollection e(UNTIMED);

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/shared/objectCountEventSender.hpp"
#include "memory/heapInspection.hpp"
#include "trace/tracing.hpp"
#include "utilities/globalDefinitions.hpp"
#if INCLUDE_SERVICES
void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp) {
assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
"Only call this method if the event is enabled");
EventObjectCountAfterGC event(UNTIMED);
event.set_gcId(gc_id);
event.set_class(entry->klass());
event.set_count(entry->count());
event.set_totalSize(entry->words() * BytesPerWord);
event.set_endtime(timestamp);
event.commit();
}
bool ObjectCountEventSender::should_send_event() {
#if INCLUDE_TRACE
return Tracing::is_event_enabled(EventObjectCountAfterGC::eventId);
#else
return false;
#endif // INCLUDE_TRACE
}
#endif // INCLUDE_SERVICES

View File

@ -22,15 +22,23 @@
*
*/
#ifndef SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
#define SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
#ifndef SHARE_VM_OBJECT_COUNT_EVENT_SENDER_HPP
#define SHARE_VM_OBJECT_COUNT_EVENT_SENDER_HPP
#include "gc_implementation/shared/gcTrace.hpp"
#include "memory/allocation.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_SERVICES
class KlassInfoEntry;
class KlassInfoClosure : public StackObj {
class ObjectCountEventSender : public AllStatic {
public:
// Called for each KlassInfoEntry.
virtual void do_cinfo(KlassInfoEntry* cie) = 0;
static void send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp);
static bool should_send_event();
};
#endif // SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
#endif // INCLUDE_SERVICES
#endif // SHARE_VM_OBJECT_COUNT_EVENT_SENDER

View File

@ -85,16 +85,16 @@ GCHeapSummary CollectedHeap::create_heap_summary() {
MetaspaceSummary CollectedHeap::create_metaspace_summary() {
const MetaspaceSizes meta_space(
0, /*MetaspaceAux::capacity_in_bytes(),*/
0, /*MetaspaceAux::used_in_bytes(),*/
MetaspaceAux::allocated_capacity_bytes(),
MetaspaceAux::allocated_used_bytes(),
MetaspaceAux::reserved_in_bytes());
const MetaspaceSizes data_space(
0, /*MetaspaceAux::capacity_in_bytes(Metaspace::NonClassType),*/
0, /*MetaspaceAux::used_in_bytes(Metaspace::NonClassType),*/
MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType));
const MetaspaceSizes class_space(
0, /*MetaspaceAux::capacity_in_bytes(Metaspace::ClassType),*/
0, /*MetaspaceAux::used_in_bytes(Metaspace::ClassType),*/
MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
MetaspaceAux::reserved_in_bytes(Metaspace::ClassType));
return MetaspaceSummary(meta_space, data_space, class_space);

View File

@ -236,10 +236,11 @@ class ChunkPool: public CHeapObj<mtInternal> {
size_t _num_used; // number of chunks currently checked out
const size_t _size; // size of each chunk (must be uniform)
// Our three static pools
// Our four static pools
static ChunkPool* _large_pool;
static ChunkPool* _medium_pool;
static ChunkPool* _small_pool;
static ChunkPool* _tiny_pool;
// return first element or null
void* get_first() {
@ -319,15 +320,18 @@ class ChunkPool: public CHeapObj<mtInternal> {
static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
static void initialize() {
_large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
_medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
_small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
_tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
}
static void clean() {
enum { BlocksToKeep = 5 };
_tiny_pool->free_all_but(BlocksToKeep);
_small_pool->free_all_but(BlocksToKeep);
_medium_pool->free_all_but(BlocksToKeep);
_large_pool->free_all_but(BlocksToKeep);
@ -337,6 +341,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
ChunkPool* ChunkPool::_large_pool = NULL;
ChunkPool* ChunkPool::_medium_pool = NULL;
ChunkPool* ChunkPool::_small_pool = NULL;
ChunkPool* ChunkPool::_tiny_pool = NULL;
void chunkpool_init() {
ChunkPool::initialize();
@ -376,6 +381,7 @@ void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode,
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
default: {
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
@ -392,6 +398,7 @@ void Chunk::operator delete(void* p) {
case Chunk::size: ChunkPool::large_pool()->free(c); break;
case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
default: os::free(c, mtChunk);
}
}

View File

@ -353,7 +353,8 @@ class Chunk: CHeapObj<mtChunk> {
slack = 20, // suspected sizeof(Chunk) + internal malloc headers
#endif
init_size = 1*K - slack, // Size of first chunk
tiny_size = 256 - slack, // Size of first chunk (tiny)
init_size = 1*K - slack, // Size of first chunk (normal aka small)
medium_size= 10*K - slack, // Size of medium-sized chunk
size = 32*K - slack, // Default size of an Arena chunk (following the first)
non_pool_size = init_size + 32 // An initial size which is not one of above

View File

@ -450,11 +450,6 @@ void DefNewGeneration::compute_new_size() {
}
}
void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
// $$$ This may be wrong in case of "scavenge failure"?
eden()->object_iterate(cl);
}
void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
assert(false, "NYI -- are you sure you want to call this?");
}

View File

@ -252,7 +252,6 @@ protected:
// Iteration
void object_iterate(ObjectClosure* blk);
void object_iterate_since_last_GC(ObjectClosure* cl);
void younger_refs_iterate(OopsInGenClosure* cl);

View File

@ -42,7 +42,6 @@
#include "memory/space.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.hpp"
@ -873,12 +872,6 @@ void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
}
}
void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
for (int i = 0; i < _n_gens; i++) {
_gens[i]->object_iterate_since_last_GC(cl);
}
}
Space* GenCollectedHeap::space_containing(const void* addr) const {
for (int i = 0; i < _n_gens; i++) {
Space* res = _gens[i]->space_containing(addr);
@ -1186,8 +1179,6 @@ void GenCollectedHeap::gc_prologue(bool full) {
CollectedHeap::accumulate_statistics_all_tlabs();
ensure_parsability(true); // retire TLABs
// Call allocation profiler
AllocationProfiler::iterate_since_last_gc();
// Walk generations
GenGCPrologueClosure blk(full);
generation_iterate(&blk, false); // not old-to-young.

View File

@ -222,7 +222,6 @@ public:
void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl);
void object_iterate_since_last_GC(ObjectClosure* cl);
Space* space_containing(const void* addr) const;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,

View File

@ -811,16 +811,6 @@ void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
blk->do_space(_the_space);
}
void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) {
// Deal with delayed initialization of _the_space,
// and lack of initialization of _last_gc.
if (_last_gc.space() == NULL) {
assert(the_space() != NULL, "shouldn't be NULL");
_last_gc = the_space()->bottom_mark();
}
the_space()->object_iterate_from(_last_gc, blk);
}
void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
blk->set_generation(this);
younger_refs_in_space_iterate(_the_space, blk);

View File

@ -551,12 +551,6 @@ class Generation: public CHeapObj<mtGC> {
// the heap. This defaults to object_iterate() unless overridden.
virtual void safe_object_iterate(ObjectClosure* cl);
// Iterate over all objects allocated in the generation since the last
// collection, calling "cl.do_object" on each. The generation must have
// been initialized properly to support this function, or else this call
// will fail.
virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0;
// Apply "cl->do_oop" to (the address of) all and only all the ref fields
// in the current generation that contain pointers to objects in younger
// generations. Objects allocated since the last "save_marks" call are
@ -724,7 +718,6 @@ class OneContigSpaceCardGeneration: public CardGeneration {
// Iteration
void object_iterate(ObjectClosure* blk);
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
void object_iterate_since_last_GC(ObjectClosure* cl);
void younger_refs_iterate(OopsInGenClosure* blk);

View File

@ -26,7 +26,6 @@
#define SHARE_VM_MEMORY_HEAPINSPECTION_HPP
#include "memory/allocation.inline.hpp"
#include "memory/klassInfoClosure.hpp"
#include "oops/oop.inline.hpp"
#include "oops/annotations.hpp"
#include "utilities/macros.hpp"
@ -204,6 +203,12 @@ class KlassInfoEntry: public CHeapObj<mtInternal> {
const char* name() const;
};
class KlassInfoClosure : public StackObj {
public:
// Called for each KlassInfoEntry.
virtual void do_cinfo(KlassInfoEntry* cie) = 0;
};
class KlassInfoBucket: public CHeapObj<mtInternal> {
private:
KlassInfoEntry* _list;

View File

@ -166,11 +166,6 @@ public:
// Same as above, restricted to a memory region.
virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
// Iterate over all objects allocated since the last collection, calling
// "cl->do_object" on each. The heap must have been initialized properly
// to support this function, or else this call will fail.
virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0;
// Iterate over all spaces in use in the heap, in an undefined order.
virtual void space_iterate(SpaceClosure* cl) = 0;

View File

@ -52,7 +52,6 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayKlass.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fprofiler.hpp"

View File

@ -71,7 +71,6 @@ Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature) cons
}
ArrayKlass::ArrayKlass(Symbol* name) {
set_alloc_size(0);
set_name(name);
set_super(Universe::is_bootstrapping() ? (Klass*)NULL : SystemDictionary::Object_klass());
@ -161,12 +160,6 @@ void ArrayKlass::array_klasses_do(void f(Klass* k)) {
}
}
void ArrayKlass::with_array_klasses_do(void f(Klass* k)) {
array_klasses_do(f);
}
// GC support
void ArrayKlass::oops_do(OopClosure* cl) {

View File

@ -39,7 +39,6 @@ class ArrayKlass: public Klass {
Klass* volatile _higher_dimension; // Refers the (n+1)'th-dimensional array (if present).
Klass* volatile _lower_dimension; // Refers the (n-1)'th-dimensional array (if present).
int _vtable_len; // size of vtable for this klass
juint _alloc_size; // allocation profiling support
oop _component_mirror; // component type, as a java/lang/Class
protected:
@ -65,10 +64,6 @@ class ArrayKlass: public Klass {
void set_lower_dimension(Klass* k) { _lower_dimension = k; }
Klass** adr_lower_dimension() { return (Klass**)&this->_lower_dimension;}
// Allocation profiling support
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
// offset of first element, including any padding for the sake of alignment
int array_header_in_bytes() const { return layout_helper_header_size(layout_helper()); }
int log2_element_size() const { return layout_helper_log2_element_size(layout_helper()); }
@ -126,7 +121,6 @@ class ArrayKlass: public Klass {
// Iterators
void array_klasses_do(void f(Klass* k));
void array_klasses_do(void f(Klass* k, TRAPS), TRAPS);
void with_array_klasses_do(void f(Klass* k));
// GC support
virtual void oops_do(OopClosure* cl);

View File

@ -1321,12 +1321,6 @@ void InstanceKlass::array_klasses_do(void f(Klass* k)) {
ArrayKlass::cast(array_klasses())->array_klasses_do(f);
}
void InstanceKlass::with_array_klasses_do(void f(Klass* k)) {
f(this);
array_klasses_do(f);
}
#ifdef ASSERT
static int linear_search(Array<Method*>* methods, Symbol* name, Symbol* signature) {
int len = methods->length();

View File

@ -794,7 +794,6 @@ class InstanceKlass: public Klass {
void methods_do(void f(Method* method));
void array_klasses_do(void f(Klass* k));
void array_klasses_do(void f(Klass* k, TRAPS), TRAPS);
void with_array_klasses_do(void f(Klass* k));
bool super_types_do(SuperTypeClosure* blk);
// Casting from Klass*
@ -874,10 +873,6 @@ class InstanceKlass: public Klass {
}
}
// Allocation profiling support
juint alloc_size() const { return _alloc_count * size_helper(); }
void set_alloc_size(juint n) {}
// Use this to return the size of an instance in heap words:
int size_helper() const {
return layout_helper_to_size_helper(layout_helper());

View File

@ -168,7 +168,6 @@ Klass::Klass() {
set_subklass(NULL);
set_next_sibling(NULL);
set_next_link(NULL);
set_alloc_count(0);
TRACE_INIT_ID(this);
set_prototype_header(markOopDesc::prototype());
@ -543,12 +542,6 @@ Klass* Klass::array_klass_impl(bool or_null, TRAPS) {
return NULL;
}
void Klass::with_array_klasses_do(void f(Klass* k)) {
f(this);
}
oop Klass::class_loader() const { return class_loader_data()->class_loader(); }
const char* Klass::external_name() const {

View File

@ -79,7 +79,6 @@
// [last_biased_lock_bulk_revocation_time] (64 bits)
// [prototype_header]
// [biased_lock_revocation_count]
// [alloc_count ]
// [_modified_oops]
// [_accumulated_modified_oops]
// [trace_id]
@ -171,8 +170,6 @@ class Klass : public Metadata {
markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
jint _biased_lock_revocation_count;
juint _alloc_count; // allocation profiling support
TRACE_DEFINE_KLASS_TRACE_ID;
// Remembered sets support for the oops in the klasses.
@ -290,11 +287,6 @@ class Klass : public Metadata {
void set_next_sibling(Klass* s);
public:
// Allocation profiling support
juint alloc_count() const { return _alloc_count; }
void set_alloc_count(juint n) { _alloc_count = n; }
virtual juint alloc_size() const = 0;
virtual void set_alloc_size(juint n) = 0;
// Compiler support
static ByteSize super_offset() { return in_ByteSize(offset_of(Klass, _super)); }
@ -677,7 +669,6 @@ class Klass : public Metadata {
#endif // INCLUDE_ALL_GCS
virtual void array_klasses_do(void f(Klass* k)) {}
virtual void with_array_klasses_do(void f(Klass* k));
// Return self, except for abstract classes with exactly 1
// implementor. Then return the 1 concrete implementation.

View File

@ -5097,7 +5097,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
// function used to determine this will always return false. Atomic::xchg
// does not have this problem.
if (Atomic::xchg(1, &vm_created) == 1) {
return JNI_ERR; // already created, or create attempt in progress
return JNI_EEXIST; // already created, or create attempt in progress
}
if (Atomic::xchg(0, &safe_to_recreate_vm) == 0) {
return JNI_ERR; // someone tried and failed and retry not allowed.
@ -5138,9 +5138,21 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
event.commit();
}
#ifndef PRODUCT
#ifndef TARGET_OS_FAMILY_windows
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
#endif
// Check if we should compile all classes on bootclasspath
NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();)
NOT_PRODUCT(if (ReplayCompiles) ciReplay::replay(thread);)
if (CompileTheWorld) ClassLoader::compile_the_world();
if (ReplayCompiles) ciReplay::replay(thread);
// Some platforms (like Win*) need a wrapper around these test
// functions in order to properly handle error conditions.
CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(test_error_handler);
CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(execute_internal_vm_tests);
#endif
// Since this is not a JVM_ENTRY we have to set the thread state manually before leaving.
ThreadStateTransition::transition_and_fence(thread, _thread_in_vm, _thread_in_native);
} else {
@ -5157,8 +5169,6 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
OrderAccess::release_store(&vm_created, 0);
}
NOT_PRODUCT(test_error_handler(ErrorHandlerTest));
NOT_PRODUCT(execute_internal_vm_tests());
return result;
}

View File

@ -1121,26 +1121,6 @@ JVM_ENTRY(jobject, JVM_GetProtectionDomain(JNIEnv *env, jclass cls))
JVM_END
// Obsolete since 1.2 (Class.setProtectionDomain removed), although
// still defined in core libraries as of 1.5.
JVM_ENTRY(void, JVM_SetProtectionDomain(JNIEnv *env, jclass cls, jobject protection_domain))
JVMWrapper("JVM_SetProtectionDomain");
if (JNIHandles::resolve(cls) == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
// Call is ignored for primitive types
Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
// cls won't be an array, as this called only from ClassLoader.defineClass
if (k->oop_is_instance()) {
oop pd = JNIHandles::resolve(protection_domain);
assert(pd == NULL || pd->is_oop(), "just checking");
java_lang_Class::set_protection_domain(k->java_mirror(), pd);
}
}
JVM_END
static bool is_authorized(Handle context, instanceKlassHandle klass, TRAPS) {
// If there is a security manager and protection domain, check the access
// in the protection domain, otherwise it is authorized.

View File

@ -471,9 +471,6 @@ JVM_SetClassSigners(JNIEnv *env, jclass cls, jobjectArray signers);
JNIEXPORT jobject JNICALL
JVM_GetProtectionDomain(JNIEnv *env, jclass cls);
JNIEXPORT void JNICALL
JVM_SetProtectionDomain(JNIEnv *env, jclass cls, jobject protection_domain);
JNIEXPORT jboolean JNICALL
JVM_IsArrayClass(JNIEnv *env, jclass cls);

View File

@ -1,143 +0,0 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/space.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/aprofiler.hpp"
bool AllocationProfiler::_active = false;
GrowableArray<Klass*>* AllocationProfiler::_print_array = NULL;
class AllocProfClosure : public ObjectClosure {
public:
void do_object(oop obj) {
Klass* k = obj->klass();
k->set_alloc_count(k->alloc_count() + 1);
k->set_alloc_size(k->alloc_size() + obj->size());
}
};
void AllocationProfiler::iterate_since_last_gc() {
if (is_active()) {
AllocProfClosure blk;
GenCollectedHeap* heap = GenCollectedHeap::heap();
heap->object_iterate_since_last_GC(&blk);
}
}
void AllocationProfiler::engage() {
_active = true;
}
void AllocationProfiler::disengage() {
_active = false;
}
void AllocationProfiler::add_class_to_array(Klass* k) {
_print_array->append(k);
}
void AllocationProfiler::add_classes_to_array(Klass* k) {
// Iterate over klass and all array klasses for klass
k->with_array_klasses_do(&AllocationProfiler::add_class_to_array);
}
int AllocationProfiler::compare_classes(Klass** k1, Klass** k2) {
// Sort by total allocation size
return (*k2)->alloc_size() - (*k1)->alloc_size();
}
int AllocationProfiler::average(size_t alloc_size, int alloc_count) {
return (int) ((double) (alloc_size * BytesPerWord) / MAX2(alloc_count, 1) + 0.5);
}
void AllocationProfiler::sort_and_print_array(size_t cutoff) {
_print_array->sort(&AllocationProfiler::compare_classes);
tty->print_cr("________________Size"
"__Instances"
"__Average"
"__Class________________");
size_t total_alloc_size = 0;
int total_alloc_count = 0;
for (int index = 0; index < _print_array->length(); index++) {
Klass* k = _print_array->at(index);
size_t alloc_size = k->alloc_size();
if (alloc_size > cutoff) {
int alloc_count = k->alloc_count();
#ifdef PRODUCT
const char* name = k->external_name();
#else
const char* name = k->internal_name();
#endif
tty->print_cr("%20u %10u %8u %s",
alloc_size * BytesPerWord,
alloc_count,
average(alloc_size, alloc_count),
name);
total_alloc_size += alloc_size;
total_alloc_count += alloc_count;
}
k->set_alloc_count(0);
k->set_alloc_size(0);
}
tty->print_cr("%20u %10u %8u --total--",
total_alloc_size * BytesPerWord,
total_alloc_count,
average(total_alloc_size, total_alloc_count));
tty->cr();
}
void AllocationProfiler::print(size_t cutoff) {
ResourceMark rm;
assert(!is_active(), "AllocationProfiler cannot be active while printing profile");
tty->cr();
tty->print_cr("Allocation profile (sizes in bytes, cutoff = " SIZE_FORMAT " bytes):", cutoff * BytesPerWord);
tty->cr();
// Print regular instance klasses and basic type array klasses
_print_array = new GrowableArray<Klass*>(SystemDictionary::number_of_classes()*2);
SystemDictionary::classes_do(&add_classes_to_array);
Universe::basic_type_classes_do(&add_classes_to_array);
sort_and_print_array(cutoff);
// This used to print metadata in the permgen but since there isn't a permgen
// anymore, it is not yet implemented.
}

View File

@ -1,71 +0,0 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_RUNTIME_APROFILER_HPP
#define SHARE_VM_RUNTIME_APROFILER_HPP
#include "memory/allocation.hpp"
#include "memory/universe.hpp"
#include "oops/klass.hpp"
#include "utilities/top.hpp"
// A simple allocation profiler for Java. The profiler collects and prints
// the number and total size of instances allocated per class, including
// array classes.
//
// The profiler is currently global for all threads. It can be changed to a
// per threads profiler by keeping a more elaborate data structure and calling
// iterate_since_last_scavenge at thread switches.
class AllocationProfiler: AllStatic {
friend class GenCollectedHeap;
friend class G1CollectedHeap;
friend class MarkSweep;
private:
static bool _active; // tells whether profiler is active
static GrowableArray<Klass*>* _print_array; // temporary array for printing
// Utility printing functions
static void add_class_to_array(Klass* k);
static void add_classes_to_array(Klass* k);
static int compare_classes(Klass** k1, Klass** k2);
static int average(size_t alloc_size, int alloc_count);
static void sort_and_print_array(size_t cutoff);
// Call for collecting allocation information. Called at scavenge, mark-sweep and disengage.
static void iterate_since_last_gc();
public:
// Start profiler
static void engage();
// Stop profiler
static void disengage();
// Tells whether profiler is active
static bool is_active() { return _active; }
// Print profile
static void print(size_t cutoff); // Cutoff in total allocation size (in words)
};
#endif // SHARE_VM_RUNTIME_APROFILER_HPP

View File

@ -68,7 +68,6 @@ char* Arguments::_java_command = NULL;
SystemProperty* Arguments::_system_properties = NULL;
const char* Arguments::_gc_log_filename = NULL;
bool Arguments::_has_profile = false;
bool Arguments::_has_alloc_profile = false;
uintx Arguments::_min_heap_size = 0;
Arguments::Mode Arguments::_mode = _mixed;
bool Arguments::_java_compiler = false;
@ -261,6 +260,9 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "PrintRevisitStats", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseVectoredExceptions", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseSplitVerifier", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseISM", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UsePermISM", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseMPSS", JDK_Version::jdk(8), JDK_Version::jdk(9) },
#ifdef PRODUCT
{ "DesiredMethodLimit",
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
@ -1855,8 +1857,13 @@ bool Arguments::check_gc_consistency() {
"please refer to the release notes for the combinations "
"allowed\n");
status = false;
} else if (ReservedCodeCacheSize > 2*G) {
// Code cache size larger than MAXINT is not supported.
jio_fprintf(defaultStream::error_stream(),
"Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
(2*G)/M);
status = false;
}
return status;
}
@ -1986,23 +1993,6 @@ bool Arguments::check_vm_args_consistency() {
status = status && check_gc_consistency();
status = status && check_stack_pages();
if (_has_alloc_profile) {
if (UseParallelGC || UseParallelOldGC) {
jio_fprintf(defaultStream::error_stream(),
"error: invalid argument combination.\n"
"Allocation profiling (-Xaprof) cannot be used together with "
"Parallel GC (-XX:+UseParallelGC or -XX:+UseParallelOldGC).\n");
status = false;
}
if (UseConcMarkSweepGC) {
jio_fprintf(defaultStream::error_stream(),
"error: invalid argument combination.\n"
"Allocation profiling (-Xaprof) cannot be used together with "
"the CMS collector (-XX:+UseConcMarkSweepGC).\n");
status = false;
}
}
if (CMSIncrementalMode) {
if (!UseConcMarkSweepGC) {
jio_fprintf(defaultStream::error_stream(),
@ -2239,8 +2229,13 @@ bool Arguments::check_vm_args_consistency() {
"Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K,
min_code_cache_size/K);
status = false;
} else if (ReservedCodeCacheSize > 2*G) {
// Code cache size larger than MAXINT is not supported.
jio_fprintf(defaultStream::error_stream(),
"Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
(2*G)/M);
status = false;
}
return status;
}
@ -2700,9 +2695,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
"Flat profiling is not supported in this VM.\n");
return JNI_ERR;
#endif // INCLUDE_FPROF
// -Xaprof
} else if (match_option(option, "-Xaprof", &tail)) {
_has_alloc_profile = true;
// -Xconcurrentio
} else if (match_option(option, "-Xconcurrentio", &tail)) {
FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true);
@ -2957,13 +2949,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
FLAG_SET_CMDLINE(bool, UseTLAB, true);
} else if (match_option(option, "-XX:-UseTLE", &tail)) {
FLAG_SET_CMDLINE(bool, UseTLAB, false);
SOLARIS_ONLY(
} else if (match_option(option, "-XX:+UsePermISM", &tail)) {
warning("-XX:+UsePermISM is obsolete.");
FLAG_SET_CMDLINE(bool, UseISM, true);
} else if (match_option(option, "-XX:-UsePermISM", &tail)) {
FLAG_SET_CMDLINE(bool, UseISM, false);
)
} else if (match_option(option, "-XX:+DisplayVMOutputToStderr", &tail)) {
FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false);
FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true);
@ -3136,8 +3121,6 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
// Note that large pages are enabled/disabled for both the
// Java heap and the code cache.
FLAG_SET_DEFAULT(UseLargePages, false);
SOLARIS_ONLY(FLAG_SET_DEFAULT(UseMPSS, false));
SOLARIS_ONLY(FLAG_SET_DEFAULT(UseISM, false));
}
// Tiered compilation is undefined with C1.

View File

@ -262,7 +262,6 @@ class Arguments : AllStatic {
// Option flags
static bool _has_profile;
static bool _has_alloc_profile;
static const char* _gc_log_filename;
static uintx _min_heap_size;
@ -464,9 +463,8 @@ class Arguments : AllStatic {
// -Xloggc:<file>, if not specified will be NULL
static const char* gc_log_filename() { return _gc_log_filename; }
// -Xprof/-Xaprof
// -Xprof
static bool has_profile() { return _has_profile; }
static bool has_alloc_profile() { return _has_alloc_profile; }
// -Xms, -Xmx
static uintx min_heap_size() { return _min_heap_size; }

View File

@ -175,6 +175,7 @@ define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
define_pd_global(intx, CodeCacheMinBlockLength, 1);
define_pd_global(intx, CodeCacheMinimumUseSpace, 200*K);
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(4*M));
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
@ -3679,6 +3680,9 @@ class CommandLineFlags {
develop(bool, VerifyGenericSignatures, false, \
"Abort VM on erroneous or inconsistent generic signatures") \
\
product(bool, ParseGenericDefaults, false, \
"Parse generic signatures for default method handling") \
\
product(bool, UseVMInterruptibleIO, false, \
"(Unstable, Solaris-specific) Thread interrupt before or with " \
"EINTR for I/O operations results in OS_INTRPT. The default value"\

View File

@ -227,7 +227,7 @@ class HandleArea: public Arena {
HandleArea* _prev; // link to outer (older) area
public:
// Constructor
HandleArea(HandleArea* prev) {
HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) {
debug_only(_handle_mark_nesting = 0);
debug_only(_no_handle_mark_nesting = 0);
_prev = prev;

View File

@ -42,7 +42,6 @@
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/arguments.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
@ -509,16 +508,6 @@ void before_exit(JavaThread * thread) {
}
}
if (Arguments::has_alloc_profile()) {
HandleMark hm;
// Do one last collection to enumerate all the objects
// allocated since the last one.
Universe::heap()->collect(GCCause::_allocation_profiler);
AllocationProfiler::disengage();
AllocationProfiler::print(0);
}
if (PrintBytecodeHistogram) {
BytecodeHistogram::print();
}

View File

@ -507,16 +507,16 @@ class os: AllStatic {
// Symbol lookup, find nearest function name; basically it implements
// dladdr() for all platforms. Name of the nearest function is copied
// to buf. Distance from its base address is returned as offset.
// to buf. Distance from its base address is optionally returned as offset.
// If function name is not found, buf[0] is set to '\0' and offset is
// set to -1.
// set to -1 (if offset is non-NULL).
static bool dll_address_to_function_name(address addr, char* buf,
int buflen, int* offset);
// Locate DLL/DSO. On success, full path of the library is copied to
// buf, and offset is set to be the distance between addr and the
// library's base address. On failure, buf[0] is set to '\0' and
// offset is set to -1.
// buf, and offset is optionally set to be the distance between addr
// and the library's base address. On failure, buf[0] is set to '\0'
// and offset is set to -1 (if offset is non-NULL).
static bool dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset);

View File

@ -45,7 +45,6 @@
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/privilegedStack.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/arguments.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/deoptimization.hpp"
@ -3677,7 +3676,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
if (Arguments::has_profile()) FlatProfiler::engage(main_thread, true);
if (Arguments::has_alloc_profile()) AllocationProfiler::engage();
if (MemProfiling) MemProfiler::engage();
StatSampler::engage();
if (CheckJNICalls) JniPeriodicChecker::engage();

View File

@ -263,7 +263,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
unchecked_c2_static_field) \
\
/******************************************************************/ \
/* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */ \
/* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */ \
/******************************************************************/ \
\
volatile_nonstatic_field(oopDesc, _mark, markOop) \
@ -274,21 +274,20 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
volatile_nonstatic_field(ArrayKlass, _higher_dimension, Klass*) \
volatile_nonstatic_field(ArrayKlass, _lower_dimension, Klass*) \
nonstatic_field(ArrayKlass, _vtable_len, int) \
nonstatic_field(ArrayKlass, _alloc_size, juint) \
nonstatic_field(ArrayKlass, _component_mirror, oop) \
nonstatic_field(CompiledICHolder, _holder_method, Method*) \
nonstatic_field(CompiledICHolder, _holder_method, Method*) \
nonstatic_field(CompiledICHolder, _holder_klass, Klass*) \
nonstatic_field(ConstantPool, _tags, Array<u1>*) \
nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \
nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \
nonstatic_field(ConstantPool, _pool_holder, InstanceKlass*) \
nonstatic_field(ConstantPool, _operands, Array<u2>*) \
nonstatic_field(ConstantPool, _length, int) \
nonstatic_field(ConstantPool, _resolved_references, jobject) \
nonstatic_field(ConstantPool, _reference_map, Array<u2>*) \
nonstatic_field(ConstantPoolCache, _length, int) \
nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \
nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \
nonstatic_field(InstanceKlass, _array_klasses, Klass*) \
nonstatic_field(InstanceKlass, _methods, Array<Method*>*) \
nonstatic_field(InstanceKlass, _methods, Array<Method*>*) \
nonstatic_field(InstanceKlass, _local_interfaces, Array<Klass*>*) \
nonstatic_field(InstanceKlass, _transitive_interfaces, Array<Klass*>*) \
nonstatic_field(InstanceKlass, _fields, Array<u2>*) \
@ -336,9 +335,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(Klass, _access_flags, AccessFlags) \
nonstatic_field(Klass, _subklass, Klass*) \
nonstatic_field(Klass, _next_sibling, Klass*) \
nonstatic_field(Klass, _alloc_count, juint) \
nonstatic_field(MethodData, _size, int) \
nonstatic_field(MethodData, _method, Method*) \
nonstatic_field(MethodData, _method, Method*) \
nonstatic_field(MethodData, _data_size, int) \
nonstatic_field(MethodData, _data[0], intptr_t) \
nonstatic_field(MethodData, _nof_decompiles, uint) \

View File

@ -486,7 +486,7 @@ int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
assert(delta != 0, "dup pointer");
assert(p1 == p2 || delta != 0, "dup pointer");
return delta;
}

View File

@ -470,7 +470,21 @@ class MemTracker : AllStatic {
static void check_NMT_load(Thread* thr) {
assert(thr != NULL, "Sanity check");
if (_slowdown_calling_thread && thr != _worker_thread) {
#ifdef _WINDOWS
// On Windows, os::NakedYield() does not work as well
// as os::yield_all()
os::yield_all();
#else
// On Solaris, os::yield_all() depends on os::sleep()
// which requires JavaTherad in _thread_in_vm state.
// Transits thread to _thread_in_vm state can be dangerous
// if caller holds lock, as it may deadlock with Threads_lock.
// So use NaKedYield instead.
//
// Linux and BSD, NakedYield() and yield_all() implementations
// are the same.
os::NakedYield();
#endif
}
}

View File

@ -314,8 +314,8 @@ bool is_error_reported() {
#ifndef PRODUCT
#include <signal.h>
void test_error_handler(size_t test_num)
{
void test_error_handler() {
uintx test_num = ErrorHandlerTest;
if (test_num == 0) return;
// If asserts are disabled, use the corresponding guarantee instead.
@ -327,6 +327,8 @@ void test_error_handler(size_t test_num)
const char* const eol = os::line_separator();
const char* const msg = "this message should be truncated during formatting";
char * const dataPtr = NULL; // bad data pointer
const void (*funcPtr)(void) = (const void(*)()) 0xF; // bad function pointer
// Keep this in sync with test/runtime/6888954/vmerrors.sh.
switch (n) {
@ -348,11 +350,16 @@ void test_error_handler(size_t test_num)
case 9: ShouldNotCallThis();
case 10: ShouldNotReachHere();
case 11: Unimplemented();
// This is last because it does not generate an hs_err* file on Windows.
case 12: os::signal_raise(SIGSEGV);
// There's no guarantee the bad data pointer will crash us
// so "break" out to the ShouldNotReachHere().
case 12: *dataPtr = '\0'; break;
// There's no guarantee the bad function pointer will crash us
// so "break" out to the ShouldNotReachHere().
case 13: (*funcPtr)(); break;
default: ShouldNotReachHere();
default: tty->print_cr("ERROR: %d: unexpected test_num value.", n);
}
ShouldNotReachHere();
}
#endif // !PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -243,7 +243,7 @@ bool is_error_reported();
void set_error_reported();
/* Test assert(), fatal(), guarantee(), etc. */
NOT_PRODUCT(void test_error_handler(size_t test_num);)
NOT_PRODUCT(void test_error_handler();)
void pd_ps(frame f);
void pd_obfuscate_location(char *buf, size_t buflen);

View File

@ -908,10 +908,11 @@ void VMError::report_and_die() {
// This is not the first error, see if it happened in a different thread
// or in the same thread during error reporting.
if (first_error_tid != mytid) {
jio_snprintf(buffer, sizeof(buffer),
char msgbuf[64];
jio_snprintf(msgbuf, sizeof(msgbuf),
"[thread " INT64_FORMAT " also had an error]",
mytid);
out.print_raw_cr(buffer);
out.print_raw_cr(msgbuf);
// error reporting is not MT-safe, block current thread
os::infinite_sleep();

View File

@ -15,7 +15,7 @@
* @bug 8005956
* @summary C2: assert(!def_outside->member(r)) failed: Use of external LRG overlaps the same LRG defined in this block
*
* @run main PolynomialRoot
* @run main/timeout=300 PolynomialRoot
*/
public class PolynomialRoot {
@ -757,19 +757,26 @@ public static int root4(final double [] p,final double [] re_root,final double [
public static void main(final String [] args)
{
final long t0=System.currentTimeMillis();
final double eps=1e-6;
//checkRoots();
final java.util.Random r=new java.util.Random(-1381923);
printSpecialValues();
if (System.getProperty("os.arch").equals("x86") ||
System.getProperty("os.arch").equals("amd64") ||
System.getProperty("os.arch").equals("x86_64")){
final long t0=System.currentTimeMillis();
final double eps=1e-6;
//checkRoots();
final java.util.Random r=new java.util.Random(-1381923);
printSpecialValues();
final int n_tests=10000000;
//testRoots(2,n_tests,r,eps);
//testRoots(3,n_tests,r,eps);
testRoots(4,n_tests,r,eps);
final long t1=System.currentTimeMillis();
System.err.println("PolynomialRoot.main: "+n_tests+" tests OK done in "+(t1-t0)+" milliseconds. ver=$Id: PolynomialRoot.java,v 1.105 2012/08/18 00:00:05 mal Exp $");
}
final int n_tests=100000;
//testRoots(2,n_tests,r,eps);
//testRoots(3,n_tests,r,eps);
testRoots(4,n_tests,r,eps);
final long t1=System.currentTimeMillis();
System.err.println("PolynomialRoot.main: "+n_tests+" tests OK done in "+(t1-t0)+" milliseconds. ver=$Id: PolynomialRoot.java,v 1.105 2012/08/18 00:00:05 mal Exp $");
System.out.println("PASSED");
} else {
System.out.println("PASS test for non-x86");
}
}

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8015635
* @summary Test ensures that the ReservedCodeCacheSize is at most MAXINT
* @library /testlibrary
*
*/
import com.oracle.java.testlibrary.*;
public class CheckUpperLimit {
public static void main(String[] args) throws Exception {
ProcessBuilder pb;
OutputAnalyzer out;
pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=2048m", "-version");
out = new OutputAnalyzer(pb.start());
out.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=2049m", "-version");
out = new OutputAnalyzer(pb.start());
out.shouldContain("Invalid ReservedCodeCacheSize=");
out.shouldHaveExitValue(1);
}
}

View File

@ -1,5 +1,6 @@
# @test
# @bug 6888954
# @bug 8015884
# @summary exercise HotSpot error handling code
# @author John Coomes
# @run shell vmerrors.sh
@ -27,9 +28,24 @@ i=1
rc=0
assert_re='(assert|guarantee)[(](str|num).*failed: *'
# for bad_data_ptr_re:
# EXCEPTION_ACCESS_VIOLATION - Win-*
# SIGILL - MacOS X
# SIGSEGV - Linux-*, Solaris SPARC-*, Solaris X86-*
#
bad_data_ptr_re='(SIGILL|SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc='
#
# for bad_func_ptr_re:
# EXCEPTION_ACCESS_VIOLATION - Win-*
# SIGBUS - Solaris SPARC-64
# SIGSEGV - Linux-*, Solaris SPARC-32, Solaris X86-*
#
# Note: would like to use "pc=0x00*0f," in the pattern, but Solaris SPARC-*
# gets its signal at a PC in test_error_handler().
#
bad_func_ptr_re='(SIGBUS|SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc='
guarantee_re='guarantee[(](str|num).*failed: *'
fatal_re='fatal error: *'
signal_re='(SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc='
tail_1='.*expected null'
tail_2='.*num='
@ -39,8 +55,9 @@ for re in \
"${fatal_re}${tail_1}" "${fatal_re}${tail_2}" \
"${fatal_re}.*truncated" "ChunkPool::allocate" \
"ShouldNotCall" "ShouldNotReachHere" \
"Unimplemented" "$signal_re"
"Unimplemented" "$bad_data_ptr_re" \
"$bad_func_ptr_re"
do
i2=$i
[ $i -lt 10 ] && i2=0$i

View File

@ -2,21 +2,21 @@
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.

View File

@ -219,3 +219,4 @@ a2a2a91075ad85becbe10a39d7fd04ef9bea8df5 jdk8-b92
42aa9f1828852bb8b77e98ec695211493ae0759d jdk8-b95
4a5d3cf2b3af1660db0237e8da324c140e534fa4 jdk8-b96
978a95239044f26dcc8a6d59246be07ad6ca6be2 jdk8-b97
c4908732fef5235f1b98cafe0ce507771ef7892c jdk8-b98

View File

@ -24,24 +24,23 @@
*/
package java.lang.invoke;
import java.io.Serializable;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import sun.invoke.util.Wrapper;
import static sun.invoke.util.Wrapper.*;
import static sun.invoke.util.Wrapper.forPrimitiveType;
import static sun.invoke.util.Wrapper.forWrapperType;
import static sun.invoke.util.Wrapper.isWrapperType;
/**
* Abstract implementation of a lambda metafactory which provides parameter unrolling and input validation.
* Abstract implementation of a lambda metafactory which provides parameter
* unrolling and input validation.
*
* @see LambdaMetafactory
*/
/* package */ abstract class AbstractValidatingLambdaMetafactory {
/*
* For context, the comments for the following fields are marked in quotes with their values, given this program:
* For context, the comments for the following fields are marked in quotes
* with their values, given this program:
* interface II<T> { Object foo(T x); }
* interface JJ<R extends Number> extends II<R> { }
* class CC { String impl(int i) { return "impl:"+i; }}
@ -54,9 +53,7 @@ import static sun.invoke.util.Wrapper.*;
final Class<?> targetClass; // The class calling the meta-factory via invokedynamic "class X"
final MethodType invokedType; // The type of the invoked method "(CC)II"
final Class<?> samBase; // The type of the returned instance "interface JJ"
final MethodHandle samMethod; // Raw method handle for the functional interface method
final MethodHandleInfo samInfo; // Info about the SAM method handle "MethodHandleInfo[9 II.foo(Object)Object]"
final Class<?> samClass; // Interface containing the SAM method "interface II"
final String samMethodName; // Name of the SAM method "foo"
final MethodType samMethodType; // Type of the SAM method "(Object)Object"
final MethodHandle implMethod; // Raw method handle for the implementation method
final MethodHandleInfo implInfo; // Info about the implementation method handle "MethodHandleInfo[5 CC.impl(int)String]"
@ -67,44 +64,64 @@ import static sun.invoke.util.Wrapper.*;
final MethodType instantiatedMethodType; // Instantiated erased functional interface method type "(Integer)Object"
final boolean isSerializable; // Should the returned instance be serializable
final Class<?>[] markerInterfaces; // Additional marker interfaces to be implemented
final MethodType[] additionalBridges; // Signatures of additional methods to bridge
/**
* Meta-factory constructor.
*
* @param caller Stacked automatically by VM; represents a lookup context with the accessibility privileges
* of the caller.
* @param invokedType Stacked automatically by VM; the signature of the invoked method, which includes the
* expected static type of the returned lambda object, and the static types of the captured
* arguments for the lambda. In the event that the implementation method is an instance method,
* the first argument in the invocation signature will correspond to the receiver.
* @param samMethod The primary method in the functional interface to which the lambda or method reference is
* being converted, represented as a method handle.
* @param implMethod The implementation method which should be called (with suitable adaptation of argument
* types, return types, and adjustment for captured arguments) when methods of the resulting
* functional interface instance are invoked.
* @param instantiatedMethodType The signature of the primary functional interface method after type variables
* are substituted with their instantiation from the capture site
* @param caller Stacked automatically by VM; represents a lookup context
* with the accessibility privileges of the caller.
* @param invokedType Stacked automatically by VM; the signature of the
* invoked method, which includes the expected static
* type of the returned lambda object, and the static
* types of the captured arguments for the lambda. In
* the event that the implementation method is an
* instance method, the first argument in the invocation
* signature will correspond to the receiver.
* @param samMethodName Name of the method in the functional interface to
* which the lambda or method reference is being
* converted, represented as a String.
* @param samMethodType Type of the method in the functional interface to
* which the lambda or method reference is being
* converted, represented as a MethodType.
* @param implMethod The implementation method which should be called
* (with suitable adaptation of argument types, return
* types, and adjustment for captured arguments) when
* methods of the resulting functional interface instance
* are invoked.
* @param instantiatedMethodType The signature of the primary functional
* interface method after type variables are
* substituted with their instantiation from
* the capture site
* @param isSerializable Should the lambda be made serializable? If set,
* either the target type or one of the additional SAM
* types must extend {@code Serializable}.
* @param markerInterfaces Additional interfaces which the lambda object
* should implement.
* @param additionalBridges Method types for additional signatures to be
* bridged to the implementation method
* @throws ReflectiveOperationException
* @throws LambdaConversionException If any of the meta-factory protocol invariants are violated
* @throws LambdaConversionException If any of the meta-factory protocol
* invariants are violated
*/
AbstractValidatingLambdaMetafactory(MethodHandles.Lookup caller,
MethodType invokedType,
MethodHandle samMethod,
String samMethodName,
MethodType samMethodType,
MethodHandle implMethod,
MethodType instantiatedMethodType,
int flags,
Class<?>[] markerInterfaces)
boolean isSerializable,
Class<?>[] markerInterfaces,
MethodType[] additionalBridges)
throws ReflectiveOperationException, LambdaConversionException {
this.targetClass = caller.lookupClass();
this.invokedType = invokedType;
this.samBase = invokedType.returnType();
this.samMethod = samMethod;
this.samInfo = new MethodHandleInfo(samMethod);
this.samClass = samInfo.getDeclaringClass();
this.samMethodType = samInfo.getMethodType();
this.samMethodName = samMethodName;
this.samMethodType = samMethodType;
this.implMethod = implMethod;
this.implInfo = new MethodHandleInfo(implMethod);
@ -118,32 +135,24 @@ import static sun.invoke.util.Wrapper.*;
implKind == MethodHandleInfo.REF_invokeInterface;
this.implDefiningClass = implInfo.getDeclaringClass();
this.implMethodType = implInfo.getMethodType();
this.instantiatedMethodType = instantiatedMethodType;
this.isSerializable = isSerializable;
this.markerInterfaces = markerInterfaces;
this.additionalBridges = additionalBridges;
if (!samClass.isInterface()) {
if (!samBase.isInterface()) {
throw new LambdaConversionException(String.format(
"Functional interface %s is not an interface",
samClass.getName()));
samBase.getName()));
}
boolean foundSerializableSupertype = Serializable.class.isAssignableFrom(samBase);
for (Class<?> c : markerInterfaces) {
if (!c.isInterface()) {
throw new LambdaConversionException(String.format(
"Marker interface %s is not an interface",
c.getName()));
}
foundSerializableSupertype |= Serializable.class.isAssignableFrom(c);
}
this.isSerializable = ((flags & LambdaMetafactory.FLAG_SERIALIZABLE) != 0)
|| foundSerializableSupertype;
if (isSerializable && !foundSerializableSupertype) {
markerInterfaces = Arrays.copyOf(markerInterfaces, markerInterfaces.length + 1);
markerInterfaces[markerInterfaces.length-1] = Serializable.class;
}
this.markerInterfaces = markerInterfaces;
}
/**
@ -153,20 +162,14 @@ import static sun.invoke.util.Wrapper.*;
* functional interface
* @throws ReflectiveOperationException
*/
abstract CallSite buildCallSite() throws ReflectiveOperationException, LambdaConversionException;
abstract CallSite buildCallSite()
throws ReflectiveOperationException, LambdaConversionException;
/**
* Check the meta-factory arguments for errors
* @throws LambdaConversionException if there are improper conversions
*/
void validateMetafactoryArgs() throws LambdaConversionException {
// Check target type is a subtype of class where SAM method is defined
if (!samClass.isAssignableFrom(samBase)) {
throw new LambdaConversionException(
String.format("Invalid target type %s for lambda conversion; not a subtype of functional interface %s",
samBase.getName(), samClass.getName()));
}
switch (implKind) {
case MethodHandleInfo.REF_invokeInterface:
case MethodHandleInfo.REF_invokeVirtual:
@ -265,9 +268,9 @@ import static sun.invoke.util.Wrapper.*;
}
/**
* Check type adaptability
* @param fromType
* @param toType
* Check type adaptability for parameter types.
* @param fromType Type to convert from
* @param toType Type to convert to
* @param strict If true, do strict checks, else allow that fromType may be parameterized
* @return True if 'fromType' can be passed to an argument of 'toType'
*/
@ -299,15 +302,14 @@ import static sun.invoke.util.Wrapper.*;
}
} else {
// both are reference types: fromType should be a superclass of toType.
return strict? toType.isAssignableFrom(fromType) : true;
return !strict || toType.isAssignableFrom(fromType);
}
}
}
/**
* Check type adaptability for return types -- special handling of void type) and parameterized fromType
* @param fromType
* @param toType
* Check type adaptability for return types --
* special handling of void type) and parameterized fromType
* @return True if 'fromType' can be converted to 'toType'
*/
private boolean isAdaptableToAsReturn(Class<?> fromType, Class<?> toType) {
@ -338,89 +340,4 @@ import static sun.invoke.util.Wrapper.*;
}
***********************/
/**
* Find the functional interface method and corresponding abstract methods
* which should be bridged. The functional interface method and those to be
* bridged will have the same name and number of parameters. Check for
* matching default methods (non-abstract), the VM will create bridges for
* default methods; We don't have enough readily available type information
* to distinguish between where the functional interface method should be
* bridged and where the default method should be bridged; This situation is
* flagged.
*/
class MethodAnalyzer {
private final Method[] methods = samBase.getMethods();
private Method samMethod = null;
private final List<Method> methodsToBridge = new ArrayList<>(methods.length);
private boolean conflictFoundBetweenDefaultAndBridge = false;
MethodAnalyzer() {
String samMethodName = samInfo.getName();
Class<?>[] samParamTypes = samMethodType.parameterArray();
int samParamLength = samParamTypes.length;
Class<?> samReturnType = samMethodType.returnType();
Class<?> objectClass = Object.class;
List<Method> defaultMethods = new ArrayList<>(methods.length);
for (Method m : methods) {
if (m.getName().equals(samMethodName) && m.getDeclaringClass() != objectClass) {
Class<?>[] mParamTypes = m.getParameterTypes();
if (mParamTypes.length == samParamLength) {
// Method matches name and parameter length -- and is not Object
if (Modifier.isAbstract(m.getModifiers())) {
// Method is abstract
if (m.getReturnType().equals(samReturnType)
&& Arrays.equals(mParamTypes, samParamTypes)) {
// Exact match, this is the SAM method signature
samMethod = m;
} else if (!hasMatchingBridgeSignature(m)) {
// Record bridges, exclude methods with duplicate signatures
methodsToBridge.add(m);
}
} else {
// Record default methods for conflict testing
defaultMethods.add(m);
}
}
}
}
for (Method dm : defaultMethods) {
if (hasMatchingBridgeSignature(dm)) {
conflictFoundBetweenDefaultAndBridge = true;
break;
}
}
}
Method getSamMethod() {
return samMethod;
}
List<Method> getMethodsToBridge() {
return methodsToBridge;
}
boolean conflictFoundBetweenDefaultAndBridge() {
return conflictFoundBetweenDefaultAndBridge;
}
/**
* Search the list of previously found bridge methods to determine if there is a method with the same signature
* (return and parameter types) as the specified method.
*
* @param m The method to match
* @return True if the method was found, False otherwise
*/
private boolean hasMatchingBridgeSignature(Method m) {
Class<?>[] ptypes = m.getParameterTypes();
Class<?> rtype = m.getReturnType();
for (Method md : methodsToBridge) {
if (md.getReturnType().equals(rtype) && Arrays.equals(ptypes, md.getParameterTypes())) {
return true;
}
}
return false;
}
}
}

View File

@ -25,22 +25,26 @@
package java.lang.invoke;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.security.ProtectionDomain;
import java.util.concurrent.atomic.AtomicInteger;
import jdk.internal.org.objectweb.asm.*;
import static jdk.internal.org.objectweb.asm.Opcodes.*;
import sun.misc.Unsafe;
import java.lang.reflect.Constructor;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.security.ProtectionDomain;
import java.util.concurrent.atomic.AtomicInteger;
import static jdk.internal.org.objectweb.asm.Opcodes.*;
/**
* Lambda metafactory implementation which dynamically creates an inner-class-like class per lambda callsite.
* Lambda metafactory implementation which dynamically creates an
* inner-class-like class per lambda callsite.
*
* @see LambdaMetafactory
*/
/* package */ final class InnerClassLambdaMetafactory extends AbstractValidatingLambdaMetafactory {
private static final Unsafe UNSAFE = Unsafe.getUnsafe();
private static final int CLASSFILE_VERSION = 51;
private static final String METHOD_DESCRIPTOR_VOID = Type.getMethodDescriptor(Type.VOID_TYPE);
private static final String NAME_MAGIC_ACCESSOR_IMPL = "java/lang/invoke/MagicLambdaImpl";
@ -54,7 +58,7 @@ import java.security.PrivilegedAction;
private static final String DESCR_CTOR_SERIALIZED_LAMBDA
= MethodType.methodType(void.class,
Class.class,
int.class, String.class, String.class, String.class,
String.class, String.class, String.class,
int.class, String.class, String.class, String.class,
String.class,
Object[].class).toMethodDescriptorString();
@ -77,36 +81,56 @@ import java.security.PrivilegedAction;
private final Type[] instantiatedArgumentTypes; // ASM types for the functional interface arguments
/**
* General meta-factory constructor, standard cases and allowing for uncommon options such as serialization.
* General meta-factory constructor, supporting both standard cases and
* allowing for uncommon options such as serialization or bridging.
*
* @param caller Stacked automatically by VM; represents a lookup context with the accessibility privileges
* of the caller.
* @param invokedType Stacked automatically by VM; the signature of the invoked method, which includes the
* expected static type of the returned lambda object, and the static types of the captured
* arguments for the lambda. In the event that the implementation method is an instance method,
* the first argument in the invocation signature will correspond to the receiver.
* @param samMethod The primary method in the functional interface to which the lambda or method reference is
* being converted, represented as a method handle.
* @param implMethod The implementation method which should be called (with suitable adaptation of argument
* types, return types, and adjustment for captured arguments) when methods of the resulting
* functional interface instance are invoked.
* @param instantiatedMethodType The signature of the primary functional interface method after type variables
* are substituted with their instantiation from the capture site
* @param flags A bitmask containing flags that may influence the translation of this lambda expression. Defined
* fields include FLAG_SERIALIZABLE.
* @param markerInterfaces Additional interfaces which the lambda object should implement.
* @param caller Stacked automatically by VM; represents a lookup context
* with the accessibility privileges of the caller.
* @param invokedType Stacked automatically by VM; the signature of the
* invoked method, which includes the expected static
* type of the returned lambda object, and the static
* types of the captured arguments for the lambda. In
* the event that the implementation method is an
* instance method, the first argument in the invocation
* signature will correspond to the receiver.
* @param samMethodName Name of the method in the functional interface to
* which the lambda or method reference is being
* converted, represented as a String.
* @param samMethodType Type of the method in the functional interface to
* which the lambda or method reference is being
* converted, represented as a MethodType.
* @param implMethod The implementation method which should be called (with
* suitable adaptation of argument types, return types,
* and adjustment for captured arguments) when methods of
* the resulting functional interface instance are invoked.
* @param instantiatedMethodType The signature of the primary functional
* interface method after type variables are
* substituted with their instantiation from
* the capture site
* @param isSerializable Should the lambda be made serializable? If set,
* either the target type or one of the additional SAM
* types must extend {@code Serializable}.
* @param markerInterfaces Additional interfaces which the lambda object
* should implement.
* @param additionalBridges Method types for additional signatures to be
* bridged to the implementation method
* @throws ReflectiveOperationException
* @throws LambdaConversionException If any of the meta-factory protocol invariants are violated
* @throws LambdaConversionException If any of the meta-factory protocol
* invariants are violated
*/
public InnerClassLambdaMetafactory(MethodHandles.Lookup caller,
MethodType invokedType,
MethodHandle samMethod,
String samMethodName,
MethodType samMethodType,
MethodHandle implMethod,
MethodType instantiatedMethodType,
int flags,
Class<?>[] markerInterfaces)
boolean isSerializable,
Class<?>[] markerInterfaces,
MethodType[] additionalBridges)
throws ReflectiveOperationException, LambdaConversionException {
super(caller, invokedType, samMethod, implMethod, instantiatedMethodType, flags, markerInterfaces);
super(caller, invokedType, samMethodName, samMethodType,
implMethod, instantiatedMethodType,
isSerializable, markerInterfaces, additionalBridges);
implMethodClassName = implDefiningClass.getName().replace('.', '/');
implMethodName = implInfo.getName();
implMethodDesc = implMethodType.toMethodDescriptorString();
@ -124,7 +148,8 @@ import java.security.PrivilegedAction;
for (int i = 0; i < argTypes.length; i++) {
argNames[i] = "arg$" + (i + 1);
}
instantiatedArgumentTypes = Type.getArgumentTypes(instantiatedMethodType.toMethodDescriptorString());
instantiatedArgumentTypes = Type.getArgumentTypes(
instantiatedMethodType.toMethodDescriptorString());
}
/**
@ -136,7 +161,8 @@ import java.security.PrivilegedAction;
* @return a CallSite, which, when invoked, will return an instance of the
* functional interface
* @throws ReflectiveOperationException
* @throws LambdaConversionException If properly formed functional interface is not found
* @throws LambdaConversionException If properly formed functional interface
* is not found
*/
@Override
CallSite buildCallSite() throws ReflectiveOperationException, LambdaConversionException {
@ -167,8 +193,8 @@ import java.security.PrivilegedAction;
} else {
return new ConstantCallSite(
MethodHandles.Lookup.IMPL_LOOKUP
.findConstructor(innerClass, constructorType)
.asType(constructorType.changeReturnType(samBase)));
.findConstructor(innerClass, constructorType)
.asType(constructorType.changeReturnType(samBase)));
}
}
@ -176,13 +202,20 @@ import java.security.PrivilegedAction;
* Generate a class file which implements the functional
* interface, define and return the class.
*
* @implNote The class that is generated does not include signature
* information for exceptions that may be present on the SAM method.
* This is to reduce classfile size, and is harmless as checked exceptions
* are erased anyway, no one will ever compile against this classfile,
* and we make no guarantees about the reflective properties of lambda
* objects.
*
* @return a Class which implements the functional interface
* @throws LambdaConversionException If properly formed functional interface is not found
* @throws LambdaConversionException If properly formed functional interface
* is not found
*/
private Class<?> spinInnerClass() throws LambdaConversionException {
String samName = samBase.getName().replace('.', '/');
String[] interfaces = new String[markerInterfaces.length + 1];
interfaces[0] = samName;
interfaces[0] = samBase.getName().replace('.', '/');
for (int i=0; i<markerInterfaces.length; i++) {
interfaces[i+1] = markerInterfaces[i].getName().replace('.', '/');
}
@ -192,35 +225,33 @@ import java.security.PrivilegedAction;
// Generate final fields to be filled in by constructor
for (int i = 0; i < argTypes.length; i++) {
FieldVisitor fv = cw.visitField(ACC_PRIVATE + ACC_FINAL, argNames[i], argTypes[i].getDescriptor(),
FieldVisitor fv = cw.visitField(ACC_PRIVATE + ACC_FINAL,
argNames[i],
argTypes[i].getDescriptor(),
null, null);
fv.visitEnd();
}
generateConstructor();
MethodAnalyzer ma = new MethodAnalyzer();
// Forward the SAM method
if (ma.getSamMethod() == null) {
throw new LambdaConversionException(String.format("Functional interface method not found: %s", samMethodType));
} else {
generateForwardingMethod(ma.getSamMethod(), false);
}
String methodDescriptor = samMethodType.toMethodDescriptorString();
MethodVisitor mv = cw.visitMethod(ACC_PUBLIC, samMethodName,
methodDescriptor, null, null);
new ForwardingMethodGenerator(mv).generate(methodDescriptor);
// Forward the bridges
// @@@ The commented-out code is temporary, pending the VM's ability to bridge all methods on request
// @@@ Once the VM can do fail-over, uncomment the !ma.wasDefaultMethodFound() test, and emit the appropriate
// @@@ classfile attribute to request custom bridging. See 8002092.
if (!ma.getMethodsToBridge().isEmpty() /* && !ma.conflictFoundBetweenDefaultAndBridge() */ ) {
for (Method m : ma.getMethodsToBridge()) {
generateForwardingMethod(m, true);
if (additionalBridges != null) {
for (MethodType mt : additionalBridges) {
methodDescriptor = mt.toMethodDescriptorString();
mv = cw.visitMethod(ACC_PUBLIC|ACC_BRIDGE, samMethodName,
methodDescriptor, null, null);
new ForwardingMethodGenerator(mv).generate(methodDescriptor);
}
}
if (isSerializable) {
if (isSerializable)
generateWriteReplace();
}
cw.visitEnd();
@ -229,11 +260,14 @@ import java.security.PrivilegedAction;
final byte[] classBytes = cw.toByteArray();
/*** Uncomment to dump the generated file
System.out.printf("Loaded: %s (%d bytes) %n", lambdaClassName, classBytes.length);
try (FileOutputStream fos = new FileOutputStream(lambdaClassName.replace('/', '.') + ".class")) {
System.out.printf("Loaded: %s (%d bytes) %n", lambdaClassName,
classBytes.length);
try (FileOutputStream fos = new FileOutputStream(lambdaClassName
.replace('/', '.') + ".class")) {
fos.write(classBytes);
} catch (IOException ex) {
PlatformLogger.getLogger(InnerClassLambdaMetafactory.class.getName()).severe(ex.getMessage(), ex);
PlatformLogger.getLogger(InnerClassLambdaMetafactory.class
.getName()).severe(ex.getMessage(), ex);
}
***/
@ -249,8 +283,9 @@ import java.security.PrivilegedAction;
}
);
return (Class<?>) Unsafe.getUnsafe().defineClass(lambdaClassName, classBytes, 0, classBytes.length,
loader, pd);
return UNSAFE.defineClass(lambdaClassName,
classBytes, 0, classBytes.length,
loader, pd);
}
/**
@ -258,19 +293,23 @@ import java.security.PrivilegedAction;
*/
private void generateConstructor() {
// Generate constructor
MethodVisitor ctor = cw.visitMethod(ACC_PRIVATE, NAME_CTOR, constructorDesc, null, null);
MethodVisitor ctor = cw.visitMethod(ACC_PRIVATE, NAME_CTOR,
constructorDesc, null, null);
ctor.visitCode();
ctor.visitVarInsn(ALOAD, 0);
ctor.visitMethodInsn(INVOKESPECIAL, NAME_MAGIC_ACCESSOR_IMPL, NAME_CTOR, METHOD_DESCRIPTOR_VOID);
ctor.visitMethodInsn(INVOKESPECIAL, NAME_MAGIC_ACCESSOR_IMPL, NAME_CTOR,
METHOD_DESCRIPTOR_VOID);
int lvIndex = 0;
for (int i = 0; i < argTypes.length; i++) {
ctor.visitVarInsn(ALOAD, 0);
ctor.visitVarInsn(argTypes[i].getOpcode(ILOAD), lvIndex + 1);
lvIndex += argTypes[i].getSize();
ctor.visitFieldInsn(PUTFIELD, lambdaClassName, argNames[i], argTypes[i].getDescriptor());
ctor.visitFieldInsn(PUTFIELD, lambdaClassName, argNames[i],
argTypes[i].getDescriptor());
}
ctor.visitInsn(RETURN);
ctor.visitMaxs(-1, -1); // Maxs computed by ClassWriter.COMPUTE_MAXS, these arguments ignored
// Maxs computed by ClassWriter.COMPUTE_MAXS, these arguments ignored
ctor.visitMaxs(-1, -1);
ctor.visitEnd();
}
@ -279,18 +318,18 @@ import java.security.PrivilegedAction;
*/
private void generateWriteReplace() {
TypeConvertingMethodAdapter mv
= new TypeConvertingMethodAdapter(cw.visitMethod(ACC_PRIVATE + ACC_FINAL,
NAME_METHOD_WRITE_REPLACE, DESCR_METHOD_WRITE_REPLACE,
null, null));
= new TypeConvertingMethodAdapter(
cw.visitMethod(ACC_PRIVATE + ACC_FINAL,
NAME_METHOD_WRITE_REPLACE, DESCR_METHOD_WRITE_REPLACE,
null, null));
mv.visitCode();
mv.visitTypeInsn(NEW, NAME_SERIALIZED_LAMBDA);
mv.visitInsn(DUP);;
mv.visitInsn(DUP);
mv.visitLdcInsn(Type.getType(targetClass));
mv.visitLdcInsn(samInfo.getReferenceKind());
mv.visitLdcInsn(invokedType.returnType().getName().replace('.', '/'));
mv.visitLdcInsn(samInfo.getName());
mv.visitLdcInsn(samInfo.getMethodType().toMethodDescriptorString());
mv.visitLdcInsn(samMethodName);
mv.visitLdcInsn(samMethodType.toMethodDescriptorString());
mv.visitLdcInsn(implInfo.getReferenceKind());
mv.visitLdcInsn(implInfo.getDeclaringClass().getName().replace('.', '/'));
mv.visitLdcInsn(implInfo.getName());
@ -303,35 +342,19 @@ import java.security.PrivilegedAction;
mv.visitInsn(DUP);
mv.iconst(i);
mv.visitVarInsn(ALOAD, 0);
mv.visitFieldInsn(GETFIELD, lambdaClassName, argNames[i], argTypes[i].getDescriptor());
mv.visitFieldInsn(GETFIELD, lambdaClassName, argNames[i],
argTypes[i].getDescriptor());
mv.boxIfTypePrimitive(argTypes[i]);
mv.visitInsn(AASTORE);
}
mv.visitMethodInsn(INVOKESPECIAL, NAME_SERIALIZED_LAMBDA, NAME_CTOR,
DESCR_CTOR_SERIALIZED_LAMBDA);
mv.visitInsn(ARETURN);
mv.visitMaxs(-1, -1); // Maxs computed by ClassWriter.COMPUTE_MAXS, these arguments ignored
// Maxs computed by ClassWriter.COMPUTE_MAXS, these arguments ignored
mv.visitMaxs(-1, -1);
mv.visitEnd();
}
/**
* Generate a method which calls the lambda implementation method,
* converting arguments, as needed.
* @param m The method whose signature should be generated
* @param isBridge True if this methods should be flagged as a bridge
*/
private void generateForwardingMethod(Method m, boolean isBridge) {
Class<?>[] exceptionTypes = m.getExceptionTypes();
String[] exceptionNames = new String[exceptionTypes.length];
for (int i = 0; i < exceptionTypes.length; i++) {
exceptionNames[i] = exceptionTypes[i].getName().replace('.', '/');
}
String methodDescriptor = Type.getMethodDescriptor(m);
int access = isBridge? ACC_PUBLIC | ACC_BRIDGE : ACC_PUBLIC;
MethodVisitor mv = cw.visitMethod(access, m.getName(), methodDescriptor, null, exceptionNames);
new ForwardingMethodGenerator(mv).generate(m);
}
/**
* This class generates a method body which calls the lambda implementation
* method, converting arguments, as needed.
@ -342,36 +365,39 @@ import java.security.PrivilegedAction;
super(mv);
}
void generate(Method m) throws InternalError {
void generate(String methodDescriptor) {
visitCode();
if (implKind == MethodHandleInfo.REF_newInvokeSpecial) {
visitTypeInsn(NEW, implMethodClassName);
visitInsn(DUP);;
visitInsn(DUP);
}
for (int i = 0; i < argTypes.length; i++) {
visitVarInsn(ALOAD, 0);
visitFieldInsn(GETFIELD, lambdaClassName, argNames[i], argTypes[i].getDescriptor());
visitFieldInsn(GETFIELD, lambdaClassName, argNames[i],
argTypes[i].getDescriptor());
}
convertArgumentTypes(Type.getArgumentTypes(m));
convertArgumentTypes(Type.getArgumentTypes(methodDescriptor));
// Invoke the method we want to forward to
visitMethodInsn(invocationOpcode(), implMethodClassName, implMethodName, implMethodDesc);
// Convert the return value (if any) and return it
// Note: if adapting from non-void to void, the 'return' instruction will pop the unneeded result
Type samReturnType = Type.getReturnType(m);
// Note: if adapting from non-void to void, the 'return'
// instruction will pop the unneeded result
Type samReturnType = Type.getReturnType(methodDescriptor);
convertType(implMethodReturnType, samReturnType, samReturnType);
visitInsn(samReturnType.getOpcode(Opcodes.IRETURN));
visitMaxs(-1, -1); // Maxs computed by ClassWriter.COMPUTE_MAXS, these arguments ignored
// Maxs computed by ClassWriter.COMPUTE_MAXS,these arguments ignored
visitMaxs(-1, -1);
visitEnd();
}
private void convertArgumentTypes(Type[] samArgumentTypes) {
int lvIndex = 0;
boolean samIncludesReceiver = implIsInstanceMethod && argTypes.length == 0;
boolean samIncludesReceiver = implIsInstanceMethod &&
argTypes.length == 0;
int samReceiverLength = samIncludesReceiver ? 1 : 0;
if (samIncludesReceiver) {
// push receiver
@ -395,7 +421,9 @@ import java.security.PrivilegedAction;
}
private void convertType(Type argType, Type targetType, Type functionalType) {
convertType(argType.getDescriptor(), targetType.getDescriptor(), functionalType.getDescriptor());
convertType(argType.getDescriptor(),
targetType.getDescriptor(),
functionalType.getDescriptor());
}
private int invocationOpcode() throws InternalError {

View File

@ -25,6 +25,9 @@
package java.lang.invoke;
import java.io.Serializable;
import java.util.Arrays;
/**
* <p>Bootstrap methods for converting lambda expressions and method references to functional interface objects.</p>
*
@ -44,16 +47,11 @@ package java.lang.invoke;
*
* <p>When parameterized types are used, the instantiated type of the functional interface method may be different
* from that in the functional interface. For example, consider
* <code>interface I&lt;T&gt; { int m(T x); }</code> if this functional interface type is used in a lambda
* <code>I&lt;Byte&gt; v = ...</code>, we need both the actual functional interface method which has the signature
* <code>(Object)int</code> and the erased instantiated type of the functional interface method (or simply
* {@code interface I<T> { int m(T x); }} if this functional interface type is used in a lambda
* {@code I<Byte>; v = ...}, we need both the actual functional interface method which has the signature
* {@code (Object)int} and the erased instantiated type of the functional interface method (or simply
* <I>instantiated method type</I>), which has signature
* <code>(Byte)int</code>.
*
* <p>While functional interfaces only have a single abstract method from the language perspective (concrete
* methods in Object are and default methods may be present), at the bytecode level they may actually have multiple
* methods because of the need for bridge methods. Invoking any of these methods on the lambda object will result
* in invoking the implementation method.
* {@code (Byte)int}.
*
* <p>The argument list of the implementation method and the argument list of the functional interface method(s)
* may differ in several ways. The implementation methods may have additional arguments to accommodate arguments
@ -137,108 +135,147 @@ package java.lang.invoke;
* </tr>
* </table>
*
* The default bootstrap ({@link #metaFactory}) represents the common cases and uses an optimized protocol.
* Alternate bootstraps (e.g., {@link #altMetaFactory}) exist to support uncommon cases such as serialization
* The default bootstrap ({@link #metafactory}) represents the common cases and uses an optimized protocol.
* Alternate bootstraps (e.g., {@link #altMetafactory}) exist to support uncommon cases such as serialization
* or additional marker superinterfaces.
*
*/
public class LambdaMetafactory {
/** Flag for alternate metafactories indicating the lambda object is must to be serializable */
/** Flag for alternate metafactories indicating the lambda object is
* must to be serializable */
public static final int FLAG_SERIALIZABLE = 1 << 0;
/**
* Flag for alternate metafactories indicating the lambda object implements other marker interfaces
* Flag for alternate metafactories indicating the lambda object implements
* other marker interfaces
* besides Serializable
*/
public static final int FLAG_MARKERS = 1 << 1;
/**
* Flag for alternate metafactories indicating the lambda object requires
* additional bridge methods
*/
public static final int FLAG_BRIDGES = 1 << 2;
private static final Class<?>[] EMPTY_CLASS_ARRAY = new Class<?>[0];
private static final MethodType[] EMPTY_MT_ARRAY = new MethodType[0];
/**
* Standard meta-factory for conversion of lambda expressions or method references to functional interfaces.
* Standard meta-factory for conversion of lambda expressions or method
* references to functional interfaces.
*
* @param caller Stacked automatically by VM; represents a lookup context with the accessibility privileges
* of the caller.
* @param invokedName Stacked automatically by VM; the name of the invoked method as it appears at the call site.
* Currently unused.
* @param invokedType Stacked automatically by VM; the signature of the invoked method, which includes the
* expected static type of the returned lambda object, and the static types of the captured
* arguments for the lambda. In the event that the implementation method is an instance method,
* the first argument in the invocation signature will correspond to the receiver.
* @param samMethod The primary method in the functional interface to which the lambda or method reference is
* being converted, represented as a method handle.
* @param implMethod The implementation method which should be called (with suitable adaptation of argument
* types, return types, and adjustment for captured arguments) when methods of the resulting
* functional interface instance are invoked.
* @param instantiatedMethodType The signature of the primary functional interface method after type variables
* are substituted with their instantiation from the capture site
* @return a CallSite, which, when invoked, will return an instance of the functional interface
* @throws ReflectiveOperationException if the caller is not able to reconstruct one of the method handles
* @throws LambdaConversionException If any of the meta-factory protocol invariants are violated
* @param caller Stacked automatically by VM; represents a lookup context
* with the accessibility privileges of the caller.
* @param invokedName Stacked automatically by VM; the name of the invoked
* method as it appears at the call site.
* Used as the name of the functional interface method
* to which the lambda or method reference is being
* converted.
* @param invokedType Stacked automatically by VM; the signature of the
* invoked method, which includes the expected static
* type of the returned lambda object, and the static
* types of the captured arguments for the lambda.
* In the event that the implementation method is an
* instance method, the first argument in the invocation
* signature will correspond to the receiver.
* @param samMethodType MethodType of the method in the functional interface
* to which the lambda or method reference is being
* converted, represented as a MethodType.
* @param implMethod The implementation method which should be called
* (with suitable adaptation of argument types, return
* types, and adjustment for captured arguments) when
* methods of the resulting functional interface instance
* are invoked.
* @param instantiatedMethodType The signature of the primary functional
* interface method after type variables
* are substituted with their instantiation
* from the capture site
* @return a CallSite, which, when invoked, will return an instance of the
* functional interface
* @throws ReflectiveOperationException if the caller is not able to
* reconstruct one of the method handles
* @throws LambdaConversionException If any of the meta-factory protocol
* invariants are violated
*/
public static CallSite metaFactory(MethodHandles.Lookup caller,
public static CallSite metafactory(MethodHandles.Lookup caller,
String invokedName,
MethodType invokedType,
MethodHandle samMethod,
MethodType samMethodType,
MethodHandle implMethod,
MethodType instantiatedMethodType)
throws ReflectiveOperationException, LambdaConversionException {
AbstractValidatingLambdaMetafactory mf;
mf = new InnerClassLambdaMetafactory(caller, invokedType, samMethod, implMethod, instantiatedMethodType,
0, EMPTY_CLASS_ARRAY);
mf = new InnerClassLambdaMetafactory(caller, invokedType,
invokedName, samMethodType,
implMethod, instantiatedMethodType,
false, EMPTY_CLASS_ARRAY, EMPTY_MT_ARRAY);
mf.validateMetafactoryArgs();
return mf.buildCallSite();
}
/**
* Alternate meta-factory for conversion of lambda expressions or method references to functional interfaces,
* which supports serialization and other uncommon options.
* Alternate meta-factory for conversion of lambda expressions or method
* references to functional interfaces, which supports serialization and
* other uncommon options.
*
* The declared argument list for this method is:
*
* CallSite altMetaFactory(MethodHandles.Lookup caller,
* CallSite altMetafactory(MethodHandles.Lookup caller,
* String invokedName,
* MethodType invokedType,
* Object... args)
*
* but it behaves as if the argument list is:
*
* CallSite altMetaFactory(MethodHandles.Lookup caller,
* CallSite altMetafactory(MethodHandles.Lookup caller,
* String invokedName,
* MethodType invokedType,
* MethodHandle samMethod
* MethodType samMethodType
* MethodHandle implMethod,
* MethodType instantiatedMethodType,
* int flags,
* int markerInterfaceCount, // IF flags has MARKERS set
* Class... markerInterfaces // IF flags has MARKERS set
* int bridgeCount, // IF flags has BRIDGES set
* MethodType... bridges // IF flags has BRIDGES set
* )
*
*
* @param caller Stacked automatically by VM; represents a lookup context with the accessibility privileges
* of the caller.
* @param invokedName Stacked automatically by VM; the name of the invoked method as it appears at the call site.
* Currently unused.
* @param invokedType Stacked automatically by VM; the signature of the invoked method, which includes thefu
* expected static type of the returned lambda object, and the static types of the captured
* arguments for the lambda. In the event that the implementation method is an instance method,
* the first argument in the invocation signature will correspond to the receiver.
* @param args argument to pass, flags, marker interface count, and marker interfaces as described above
* @return a CallSite, which, when invoked, will return an instance of the functional interface
* @throws ReflectiveOperationException if the caller is not able to reconstruct one of the method handles
* @throws LambdaConversionException If any of the meta-factory protocol invariants are violated
* @param caller Stacked automatically by VM; represents a lookup context
* with the accessibility privileges of the caller.
* @param invokedName Stacked automatically by VM; the name of the invoked
* method as it appears at the call site.
* Used as the name of the functional interface method
* to which the lambda or method reference is being
* converted.
* @param invokedType Stacked automatically by VM; the signature of the
* invoked method, which includes the expected static
* type of the returned lambda object, and the static
* types of the captured arguments for the lambda.
* In the event that the implementation method is an
* instance method, the first argument in the invocation
* signature will correspond to the receiver.
* @param args flags and optional arguments, as described above
* @return a CallSite, which, when invoked, will return an instance of the
* functional interface
* @throws ReflectiveOperationException if the caller is not able to
* reconstruct one of the method handles
* @throws LambdaConversionException If any of the meta-factory protocol
* invariants are violated
*/
public static CallSite altMetaFactory(MethodHandles.Lookup caller,
public static CallSite altMetafactory(MethodHandles.Lookup caller,
String invokedName,
MethodType invokedType,
Object... args)
throws ReflectiveOperationException, LambdaConversionException {
MethodHandle samMethod = (MethodHandle)args[0];
MethodType samMethodType = (MethodType)args[0];
MethodHandle implMethod = (MethodHandle)args[1];
MethodType instantiatedMethodType = (MethodType)args[2];
int flags = (Integer) args[3];
Class<?>[] markerInterfaces;
MethodType[] bridges;
int argIndex = 4;
if ((flags & FLAG_MARKERS) != 0) {
int markerCount = (Integer) args[argIndex++];
@ -248,9 +285,33 @@ public class LambdaMetafactory {
}
else
markerInterfaces = EMPTY_CLASS_ARRAY;
AbstractValidatingLambdaMetafactory mf;
mf = new InnerClassLambdaMetafactory(caller, invokedType, samMethod, implMethod, instantiatedMethodType,
flags, markerInterfaces);
if ((flags & FLAG_BRIDGES) != 0) {
int bridgeCount = (Integer) args[argIndex++];
bridges = new MethodType[bridgeCount];
System.arraycopy(args, argIndex, bridges, 0, bridgeCount);
argIndex += bridgeCount;
}
else
bridges = EMPTY_MT_ARRAY;
boolean foundSerializableSupertype = Serializable.class.isAssignableFrom(invokedType.returnType());
for (Class<?> c : markerInterfaces)
foundSerializableSupertype |= Serializable.class.isAssignableFrom(c);
boolean isSerializable = ((flags & LambdaMetafactory.FLAG_SERIALIZABLE) != 0)
|| foundSerializableSupertype;
if (isSerializable && !foundSerializableSupertype) {
markerInterfaces = Arrays.copyOf(markerInterfaces, markerInterfaces.length + 1);
markerInterfaces[markerInterfaces.length-1] = Serializable.class;
}
AbstractValidatingLambdaMetafactory mf
= new InnerClassLambdaMetafactory(caller, invokedType,
invokedName, samMethodType,
implMethod,
instantiatedMethodType,
isSerializable,
markerInterfaces, bridges);
mf.validateMetafactoryArgs();
return mf.buildCallSite();
}

View File

@ -44,7 +44,6 @@ public final class SerializedLambda implements Serializable {
private final String functionalInterfaceClass;
private final String functionalInterfaceMethodName;
private final String functionalInterfaceMethodSignature;
private final int functionalInterfaceMethodKind;
private final String implClass;
private final String implMethodName;
private final String implMethodSignature;
@ -53,28 +52,32 @@ public final class SerializedLambda implements Serializable {
private final Object[] capturedArgs;
/**
* Create a {@code SerializedLambda} from the low-level information present at the lambda factory site.
* Create a {@code SerializedLambda} from the low-level information present
* at the lambda factory site.
*
* @param capturingClass The class in which the lambda expression appears
* @param functionalInterfaceMethodKind Method handle kind (see {@link MethodHandleInfo}) for the
* functional interface method handle present at the lambda factory site
* @param functionalInterfaceClass Name, in slash-delimited form, for the functional interface class present at the
* lambda factory site
* @param functionalInterfaceMethodName Name of the primary method for the functional interface present at the
* @param functionalInterfaceClass Name, in slash-delimited form, of static
* type of the returned lambda object
* @param functionalInterfaceMethodName Name of the functional interface
* method for the present at the
* lambda factory site
* @param functionalInterfaceMethodSignature Signature of the primary method for the functional interface present
* at the lambda factory site
* @param functionalInterfaceMethodSignature Signature of the functional
* interface method present at
* the lambda factory site
* @param implMethodKind Method handle kind for the implementation method
* @param implClass Name, in slash-delimited form, for the class holding the implementation method
* @param implClass Name, in slash-delimited form, for the class holding
* the implementation method
* @param implMethodName Name of the implementation method
* @param implMethodSignature Signature of the implementation method
* @param instantiatedMethodType The signature of the primary functional interface method after type variables
* are substituted with their instantiation from the capture site
* @param capturedArgs The dynamic arguments to the lambda factory site, which represent variables captured by
* @param instantiatedMethodType The signature of the primary functional
* interface method after type variables
* are substituted with their instantiation
* from the capture site
* @param capturedArgs The dynamic arguments to the lambda factory site,
* which represent variables captured by
* the lambda
*/
public SerializedLambda(Class<?> capturingClass,
int functionalInterfaceMethodKind,
String functionalInterfaceClass,
String functionalInterfaceMethodName,
String functionalInterfaceMethodSignature,
@ -85,7 +88,6 @@ public final class SerializedLambda implements Serializable {
String instantiatedMethodType,
Object[] capturedArgs) {
this.capturingClass = capturingClass;
this.functionalInterfaceMethodKind = functionalInterfaceMethodKind;
this.functionalInterfaceClass = functionalInterfaceClass;
this.functionalInterfaceMethodName = functionalInterfaceMethodName;
this.functionalInterfaceMethodSignature = functionalInterfaceMethodSignature;
@ -106,10 +108,10 @@ public final class SerializedLambda implements Serializable {
}
/**
* Get the name of the functional interface class to which this
* Get the name of the invoked type to which this
* lambda has been converted
* @return the name of the functional interface this lambda has
* been converted to
* @return the name of the functional interface class to which
* this lambda has been converted
*/
public String getFunctionalInterfaceClass() {
return functionalInterfaceClass;
@ -134,17 +136,6 @@ public final class SerializedLambda implements Serializable {
return functionalInterfaceMethodSignature;
}
/**
* Get the method handle kind (see {@link MethodHandleInfo}) of
* the primary method for the functional interface to which this
* lambda has been converted
* @return the method handle kind of the primary method of
* functional interface
*/
public int getFunctionalInterfaceMethodKind() {
return functionalInterfaceMethodKind;
}
/**
* Get the name of the class containing the implementation
* method.
@ -234,11 +225,17 @@ public final class SerializedLambda implements Serializable {
@Override
public String toString() {
return String.format("SerializedLambda[capturingClass=%s, functionalInterfaceMethod=%s %s.%s:%s, " +
"implementation=%s %s.%s:%s, instantiatedMethodType=%s, numCaptured=%d]",
capturingClass, MethodHandleInfo.getReferenceKindString(functionalInterfaceMethodKind),
functionalInterfaceClass, functionalInterfaceMethodName, functionalInterfaceMethodSignature,
MethodHandleInfo.getReferenceKindString(implMethodKind), implClass, implMethodName,
implMethodSignature, instantiatedMethodType, capturedArgs.length);
String implKind=MethodHandleInfo.getReferenceKindString(implMethodKind);
return String.format("SerializedLambda[%s=%s, %s=%s.%s:%s, " +
"%s=%s %s.%s:%s, %s=%s, %s=%d]",
"capturingClass", capturingClass,
"functionalInterfaceMethod", functionalInterfaceClass,
functionalInterfaceMethodName,
functionalInterfaceMethodSignature,
"implementation",
implKind,
implClass, implMethodName, implMethodSignature,
"instantiatedMethodType", instantiatedMethodType,
"numCaptured", capturedArgs.length);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2013 Sun Microsystems, Inc. All Rights Reserved.
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -16,10 +16,10 @@
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test