Merge
This commit is contained in:
commit
ec2e296ec1
1
.hgtags
1
.hgtags
@ -248,3 +248,4 @@ b32e2219736e42baaf45daf0ad67ed34f6033799 jdk9-b02
|
||||
7f655f31f9bcee618cf832f08176ad8c1ed3fdd3 jdk9-b03
|
||||
099891b1d86f3719e116ac717ffdafc90d037fb7 jdk9-b04
|
||||
dd311791ad6895a3989020dd6c6c46db87972ab8 jdk9-b05
|
||||
85dbdc227c5e11429b4fc4a8ba763f50107edd6e jdk9-b06
|
||||
|
@ -248,3 +248,4 @@ cd3825b2983045784d6fc6d1729c799b08215752 jdk8-b120
|
||||
fd8d51bdf9aadf7ae83e65e8655c53581017c363 jdk9-b03
|
||||
cb4c3440bc2748101923e2488506e61009ab1bf5 jdk9-b04
|
||||
8c63f0b6ada282f27e3a80125e53c3be603f9af7 jdk9-b05
|
||||
d0b525cd31b87abeb6d5b7e3516953eeb13b323c jdk9-b06
|
||||
|
@ -248,3 +248,4 @@ a7d3638deb2f4e33217b1ecf889479e90f9e5b50 jdk9-b00
|
||||
d338b892a13db19b093f85cf5f949a4504e4d31f jdk9-b03
|
||||
1ed19de263e1e0772da0269118cdd9deeb9fff04 jdk9-b04
|
||||
167c39eb44731a5d66770d0f00e231164653a2ff jdk9-b05
|
||||
a4bf701ac316946c2e5e83138ad8e687da6a4b30 jdk9-b06
|
||||
|
@ -408,3 +408,4 @@ b188446de75bda5fc52d102cddf242c3ef5ecbdf jdk9-b02
|
||||
b2fee789d23f3cdabb3db4e51af43038e5692d3a jdk9-b03
|
||||
3812c088b9456ee22c933e88aee1ece71f4e783a jdk9-b04
|
||||
bdc5311e1db7598589b77015119b821bf8c828bd jdk9-b05
|
||||
52377a30a3f87b62d6135706997b8c7a47366e37 jdk9-b06
|
||||
|
@ -66,8 +66,8 @@ ifndef CC_INTERP
|
||||
FORCE_TIERED=1
|
||||
endif
|
||||
endif
|
||||
# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
|
||||
ifneq (,$(filter $(ARCH),ppc64 pp64le))
|
||||
# C1 is not ported on ppc64, so we cannot build a tiered VM:
|
||||
ifeq ($(ARCH),ppc64)
|
||||
FORCE_TIERED=0
|
||||
endif
|
||||
|
||||
|
@ -33,6 +33,11 @@ SLASH_JAVA ?= /java
|
||||
# ARCH can be set explicitly in spec.gmk
|
||||
ifndef ARCH
|
||||
ARCH := $(shell uname -m)
|
||||
# Fold little endian PowerPC64 into big-endian (if ARCH is set in
|
||||
# hotspot-spec.gmk, this will be done by the configure script).
|
||||
ifeq ($(ARCH),ppc64le)
|
||||
ARCH := ppc64
|
||||
endif
|
||||
endif
|
||||
|
||||
PATH_SEP ?= :
|
||||
|
@ -337,56 +337,20 @@ endif
|
||||
ifeq ($(DEBUG_BINARIES), true)
|
||||
CFLAGS += -g
|
||||
else
|
||||
# Use the stabs format for debugging information (this is the default
|
||||
# on gcc-2.91). It's good enough, has all the information about line
|
||||
# numbers and local variables, and libjvm.so is only about 16M.
|
||||
# Change this back to "-g" if you want the most expressive format.
|
||||
# (warning: that could easily inflate libjvm.so to 150M!)
|
||||
# Note: The Itanium gcc compiler crashes when using -gstabs.
|
||||
DEBUG_CFLAGS/ia64 = -g
|
||||
DEBUG_CFLAGS/amd64 = -g
|
||||
DEBUG_CFLAGS/arm = -g
|
||||
DEBUG_CFLAGS/ppc = -g
|
||||
DEBUG_CFLAGS/ppc64 = -g
|
||||
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
||||
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang doesn't understand -gstabs
|
||||
DEBUG_CFLAGS += -g
|
||||
else
|
||||
DEBUG_CFLAGS += -gstabs
|
||||
endif
|
||||
DEBUG_CFLAGS += -g
|
||||
endif
|
||||
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
FASTDEBUG_CFLAGS/ia64 = -g
|
||||
FASTDEBUG_CFLAGS/amd64 = -g
|
||||
FASTDEBUG_CFLAGS/arm = -g
|
||||
FASTDEBUG_CFLAGS/ppc = -g
|
||||
FASTDEBUG_CFLAGS/ppc64 = -g
|
||||
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
||||
FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
|
||||
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang doesn't understand -gstabs
|
||||
FASTDEBUG_CFLAGS += -g
|
||||
else
|
||||
FASTDEBUG_CFLAGS += -gstabs
|
||||
endif
|
||||
FASTDEBUG_CFLAGS += -g
|
||||
endif
|
||||
|
||||
OPT_CFLAGS/ia64 = -g
|
||||
OPT_CFLAGS/amd64 = -g
|
||||
OPT_CFLAGS/arm = -g
|
||||
OPT_CFLAGS/ppc = -g
|
||||
OPT_CFLAGS/ppc64 = -g
|
||||
|
||||
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
|
||||
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang doesn't understand -gstabs
|
||||
OPT_CFLAGS += -g
|
||||
else
|
||||
OPT_CFLAGS += -gstabs
|
||||
endif
|
||||
OPT_CFLAGS += -g
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
@ -26,14 +26,26 @@
|
||||
# make c code know it is on a 64 bit platform.
|
||||
CFLAGS += -D_LP64=1
|
||||
|
||||
# fixes `relocation truncated to fit' error for gcc 4.1.
|
||||
CFLAGS += -mminimal-toc
|
||||
ifeq ($(origin OPENJDK_TARGET_CPU_ENDIAN),undefined)
|
||||
# This can happen during hotspot standalone build. Set endianness from
|
||||
# uname. We assume build and target machines are the same.
|
||||
OPENJDK_TARGET_CPU_ENDIAN:=$(if $(filter ppc64le,$(shell uname -m)),little,big)
|
||||
endif
|
||||
|
||||
# finds use ppc64 instructions, but schedule for power5
|
||||
CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
|
||||
ifeq ($(filter $(OPENJDK_TARGET_CPU_ENDIAN),big little),)
|
||||
$(error OPENJDK_TARGET_CPU_ENDIAN value should be 'big' or 'little')
|
||||
endif
|
||||
|
||||
# let linker find external 64 bit libs.
|
||||
LFLAGS_VM += -L/lib64
|
||||
ifeq ($(OPENJDK_TARGET_CPU_ENDIAN),big)
|
||||
# fixes `relocation truncated to fit' error for gcc 4.1.
|
||||
CFLAGS += -mminimal-toc
|
||||
|
||||
# specify lib format.
|
||||
LFLAGS_VM += -Wl,-melf64ppc
|
||||
# finds use ppc64 instructions, but schedule for power5
|
||||
CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
|
||||
else
|
||||
# Little endian machine uses ELFv2 ABI.
|
||||
CFLAGS += -DVM_LITTLE_ENDIAN -DABI_ELFv2
|
||||
|
||||
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
|
||||
CFLAGS += -mcpu=power7 -mtune=power8 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
|
||||
endif
|
||||
|
@ -1025,15 +1025,14 @@ class Assembler : public AbstractAssembler {
|
||||
}
|
||||
|
||||
static void set_imm(int* instr, short s) {
|
||||
short* p = ((short *)instr) + 1;
|
||||
*p = s;
|
||||
// imm is always in the lower 16 bits of the instruction,
|
||||
// so this is endian-neutral. Same for the get_imm below.
|
||||
uint32_t w = *(uint32_t *)instr;
|
||||
*instr = (int)((w & ~0x0000FFFF) | (s & 0x0000FFFF));
|
||||
}
|
||||
|
||||
static int get_imm(address a, int instruction_number) {
|
||||
short imm;
|
||||
short *p =((short *)a)+2*instruction_number+1;
|
||||
imm = *p;
|
||||
return (int)imm;
|
||||
return (short)((int *)a)[instruction_number];
|
||||
}
|
||||
|
||||
static inline int hi16_signed( int x) { return (int)(int16_t)(x >> 16); }
|
||||
|
@ -35,6 +35,126 @@ class Bytes: AllStatic {
|
||||
|
||||
// Can I count on address always being a pointer to an unsigned char? Yes.
|
||||
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
|
||||
// Returns true, if the byte ordering used by Java is different from the native byte ordering
|
||||
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
|
||||
static inline bool is_Java_byte_ordering_different() { return true; }
|
||||
|
||||
// Forward declarations of the compiler-dependent implementation
|
||||
static inline u2 swap_u2(u2 x);
|
||||
static inline u4 swap_u4(u4 x);
|
||||
static inline u8 swap_u8(u8 x);
|
||||
|
||||
static inline u2 get_native_u2(address p) {
|
||||
return (intptr_t(p) & 1) == 0
|
||||
? *(u2*)p
|
||||
: ( u2(p[1]) << 8 )
|
||||
| ( u2(p[0]) );
|
||||
}
|
||||
|
||||
static inline u4 get_native_u4(address p) {
|
||||
switch (intptr_t(p) & 3) {
|
||||
case 0: return *(u4*)p;
|
||||
|
||||
case 2: return ( u4( ((u2*)p)[1] ) << 16 )
|
||||
| ( u4( ((u2*)p)[0] ) );
|
||||
|
||||
default: return ( u4(p[3]) << 24 )
|
||||
| ( u4(p[2]) << 16 )
|
||||
| ( u4(p[1]) << 8 )
|
||||
| u4(p[0]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline u8 get_native_u8(address p) {
|
||||
switch (intptr_t(p) & 7) {
|
||||
case 0: return *(u8*)p;
|
||||
|
||||
case 4: return ( u8( ((u4*)p)[1] ) << 32 )
|
||||
| ( u8( ((u4*)p)[0] ) );
|
||||
|
||||
case 2: return ( u8( ((u2*)p)[3] ) << 48 )
|
||||
| ( u8( ((u2*)p)[2] ) << 32 )
|
||||
| ( u8( ((u2*)p)[1] ) << 16 )
|
||||
| ( u8( ((u2*)p)[0] ) );
|
||||
|
||||
default: return ( u8(p[7]) << 56 )
|
||||
| ( u8(p[6]) << 48 )
|
||||
| ( u8(p[5]) << 40 )
|
||||
| ( u8(p[4]) << 32 )
|
||||
| ( u8(p[3]) << 24 )
|
||||
| ( u8(p[2]) << 16 )
|
||||
| ( u8(p[1]) << 8 )
|
||||
| u8(p[0]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline void put_native_u2(address p, u2 x) {
|
||||
if ( (intptr_t(p) & 1) == 0 ) *(u2*)p = x;
|
||||
else {
|
||||
p[1] = x >> 8;
|
||||
p[0] = x;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u4(address p, u4 x) {
|
||||
switch ( intptr_t(p) & 3 ) {
|
||||
case 0: *(u4*)p = x;
|
||||
break;
|
||||
|
||||
case 2: ((u2*)p)[1] = x >> 16;
|
||||
((u2*)p)[0] = x;
|
||||
break;
|
||||
|
||||
default: ((u1*)p)[3] = x >> 24;
|
||||
((u1*)p)[2] = x >> 16;
|
||||
((u1*)p)[1] = x >> 8;
|
||||
((u1*)p)[0] = x;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u8(address p, u8 x) {
|
||||
switch ( intptr_t(p) & 7 ) {
|
||||
case 0: *(u8*)p = x;
|
||||
break;
|
||||
|
||||
case 4: ((u4*)p)[1] = x >> 32;
|
||||
((u4*)p)[0] = x;
|
||||
break;
|
||||
|
||||
case 2: ((u2*)p)[3] = x >> 48;
|
||||
((u2*)p)[2] = x >> 32;
|
||||
((u2*)p)[1] = x >> 16;
|
||||
((u2*)p)[0] = x;
|
||||
break;
|
||||
|
||||
default: ((u1*)p)[7] = x >> 56;
|
||||
((u1*)p)[6] = x >> 48;
|
||||
((u1*)p)[5] = x >> 40;
|
||||
((u1*)p)[4] = x >> 32;
|
||||
((u1*)p)[3] = x >> 24;
|
||||
((u1*)p)[2] = x >> 16;
|
||||
((u1*)p)[1] = x >> 8;
|
||||
((u1*)p)[0] = x;
|
||||
}
|
||||
}
|
||||
|
||||
// Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
|
||||
// (no byte-order reversal is needed since Power CPUs are big-endian oriented).
|
||||
static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); }
|
||||
static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); }
|
||||
static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); }
|
||||
|
||||
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); }
|
||||
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); }
|
||||
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); }
|
||||
|
||||
#else // !defined(VM_LITTLE_ENDIAN)
|
||||
|
||||
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
|
||||
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
|
||||
static inline bool is_Java_byte_ordering_different() { return false; }
|
||||
@ -150,6 +270,12 @@ class Bytes: AllStatic {
|
||||
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, x); }
|
||||
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, x); }
|
||||
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, x); }
|
||||
|
||||
#endif // VM_LITTLE_ENDIAN
|
||||
};
|
||||
|
||||
#if defined(TARGET_OS_ARCH_linux_ppc)
|
||||
#include "bytes_linux_ppc.inline.hpp"
|
||||
#endif
|
||||
|
||||
#endif // CPU_PPC_VM_BYTES_PPC_HPP
|
||||
|
@ -799,7 +799,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ movl(as_Address(addr), (int32_t)NULL_WORD);
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
__ xorptr(rscratch1, rscratch1);
|
||||
null_check_here = code_offset();
|
||||
__ movptr(as_Address(addr), rscratch1);
|
||||
#else
|
||||
__ movptr(as_Address(addr), NULL_WORD);
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
if (is_literal_address(addr)) {
|
||||
|
@ -59,9 +59,9 @@ static BufferBlob* stub_blob;
|
||||
static const int stub_size = 600;
|
||||
|
||||
extern "C" {
|
||||
typedef void (*getPsrInfo_stub_t)(void*);
|
||||
typedef void (*get_cpu_info_stub_t)(void*);
|
||||
}
|
||||
static getPsrInfo_stub_t getPsrInfo_stub = NULL;
|
||||
static get_cpu_info_stub_t get_cpu_info_stub = NULL;
|
||||
|
||||
|
||||
class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
@ -69,7 +69,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
|
||||
VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
|
||||
|
||||
address generate_getPsrInfo() {
|
||||
address generate_get_cpu_info() {
|
||||
// Flags to test CPU type.
|
||||
const uint32_t HS_EFL_AC = 0x40000;
|
||||
const uint32_t HS_EFL_ID = 0x200000;
|
||||
@ -81,13 +81,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
|
||||
Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done;
|
||||
|
||||
StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
|
||||
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
|
||||
# define __ _masm->
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
//
|
||||
// void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
|
||||
// void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
|
||||
//
|
||||
// LP64: rcx and rdx are first and second argument registers on windows
|
||||
|
||||
@ -385,6 +385,14 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
};
|
||||
|
||||
|
||||
void VM_Version::get_cpu_info_wrapper() {
|
||||
get_cpu_info_stub(&_cpuid_info);
|
||||
}
|
||||
|
||||
#ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
|
||||
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
|
||||
#endif
|
||||
|
||||
void VM_Version::get_processor_features() {
|
||||
|
||||
_cpu = 4; // 486 by default
|
||||
@ -395,7 +403,11 @@ void VM_Version::get_processor_features() {
|
||||
|
||||
if (!Use486InstrsOnly) {
|
||||
// Get raw processor info
|
||||
getPsrInfo_stub(&_cpuid_info);
|
||||
|
||||
// Some platforms (like Win*) need a wrapper around here
|
||||
// in order to properly handle SEGV for YMM registers test.
|
||||
CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper);
|
||||
|
||||
assert_is_initialized();
|
||||
_cpu = extended_cpu_family();
|
||||
_model = extended_cpu_model();
|
||||
@ -986,14 +998,14 @@ void VM_Version::initialize() {
|
||||
ResourceMark rm;
|
||||
// Making this stub must be FIRST use of assembler
|
||||
|
||||
stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
|
||||
stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
|
||||
if (stub_blob == NULL) {
|
||||
vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
|
||||
vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
|
||||
}
|
||||
CodeBuffer c(stub_blob);
|
||||
VM_Version_StubGenerator g(&c);
|
||||
getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
|
||||
g.generate_getPsrInfo());
|
||||
get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
|
||||
g.generate_get_cpu_info());
|
||||
|
||||
get_processor_features();
|
||||
}
|
||||
|
@ -507,6 +507,7 @@ public:
|
||||
// The value used to check ymm register after signal handle
|
||||
static int ymm_test_value() { return 0xCAFEBABE; }
|
||||
|
||||
static void get_cpu_info_wrapper();
|
||||
static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; }
|
||||
static bool is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; }
|
||||
static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; }
|
||||
|
@ -1932,7 +1932,11 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
|
||||
{EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
|
||||
{EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
|
||||
{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"},
|
||||
#else
|
||||
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
|
||||
#endif
|
||||
{EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
|
||||
{EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
|
||||
{EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
|
||||
|
@ -2702,7 +2702,6 @@ address os::win32::fast_jni_accessor_wrapper(BasicType type) {
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
|
||||
// Install a win32 structured exception handler around the test
|
||||
// function call so the VM can generate an error dump if needed.
|
||||
@ -2713,7 +2712,6 @@ void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
|
||||
// Nothing to do.
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Virtual Memory
|
||||
|
||||
|
@ -101,9 +101,7 @@ class win32 {
|
||||
static address fast_jni_accessor_wrapper(BasicType);
|
||||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
static void call_test_func_with_wrapper(void (*funcPtr)(void));
|
||||
#endif
|
||||
|
||||
// filter function to ignore faults on serializations page
|
||||
static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
|
||||
|
@ -108,9 +108,7 @@ inline bool os::supports_monotonic_clock() {
|
||||
return win32::_has_performance_count;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
|
||||
os::win32::call_test_func_with_wrapper(f)
|
||||
#endif
|
||||
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
|
||||
os::win32::call_test_func_with_wrapper(f)
|
||||
|
||||
#endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
|
||||
|
39
hotspot/src/os_cpu/linux_ppc/vm/bytes_linux_ppc.inline.hpp
Normal file
39
hotspot/src/os_cpu/linux_ppc/vm/bytes_linux_ppc.inline.hpp
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2014 Google Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
|
||||
#define OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
|
||||
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
#include <byteswap.h>
|
||||
|
||||
// Efficient swapping of data bytes from Java byte
|
||||
// ordering to native byte ordering and vice versa.
|
||||
inline u2 Bytes::swap_u2(u2 x) { return bswap_16(x); }
|
||||
inline u4 Bytes::swap_u4(u4 x) { return bswap_32(x); }
|
||||
inline u8 Bytes::swap_u8(u8 x) { return bswap_64(x); }
|
||||
#endif // VM_LITTLE_ENDIAN
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
|
@ -243,7 +243,6 @@ int main(int argc, char *argv[])
|
||||
AD.addInclude(AD._CPP_file, "vmreg_arm.inline.hpp");
|
||||
#endif
|
||||
#ifdef TARGET_ARCH_ppc
|
||||
AD.addInclude(AD._CPP_file, "assembler_ppc.inline.hpp");
|
||||
AD.addInclude(AD._CPP_file, "nativeInst_ppc.hpp");
|
||||
AD.addInclude(AD._CPP_file, "vmreg_ppc.inline.hpp");
|
||||
#endif
|
||||
@ -274,6 +273,7 @@ int main(int argc, char *argv[])
|
||||
AD.addInclude(AD._DFA_file, "opto/cfgnode.hpp"); // Use PROB_MAX in predicate.
|
||||
AD.addInclude(AD._DFA_file, "opto/matcher.hpp");
|
||||
AD.addInclude(AD._DFA_file, "opto/opcodes.hpp");
|
||||
AD.addInclude(AD._DFA_file, "opto/convertnode.hpp");
|
||||
// Make sure each .cpp file starts with include lines:
|
||||
// files declaring and defining generators for Mach* Objects (hpp,cpp)
|
||||
// Generate the result files:
|
||||
|
@ -581,14 +581,14 @@ void ciMethod::assert_call_type_ok(int bci) {
|
||||
* Check whether profiling provides a type for the argument i to the
|
||||
* call at bci bci
|
||||
*
|
||||
* @param bci bci of the call
|
||||
* @param i argument number
|
||||
* @return profiled type
|
||||
* @param [in]bci bci of the call
|
||||
* @param [in]i argument number
|
||||
* @param [out]type profiled type of argument, NULL if none
|
||||
* @param [out]maybe_null true if null was seen for argument
|
||||
* @return true if profiling exists
|
||||
*
|
||||
* If the profile reports that the argument may be null, return false
|
||||
* at least for now.
|
||||
*/
|
||||
ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
|
||||
bool ciMethod::argument_profiled_type(int bci, int i, ciKlass*& type, bool& maybe_null) {
|
||||
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
|
||||
ciProfileData* data = method_data()->bci_to_data(bci);
|
||||
if (data != NULL) {
|
||||
@ -596,82 +596,77 @@ ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
|
||||
assert_virtual_call_type_ok(bci);
|
||||
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
|
||||
if (i >= call->number_of_arguments()) {
|
||||
return NULL;
|
||||
}
|
||||
ciKlass* type = call->valid_argument_type(i);
|
||||
if (type != NULL && !call->argument_maybe_null(i)) {
|
||||
return type;
|
||||
return false;
|
||||
}
|
||||
type = call->valid_argument_type(i);
|
||||
maybe_null = call->argument_maybe_null(i);
|
||||
return true;
|
||||
} else if (data->is_CallTypeData()) {
|
||||
assert_call_type_ok(bci);
|
||||
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
|
||||
if (i >= call->number_of_arguments()) {
|
||||
return NULL;
|
||||
}
|
||||
ciKlass* type = call->valid_argument_type(i);
|
||||
if (type != NULL && !call->argument_maybe_null(i)) {
|
||||
return type;
|
||||
return false;
|
||||
}
|
||||
type = call->valid_argument_type(i);
|
||||
maybe_null = call->argument_maybe_null(i);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether profiling provides a type for the return value from
|
||||
* the call at bci bci
|
||||
*
|
||||
* @param bci bci of the call
|
||||
* @return profiled type
|
||||
* @param [in]bci bci of the call
|
||||
* @param [out]type profiled type of argument, NULL if none
|
||||
* @param [out]maybe_null true if null was seen for argument
|
||||
* @return true if profiling exists
|
||||
*
|
||||
* If the profile reports that the argument may be null, return false
|
||||
* at least for now.
|
||||
*/
|
||||
ciKlass* ciMethod::return_profiled_type(int bci) {
|
||||
bool ciMethod::return_profiled_type(int bci, ciKlass*& type, bool& maybe_null) {
|
||||
if (MethodData::profile_return() && method_data() != NULL && method_data()->is_mature()) {
|
||||
ciProfileData* data = method_data()->bci_to_data(bci);
|
||||
if (data != NULL) {
|
||||
if (data->is_VirtualCallTypeData()) {
|
||||
assert_virtual_call_type_ok(bci);
|
||||
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
|
||||
ciKlass* type = call->valid_return_type();
|
||||
if (type != NULL && !call->return_maybe_null()) {
|
||||
return type;
|
||||
}
|
||||
type = call->valid_return_type();
|
||||
maybe_null = call->return_maybe_null();
|
||||
return true;
|
||||
} else if (data->is_CallTypeData()) {
|
||||
assert_call_type_ok(bci);
|
||||
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
|
||||
ciKlass* type = call->valid_return_type();
|
||||
if (type != NULL && !call->return_maybe_null()) {
|
||||
return type;
|
||||
}
|
||||
type = call->valid_return_type();
|
||||
maybe_null = call->return_maybe_null();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether profiling provides a type for the parameter i
|
||||
*
|
||||
* @param i parameter number
|
||||
* @return profiled type
|
||||
* @param [in]i parameter number
|
||||
* @param [out]type profiled type of parameter, NULL if none
|
||||
* @param [out]maybe_null true if null was seen for parameter
|
||||
* @return true if profiling exists
|
||||
*
|
||||
* If the profile reports that the argument may be null, return false
|
||||
* at least for now.
|
||||
*/
|
||||
ciKlass* ciMethod::parameter_profiled_type(int i) {
|
||||
bool ciMethod::parameter_profiled_type(int i, ciKlass*& type, bool& maybe_null) {
|
||||
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
|
||||
ciParametersTypeData* parameters = method_data()->parameters_type_data();
|
||||
if (parameters != NULL && i < parameters->number_of_parameters()) {
|
||||
ciKlass* type = parameters->valid_parameter_type(i);
|
||||
if (type != NULL && !parameters->parameter_maybe_null(i)) {
|
||||
return type;
|
||||
}
|
||||
type = parameters->valid_parameter_type(i);
|
||||
maybe_null = parameters->parameter_maybe_null(i);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -234,10 +234,10 @@ class ciMethod : public ciMetadata {
|
||||
ciCallProfile call_profile_at_bci(int bci);
|
||||
int interpreter_call_site_count(int bci);
|
||||
|
||||
// Does type profiling provide a useful type at this point?
|
||||
ciKlass* argument_profiled_type(int bci, int i);
|
||||
ciKlass* parameter_profiled_type(int i);
|
||||
ciKlass* return_profiled_type(int bci);
|
||||
// Does type profiling provide any useful information at this point?
|
||||
bool argument_profiled_type(int bci, int i, ciKlass*& type, bool& maybe_null);
|
||||
bool parameter_profiled_type(int i, ciKlass*& type, bool& maybe_null);
|
||||
bool return_profiled_type(int bci, ciKlass*& type, bool& maybe_null);
|
||||
|
||||
ciField* get_field_at_bci( int bci, bool &will_link);
|
||||
ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
|
||||
|
@ -135,6 +135,14 @@ void ClassLoaderData::classes_do(void f(Klass * const)) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderData::methods_do(void f(Method*)) {
|
||||
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
|
||||
if (k->oop_is_instance()) {
|
||||
InstanceKlass::cast(k)->methods_do(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
|
||||
// Lock to avoid classes being modified/added/removed during iteration
|
||||
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
|
||||
@ -624,6 +632,12 @@ void ClassLoaderDataGraph::classes_do(void f(Klass* const)) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::methods_do(void f(Method*)) {
|
||||
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
|
||||
cld->methods_do(f);
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
|
||||
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
|
||||
cld->loaded_classes_do(klass_closure);
|
||||
|
@ -78,6 +78,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
|
||||
static void classes_do(KlassClosure* klass_closure);
|
||||
static void classes_do(void f(Klass* const));
|
||||
static void methods_do(void f(Method*));
|
||||
static void loaded_classes_do(KlassClosure* klass_closure);
|
||||
static void classes_unloading_do(void f(Klass* const));
|
||||
static bool do_unloading(BoolObjectClosure* is_alive);
|
||||
@ -189,6 +190,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
void classes_do(void f(Klass*));
|
||||
void loaded_classes_do(KlassClosure* klass_closure);
|
||||
void classes_do(void f(InstanceKlass*));
|
||||
void methods_do(void f(Method*));
|
||||
|
||||
// Deallocate free list during class unloading.
|
||||
void free_deallocate_list();
|
||||
|
@ -116,10 +116,6 @@ class MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
||||
MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
||||
@ -132,10 +128,6 @@ class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
||||
Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
// A variant of the above used in certain kinds of CMS
|
||||
@ -152,10 +144,6 @@ class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
|
||||
CMSBitMap* cms_bm);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
@ -181,10 +169,6 @@ class PushAndMarkClosure: public CMSOopClosure {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
// In the parallel case, the bit map and the
|
||||
@ -211,10 +195,6 @@ class Par_PushAndMarkClosure: public CMSOopClosure {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
@ -245,9 +225,6 @@ class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
||||
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
void set_freelistLock(Mutex* m) {
|
||||
_freelistLock = m;
|
||||
}
|
||||
@ -282,9 +259,6 @@ class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
||||
inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
void trim_queue(uint size);
|
||||
};
|
||||
|
||||
|
@ -851,42 +851,60 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
|
||||
UpwardsObjectClosure* cl) {
|
||||
assert_locked(freelistLock());
|
||||
NOT_PRODUCT(verify_objects_initialized());
|
||||
Space::object_iterate_mem(mr, cl);
|
||||
assert(!mr.is_empty(), "Should be non-empty");
|
||||
// We use MemRegion(bottom(), end()) rather than used_region() below
|
||||
// because the two are not necessarily equal for some kinds of
|
||||
// spaces, in particular, certain kinds of free list spaces.
|
||||
// We could use the more complicated but more precise:
|
||||
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
|
||||
// but the slight imprecision seems acceptable in the assertion check.
|
||||
assert(MemRegion(bottom(), end()).contains(mr),
|
||||
"Should be within used space");
|
||||
HeapWord* prev = cl->previous(); // max address from last time
|
||||
if (prev >= mr.end()) { // nothing to do
|
||||
return;
|
||||
}
|
||||
// This assert will not work when we go from cms space to perm
|
||||
// space, and use same closure. Easy fix deferred for later. XXX YSR
|
||||
// assert(prev == NULL || contains(prev), "Should be within space");
|
||||
|
||||
bool last_was_obj_array = false;
|
||||
HeapWord *blk_start_addr, *region_start_addr;
|
||||
if (prev > mr.start()) {
|
||||
region_start_addr = prev;
|
||||
blk_start_addr = prev;
|
||||
// The previous invocation may have pushed "prev" beyond the
|
||||
// last allocated block yet there may be still be blocks
|
||||
// in this region due to a particular coalescing policy.
|
||||
// Relax the assertion so that the case where the unallocated
|
||||
// block is maintained and "prev" is beyond the unallocated
|
||||
// block does not cause the assertion to fire.
|
||||
assert((BlockOffsetArrayUseUnallocatedBlock &&
|
||||
(!is_in(prev))) ||
|
||||
(blk_start_addr == block_start(region_start_addr)), "invariant");
|
||||
} else {
|
||||
region_start_addr = mr.start();
|
||||
blk_start_addr = block_start(region_start_addr);
|
||||
}
|
||||
HeapWord* region_end_addr = mr.end();
|
||||
MemRegion derived_mr(region_start_addr, region_end_addr);
|
||||
while (blk_start_addr < region_end_addr) {
|
||||
const size_t size = block_size(blk_start_addr);
|
||||
if (block_is_obj(blk_start_addr)) {
|
||||
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
|
||||
} else {
|
||||
last_was_obj_array = false;
|
||||
}
|
||||
blk_start_addr += size;
|
||||
}
|
||||
if (!last_was_obj_array) {
|
||||
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
|
||||
"Should be within (closed) used space");
|
||||
assert(blk_start_addr > prev, "Invariant");
|
||||
cl->set_previous(blk_start_addr); // min address for next time
|
||||
}
|
||||
}
|
||||
|
||||
// Callers of this iterator beware: The closure application should
|
||||
// be robust in the face of uninitialized objects and should (always)
|
||||
// return a correct size so that the next addr + size below gives us a
|
||||
// valid block boundary. [See for instance,
|
||||
// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
|
||||
// in ConcurrentMarkSweepGeneration.cpp.]
|
||||
HeapWord*
|
||||
CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
|
||||
assert_lock_strong(freelistLock());
|
||||
HeapWord *addr, *last;
|
||||
size_t size;
|
||||
for (addr = bottom(), last = end();
|
||||
addr < last; addr += size) {
|
||||
FreeChunk* fc = (FreeChunk*)addr;
|
||||
if (fc->is_free()) {
|
||||
// Since we hold the free list lock, which protects direct
|
||||
// allocation in this generation by mutators, a free object
|
||||
// will remain free throughout this iteration code.
|
||||
size = fc->size();
|
||||
} else {
|
||||
// Note that the object need not necessarily be initialized,
|
||||
// because (for instance) the free list lock does NOT protect
|
||||
// object initialization. The closure application below must
|
||||
// therefore be correct in the face of uninitialized objects.
|
||||
size = cl->do_object_careful(oop(addr));
|
||||
if (size == 0) {
|
||||
// An unparsable object found. Signal early termination.
|
||||
return addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Callers of this iterator beware: The closure application should
|
||||
// be robust in the face of uninitialized objects and should (always)
|
||||
|
@ -338,10 +338,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
unallocated_block() : end());
|
||||
}
|
||||
|
||||
bool is_in(const void* p) const {
|
||||
return used_region().contains(p);
|
||||
}
|
||||
|
||||
virtual bool is_free_block(const HeapWord* p) const;
|
||||
|
||||
// Resizing support
|
||||
@ -363,6 +359,12 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
// obj_is_alive() to determine whether it is safe to iterate of
|
||||
// an object.
|
||||
void safe_object_iterate(ObjectClosure* blk);
|
||||
|
||||
// Iterate over all objects that intersect with mr, calling "cl->do_object"
|
||||
// on each. There is an exception to this: if this closure has already
|
||||
// been invoked on an object, it may skip such objects in some cases. This is
|
||||
// Most likely to happen in an "upwards" (ascending address) iteration of
|
||||
// MemRegions.
|
||||
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
|
||||
|
||||
// Requires that "mr" be entirely within the space.
|
||||
@ -371,11 +373,8 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
// terminate the iteration and return the address of the start of the
|
||||
// subregion that isn't done. Return of "NULL" indicates that the
|
||||
// iteration completed.
|
||||
virtual HeapWord*
|
||||
object_iterate_careful_m(MemRegion mr,
|
||||
ObjectClosureCareful* cl);
|
||||
virtual HeapWord*
|
||||
object_iterate_careful(ObjectClosureCareful* cl);
|
||||
HeapWord* object_iterate_careful_m(MemRegion mr,
|
||||
ObjectClosureCareful* cl);
|
||||
|
||||
// Override: provides a DCTO_CL specific to this kind of space.
|
||||
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
|
||||
|
@ -1498,6 +1498,19 @@ class FalseBitMapClosure: public BitMapClosure {
|
||||
}
|
||||
};
|
||||
|
||||
// A version of ObjectClosure with "memory" (see _previous_address below)
|
||||
class UpwardsObjectClosure: public BoolObjectClosure {
|
||||
HeapWord* _previous_address;
|
||||
public:
|
||||
UpwardsObjectClosure() : _previous_address(NULL) { }
|
||||
void set_previous(HeapWord* addr) { _previous_address = addr; }
|
||||
HeapWord* previous() { return _previous_address; }
|
||||
// A return value of "true" can be used by the caller to decide
|
||||
// if this object's end should *NOT* be recorded in
|
||||
// _previous_address above.
|
||||
virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
|
||||
};
|
||||
|
||||
// This closure is used during the second checkpointing phase
|
||||
// to rescan the marked objects on the dirty cards in the mod
|
||||
// union table and the card table proper. It's invoked via
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/dirtyCardQueue.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
@ -3529,6 +3529,29 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
const HeapRegion* hr,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
|
||||
void G1CollectedHeap::print_on(outputStream* st) const {
|
||||
st->print(" %-20s", "garbage-first heap");
|
||||
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
||||
@ -6598,13 +6621,13 @@ public:
|
||||
if (hr->is_young()) {
|
||||
// TODO
|
||||
} else if (hr->startsHumongous()) {
|
||||
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
|
||||
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
|
||||
_humongous_count.increment(1u, hr->capacity());
|
||||
} else if (hr->is_empty()) {
|
||||
assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
|
||||
assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
|
||||
_free_count.increment(1u, hr->capacity());
|
||||
} else {
|
||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
|
||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
|
||||
_old_count.increment(1u, hr->capacity());
|
||||
}
|
||||
return false;
|
||||
|
@ -706,19 +706,7 @@ public:
|
||||
// This is a fast test on whether a reference points into the
|
||||
// collection set or not. Assume that the reference
|
||||
// points into the heap.
|
||||
bool in_cset_fast_test(oop obj) {
|
||||
assert(_in_cset_fast_test != NULL, "sanity");
|
||||
assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
|
||||
// no need to subtract the bottom of the heap from obj,
|
||||
// _in_cset_fast_test is biased
|
||||
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
|
||||
bool ret = _in_cset_fast_test[index];
|
||||
// let's make sure the result is consistent with what the slower
|
||||
// test returns
|
||||
assert( ret || !obj_in_cs(obj), "sanity");
|
||||
assert(!ret || obj_in_cs(obj), "sanity");
|
||||
return ret;
|
||||
}
|
||||
inline bool in_cset_fast_test(oop obj);
|
||||
|
||||
void clear_cset_fast_test() {
|
||||
assert(_in_cset_fast_test_base != NULL, "sanity");
|
||||
@ -1250,9 +1238,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void old_set_remove(HeapRegion* hr) {
|
||||
_old_set.remove(hr);
|
||||
}
|
||||
inline void old_set_remove(HeapRegion* hr);
|
||||
|
||||
size_t non_young_capacity_bytes() {
|
||||
return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
|
||||
@ -1343,7 +1329,7 @@ public:
|
||||
void heap_region_iterate(HeapRegionClosure* blk) const;
|
||||
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
HeapRegion* region_at(uint index) const { return _hrs.at(index); }
|
||||
inline HeapRegion* region_at(uint index) const;
|
||||
|
||||
// Divide the heap region sequence into "chunks" of some size (the number
|
||||
// of regions divided by the number of parallel threads times some
|
||||
@ -1472,10 +1458,7 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_in_young(const oop obj) {
|
||||
HeapRegion* hr = heap_region_containing(obj);
|
||||
return hr != NULL && hr->is_young();
|
||||
}
|
||||
inline bool is_in_young(const oop obj);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual bool is_in_partial_collection(const void* p);
|
||||
@ -1488,9 +1471,7 @@ public:
|
||||
// pre-value that needs to be remembered; for the remembered-set
|
||||
// update logging post-barrier, we don't maintain remembered set
|
||||
// information for young gen objects.
|
||||
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
|
||||
return is_in_young(new_obj);
|
||||
}
|
||||
virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
|
||||
|
||||
// Returns "true" iff the given word_size is "very large".
|
||||
static bool isHumongous(size_t word_size) {
|
||||
@ -1584,23 +1565,9 @@ public:
|
||||
|
||||
// Added if it is NULL it isn't dead.
|
||||
|
||||
bool is_obj_dead(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
}
|
||||
else return is_obj_dead(obj, hr);
|
||||
}
|
||||
inline bool is_obj_dead(const oop obj) const;
|
||||
|
||||
bool is_obj_ill(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
}
|
||||
else return is_obj_ill(obj, hr);
|
||||
}
|
||||
inline bool is_obj_ill(const oop obj) const;
|
||||
|
||||
bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
|
||||
HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
|
||||
@ -1694,26 +1661,10 @@ public:
|
||||
|
||||
bool is_obj_dead_cond(const oop obj,
|
||||
const HeapRegion* hr,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
const VerifyOption vo) const;
|
||||
|
||||
bool is_obj_dead_cond(const oop obj,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
const VerifyOption vo) const;
|
||||
|
||||
// Printing
|
||||
|
||||
@ -1807,11 +1758,7 @@ protected:
|
||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
|
||||
|
||||
template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
if (!from->is_survivor()) {
|
||||
_g1_rem->par_write_ref(from, p, tid);
|
||||
}
|
||||
}
|
||||
template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
|
||||
|
||||
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
// If the new value of the field points to the same region or
|
||||
@ -1853,13 +1800,7 @@ public:
|
||||
refs()->push(ref);
|
||||
}
|
||||
|
||||
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
deferred_rs_update(from, p, tid);
|
||||
} else {
|
||||
immediate_rs_update(from, p, tid);
|
||||
}
|
||||
}
|
||||
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
|
||||
|
||||
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = NULL;
|
||||
@ -1983,54 +1924,7 @@ private:
|
||||
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
void do_oop_partial_array(oop* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
|
||||
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
err_msg("invariant, next index: %d, length: %d", next_index, length));
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
oop* from_obj_p = set_partial_array_mask(from_obj);
|
||||
push_on_queue(from_obj_p);
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
inline void do_oop_partial_array(oop* p);
|
||||
|
||||
// This method is applied to the fields of the objects that have just been copied.
|
||||
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
|
||||
@ -2060,26 +1954,9 @@ public:
|
||||
|
||||
oop copy_to_survivor_space(oop const obj);
|
||||
|
||||
template <class T> void deal_with_reference(T* ref_to_scan) {
|
||||
if (!has_partial_array_mask(ref_to_scan)) {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
do_oop_evac(ref_to_scan, r);
|
||||
} else {
|
||||
do_oop_partial_array((oop*)ref_to_scan);
|
||||
}
|
||||
}
|
||||
template <class T> inline void deal_with_reference(T* ref_to_scan);
|
||||
|
||||
void deal_with_reference(StarTask ref) {
|
||||
assert(verify_task(ref), "sanity");
|
||||
if (ref.is_narrow()) {
|
||||
deal_with_reference((narrowOop*)ref);
|
||||
} else {
|
||||
deal_with_reference((oop*)ref);
|
||||
}
|
||||
}
|
||||
inline void deal_with_reference(StarTask ref);
|
||||
|
||||
public:
|
||||
void trim_queue();
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
@ -36,6 +37,9 @@
|
||||
|
||||
// Inline functions for G1CollectedHeap
|
||||
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion*
|
||||
G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
@ -55,6 +59,10 @@ G1CollectedHeap::heap_region_containing_raw(const T addr) const {
|
||||
return res;
|
||||
}
|
||||
|
||||
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
|
||||
_old_set.remove(hr);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
||||
HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
|
||||
return r != NULL && r->in_collection_set();
|
||||
@ -151,6 +159,24 @@ inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
|
||||
return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
|
||||
}
|
||||
|
||||
|
||||
// This is a fast test on whether a reference points into the
|
||||
// collection set or not. Assume that the reference
|
||||
// points into the heap.
|
||||
inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
|
||||
assert(_in_cset_fast_test != NULL, "sanity");
|
||||
assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
|
||||
// no need to subtract the bottom of the heap from obj,
|
||||
// _in_cset_fast_test is biased
|
||||
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
|
||||
bool ret = _in_cset_fast_test[index];
|
||||
// let's make sure the result is consistent with what the slower
|
||||
// test returns
|
||||
assert( ret || !obj_in_cs(obj), "sanity");
|
||||
assert(!ret || obj_in_cs(obj), "sanity");
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Support for G1EvacuationFailureALot
|
||||
|
||||
@ -224,4 +250,121 @@ inline void G1CollectedHeap::reset_evacuation_should_fail() {
|
||||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
inline bool G1CollectedHeap::is_in_young(const oop obj) {
|
||||
HeapRegion* hr = heap_region_containing(obj);
|
||||
return hr != NULL && hr->is_young();
|
||||
}
|
||||
|
||||
// We don't need barriers for initializing stores to objects
|
||||
// in the young gen: for the SATB pre-barrier, there is no
|
||||
// pre-value that needs to be remembered; for the remembered-set
|
||||
// update logging post-barrier, we don't maintain remembered set
|
||||
// information for young gen objects.
|
||||
inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
|
||||
return is_in_young(new_obj);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
}
|
||||
else return is_obj_dead(obj, hr);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
}
|
||||
else return is_obj_ill(obj, hr);
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
if (!from->is_survivor()) {
|
||||
_g1_rem->par_write_ref(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
deferred_rs_update(from, p, tid);
|
||||
} else {
|
||||
immediate_rs_update(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
|
||||
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
err_msg("invariant, next index: %d, length: %d", next_index, length));
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
oop* from_obj_p = set_partial_array_mask(from_obj);
|
||||
push_on_queue(from_obj_p);
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
|
||||
if (!has_partial_array_mask(ref_to_scan)) {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
do_oop_evac(ref_to_scan, r);
|
||||
} else {
|
||||
do_oop_partial_array((oop*)ref_to_scan);
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
|
||||
assert(verify_task(ref), "sanity");
|
||||
if (ref.is_narrow()) {
|
||||
deal_with_reference((narrowOop*)ref);
|
||||
} else {
|
||||
deal_with_reference((oop*)ref);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
|
||||
|
@ -472,9 +472,6 @@ HeapRegion::object_iterate_mem_careful(MemRegion mr,
|
||||
} else if (!g1h->is_obj_dead(obj)) {
|
||||
cl->do_object(obj);
|
||||
}
|
||||
if (cl->abort()) return cur;
|
||||
// The check above must occur before the operation below, since an
|
||||
// abort might invalidate the "size" operation.
|
||||
cur += obj->size();
|
||||
}
|
||||
return NULL;
|
||||
|
@ -25,7 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
|
@ -89,6 +89,15 @@ bool VM_GC_Operation::doit_prologue() {
|
||||
assert(((_gc_cause != GCCause::_no_gc) &&
|
||||
(_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
|
||||
|
||||
// To be able to handle a GC the VM initialization needs to be completed.
|
||||
if (!is_init_completed()) {
|
||||
vm_exit_during_initialization(
|
||||
err_msg("GC triggered before VM initialization completed. Try increasing "
|
||||
"NewSize, current value " UINTX_FORMAT "%s.",
|
||||
byte_size_in_proper_unit(NewSize),
|
||||
proper_unit_for_byte_size(NewSize)));
|
||||
}
|
||||
|
||||
acquire_pending_list_lock();
|
||||
// If the GC count has changed someone beat us to the collection
|
||||
// Get the Heap_lock after the pending_list_lock.
|
||||
|
@ -35,8 +35,6 @@
|
||||
#include "runtime/timer.hpp"
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
// Standard closure for BytecodeTracer: prints the current bytecode
|
||||
// and its attributes using bytecode-specific information.
|
||||
|
||||
@ -600,4 +598,3 @@ void BytecodePrinter::bytecode_epilog(int bci, outputStream* st) {
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
@ -34,8 +34,7 @@
|
||||
// By specialising the BytecodeClosure, all kinds of bytecode traces can
|
||||
// be done.
|
||||
|
||||
#ifndef PRODUCT
|
||||
// class BytecodeTracer is only used by TraceBytecodes option
|
||||
// class BytecodeTracer is used by TraceBytecodes option and PrintMethodData
|
||||
|
||||
class BytecodeClosure;
|
||||
class BytecodeTracer: AllStatic {
|
||||
@ -60,6 +59,4 @@ class BytecodeClosure {
|
||||
virtual void trace(methodHandle method, address bcp, outputStream* st) = 0;
|
||||
};
|
||||
|
||||
#endif // !PRODUCT
|
||||
|
||||
#endif // SHARE_VM_INTERPRETER_BYTECODETRACER_HPP
|
||||
|
@ -748,6 +748,12 @@ class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
|
||||
bool _use_malloc;
|
||||
size_t _size;
|
||||
bool _free_in_destructor;
|
||||
|
||||
static bool should_use_malloc(size_t size) {
|
||||
return size < ArrayAllocatorMallocLimit;
|
||||
}
|
||||
|
||||
static char* allocate_inner(size_t& size, bool& use_malloc);
|
||||
public:
|
||||
ArrayAllocator(bool free_in_destructor = true) :
|
||||
_addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
|
||||
@ -759,6 +765,7 @@ class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
|
||||
}
|
||||
|
||||
E* allocate(size_t length);
|
||||
E* reallocate(size_t new_length);
|
||||
void free();
|
||||
};
|
||||
|
||||
|
@ -122,35 +122,57 @@ template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::allocate(size_t length) {
|
||||
assert(_addr == NULL, "Already in use");
|
||||
char* ArrayAllocator<E, F>::allocate_inner(size_t &size, bool &use_malloc) {
|
||||
char* addr = NULL;
|
||||
|
||||
_size = sizeof(E) * length;
|
||||
_use_malloc = _size < ArrayAllocatorMallocLimit;
|
||||
|
||||
if (_use_malloc) {
|
||||
_addr = AllocateHeap(_size, F);
|
||||
if (_addr == NULL && _size >= (size_t)os::vm_allocation_granularity()) {
|
||||
if (use_malloc) {
|
||||
addr = AllocateHeap(size, F);
|
||||
if (addr == NULL && size >= (size_t)os::vm_allocation_granularity()) {
|
||||
// malloc failed let's try with mmap instead
|
||||
_use_malloc = false;
|
||||
use_malloc = false;
|
||||
} else {
|
||||
return (E*)_addr;
|
||||
return addr;
|
||||
}
|
||||
}
|
||||
|
||||
int alignment = os::vm_allocation_granularity();
|
||||
_size = align_size_up(_size, alignment);
|
||||
size = align_size_up(size, alignment);
|
||||
|
||||
_addr = os::reserve_memory(_size, NULL, alignment, F);
|
||||
if (_addr == NULL) {
|
||||
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
addr = os::reserve_memory(size, NULL, alignment, F);
|
||||
if (addr == NULL) {
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
}
|
||||
|
||||
os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
|
||||
os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
|
||||
return addr;
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::allocate(size_t length) {
|
||||
assert(_addr == NULL, "Already in use");
|
||||
|
||||
_size = sizeof(E) * length;
|
||||
_use_malloc = should_use_malloc(_size);
|
||||
_addr = allocate_inner(_size, _use_malloc);
|
||||
|
||||
return (E*)_addr;
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::reallocate(size_t new_length) {
|
||||
size_t new_size = sizeof(E) * new_length;
|
||||
bool use_malloc = should_use_malloc(new_size);
|
||||
char* new_addr = allocate_inner(new_size, use_malloc);
|
||||
|
||||
memcpy(new_addr, _addr, MIN2(new_size, _size));
|
||||
|
||||
free();
|
||||
_size = new_size;
|
||||
_use_malloc = use_malloc;
|
||||
_addr = new_addr;
|
||||
return (E*)new_addr;
|
||||
}
|
||||
|
||||
template<class E, MEMFLAGS F>
|
||||
void ArrayAllocator<E, F>::free() {
|
||||
if (_addr != NULL) {
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "memory/sharedHeap.hpp"
|
||||
|
||||
volatile jint GC_locker::_jni_lock_count = 0;
|
||||
volatile jint GC_locker::_lock_count = 0;
|
||||
volatile bool GC_locker::_needs_gc = false;
|
||||
volatile bool GC_locker::_doing_gc = false;
|
||||
|
||||
@ -102,7 +101,7 @@ void GC_locker::jni_lock(JavaThread* thread) {
|
||||
// We check that at least one thread is in a critical region before
|
||||
// blocking because blocked threads are woken up by a thread exiting
|
||||
// a JNI critical region.
|
||||
while ((needs_gc() && is_jni_active()) || _doing_gc) {
|
||||
while (is_active_and_needs_gc() || _doing_gc) {
|
||||
JNICritical_lock->wait();
|
||||
}
|
||||
thread->enter_critical();
|
||||
@ -116,27 +115,20 @@ void GC_locker::jni_unlock(JavaThread* thread) {
|
||||
_jni_lock_count--;
|
||||
decrement_debug_jni_lock_count();
|
||||
thread->exit_critical();
|
||||
if (needs_gc() && !is_jni_active()) {
|
||||
if (needs_gc() && !is_active_internal()) {
|
||||
// We're the last thread out. Cause a GC to occur.
|
||||
// GC will also check is_active, so this check is not
|
||||
// strictly needed. It's added here to make it clear that
|
||||
// the GC will NOT be performed if any other caller
|
||||
// of GC_locker::lock() still needs GC locked.
|
||||
if (!is_active_internal()) {
|
||||
_doing_gc = true;
|
||||
{
|
||||
// Must give up the lock while at a safepoint
|
||||
MutexUnlocker munlock(JNICritical_lock);
|
||||
if (PrintJNIGCStalls && PrintGCDetails) {
|
||||
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
|
||||
gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
|
||||
gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
|
||||
}
|
||||
Universe::heap()->collect(GCCause::_gc_locker);
|
||||
_doing_gc = true;
|
||||
{
|
||||
// Must give up the lock while at a safepoint
|
||||
MutexUnlocker munlock(JNICritical_lock);
|
||||
if (PrintJNIGCStalls && PrintGCDetails) {
|
||||
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
|
||||
gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
|
||||
gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
|
||||
}
|
||||
_doing_gc = false;
|
||||
Universe::heap()->collect(GCCause::_gc_locker);
|
||||
}
|
||||
|
||||
_doing_gc = false;
|
||||
_needs_gc = false;
|
||||
JNICritical_lock->notify_all();
|
||||
}
|
||||
|
@ -54,8 +54,6 @@ class GC_locker: public AllStatic {
|
||||
// safepointing and decremented during the slow path of GC_locker
|
||||
// unlocking.
|
||||
static volatile jint _jni_lock_count; // number of jni active instances.
|
||||
|
||||
static volatile jint _lock_count; // number of other active instances
|
||||
static volatile bool _needs_gc; // heap is filling, we need a GC
|
||||
// note: bool is typedef'd as jint
|
||||
static volatile bool _doing_gc; // unlock_critical() is doing a GC
|
||||
@ -66,12 +64,6 @@ class GC_locker: public AllStatic {
|
||||
static volatile jint _debug_jni_lock_count;
|
||||
#endif
|
||||
|
||||
// Accessors
|
||||
static bool is_jni_active() {
|
||||
assert(_needs_gc, "only valid when _needs_gc is set");
|
||||
return _jni_lock_count > 0;
|
||||
}
|
||||
|
||||
// At a safepoint, visit all threads and count the number of active
|
||||
// critical sections. This is used to ensure that all active
|
||||
// critical sections are exited before a new one is started.
|
||||
@ -82,7 +74,7 @@ class GC_locker: public AllStatic {
|
||||
|
||||
static bool is_active_internal() {
|
||||
verify_critical_count();
|
||||
return _lock_count > 0 || _jni_lock_count > 0;
|
||||
return _jni_lock_count > 0;
|
||||
}
|
||||
|
||||
public:
|
||||
@ -132,10 +124,6 @@ class GC_locker: public AllStatic {
|
||||
// not a stable predicate.
|
||||
static void stall_until_clear();
|
||||
|
||||
// Non-structured GC locking: currently needed for JNI. Use with care!
|
||||
static void lock();
|
||||
static void unlock();
|
||||
|
||||
// The following two methods are used for JNI critical regions.
|
||||
// If we find that we failed to perform a GC because the GC_locker
|
||||
// was active, arrange for one as soon as possible by allowing
|
||||
|
@ -27,22 +27,6 @@
|
||||
|
||||
#include "memory/gcLocker.hpp"
|
||||
|
||||
inline void GC_locker::lock() {
|
||||
// cast away volatile
|
||||
Atomic::inc(&_lock_count);
|
||||
CHECK_UNHANDLED_OOPS_ONLY(
|
||||
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
|
||||
assert(Universe::heap() == NULL ||
|
||||
!Universe::heap()->is_gc_active(), "locking failed");
|
||||
}
|
||||
|
||||
inline void GC_locker::unlock() {
|
||||
// cast away volatile
|
||||
Atomic::dec(&_lock_count);
|
||||
CHECK_UNHANDLED_OOPS_ONLY(
|
||||
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
|
||||
}
|
||||
|
||||
inline void GC_locker::lock_critical(JavaThread* thread) {
|
||||
if (!thread->in_critical()) {
|
||||
if (needs_gc()) {
|
||||
|
@ -115,9 +115,6 @@ class ScanClosure: public OopsInKlassOrGenClosure {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_write;
|
||||
}
|
||||
};
|
||||
|
||||
// Closure for scanning DefNewGeneration.
|
||||
@ -137,9 +134,6 @@ class FastScanClosure: public OopsInKlassOrGenClosure {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_write;
|
||||
}
|
||||
};
|
||||
|
||||
class KlassScanClosure: public KlassClosure {
|
||||
|
@ -27,11 +27,8 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "runtime/prefetch.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// The following classes are C++ `closures` for iterating over objects, roots and spaces
|
||||
|
||||
class CodeBlob;
|
||||
class nmethod;
|
||||
class ReferenceProcessor;
|
||||
@ -39,22 +36,11 @@ class DataLayout;
|
||||
class KlassClosure;
|
||||
class ClassLoaderData;
|
||||
|
||||
// Closure provides abortability.
|
||||
// The following classes are C++ `closures` for iterating over objects, roots and spaces
|
||||
|
||||
class Closure : public StackObj {
|
||||
protected:
|
||||
bool _abort;
|
||||
void set_abort() { _abort = true; }
|
||||
public:
|
||||
Closure() : _abort(false) {}
|
||||
// A subtype can use this mechanism to indicate to some iterator mapping
|
||||
// functions that the iteration should cease.
|
||||
bool abort() { return _abort; }
|
||||
void clear_abort() { _abort = false; }
|
||||
};
|
||||
class Closure : public StackObj { };
|
||||
|
||||
// OopClosure is used for iterating through references to Java objects.
|
||||
|
||||
class OopClosure : public Closure {
|
||||
public:
|
||||
virtual void do_oop(oop* o) = 0;
|
||||
@ -97,11 +83,6 @@ class ExtendedOopClosure : public OopClosure {
|
||||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
|
||||
|
||||
// Controls how prefetching is done for invocations of this closure.
|
||||
Prefetch::style prefetch_style() { // Note that this is non-virtual.
|
||||
return Prefetch::do_none;
|
||||
}
|
||||
|
||||
// True iff this closure may be safely applied more than once to an oop
|
||||
// location without an intervening "major reset" (like the end of a GC).
|
||||
virtual bool idempotent() { return false; }
|
||||
@ -177,19 +158,6 @@ public:
|
||||
ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
|
||||
};
|
||||
|
||||
// A version of ObjectClosure with "memory" (see _previous_address below)
|
||||
class UpwardsObjectClosure: public BoolObjectClosure {
|
||||
HeapWord* _previous_address;
|
||||
public:
|
||||
UpwardsObjectClosure() : _previous_address(NULL) { }
|
||||
void set_previous(HeapWord* addr) { _previous_address = addr; }
|
||||
HeapWord* previous() { return _previous_address; }
|
||||
// A return value of "true" can be used by the caller to decide
|
||||
// if this object's end should *NOT* be recorded in
|
||||
// _previous_address above.
|
||||
virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
|
||||
};
|
||||
|
||||
// A version of ObjectClosure that is expected to be robust
|
||||
// in the face of possibly uninitialized objects.
|
||||
class ObjectClosureCareful : public ObjectClosure {
|
||||
|
@ -645,9 +645,6 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
|
||||
TraceTime timer("Dump Shared Spaces", TraceStartupTime);
|
||||
ResourceMark rm;
|
||||
|
||||
// Lock out GC - is it necessary? I don't think we care.
|
||||
No_GC_Verifier no_gc;
|
||||
|
||||
// Preload classes to be shared.
|
||||
// Should use some os:: method rather than fopen() here. aB.
|
||||
// Construct the path to the class list (in jre/lib)
|
||||
|
@ -302,10 +302,6 @@ void ContiguousSpace::clear(bool mangle_space) {
|
||||
CompactibleSpace::clear(mangle_space);
|
||||
}
|
||||
|
||||
bool ContiguousSpace::is_in(const void* p) const {
|
||||
return _bottom <= p && p < _top;
|
||||
}
|
||||
|
||||
bool ContiguousSpace::is_free_block(const HeapWord* p) const {
|
||||
return p >= _top;
|
||||
}
|
||||
@ -547,115 +543,11 @@ void Space::oop_iterate(ExtendedOopClosure* blk) {
|
||||
object_iterate(&blk2);
|
||||
}
|
||||
|
||||
HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
|
||||
guarantee(false, "NYI");
|
||||
return bottom();
|
||||
}
|
||||
|
||||
HeapWord* Space::object_iterate_careful_m(MemRegion mr,
|
||||
ObjectClosureCareful* cl) {
|
||||
guarantee(false, "NYI");
|
||||
return bottom();
|
||||
}
|
||||
|
||||
|
||||
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
|
||||
assert(!mr.is_empty(), "Should be non-empty");
|
||||
// We use MemRegion(bottom(), end()) rather than used_region() below
|
||||
// because the two are not necessarily equal for some kinds of
|
||||
// spaces, in particular, certain kinds of free list spaces.
|
||||
// We could use the more complicated but more precise:
|
||||
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
|
||||
// but the slight imprecision seems acceptable in the assertion check.
|
||||
assert(MemRegion(bottom(), end()).contains(mr),
|
||||
"Should be within used space");
|
||||
HeapWord* prev = cl->previous(); // max address from last time
|
||||
if (prev >= mr.end()) { // nothing to do
|
||||
return;
|
||||
}
|
||||
// This assert will not work when we go from cms space to perm
|
||||
// space, and use same closure. Easy fix deferred for later. XXX YSR
|
||||
// assert(prev == NULL || contains(prev), "Should be within space");
|
||||
|
||||
bool last_was_obj_array = false;
|
||||
HeapWord *blk_start_addr, *region_start_addr;
|
||||
if (prev > mr.start()) {
|
||||
region_start_addr = prev;
|
||||
blk_start_addr = prev;
|
||||
// The previous invocation may have pushed "prev" beyond the
|
||||
// last allocated block yet there may be still be blocks
|
||||
// in this region due to a particular coalescing policy.
|
||||
// Relax the assertion so that the case where the unallocated
|
||||
// block is maintained and "prev" is beyond the unallocated
|
||||
// block does not cause the assertion to fire.
|
||||
assert((BlockOffsetArrayUseUnallocatedBlock &&
|
||||
(!is_in(prev))) ||
|
||||
(blk_start_addr == block_start(region_start_addr)), "invariant");
|
||||
} else {
|
||||
region_start_addr = mr.start();
|
||||
blk_start_addr = block_start(region_start_addr);
|
||||
}
|
||||
HeapWord* region_end_addr = mr.end();
|
||||
MemRegion derived_mr(region_start_addr, region_end_addr);
|
||||
while (blk_start_addr < region_end_addr) {
|
||||
const size_t size = block_size(blk_start_addr);
|
||||
if (block_is_obj(blk_start_addr)) {
|
||||
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
|
||||
} else {
|
||||
last_was_obj_array = false;
|
||||
}
|
||||
blk_start_addr += size;
|
||||
}
|
||||
if (!last_was_obj_array) {
|
||||
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
|
||||
"Should be within (closed) used space");
|
||||
assert(blk_start_addr > prev, "Invariant");
|
||||
cl->set_previous(blk_start_addr); // min address for next time
|
||||
}
|
||||
}
|
||||
|
||||
bool Space::obj_is_alive(const HeapWord* p) const {
|
||||
assert (block_is_obj(p), "The address should point to an object");
|
||||
return true;
|
||||
}
|
||||
|
||||
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
|
||||
assert(!mr.is_empty(), "Should be non-empty");
|
||||
assert(used_region().contains(mr), "Should be within used space");
|
||||
HeapWord* prev = cl->previous(); // max address from last time
|
||||
if (prev >= mr.end()) { // nothing to do
|
||||
return;
|
||||
}
|
||||
// See comment above (in more general method above) in case you
|
||||
// happen to use this method.
|
||||
assert(prev == NULL || is_in_reserved(prev), "Should be within space");
|
||||
|
||||
bool last_was_obj_array = false;
|
||||
HeapWord *obj_start_addr, *region_start_addr;
|
||||
if (prev > mr.start()) {
|
||||
region_start_addr = prev;
|
||||
obj_start_addr = prev;
|
||||
assert(obj_start_addr == block_start(region_start_addr), "invariant");
|
||||
} else {
|
||||
region_start_addr = mr.start();
|
||||
obj_start_addr = block_start(region_start_addr);
|
||||
}
|
||||
HeapWord* region_end_addr = mr.end();
|
||||
MemRegion derived_mr(region_start_addr, region_end_addr);
|
||||
while (obj_start_addr < region_end_addr) {
|
||||
oop obj = oop(obj_start_addr);
|
||||
const size_t size = obj->size();
|
||||
last_was_obj_array = cl->do_object_bm(obj, derived_mr);
|
||||
obj_start_addr += size;
|
||||
}
|
||||
if (!last_was_obj_array) {
|
||||
assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
|
||||
"Should be within (closed) used space");
|
||||
assert(obj_start_addr > prev, "Invariant");
|
||||
cl->set_previous(obj_start_addr); // min address for next time
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
|
@ -120,6 +120,12 @@ class Space: public CHeapObj<mtGC> {
|
||||
|
||||
void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
|
||||
|
||||
// Returns true if this object has been allocated since a
|
||||
// generation's "save_marks" call.
|
||||
virtual bool obj_allocated_since_save_marks(const oop obj) const {
|
||||
return (HeapWord*)obj >= saved_mark_word();
|
||||
}
|
||||
|
||||
MemRegionClosure* preconsumptionDirtyCardClosure() const {
|
||||
return _preconsumptionDirtyCardClosure;
|
||||
}
|
||||
@ -127,9 +133,9 @@ class Space: public CHeapObj<mtGC> {
|
||||
_preconsumptionDirtyCardClosure = cl;
|
||||
}
|
||||
|
||||
// Returns a subregion of the space containing all the objects in
|
||||
// Returns a subregion of the space containing only the allocated objects in
|
||||
// the space.
|
||||
virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
|
||||
virtual MemRegion used_region() const = 0;
|
||||
|
||||
// Returns a region that is guaranteed to contain (at least) all objects
|
||||
// allocated at the time of the last call to "save_marks". If the space
|
||||
@ -139,7 +145,7 @@ class Space: public CHeapObj<mtGC> {
|
||||
// saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
|
||||
// the space must distinguish between objects in the region allocated before
|
||||
// and after the call to save marks.
|
||||
virtual MemRegion used_region_at_save_marks() const {
|
||||
MemRegion used_region_at_save_marks() const {
|
||||
return MemRegion(bottom(), saved_mark_word());
|
||||
}
|
||||
|
||||
@ -172,7 +178,9 @@ class Space: public CHeapObj<mtGC> {
|
||||
// expensive operation. To prevent performance problems
|
||||
// on account of its inadvertent use in product jvm's,
|
||||
// we restrict its use to assertion checks only.
|
||||
virtual bool is_in(const void* p) const = 0;
|
||||
bool is_in(const void* p) const {
|
||||
return used_region().contains(p);
|
||||
}
|
||||
|
||||
// Returns true iff the given reserved memory of the space contains the
|
||||
// given address.
|
||||
@ -204,24 +212,6 @@ class Space: public CHeapObj<mtGC> {
|
||||
// objects whose internal references point to objects in the space.
|
||||
virtual void safe_object_iterate(ObjectClosure* blk) = 0;
|
||||
|
||||
// Iterate over all objects that intersect with mr, calling "cl->do_object"
|
||||
// on each. There is an exception to this: if this closure has already
|
||||
// been invoked on an object, it may skip such objects in some cases. This is
|
||||
// Most likely to happen in an "upwards" (ascending address) iteration of
|
||||
// MemRegions.
|
||||
virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
|
||||
|
||||
// Iterate over as many initialized objects in the space as possible,
|
||||
// calling "cl.do_object_careful" on each. Return NULL if all objects
|
||||
// in the space (at the start of the iteration) were iterated over.
|
||||
// Return an address indicating the extent of the iteration in the
|
||||
// event that the iteration had to return because of finding an
|
||||
// uninitialized object in the space, or if the closure "cl"
|
||||
// signaled early termination.
|
||||
virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
|
||||
virtual HeapWord* object_iterate_careful_m(MemRegion mr,
|
||||
ObjectClosureCareful* cl);
|
||||
|
||||
// Create and return a new dirty card to oop closure. Can be
|
||||
// overridden to return the appropriate type of closure
|
||||
// depending on the type of space in which the closure will
|
||||
@ -262,10 +252,6 @@ class Space: public CHeapObj<mtGC> {
|
||||
// Allocation (return NULL if full). Enforces mutual exclusion internally.
|
||||
virtual HeapWord* par_allocate(size_t word_size) = 0;
|
||||
|
||||
// Returns true if this object has been allocated since a
|
||||
// generation's "save_marks" call.
|
||||
virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
|
||||
|
||||
// Mark-sweep-compact support: all spaces can update pointers to objects
|
||||
// moving as a part of compaction.
|
||||
virtual void adjust_pointers();
|
||||
@ -397,7 +383,7 @@ public:
|
||||
|
||||
// Perform operations on the space needed after a compaction
|
||||
// has been performed.
|
||||
virtual void reset_after_compaction() {}
|
||||
virtual void reset_after_compaction() = 0;
|
||||
|
||||
// Returns the next space (in the current generation) to be compacted in
|
||||
// the global compaction order. Also is used to select the next
|
||||
@ -462,7 +448,7 @@ protected:
|
||||
HeapWord* _end_of_live;
|
||||
|
||||
// Minimum size of a free block.
|
||||
virtual size_t minimum_free_block_size() const = 0;
|
||||
virtual size_t minimum_free_block_size() const { return 0; }
|
||||
|
||||
// This the function is invoked when an allocation of an object covering
|
||||
// "start" to "end occurs crosses the threshold; returns the next
|
||||
@ -778,7 +764,7 @@ class ContiguousSpace: public CompactibleSpace {
|
||||
HeapWord* top() const { return _top; }
|
||||
void set_top(HeapWord* value) { _top = value; }
|
||||
|
||||
virtual void set_saved_mark() { _saved_mark_word = top(); }
|
||||
void set_saved_mark() { _saved_mark_word = top(); }
|
||||
void reset_saved_mark() { _saved_mark_word = bottom(); }
|
||||
|
||||
WaterMark bottom_mark() { return WaterMark(this, bottom()); }
|
||||
@ -813,35 +799,30 @@ class ContiguousSpace: public CompactibleSpace {
|
||||
size_t used() const { return byte_size(bottom(), top()); }
|
||||
size_t free() const { return byte_size(top(), end()); }
|
||||
|
||||
// Override from space.
|
||||
bool is_in(const void* p) const;
|
||||
|
||||
virtual bool is_free_block(const HeapWord* p) const;
|
||||
|
||||
// In a contiguous space we have a more obvious bound on what parts
|
||||
// contain objects.
|
||||
MemRegion used_region() const { return MemRegion(bottom(), top()); }
|
||||
|
||||
MemRegion used_region_at_save_marks() const {
|
||||
return MemRegion(bottom(), saved_mark_word());
|
||||
}
|
||||
|
||||
// Allocation (return NULL if full)
|
||||
virtual HeapWord* allocate(size_t word_size);
|
||||
virtual HeapWord* par_allocate(size_t word_size);
|
||||
|
||||
virtual bool obj_allocated_since_save_marks(const oop obj) const {
|
||||
return (HeapWord*)obj >= saved_mark_word();
|
||||
}
|
||||
|
||||
// Iteration
|
||||
void oop_iterate(ExtendedOopClosure* cl);
|
||||
void object_iterate(ObjectClosure* blk);
|
||||
// For contiguous spaces this method will iterate safely over objects
|
||||
// in the space (i.e., between bottom and top) when at a safepoint.
|
||||
void safe_object_iterate(ObjectClosure* blk);
|
||||
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
|
||||
// iterates on objects up to the safe limit
|
||||
|
||||
// Iterate over as many initialized objects in the space as possible,
|
||||
// calling "cl.do_object_careful" on each. Return NULL if all objects
|
||||
// in the space (at the start of the iteration) were iterated over.
|
||||
// Return an address indicating the extent of the iteration in the
|
||||
// event that the iteration had to return because of finding an
|
||||
// uninitialized object in the space, or if the closure "cl"
|
||||
// signaled early termination.
|
||||
HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
|
||||
HeapWord* concurrent_iteration_safe_limit() {
|
||||
assert(_concurrent_iteration_safe_limit <= top(),
|
||||
@ -872,7 +853,6 @@ class ContiguousSpace: public CompactibleSpace {
|
||||
// set new iteration safe limit
|
||||
set_concurrent_iteration_safe_limit(compaction_top());
|
||||
}
|
||||
virtual size_t minimum_free_block_size() const { return 0; }
|
||||
|
||||
// Override.
|
||||
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
|
||||
|
@ -632,7 +632,6 @@ jint universe_init() {
|
||||
guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
|
||||
"oop size is not not a multiple of HeapWord size");
|
||||
TraceTime timer("Genesis", TraceStartupTime);
|
||||
GC_locker::lock(); // do not allow gc during bootstrapping
|
||||
JavaClasses::compute_hard_coded_offsets();
|
||||
|
||||
jint status = Universe::initialize_heap();
|
||||
@ -1164,8 +1163,6 @@ bool universe_post_init() {
|
||||
|
||||
MemoryService::add_metaspace_memory_pools();
|
||||
|
||||
GC_locker::unlock(); // allow gc after bootstrapping
|
||||
|
||||
MemoryService::set_universe_heap(Universe::_collectedHeap);
|
||||
return true;
|
||||
}
|
||||
|
@ -329,14 +329,12 @@ bool Method::was_executed_more_than(int n) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void Method::print_invocation_count() {
|
||||
if (is_static()) tty->print("static ");
|
||||
if (is_final()) tty->print("final ");
|
||||
if (is_synchronized()) tty->print("synchronized ");
|
||||
if (is_native()) tty->print("native ");
|
||||
method_holder()->name()->print_symbol_on(tty);
|
||||
tty->print(".");
|
||||
tty->print("%s::", method_holder()->external_name());
|
||||
name()->print_symbol_on(tty);
|
||||
signature()->print_symbol_on(tty);
|
||||
|
||||
@ -349,12 +347,12 @@ void Method::print_invocation_count() {
|
||||
tty->print_cr (" interpreter_invocation_count: %8d ", interpreter_invocation_count());
|
||||
tty->print_cr (" invocation_counter: %8d ", invocation_count());
|
||||
tty->print_cr (" backedge_counter: %8d ", backedge_count());
|
||||
#ifndef PRODUCT
|
||||
if (CountCompiledCalls) {
|
||||
tty->print_cr (" compiled_invocation_count: %8d ", compiled_invocation_count());
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Build a MethodData* object to hold information about this method
|
||||
// collected in the interpreter.
|
||||
@ -1443,10 +1441,6 @@ void Method::print_name(outputStream* st) {
|
||||
#endif // !PRODUCT || INCLUDE_JVMTI
|
||||
|
||||
|
||||
//-----------------------------------------------------------------------------------
|
||||
// Non-product code
|
||||
|
||||
#ifndef PRODUCT
|
||||
void Method::print_codes_on(outputStream* st) const {
|
||||
print_codes_on(0, code_size(), st);
|
||||
}
|
||||
@ -1460,7 +1454,6 @@ void Method::print_codes_on(int from, int to, outputStream* st) const {
|
||||
BytecodeTracer::set_closure(BytecodeTracer::std_closure());
|
||||
while (s.next() >= 0) BytecodeTracer::trace(mh, s.bcp(), st);
|
||||
}
|
||||
#endif // not PRODUCT
|
||||
|
||||
|
||||
// Simple compression of line number tables. We use a regular compressed stream, except that we compress deltas
|
||||
|
@ -394,6 +394,9 @@ class Method : public Metadata {
|
||||
#ifndef PRODUCT
|
||||
int compiled_invocation_count() const { return _compiled_invocation_count; }
|
||||
void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
|
||||
#else
|
||||
// for PrintMethodData in a product build
|
||||
int compiled_invocation_count() const { return 0; }
|
||||
#endif // not PRODUCT
|
||||
|
||||
// Clear (non-shared space) pointers which could not be relevant
|
||||
@ -462,10 +465,8 @@ class Method : public Metadata {
|
||||
// Interpreter oopmap support
|
||||
void mask_for(int bci, InterpreterOopMap* mask);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// operations on invocation counter
|
||||
void print_invocation_count();
|
||||
#endif
|
||||
|
||||
// byte codes
|
||||
void set_code(address code) { return constMethod()->set_code(code); }
|
||||
@ -474,8 +475,8 @@ class Method : public Metadata {
|
||||
|
||||
// prints byte codes
|
||||
void print_codes() const { print_codes_on(tty); }
|
||||
void print_codes_on(outputStream* st) const PRODUCT_RETURN;
|
||||
void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN;
|
||||
void print_codes_on(outputStream* st) const;
|
||||
void print_codes_on(int from, int to, outputStream* st) const;
|
||||
|
||||
// method parameters
|
||||
bool has_method_parameters() const
|
||||
|
@ -115,7 +115,6 @@ void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
|
||||
print_data_on(st, print_data_on_helper(md));
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
|
||||
st->print("bci: %d", bci());
|
||||
st->fill_to(tab_width_one);
|
||||
@ -138,7 +137,6 @@ void ProfileData::print_shared(outputStream* st, const char* name, const char* e
|
||||
void ProfileData::tab(outputStream* st, bool first) const {
|
||||
st->fill_to(first ? tab_width_one : tab_width_two);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
// ==================================================================
|
||||
// BitData
|
||||
@ -147,23 +145,19 @@ void ProfileData::tab(outputStream* st, bool first) const {
|
||||
// whether a checkcast bytecode has seen a null value.
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void BitData::print_data_on(outputStream* st, const char* extra) const {
|
||||
print_shared(st, "BitData", extra);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
// ==================================================================
|
||||
// CounterData
|
||||
//
|
||||
// A CounterData corresponds to a simple counter.
|
||||
|
||||
#ifndef PRODUCT
|
||||
void CounterData::print_data_on(outputStream* st, const char* extra) const {
|
||||
print_shared(st, "CounterData", extra);
|
||||
st->print_cr("count(%u)", count());
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
// ==================================================================
|
||||
// JumpData
|
||||
@ -188,12 +182,10 @@ void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
|
||||
set_displacement(offset);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void JumpData::print_data_on(outputStream* st, const char* extra) const {
|
||||
print_shared(st, "JumpData", extra);
|
||||
st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
|
||||
// Parameter profiling include the receiver
|
||||
@ -342,7 +334,6 @@ bool TypeEntriesAtCall::arguments_profiling_enabled() {
|
||||
return MethodData::profile_arguments();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void TypeEntries::print_klass(outputStream* st, intptr_t k) {
|
||||
if (is_type_none(k)) {
|
||||
st->print("none");
|
||||
@ -398,7 +389,6 @@ void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) con
|
||||
_ret.print_data_on(st);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// ==================================================================
|
||||
// ReceiverTypeData
|
||||
@ -417,7 +407,6 @@ void ReceiverTypeData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
|
||||
uint row;
|
||||
int entries = 0;
|
||||
@ -447,7 +436,6 @@ void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
|
||||
print_shared(st, "VirtualCallData", extra);
|
||||
print_receiver_data_on(st);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
// ==================================================================
|
||||
// RetData
|
||||
@ -499,7 +487,6 @@ DataLayout* RetData::advance(MethodData *md, int bci) {
|
||||
}
|
||||
#endif // CC_INTERP
|
||||
|
||||
#ifndef PRODUCT
|
||||
void RetData::print_data_on(outputStream* st, const char* extra) const {
|
||||
print_shared(st, "RetData", extra);
|
||||
uint row;
|
||||
@ -516,7 +503,6 @@ void RetData::print_data_on(outputStream* st, const char* extra) const {
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
// ==================================================================
|
||||
// BranchData
|
||||
@ -534,7 +520,6 @@ void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
|
||||
set_displacement(offset);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void BranchData::print_data_on(outputStream* st, const char* extra) const {
|
||||
print_shared(st, "BranchData", extra);
|
||||
st->print_cr("taken(%u) displacement(%d)",
|
||||
@ -542,7 +527,6 @@ void BranchData::print_data_on(outputStream* st, const char* extra) const {
|
||||
tab(st);
|
||||
st->print_cr("not taken(%u)", not_taken());
|
||||
}
|
||||
#endif
|
||||
|
||||
// ==================================================================
|
||||
// MultiBranchData
|
||||
@ -608,7 +592,6 @@ void MultiBranchData::post_initialize(BytecodeStream* stream,
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
|
||||
print_shared(st, "MultiBranchData", extra);
|
||||
st->print_cr("default_count(%u) displacement(%d)",
|
||||
@ -620,9 +603,7 @@ void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
|
||||
count_at(i), displacement_at(i));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
|
||||
print_shared(st, "ArgInfoData", extra);
|
||||
int nargs = number_of_args();
|
||||
@ -632,8 +613,6 @@ void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int ParametersTypeData::compute_cell_count(Method* m) {
|
||||
if (!MethodData::profile_parameters_for_method(m)) {
|
||||
return 0;
|
||||
@ -654,7 +633,6 @@ bool ParametersTypeData::profiling_enabled() {
|
||||
return MethodData::profile_parameters();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
|
||||
st->print("parameter types", extra);
|
||||
_parameters.print_data_on(st);
|
||||
@ -666,7 +644,6 @@ void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) con
|
||||
method()->print_short_name(st);
|
||||
st->cr();
|
||||
}
|
||||
#endif
|
||||
|
||||
// ==================================================================
|
||||
// MethodData*
|
||||
@ -801,6 +778,8 @@ bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
|
||||
case Bytecodes::_invokeinterface:
|
||||
case Bytecodes::_if_acmpeq:
|
||||
case Bytecodes::_if_acmpne:
|
||||
case Bytecodes::_ifnull:
|
||||
case Bytecodes::_ifnonnull:
|
||||
case Bytecodes::_invokestatic:
|
||||
#ifdef COMPILER2
|
||||
return UseTypeSpeculation;
|
||||
@ -1357,8 +1336,6 @@ ArgInfoData *MethodData::arg_info() {
|
||||
|
||||
// Printing
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void MethodData::print_on(outputStream* st) const {
|
||||
assert(is_methodData(), "should be method data");
|
||||
st->print("method data for ");
|
||||
@ -1367,15 +1344,12 @@ void MethodData::print_on(outputStream* st) const {
|
||||
print_data_on(st);
|
||||
}
|
||||
|
||||
#endif //PRODUCT
|
||||
|
||||
void MethodData::print_value_on(outputStream* st) const {
|
||||
assert(is_methodData(), "should be method data");
|
||||
st->print("method data for ");
|
||||
method()->print_value_on(st);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MethodData::print_data_on(outputStream* st) const {
|
||||
ResourceMark rm;
|
||||
ProfileData* data = first_data();
|
||||
@ -1416,7 +1390,6 @@ void MethodData::print_data_on(outputStream* st) const {
|
||||
if (dp >= end) return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if INCLUDE_SERVICES
|
||||
// Size Statistics
|
||||
|
@ -280,12 +280,10 @@ class ProfileData : public ResourceObj {
|
||||
friend class ReturnTypeEntry;
|
||||
friend class TypeStackSlotEntries;
|
||||
private:
|
||||
#ifndef PRODUCT
|
||||
enum {
|
||||
tab_width_one = 16,
|
||||
tab_width_two = 36
|
||||
};
|
||||
#endif // !PRODUCT
|
||||
|
||||
// This is a pointer to a section of profiling data.
|
||||
DataLayout* _data;
|
||||
@ -521,10 +519,8 @@ public:
|
||||
|
||||
void print_data_on(outputStream* st, const MethodData* md) const;
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_shared(outputStream* st, const char* name, const char* extra) const;
|
||||
void tab(outputStream* st, bool first = false) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// BitData
|
||||
@ -583,9 +579,7 @@ public:
|
||||
}
|
||||
#endif // CC_INTERP
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// CounterData
|
||||
@ -646,9 +640,7 @@ public:
|
||||
}
|
||||
#endif // CC_INTERP
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// JumpData
|
||||
@ -733,9 +725,7 @@ public:
|
||||
// Specific initialization.
|
||||
void post_initialize(BytecodeStream* stream, MethodData* mdo);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// Entries in a ProfileData object to record types: it can either be
|
||||
@ -808,9 +798,7 @@ public:
|
||||
return with_status((intptr_t)k, in);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
static void print_klass(outputStream* st, intptr_t k);
|
||||
#endif
|
||||
|
||||
// GC support
|
||||
static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
|
||||
@ -919,9 +907,7 @@ public:
|
||||
// GC support
|
||||
void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// Type entry used for return from a call. A single cell to record the
|
||||
@ -964,9 +950,7 @@ public:
|
||||
// GC support
|
||||
void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// Entries to collect type information at a call: contains arguments
|
||||
@ -1144,9 +1128,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// ReceiverTypeData
|
||||
@ -1288,10 +1270,8 @@ public:
|
||||
}
|
||||
#endif // CC_INTERP
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_receiver_data_on(outputStream* st) const;
|
||||
void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// VirtualCallData
|
||||
@ -1332,9 +1312,7 @@ public:
|
||||
}
|
||||
#endif // CC_INTERP
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// VirtualCallTypeData
|
||||
@ -1458,9 +1436,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// RetData
|
||||
@ -1561,9 +1537,7 @@ public:
|
||||
// Specific initialization.
|
||||
void post_initialize(BytecodeStream* stream, MethodData* mdo);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// BranchData
|
||||
@ -1639,9 +1613,7 @@ public:
|
||||
// Specific initialization.
|
||||
void post_initialize(BytecodeStream* stream, MethodData* mdo);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// ArrayData
|
||||
@ -1832,9 +1804,7 @@ public:
|
||||
// Specific initialization.
|
||||
void post_initialize(BytecodeStream* stream, MethodData* mdo);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
class ArgInfoData : public ArrayData {
|
||||
@ -1859,9 +1829,7 @@ public:
|
||||
array_set_int_at(arg, val);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// ParametersTypeData
|
||||
@ -1920,9 +1888,7 @@ public:
|
||||
_parameters.clean_weak_klass_links(is_alive_closure);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
|
||||
static ByteSize stack_slot_offset(int i) {
|
||||
return cell_offset(stack_slot_local_offset(i));
|
||||
@ -1976,9 +1942,7 @@ public:
|
||||
set_intptr_at(method_offset, (intptr_t)m);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
// MethodData*
|
||||
@ -2052,7 +2016,7 @@ public:
|
||||
|
||||
// Whole-method sticky bits and flags
|
||||
enum {
|
||||
_trap_hist_limit = 19, // decoupled from Deoptimization::Reason_LIMIT
|
||||
_trap_hist_limit = 20, // decoupled from Deoptimization::Reason_LIMIT
|
||||
_trap_hist_mask = max_jubyte,
|
||||
_extra_data_count = 4 // extra DataLayout headers, for trap history
|
||||
}; // Public flag values
|
||||
@ -2457,15 +2421,11 @@ public:
|
||||
void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
|
||||
|
||||
// Printing
|
||||
#ifndef PRODUCT
|
||||
void print_on (outputStream* st) const;
|
||||
#endif
|
||||
void print_value_on(outputStream* st) const;
|
||||
|
||||
#ifndef PRODUCT
|
||||
// printing support for method data
|
||||
void print_data_on(outputStream* st) const;
|
||||
#endif
|
||||
|
||||
const char* internal_name() const { return "{method data}"; }
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
|
@ -33,8 +33,8 @@
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callGenerator.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "compiler/oopMap.hpp"
|
||||
#include "opto/callGenerator.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/escape.hpp"
|
||||
#include "opto/locknode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
|
294
hotspot/src/share/vm/opto/castnode.cpp
Normal file
294
hotspot/src/share/vm/opto/castnode.cpp
Normal file
@ -0,0 +1,294 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
#include "opto/type.hpp"
|
||||
|
||||
//=============================================================================
|
||||
// If input is already higher or equal to cast type, then this is an identity.
|
||||
Node *ConstraintCastNode::Identity( PhaseTransform *phase ) {
|
||||
return phase->type(in(1))->higher_equal_speculative(_type) ? in(1) : this;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
// Take 'join' of input and cast-up type
|
||||
const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
|
||||
if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
|
||||
const Type* ft = phase->type(in(1))->filter_speculative(_type);
|
||||
|
||||
#ifdef ASSERT
|
||||
// Previous versions of this function had some special case logic,
|
||||
// which is no longer necessary. Make sure of the required effects.
|
||||
switch (Opcode()) {
|
||||
case Op_CastII:
|
||||
{
|
||||
const Type* t1 = phase->type(in(1));
|
||||
if( t1 == Type::TOP ) assert(ft == Type::TOP, "special case #1");
|
||||
const Type* rt = t1->join_speculative(_type);
|
||||
if (rt->empty()) assert(ft == Type::TOP, "special case #2");
|
||||
break;
|
||||
}
|
||||
case Op_CastPP:
|
||||
if (phase->type(in(1)) == TypePtr::NULL_PTR &&
|
||||
_type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull)
|
||||
assert(ft == Type::TOP, "special case #3");
|
||||
break;
|
||||
}
|
||||
#endif //ASSERT
|
||||
|
||||
return ft;
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node. Strip out
|
||||
// control copies
|
||||
Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape){
|
||||
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
|
||||
}
|
||||
|
||||
//------------------------------Ideal_DU_postCCP-------------------------------
|
||||
// Throw away cast after constant propagation
|
||||
Node *ConstraintCastNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
|
||||
const Type *t = ccp->type(in(1));
|
||||
ccp->hash_delete(this);
|
||||
set_type(t); // Turn into ID function
|
||||
ccp->hash_insert(this);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
//=============================================================================
|
||||
|
||||
//------------------------------Ideal_DU_postCCP-------------------------------
|
||||
// If not converting int->oop, throw away cast after constant propagation
|
||||
Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
|
||||
const Type *t = ccp->type(in(1));
|
||||
if (!t->isa_oop_ptr() || ((in(1)->is_DecodeN()) && Matcher::gen_narrow_oop_implicit_null_checks())) {
|
||||
return NULL; // do not transform raw pointers or narrow oops
|
||||
}
|
||||
return ConstraintCastNode::Ideal_DU_postCCP(ccp);
|
||||
}
|
||||
|
||||
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Identity---------------------------------------
|
||||
// If input is already higher or equal to cast type, then this is an identity.
|
||||
Node *CheckCastPPNode::Identity( PhaseTransform *phase ) {
|
||||
// Toned down to rescue meeting at a Phi 3 different oops all implementing
|
||||
// the same interface. CompileTheWorld starting at 502, kd12rc1.zip.
|
||||
return (phase->type(in(1)) == phase->type(this)) ? in(1) : this;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
// Take 'join' of input and cast-up type, unless working with an Interface
|
||||
const Type *CheckCastPPNode::Value( PhaseTransform *phase ) const {
|
||||
if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
|
||||
|
||||
const Type *inn = phase->type(in(1));
|
||||
if( inn == Type::TOP ) return Type::TOP; // No information yet
|
||||
|
||||
const TypePtr *in_type = inn->isa_ptr();
|
||||
const TypePtr *my_type = _type->isa_ptr();
|
||||
const Type *result = _type;
|
||||
if( in_type != NULL && my_type != NULL ) {
|
||||
TypePtr::PTR in_ptr = in_type->ptr();
|
||||
if( in_ptr == TypePtr::Null ) {
|
||||
result = in_type;
|
||||
} else if( in_ptr == TypePtr::Constant ) {
|
||||
// Casting a constant oop to an interface?
|
||||
// (i.e., a String to a Comparable?)
|
||||
// Then return the interface.
|
||||
const TypeOopPtr *jptr = my_type->isa_oopptr();
|
||||
assert( jptr, "" );
|
||||
result = (jptr->klass()->is_interface() || !in_type->higher_equal(_type))
|
||||
? my_type->cast_to_ptr_type( TypePtr::NotNull )
|
||||
: in_type;
|
||||
} else {
|
||||
result = my_type->cast_to_ptr_type( my_type->join_ptr(in_ptr) );
|
||||
}
|
||||
}
|
||||
|
||||
// This is the code from TypePtr::xmeet() that prevents us from
|
||||
// having 2 ways to represent the same type. We have to replicate it
|
||||
// here because we don't go through meet/join.
|
||||
if (result->remove_speculative() == result->speculative()) {
|
||||
result = result->remove_speculative();
|
||||
}
|
||||
|
||||
// Same as above: because we don't go through meet/join, remove the
|
||||
// speculative type if we know we won't use it.
|
||||
return result->cleanup_speculative();
|
||||
|
||||
// JOIN NOT DONE HERE BECAUSE OF INTERFACE ISSUES.
|
||||
// FIX THIS (DO THE JOIN) WHEN UNION TYPES APPEAR!
|
||||
|
||||
//
|
||||
// Remove this code after overnight run indicates no performance
|
||||
// loss from not performing JOIN at CheckCastPPNode
|
||||
//
|
||||
// const TypeInstPtr *in_oop = in->isa_instptr();
|
||||
// const TypeInstPtr *my_oop = _type->isa_instptr();
|
||||
// // If either input is an 'interface', return destination type
|
||||
// assert (in_oop == NULL || in_oop->klass() != NULL, "");
|
||||
// assert (my_oop == NULL || my_oop->klass() != NULL, "");
|
||||
// if( (in_oop && in_oop->klass()->is_interface())
|
||||
// ||(my_oop && my_oop->klass()->is_interface()) ) {
|
||||
// TypePtr::PTR in_ptr = in->isa_ptr() ? in->is_ptr()->_ptr : TypePtr::BotPTR;
|
||||
// // Preserve cast away nullness for interfaces
|
||||
// if( in_ptr == TypePtr::NotNull && my_oop && my_oop->_ptr == TypePtr::BotPTR ) {
|
||||
// return my_oop->cast_to_ptr_type(TypePtr::NotNull);
|
||||
// }
|
||||
// return _type;
|
||||
// }
|
||||
//
|
||||
// // Neither the input nor the destination type is an interface,
|
||||
//
|
||||
// // history: JOIN used to cause weird corner case bugs
|
||||
// // return (in == TypeOopPtr::NULL_PTR) ? in : _type;
|
||||
// // JOIN picks up NotNull in common instance-of/check-cast idioms, both oops.
|
||||
// // JOIN does not preserve NotNull in other cases, e.g. RawPtr vs InstPtr
|
||||
// const Type *join = in->join(_type);
|
||||
// // Check if join preserved NotNull'ness for pointers
|
||||
// if( join->isa_ptr() && _type->isa_ptr() ) {
|
||||
// TypePtr::PTR join_ptr = join->is_ptr()->_ptr;
|
||||
// TypePtr::PTR type_ptr = _type->is_ptr()->_ptr;
|
||||
// // If there isn't any NotNull'ness to preserve
|
||||
// // OR if join preserved NotNull'ness then return it
|
||||
// if( type_ptr == TypePtr::BotPTR || type_ptr == TypePtr::Null ||
|
||||
// join_ptr == TypePtr::NotNull || join_ptr == TypePtr::Constant ) {
|
||||
// return join;
|
||||
// }
|
||||
// // ELSE return same old type as before
|
||||
// return _type;
|
||||
// }
|
||||
// // Not joining two pointers
|
||||
// return join;
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node. Strip out
|
||||
// control copies
|
||||
Node *CheckCastPPNode::Ideal(PhaseGVN *phase, bool can_reshape){
|
||||
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *CastX2PNode::Value( PhaseTransform *phase ) const {
|
||||
const Type* t = phase->type(in(1));
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
if (t->base() == Type_X && t->singleton()) {
|
||||
uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con();
|
||||
if (bits == 0) return TypePtr::NULL_PTR;
|
||||
return TypeRawPtr::make((address) bits);
|
||||
}
|
||||
return CastX2PNode::bottom_type();
|
||||
}
|
||||
|
||||
//------------------------------Idealize---------------------------------------
|
||||
static inline bool fits_in_int(const Type* t, bool but_not_min_int = false) {
|
||||
if (t == Type::TOP) return false;
|
||||
const TypeX* tl = t->is_intptr_t();
|
||||
jint lo = min_jint;
|
||||
jint hi = max_jint;
|
||||
if (but_not_min_int) ++lo; // caller wants to negate the value w/o overflow
|
||||
return (tl->_lo >= lo) && (tl->_hi <= hi);
|
||||
}
|
||||
|
||||
static inline Node* addP_of_X2P(PhaseGVN *phase,
|
||||
Node* base,
|
||||
Node* dispX,
|
||||
bool negate = false) {
|
||||
if (negate) {
|
||||
dispX = new (phase->C) SubXNode(phase->MakeConX(0), phase->transform(dispX));
|
||||
}
|
||||
return new (phase->C) AddPNode(phase->C->top(),
|
||||
phase->transform(new (phase->C) CastX2PNode(base)),
|
||||
phase->transform(dispX));
|
||||
}
|
||||
|
||||
Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// convert CastX2P(AddX(x, y)) to AddP(CastX2P(x), y) if y fits in an int
|
||||
int op = in(1)->Opcode();
|
||||
Node* x;
|
||||
Node* y;
|
||||
switch (op) {
|
||||
case Op_SubX:
|
||||
x = in(1)->in(1);
|
||||
// Avoid ideal transformations ping-pong between this and AddP for raw pointers.
|
||||
if (phase->find_intptr_t_con(x, -1) == 0)
|
||||
break;
|
||||
y = in(1)->in(2);
|
||||
if (fits_in_int(phase->type(y), true)) {
|
||||
return addP_of_X2P(phase, x, y, true);
|
||||
}
|
||||
break;
|
||||
case Op_AddX:
|
||||
x = in(1)->in(1);
|
||||
y = in(1)->in(2);
|
||||
if (fits_in_int(phase->type(y))) {
|
||||
return addP_of_X2P(phase, x, y);
|
||||
}
|
||||
if (fits_in_int(phase->type(x))) {
|
||||
return addP_of_X2P(phase, y, x);
|
||||
}
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
Node *CastX2PNode::Identity( PhaseTransform *phase ) {
|
||||
if (in(1)->Opcode() == Op_CastP2X) return in(1)->in(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *CastP2XNode::Value( PhaseTransform *phase ) const {
|
||||
const Type* t = phase->type(in(1));
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
if (t->base() == Type::RawPtr && t->singleton()) {
|
||||
uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con();
|
||||
return TypeX::make(bits);
|
||||
}
|
||||
return CastP2XNode::bottom_type();
|
||||
}
|
||||
|
||||
Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
Node *CastP2XNode::Identity( PhaseTransform *phase ) {
|
||||
if (in(1)->Opcode() == Op_CastX2P) return in(1)->in(1);
|
||||
return this;
|
||||
}
|
119
hotspot/src/share/vm/opto/castnode.hpp
Normal file
119
hotspot/src/share/vm/opto/castnode.hpp
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OPTO_CASTNODE_HPP
|
||||
#define SHARE_VM_OPTO_CASTNODE_HPP
|
||||
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
|
||||
|
||||
//------------------------------ConstraintCastNode-----------------------------
|
||||
// cast to a different range
|
||||
class ConstraintCastNode: public TypeNode {
|
||||
public:
|
||||
ConstraintCastNode (Node *n, const Type *t ): TypeNode(t,2) {
|
||||
init_class_id(Class_ConstraintCast);
|
||||
init_req(1, n);
|
||||
}
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const = 0;
|
||||
virtual Node *Ideal_DU_postCCP( PhaseCCP * );
|
||||
};
|
||||
|
||||
//------------------------------CastIINode-------------------------------------
|
||||
// cast integer to integer (different range)
|
||||
class CastIINode: public ConstraintCastNode {
|
||||
public:
|
||||
CastIINode (Node *n, const Type *t ): ConstraintCastNode(n,t) {}
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//------------------------------CastPPNode-------------------------------------
|
||||
// cast pointer to pointer (different type)
|
||||
class CastPPNode: public ConstraintCastNode {
|
||||
public:
|
||||
CastPPNode (Node *n, const Type *t ): ConstraintCastNode(n, t) {}
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
virtual Node *Ideal_DU_postCCP( PhaseCCP * );
|
||||
};
|
||||
|
||||
//------------------------------CheckCastPPNode--------------------------------
|
||||
// for _checkcast, cast pointer to pointer (different type), without JOIN,
|
||||
class CheckCastPPNode: public TypeNode {
|
||||
public:
|
||||
CheckCastPPNode( Node *c, Node *n, const Type *t ) : TypeNode(t,2) {
|
||||
init_class_id(Class_CheckCastPP);
|
||||
init_req(0, c);
|
||||
init_req(1, n);
|
||||
}
|
||||
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
// No longer remove CheckCast after CCP as it gives me a place to hang
|
||||
// the proper address type - which is required to compute anti-deps.
|
||||
//virtual Node *Ideal_DU_postCCP( PhaseCCP * );
|
||||
};
|
||||
|
||||
|
||||
//------------------------------CastX2PNode-------------------------------------
|
||||
// convert a machine-pointer-sized integer to a raw pointer
|
||||
class CastX2PNode : public Node {
|
||||
public:
|
||||
CastX2PNode( Node *n ) : Node(NULL, n) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
|
||||
};
|
||||
|
||||
//------------------------------CastP2XNode-------------------------------------
|
||||
// Used in both 32-bit and 64-bit land.
|
||||
// Used for card-marks and unsafe pointer math.
|
||||
class CastP2XNode : public Node {
|
||||
public:
|
||||
CastP2XNode( Node *ctrl, Node *n ) : Node(ctrl, n) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual uint ideal_reg() const { return Op_RegX; }
|
||||
virtual const Type *bottom_type() const { return TypeX_X; }
|
||||
// Return false to keep node from moving away from an associated card mark.
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
};
|
||||
|
||||
|
||||
|
||||
#endif // SHARE_VM_OPTO_CASTNODE_HPP
|
@ -29,8 +29,11 @@
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/narrowptrnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/regmask.hpp"
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "opto/indexSet.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
|
||||
|
@ -25,17 +25,24 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/countbitsnode.hpp"
|
||||
#include "opto/divnode.hpp"
|
||||
#include "opto/intrinsicnode.hpp"
|
||||
#include "opto/locknode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/multnode.hpp"
|
||||
#include "opto/narrowptrnode.hpp"
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
#include "opto/vectornode.hpp"
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/narrowptrnode.hpp"
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
#include "opto/output.hpp"
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -139,630 +139,16 @@ public:
|
||||
|
||||
};
|
||||
|
||||
//------------------------------BinaryNode-------------------------------------
|
||||
// Place holder for the 2 conditional inputs to a CMove. CMove needs 4
|
||||
// inputs: the Bool (for the lt/gt/eq/ne bits), the flags (result of some
|
||||
// compare), and the 2 values to select between. The Matcher requires a
|
||||
// binary tree so we break it down like this:
|
||||
// (CMove (Binary bol cmp) (Binary src1 src2))
|
||||
class BinaryNode : public Node {
|
||||
public:
|
||||
BinaryNode( Node *n1, Node *n2 ) : Node(0,n1,n2) { }
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return 0; }
|
||||
};
|
||||
|
||||
//------------------------------CMoveNode--------------------------------------
|
||||
// Conditional move
|
||||
class CMoveNode : public TypeNode {
|
||||
public:
|
||||
enum { Control, // When is it safe to do this cmove?
|
||||
Condition, // Condition controlling the cmove
|
||||
IfFalse, // Value if condition is false
|
||||
IfTrue }; // Value if condition is true
|
||||
CMoveNode( Node *bol, Node *left, Node *right, const Type *t ) : TypeNode(t,4)
|
||||
{
|
||||
init_class_id(Class_CMove);
|
||||
// all inputs are nullified in Node::Node(int)
|
||||
// init_req(Control,NULL);
|
||||
init_req(Condition,bol);
|
||||
init_req(IfFalse,left);
|
||||
init_req(IfTrue,right);
|
||||
}
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
static CMoveNode *make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t );
|
||||
// Helper function to spot cmove graph shapes
|
||||
static Node *is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b );
|
||||
};
|
||||
|
||||
//------------------------------CMoveDNode-------------------------------------
|
||||
class CMoveDNode : public CMoveNode {
|
||||
public:
|
||||
CMoveDNode( Node *bol, Node *left, Node *right, const Type* t) : CMoveNode(bol,left,right,t){}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
};
|
||||
|
||||
//------------------------------CMoveFNode-------------------------------------
|
||||
class CMoveFNode : public CMoveNode {
|
||||
public:
|
||||
CMoveFNode( Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) {}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
};
|
||||
|
||||
//------------------------------CMoveINode-------------------------------------
|
||||
class CMoveINode : public CMoveNode {
|
||||
public:
|
||||
CMoveINode( Node *bol, Node *left, Node *right, const TypeInt *ti ) : CMoveNode(bol,left,right,ti){}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
};
|
||||
|
||||
//------------------------------CMoveLNode-------------------------------------
|
||||
class CMoveLNode : public CMoveNode {
|
||||
public:
|
||||
CMoveLNode(Node *bol, Node *left, Node *right, const TypeLong *tl ) : CMoveNode(bol,left,right,tl){}
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//------------------------------CMovePNode-------------------------------------
|
||||
class CMovePNode : public CMoveNode {
|
||||
public:
|
||||
CMovePNode( Node *c, Node *bol, Node *left, Node *right, const TypePtr* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//------------------------------CMoveNNode-------------------------------------
|
||||
class CMoveNNode : public CMoveNode {
|
||||
public:
|
||||
CMoveNNode( Node *c, Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//------------------------------ConstraintCastNode-----------------------------
|
||||
// cast to a different range
|
||||
class ConstraintCastNode: public TypeNode {
|
||||
public:
|
||||
ConstraintCastNode (Node *n, const Type *t ): TypeNode(t,2) {
|
||||
init_class_id(Class_ConstraintCast);
|
||||
init_req(1, n);
|
||||
}
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const = 0;
|
||||
virtual Node *Ideal_DU_postCCP( PhaseCCP * );
|
||||
};
|
||||
|
||||
//------------------------------CastIINode-------------------------------------
|
||||
// cast integer to integer (different range)
|
||||
class CastIINode: public ConstraintCastNode {
|
||||
public:
|
||||
CastIINode (Node *n, const Type *t ): ConstraintCastNode(n,t) {}
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//------------------------------CastPPNode-------------------------------------
|
||||
// cast pointer to pointer (different type)
|
||||
class CastPPNode: public ConstraintCastNode {
|
||||
public:
|
||||
CastPPNode (Node *n, const Type *t ): ConstraintCastNode(n, t) {}
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
virtual Node *Ideal_DU_postCCP( PhaseCCP * );
|
||||
};
|
||||
|
||||
//------------------------------CheckCastPPNode--------------------------------
|
||||
// for _checkcast, cast pointer to pointer (different type), without JOIN,
|
||||
class CheckCastPPNode: public TypeNode {
|
||||
public:
|
||||
CheckCastPPNode( Node *c, Node *n, const Type *t ) : TypeNode(t,2) {
|
||||
init_class_id(Class_CheckCastPP);
|
||||
init_req(0, c);
|
||||
init_req(1, n);
|
||||
}
|
||||
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
// No longer remove CheckCast after CCP as it gives me a place to hang
|
||||
// the proper address type - which is required to compute anti-deps.
|
||||
//virtual Node *Ideal_DU_postCCP( PhaseCCP * );
|
||||
};
|
||||
|
||||
|
||||
//------------------------------EncodeNarrowPtr--------------------------------
|
||||
class EncodeNarrowPtrNode : public TypeNode {
|
||||
protected:
|
||||
EncodeNarrowPtrNode(Node* value, const Type* type):
|
||||
TypeNode(type, 2) {
|
||||
init_class_id(Class_EncodeNarrowPtr);
|
||||
init_req(0, NULL);
|
||||
init_req(1, value);
|
||||
}
|
||||
public:
|
||||
virtual uint ideal_reg() const { return Op_RegN; }
|
||||
virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
|
||||
};
|
||||
|
||||
//------------------------------EncodeP--------------------------------
|
||||
// Encodes an oop pointers into its compressed form
|
||||
// Takes an extra argument which is the real heap base as a long which
|
||||
// may be useful for code generation in the backend.
|
||||
class EncodePNode : public EncodeNarrowPtrNode {
|
||||
public:
|
||||
EncodePNode(Node* value, const Type* type):
|
||||
EncodeNarrowPtrNode(value, type) {
|
||||
init_class_id(Class_EncodeP);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
//------------------------------EncodePKlass--------------------------------
|
||||
// Encodes a klass pointer into its compressed form
|
||||
// Takes an extra argument which is the real heap base as a long which
|
||||
// may be useful for code generation in the backend.
|
||||
class EncodePKlassNode : public EncodeNarrowPtrNode {
|
||||
public:
|
||||
EncodePKlassNode(Node* value, const Type* type):
|
||||
EncodeNarrowPtrNode(value, type) {
|
||||
init_class_id(Class_EncodePKlass);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
//------------------------------DecodeNarrowPtr--------------------------------
|
||||
class DecodeNarrowPtrNode : public TypeNode {
|
||||
protected:
|
||||
DecodeNarrowPtrNode(Node* value, const Type* type):
|
||||
TypeNode(type, 2) {
|
||||
init_class_id(Class_DecodeNarrowPtr);
|
||||
init_req(0, NULL);
|
||||
init_req(1, value);
|
||||
}
|
||||
public:
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
};
|
||||
|
||||
//------------------------------DecodeN--------------------------------
|
||||
// Converts a narrow oop into a real oop ptr.
|
||||
// Takes an extra argument which is the real heap base as a long which
|
||||
// may be useful for code generation in the backend.
|
||||
class DecodeNNode : public DecodeNarrowPtrNode {
|
||||
public:
|
||||
DecodeNNode(Node* value, const Type* type):
|
||||
DecodeNarrowPtrNode(value, type) {
|
||||
init_class_id(Class_DecodeN);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
};
|
||||
|
||||
//------------------------------DecodeNKlass--------------------------------
|
||||
// Converts a narrow klass pointer into a real klass ptr.
|
||||
// Takes an extra argument which is the real heap base as a long which
|
||||
// may be useful for code generation in the backend.
|
||||
class DecodeNKlassNode : public DecodeNarrowPtrNode {
|
||||
public:
|
||||
DecodeNKlassNode(Node* value, const Type* type):
|
||||
DecodeNarrowPtrNode(value, type) {
|
||||
init_class_id(Class_DecodeNKlass);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
};
|
||||
|
||||
//------------------------------Conv2BNode-------------------------------------
|
||||
// Convert int/pointer to a Boolean. Map zero to zero, all else to 1.
|
||||
class Conv2BNode : public Node {
|
||||
public:
|
||||
Conv2BNode( Node *i ) : Node(0,i) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::BOOL; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
// The conversions operations are all Alpha sorted. Please keep it that way!
|
||||
//------------------------------ConvD2FNode------------------------------------
|
||||
// Convert double to float
|
||||
class ConvD2FNode : public Node {
|
||||
public:
|
||||
ConvD2FNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
};
|
||||
|
||||
//------------------------------ConvD2INode------------------------------------
|
||||
// Convert Double to Integer
|
||||
class ConvD2INode : public Node {
|
||||
public:
|
||||
ConvD2INode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//------------------------------ConvD2LNode------------------------------------
|
||||
// Convert Double to Long
|
||||
class ConvD2LNode : public Node {
|
||||
public:
|
||||
ConvD2LNode( Node *dbl ) : Node(0,dbl) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeLong::LONG; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegL; }
|
||||
};
|
||||
|
||||
//------------------------------ConvF2DNode------------------------------------
|
||||
// Convert Float to a Double.
|
||||
class ConvF2DNode : public Node {
|
||||
public:
|
||||
ConvF2DNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
};
|
||||
|
||||
//------------------------------ConvF2INode------------------------------------
|
||||
// Convert float to integer
|
||||
class ConvF2INode : public Node {
|
||||
public:
|
||||
ConvF2INode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//------------------------------ConvF2LNode------------------------------------
|
||||
// Convert float to long
|
||||
class ConvF2LNode : public Node {
|
||||
public:
|
||||
ConvF2LNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeLong::LONG; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegL; }
|
||||
};
|
||||
|
||||
//------------------------------ConvI2DNode------------------------------------
|
||||
// Convert Integer to Double
|
||||
class ConvI2DNode : public Node {
|
||||
public:
|
||||
ConvI2DNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
};
|
||||
|
||||
//------------------------------ConvI2FNode------------------------------------
|
||||
// Convert Integer to Float
|
||||
class ConvI2FNode : public Node {
|
||||
public:
|
||||
ConvI2FNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
};
|
||||
|
||||
//------------------------------ConvI2LNode------------------------------------
|
||||
// Convert integer to long
|
||||
class ConvI2LNode : public TypeNode {
|
||||
public:
|
||||
ConvI2LNode(Node *in1, const TypeLong* t = TypeLong::INT)
|
||||
: TypeNode(t, 2)
|
||||
{ init_req(1, in1); }
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegL; }
|
||||
};
|
||||
|
||||
//------------------------------ConvL2DNode------------------------------------
|
||||
// Convert Long to Double
|
||||
class ConvL2DNode : public Node {
|
||||
public:
|
||||
ConvL2DNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
};
|
||||
|
||||
//------------------------------ConvL2FNode------------------------------------
|
||||
// Convert Long to Float
|
||||
class ConvL2FNode : public Node {
|
||||
public:
|
||||
ConvL2FNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
};
|
||||
|
||||
//------------------------------ConvL2INode------------------------------------
|
||||
// Convert long to integer
|
||||
class ConvL2INode : public Node {
|
||||
public:
|
||||
ConvL2INode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//------------------------------CastX2PNode-------------------------------------
|
||||
// convert a machine-pointer-sized integer to a raw pointer
|
||||
class CastX2PNode : public Node {
|
||||
public:
|
||||
CastX2PNode( Node *n ) : Node(NULL, n) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
|
||||
};
|
||||
|
||||
//------------------------------CastP2XNode-------------------------------------
|
||||
// Used in both 32-bit and 64-bit land.
|
||||
// Used for card-marks and unsafe pointer math.
|
||||
class CastP2XNode : public Node {
|
||||
public:
|
||||
CastP2XNode( Node *ctrl, Node *n ) : Node(ctrl, n) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual uint ideal_reg() const { return Op_RegX; }
|
||||
virtual const Type *bottom_type() const { return TypeX_X; }
|
||||
// Return false to keep node from moving away from an associated card mark.
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
};
|
||||
|
||||
//------------------------------ThreadLocalNode--------------------------------
|
||||
// Ideal Node which returns the base of ThreadLocalStorage.
|
||||
class ThreadLocalNode : public Node {
|
||||
public:
|
||||
ThreadLocalNode( ) : Node((Node*)Compile::current()->root()) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM;}
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
};
|
||||
|
||||
//------------------------------LoadReturnPCNode-------------------------------
|
||||
class LoadReturnPCNode: public Node {
|
||||
public:
|
||||
LoadReturnPCNode(Node *c) : Node(c) { }
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
ThreadLocalNode( ) : Node((Node*)Compile::current()->root()) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM;}
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
};
|
||||
|
||||
|
||||
//-----------------------------RoundFloatNode----------------------------------
|
||||
class RoundFloatNode: public Node {
|
||||
public:
|
||||
RoundFloatNode(Node* c, Node *in1): Node(c, in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
|
||||
//-----------------------------RoundDoubleNode---------------------------------
|
||||
class RoundDoubleNode: public Node {
|
||||
public:
|
||||
RoundDoubleNode(Node* c, Node *in1): Node(c, in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
//------------------------------Opaque1Node------------------------------------
|
||||
// A node to prevent unwanted optimizations. Allows constant folding.
|
||||
// Stops value-numbering, Ideal calls or Identity functions.
|
||||
class Opaque1Node : public Node {
|
||||
virtual uint hash() const ; // { return NO_HASH; }
|
||||
virtual uint cmp( const Node &n ) const;
|
||||
public:
|
||||
Opaque1Node( Compile* C, Node *n ) : Node(0,n) {
|
||||
// Put it on the Macro nodes list to removed during macro nodes expansion.
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
// Special version for the pre-loop to hold the original loop limit
|
||||
// which is consumed by range check elimination.
|
||||
Opaque1Node( Compile* C, Node *n, Node* orig_limit ) : Node(0,n,orig_limit) {
|
||||
// Put it on the Macro nodes list to removed during macro nodes expansion.
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
Node* original_loop_limit() { return req()==3 ? in(2) : NULL; }
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
};
|
||||
|
||||
//------------------------------Opaque2Node------------------------------------
|
||||
// A node to prevent unwanted optimizations. Allows constant folding. Stops
|
||||
// value-numbering, most Ideal calls or Identity functions. This Node is
|
||||
// specifically designed to prevent the pre-increment value of a loop trip
|
||||
// counter from being live out of the bottom of the loop (hence causing the
|
||||
// pre- and post-increment values both being live and thus requiring an extra
|
||||
// temp register and an extra move). If we "accidentally" optimize through
|
||||
// this kind of a Node, we'll get slightly pessimal, but correct, code. Thus
|
||||
// it's OK to be slightly sloppy on optimizations here.
|
||||
class Opaque2Node : public Node {
|
||||
virtual uint hash() const ; // { return NO_HASH; }
|
||||
virtual uint cmp( const Node &n ) const;
|
||||
public:
|
||||
Opaque2Node( Compile* C, Node *n ) : Node(0,n) {
|
||||
// Put it on the Macro nodes list to removed during macro nodes expansion.
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
};
|
||||
|
||||
//------------------------------Opaque3Node------------------------------------
|
||||
// A node to prevent unwanted optimizations. Will be optimized only during
|
||||
// macro nodes expansion.
|
||||
class Opaque3Node : public Opaque2Node {
|
||||
int _opt; // what optimization it was used for
|
||||
public:
|
||||
enum { RTM_OPT };
|
||||
Opaque3Node(Compile* C, Node *n, int opt) : Opaque2Node(C, n), _opt(opt) {}
|
||||
virtual int Opcode() const;
|
||||
bool rtm_opt() const { return (_opt == RTM_OPT); }
|
||||
};
|
||||
|
||||
|
||||
//----------------------PartialSubtypeCheckNode--------------------------------
|
||||
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
|
||||
// array for an instance of the superklass. Set a hidden internal cache on a
|
||||
// hit (cache is checked with exposed code in gen_subtype_check()). Return
|
||||
// not zero for a miss or zero for a hit.
|
||||
class PartialSubtypeCheckNode : public Node {
|
||||
public:
|
||||
PartialSubtypeCheckNode(Node* c, Node* sub, Node* super) : Node(c,sub,super) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
};
|
||||
|
||||
//
|
||||
class MoveI2FNode : public Node {
|
||||
public:
|
||||
MoveI2FNode( Node *value ) : Node(0,value) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
virtual const Type* Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
class MoveL2DNode : public Node {
|
||||
public:
|
||||
MoveL2DNode( Node *value ) : Node(0,value) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
virtual const Type* Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
class MoveF2INode : public Node {
|
||||
public:
|
||||
MoveF2INode( Node *value ) : Node(0,value) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual const Type* Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
class MoveD2LNode : public Node {
|
||||
public:
|
||||
MoveD2LNode( Node *value ) : Node(0,value) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeLong::LONG; }
|
||||
virtual uint ideal_reg() const { return Op_RegL; }
|
||||
virtual const Type* Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
//---------- CountBitsNode -----------------------------------------------------
|
||||
class CountBitsNode : public Node {
|
||||
public:
|
||||
CountBitsNode(Node* in1) : Node(0, in1) {}
|
||||
const Type* bottom_type() const { return TypeInt::INT; }
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//---------- CountLeadingZerosINode --------------------------------------------
|
||||
// Count leading zeros (0-bit count starting from MSB) of an integer.
|
||||
class CountLeadingZerosINode : public CountBitsNode {
|
||||
public:
|
||||
CountLeadingZerosINode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* Value(PhaseTransform* phase) const;
|
||||
};
|
||||
|
||||
//---------- CountLeadingZerosLNode --------------------------------------------
|
||||
// Count leading zeros (0-bit count starting from MSB) of a long.
|
||||
class CountLeadingZerosLNode : public CountBitsNode {
|
||||
public:
|
||||
CountLeadingZerosLNode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* Value(PhaseTransform* phase) const;
|
||||
};
|
||||
|
||||
//---------- CountTrailingZerosINode -------------------------------------------
|
||||
// Count trailing zeros (0-bit count starting from LSB) of an integer.
|
||||
class CountTrailingZerosINode : public CountBitsNode {
|
||||
public:
|
||||
CountTrailingZerosINode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* Value(PhaseTransform* phase) const;
|
||||
};
|
||||
|
||||
//---------- CountTrailingZerosLNode -------------------------------------------
|
||||
// Count trailing zeros (0-bit count starting from LSB) of a long.
|
||||
class CountTrailingZerosLNode : public CountBitsNode {
|
||||
public:
|
||||
CountTrailingZerosLNode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* Value(PhaseTransform* phase) const;
|
||||
};
|
||||
|
||||
//---------- PopCountINode -----------------------------------------------------
|
||||
// Population count (bit count) of an integer.
|
||||
class PopCountINode : public CountBitsNode {
|
||||
public:
|
||||
PopCountINode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//---------- PopCountLNode -----------------------------------------------------
|
||||
// Population count (bit count) of a long.
|
||||
class PopCountLNode : public CountBitsNode {
|
||||
public:
|
||||
PopCountLNode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OPTO_CONNODE_HPP
|
||||
|
512
hotspot/src/share/vm/opto/convertnode.cpp
Normal file
512
hotspot/src/share/vm/opto/convertnode.cpp
Normal file
@ -0,0 +1,512 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Identity---------------------------------------
|
||||
Node *Conv2BNode::Identity( PhaseTransform *phase ) {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return in(1);
|
||||
if( t == TypeInt::ZERO ) return in(1);
|
||||
if( t == TypeInt::ONE ) return in(1);
|
||||
if( t == TypeInt::BOOL ) return in(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *Conv2BNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
if( t == TypeInt::ZERO ) return TypeInt::ZERO;
|
||||
if( t == TypePtr::NULL_PTR ) return TypeInt::ZERO;
|
||||
const TypePtr *tp = t->isa_ptr();
|
||||
if( tp != NULL ) {
|
||||
if( tp->ptr() == TypePtr::AnyNull ) return Type::TOP;
|
||||
if( tp->ptr() == TypePtr::Constant) return TypeInt::ONE;
|
||||
if (tp->ptr() == TypePtr::NotNull) return TypeInt::ONE;
|
||||
return TypeInt::BOOL;
|
||||
}
|
||||
if (t->base() != Type::Int) return TypeInt::BOOL;
|
||||
const TypeInt *ti = t->is_int();
|
||||
if( ti->_hi < 0 || ti->_lo > 0 ) return TypeInt::ONE;
|
||||
return TypeInt::BOOL;
|
||||
}
|
||||
|
||||
|
||||
// The conversions operations are all Alpha sorted. Please keep it that way!
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvD2FNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
if( t == Type::DOUBLE ) return Type::FLOAT;
|
||||
const TypeD *td = t->is_double_constant();
|
||||
return TypeF::make( (float)td->getd() );
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
// Float's can be converted to doubles with no loss of bits. Hence
|
||||
// converting a float to a double and back to a float is a NOP.
|
||||
Node *ConvD2FNode::Identity(PhaseTransform *phase) {
|
||||
return (in(1)->Opcode() == Op_ConvF2D) ? in(1)->in(1) : this;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvD2INode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
if( t == Type::DOUBLE ) return TypeInt::INT;
|
||||
const TypeD *td = t->is_double_constant();
|
||||
return TypeInt::make( SharedRuntime::d2i( td->getd() ) );
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// If converting to an int type, skip any rounding nodes
|
||||
Node *ConvD2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if( in(1)->Opcode() == Op_RoundDouble )
|
||||
set_req(1,in(1)->in(1));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
// Int's can be converted to doubles with no loss of bits. Hence
|
||||
// converting an integer to a double and back to an integer is a NOP.
|
||||
Node *ConvD2INode::Identity(PhaseTransform *phase) {
|
||||
return (in(1)->Opcode() == Op_ConvI2D) ? in(1)->in(1) : this;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvD2LNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
if( t == Type::DOUBLE ) return TypeLong::LONG;
|
||||
const TypeD *td = t->is_double_constant();
|
||||
return TypeLong::make( SharedRuntime::d2l( td->getd() ) );
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
Node *ConvD2LNode::Identity(PhaseTransform *phase) {
|
||||
// Remove ConvD2L->ConvL2D->ConvD2L sequences.
|
||||
if( in(1) ->Opcode() == Op_ConvL2D &&
|
||||
in(1)->in(1)->Opcode() == Op_ConvD2L )
|
||||
return in(1)->in(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// If converting to an int type, skip any rounding nodes
|
||||
Node *ConvD2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if( in(1)->Opcode() == Op_RoundDouble )
|
||||
set_req(1,in(1)->in(1));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvF2DNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
if( t == Type::FLOAT ) return Type::DOUBLE;
|
||||
const TypeF *tf = t->is_float_constant();
|
||||
return TypeD::make( (double)tf->getf() );
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvF2INode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
if( t == Type::FLOAT ) return TypeInt::INT;
|
||||
const TypeF *tf = t->is_float_constant();
|
||||
return TypeInt::make( SharedRuntime::f2i( tf->getf() ) );
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
Node *ConvF2INode::Identity(PhaseTransform *phase) {
|
||||
// Remove ConvF2I->ConvI2F->ConvF2I sequences.
|
||||
if( in(1) ->Opcode() == Op_ConvI2F &&
|
||||
in(1)->in(1)->Opcode() == Op_ConvF2I )
|
||||
return in(1)->in(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// If converting to an int type, skip any rounding nodes
|
||||
Node *ConvF2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if( in(1)->Opcode() == Op_RoundFloat )
|
||||
set_req(1,in(1)->in(1));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvF2LNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
if( t == Type::FLOAT ) return TypeLong::LONG;
|
||||
const TypeF *tf = t->is_float_constant();
|
||||
return TypeLong::make( SharedRuntime::f2l( tf->getf() ) );
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
Node *ConvF2LNode::Identity(PhaseTransform *phase) {
|
||||
// Remove ConvF2L->ConvL2F->ConvF2L sequences.
|
||||
if( in(1) ->Opcode() == Op_ConvL2F &&
|
||||
in(1)->in(1)->Opcode() == Op_ConvF2L )
|
||||
return in(1)->in(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// If converting to an int type, skip any rounding nodes
|
||||
Node *ConvF2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if( in(1)->Opcode() == Op_RoundFloat )
|
||||
set_req(1,in(1)->in(1));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvI2DNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
const TypeInt *ti = t->is_int();
|
||||
if( ti->is_con() ) return TypeD::make( (double)ti->get_con() );
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvI2FNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
const TypeInt *ti = t->is_int();
|
||||
if( ti->is_con() ) return TypeF::make( (float)ti->get_con() );
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
Node *ConvI2FNode::Identity(PhaseTransform *phase) {
|
||||
// Remove ConvI2F->ConvF2I->ConvI2F sequences.
|
||||
if( in(1) ->Opcode() == Op_ConvF2I &&
|
||||
in(1)->in(1)->Opcode() == Op_ConvI2F )
|
||||
return in(1)->in(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvI2LNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
const TypeInt *ti = t->is_int();
|
||||
const Type* tl = TypeLong::make(ti->_lo, ti->_hi, ti->_widen);
|
||||
// Join my declared type against my incoming type.
|
||||
tl = tl->filter(_type);
|
||||
return tl;
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
static inline bool long_ranges_overlap(jlong lo1, jlong hi1,
|
||||
jlong lo2, jlong hi2) {
|
||||
// Two ranges overlap iff one range's low point falls in the other range.
|
||||
return (lo2 <= lo1 && lo1 <= hi2) || (lo1 <= lo2 && lo2 <= hi1);
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
const TypeLong* this_type = this->type()->is_long();
|
||||
Node* this_changed = NULL;
|
||||
|
||||
// If _major_progress, then more loop optimizations follow. Do NOT
|
||||
// remove this node's type assertion until no more loop ops can happen.
|
||||
// The progress bit is set in the major loop optimizations THEN comes the
|
||||
// call to IterGVN and any chance of hitting this code. Cf. Opaque1Node.
|
||||
if (can_reshape && !phase->C->major_progress()) {
|
||||
const TypeInt* in_type = phase->type(in(1))->isa_int();
|
||||
if (in_type != NULL && this_type != NULL &&
|
||||
(in_type->_lo != this_type->_lo ||
|
||||
in_type->_hi != this_type->_hi)) {
|
||||
// Although this WORSENS the type, it increases GVN opportunities,
|
||||
// because I2L nodes with the same input will common up, regardless
|
||||
// of slightly differing type assertions. Such slight differences
|
||||
// arise routinely as a result of loop unrolling, so this is a
|
||||
// post-unrolling graph cleanup. Choose a type which depends only
|
||||
// on my input. (Exception: Keep a range assertion of >=0 or <0.)
|
||||
jlong lo1 = this_type->_lo;
|
||||
jlong hi1 = this_type->_hi;
|
||||
int w1 = this_type->_widen;
|
||||
if (lo1 != (jint)lo1 ||
|
||||
hi1 != (jint)hi1 ||
|
||||
lo1 > hi1) {
|
||||
// Overflow leads to wraparound, wraparound leads to range saturation.
|
||||
lo1 = min_jint; hi1 = max_jint;
|
||||
} else if (lo1 >= 0) {
|
||||
// Keep a range assertion of >=0.
|
||||
lo1 = 0; hi1 = max_jint;
|
||||
} else if (hi1 < 0) {
|
||||
// Keep a range assertion of <0.
|
||||
lo1 = min_jint; hi1 = -1;
|
||||
} else {
|
||||
lo1 = min_jint; hi1 = max_jint;
|
||||
}
|
||||
const TypeLong* wtype = TypeLong::make(MAX2((jlong)in_type->_lo, lo1),
|
||||
MIN2((jlong)in_type->_hi, hi1),
|
||||
MAX2((int)in_type->_widen, w1));
|
||||
if (wtype != type()) {
|
||||
set_type(wtype);
|
||||
// Note: this_type still has old type value, for the logic below.
|
||||
this_changed = this;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
// Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y)) ,
|
||||
// but only if x and y have subranges that cannot cause 32-bit overflow,
|
||||
// under the assumption that x+y is in my own subrange this->type().
|
||||
|
||||
// This assumption is based on a constraint (i.e., type assertion)
|
||||
// established in Parse::array_addressing or perhaps elsewhere.
|
||||
// This constraint has been adjoined to the "natural" type of
|
||||
// the incoming argument in(0). We know (because of runtime
|
||||
// checks) - that the result value I2L(x+y) is in the joined range.
|
||||
// Hence we can restrict the incoming terms (x, y) to values such
|
||||
// that their sum also lands in that range.
|
||||
|
||||
// This optimization is useful only on 64-bit systems, where we hope
|
||||
// the addition will end up subsumed in an addressing mode.
|
||||
// It is necessary to do this when optimizing an unrolled array
|
||||
// copy loop such as x[i++] = y[i++].
|
||||
|
||||
// On 32-bit systems, it's better to perform as much 32-bit math as
|
||||
// possible before the I2L conversion, because 32-bit math is cheaper.
|
||||
// There's no common reason to "leak" a constant offset through the I2L.
|
||||
// Addressing arithmetic will not absorb it as part of a 64-bit AddL.
|
||||
|
||||
Node* z = in(1);
|
||||
int op = z->Opcode();
|
||||
if (op == Op_AddI || op == Op_SubI) {
|
||||
Node* x = z->in(1);
|
||||
Node* y = z->in(2);
|
||||
assert (x != z && y != z, "dead loop in ConvI2LNode::Ideal");
|
||||
if (phase->type(x) == Type::TOP) return this_changed;
|
||||
if (phase->type(y) == Type::TOP) return this_changed;
|
||||
const TypeInt* tx = phase->type(x)->is_int();
|
||||
const TypeInt* ty = phase->type(y)->is_int();
|
||||
const TypeLong* tz = this_type;
|
||||
jlong xlo = tx->_lo;
|
||||
jlong xhi = tx->_hi;
|
||||
jlong ylo = ty->_lo;
|
||||
jlong yhi = ty->_hi;
|
||||
jlong zlo = tz->_lo;
|
||||
jlong zhi = tz->_hi;
|
||||
jlong vbit = CONST64(1) << BitsPerInt;
|
||||
int widen = MAX2(tx->_widen, ty->_widen);
|
||||
if (op == Op_SubI) {
|
||||
jlong ylo0 = ylo;
|
||||
ylo = -yhi;
|
||||
yhi = -ylo0;
|
||||
}
|
||||
// See if x+y can cause positive overflow into z+2**32
|
||||
if (long_ranges_overlap(xlo+ylo, xhi+yhi, zlo+vbit, zhi+vbit)) {
|
||||
return this_changed;
|
||||
}
|
||||
// See if x+y can cause negative overflow into z-2**32
|
||||
if (long_ranges_overlap(xlo+ylo, xhi+yhi, zlo-vbit, zhi-vbit)) {
|
||||
return this_changed;
|
||||
}
|
||||
// Now it's always safe to assume x+y does not overflow.
|
||||
// This is true even if some pairs x,y might cause overflow, as long
|
||||
// as that overflow value cannot fall into [zlo,zhi].
|
||||
|
||||
// Confident that the arithmetic is "as if infinite precision",
|
||||
// we can now use z's range to put constraints on those of x and y.
|
||||
// The "natural" range of x [xlo,xhi] can perhaps be narrowed to a
|
||||
// more "restricted" range by intersecting [xlo,xhi] with the
|
||||
// range obtained by subtracting y's range from the asserted range
|
||||
// of the I2L conversion. Here's the interval arithmetic algebra:
|
||||
// x == z-y == [zlo,zhi]-[ylo,yhi] == [zlo,zhi]+[-yhi,-ylo]
|
||||
// => x in [zlo-yhi, zhi-ylo]
|
||||
// => x in [zlo-yhi, zhi-ylo] INTERSECT [xlo,xhi]
|
||||
// => x in [xlo MAX zlo-yhi, xhi MIN zhi-ylo]
|
||||
jlong rxlo = MAX2(xlo, zlo - yhi);
|
||||
jlong rxhi = MIN2(xhi, zhi - ylo);
|
||||
// And similarly, x changing place with y:
|
||||
jlong rylo = MAX2(ylo, zlo - xhi);
|
||||
jlong ryhi = MIN2(yhi, zhi - xlo);
|
||||
if (rxlo > rxhi || rylo > ryhi) {
|
||||
return this_changed; // x or y is dying; don't mess w/ it
|
||||
}
|
||||
if (op == Op_SubI) {
|
||||
jlong rylo0 = rylo;
|
||||
rylo = -ryhi;
|
||||
ryhi = -rylo0;
|
||||
}
|
||||
|
||||
Node* cx = phase->transform( new (phase->C) ConvI2LNode(x, TypeLong::make(rxlo, rxhi, widen)) );
|
||||
Node* cy = phase->transform( new (phase->C) ConvI2LNode(y, TypeLong::make(rylo, ryhi, widen)) );
|
||||
switch (op) {
|
||||
case Op_AddI: return new (phase->C) AddLNode(cx, cy);
|
||||
case Op_SubI: return new (phase->C) SubLNode(cx, cy);
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
#endif //_LP64
|
||||
|
||||
return this_changed;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvL2DNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
const TypeLong *tl = t->is_long();
|
||||
if( tl->is_con() ) return TypeD::make( (double)tl->get_con() );
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvL2FNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
const TypeLong *tl = t->is_long();
|
||||
if( tl->is_con() ) return TypeF::make( (float)tl->get_con() );
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//----------------------------Identity-----------------------------------------
|
||||
Node *ConvL2INode::Identity( PhaseTransform *phase ) {
|
||||
// Convert L2I(I2L(x)) => x
|
||||
if (in(1)->Opcode() == Op_ConvI2L) return in(1)->in(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *ConvL2INode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
const TypeLong *tl = t->is_long();
|
||||
if (tl->is_con())
|
||||
// Easy case.
|
||||
return TypeInt::make((jint)tl->get_con());
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node.
|
||||
// Blow off prior masking to int
|
||||
Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
Node *andl = in(1);
|
||||
uint andl_op = andl->Opcode();
|
||||
if( andl_op == Op_AndL ) {
|
||||
// Blow off prior masking to int
|
||||
if( phase->type(andl->in(2)) == TypeLong::make( 0xFFFFFFFF ) ) {
|
||||
set_req(1,andl->in(1));
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
// Swap with a prior add: convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
|
||||
// This replaces an 'AddL' with an 'AddI'.
|
||||
if( andl_op == Op_AddL ) {
|
||||
// Don't do this for nodes which have more than one user since
|
||||
// we'll end up computing the long add anyway.
|
||||
if (andl->outcnt() > 1) return NULL;
|
||||
|
||||
Node* x = andl->in(1);
|
||||
Node* y = andl->in(2);
|
||||
assert( x != andl && y != andl, "dead loop in ConvL2INode::Ideal" );
|
||||
if (phase->type(x) == Type::TOP) return NULL;
|
||||
if (phase->type(y) == Type::TOP) return NULL;
|
||||
Node *add1 = phase->transform(new (phase->C) ConvL2INode(x));
|
||||
Node *add2 = phase->transform(new (phase->C) ConvL2INode(y));
|
||||
return new (phase->C) AddINode(add1,add2);
|
||||
}
|
||||
|
||||
// Disable optimization: LoadL->ConvL2I ==> LoadI.
|
||||
// It causes problems (sizes of Load and Store nodes do not match)
|
||||
// in objects initialization code and Escape Analysis.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Identity---------------------------------------
|
||||
// Remove redundant roundings
|
||||
Node *RoundFloatNode::Identity( PhaseTransform *phase ) {
|
||||
assert(Matcher::strict_fp_requires_explicit_rounding, "should only generate for Intel");
|
||||
// Do not round constants
|
||||
if (phase->type(in(1))->base() == Type::FloatCon) return in(1);
|
||||
int op = in(1)->Opcode();
|
||||
// Redundant rounding
|
||||
if( op == Op_RoundFloat ) return in(1);
|
||||
// Already rounded
|
||||
if( op == Op_Parm ) return in(1);
|
||||
if( op == Op_LoadF ) return in(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *RoundFloatNode::Value( PhaseTransform *phase ) const {
|
||||
return phase->type( in(1) );
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Identity---------------------------------------
|
||||
// Remove redundant roundings. Incoming arguments are already rounded.
|
||||
Node *RoundDoubleNode::Identity( PhaseTransform *phase ) {
|
||||
assert(Matcher::strict_fp_requires_explicit_rounding, "should only generate for Intel");
|
||||
// Do not round constants
|
||||
if (phase->type(in(1))->base() == Type::DoubleCon) return in(1);
|
||||
int op = in(1)->Opcode();
|
||||
// Redundant rounding
|
||||
if( op == Op_RoundDouble ) return in(1);
|
||||
// Already rounded
|
||||
if( op == Op_Parm ) return in(1);
|
||||
if( op == Op_LoadD ) return in(1);
|
||||
if( op == Op_ConvF2D ) return in(1);
|
||||
if( op == Op_ConvI2D ) return in(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *RoundDoubleNode::Value( PhaseTransform *phase ) const {
|
||||
return phase->type( in(1) );
|
||||
}
|
||||
|
||||
|
215
hotspot/src/share/vm/opto/convertnode.hpp
Normal file
215
hotspot/src/share/vm/opto/convertnode.hpp
Normal file
@ -0,0 +1,215 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OPTO_CONVERTNODE_HPP
|
||||
#define SHARE_VM_OPTO_CONVERTNODE_HPP
|
||||
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
|
||||
|
||||
//------------------------------Conv2BNode-------------------------------------
|
||||
// Convert int/pointer to a Boolean. Map zero to zero, all else to 1.
|
||||
class Conv2BNode : public Node {
|
||||
public:
|
||||
Conv2BNode( Node *i ) : Node(0,i) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::BOOL; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
// The conversions operations are all Alpha sorted. Please keep it that way!
|
||||
//------------------------------ConvD2FNode------------------------------------
|
||||
// Convert double to float
|
||||
class ConvD2FNode : public Node {
|
||||
public:
|
||||
ConvD2FNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
};
|
||||
|
||||
//------------------------------ConvD2INode------------------------------------
|
||||
// Convert Double to Integer
|
||||
class ConvD2INode : public Node {
|
||||
public:
|
||||
ConvD2INode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//------------------------------ConvD2LNode------------------------------------
|
||||
// Convert Double to Long
|
||||
class ConvD2LNode : public Node {
|
||||
public:
|
||||
ConvD2LNode( Node *dbl ) : Node(0,dbl) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeLong::LONG; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegL; }
|
||||
};
|
||||
|
||||
//------------------------------ConvF2DNode------------------------------------
|
||||
// Convert Float to a Double.
|
||||
class ConvF2DNode : public Node {
|
||||
public:
|
||||
ConvF2DNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
};
|
||||
|
||||
//------------------------------ConvF2INode------------------------------------
|
||||
// Convert float to integer
|
||||
class ConvF2INode : public Node {
|
||||
public:
|
||||
ConvF2INode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//------------------------------ConvF2LNode------------------------------------
|
||||
// Convert float to long
|
||||
class ConvF2LNode : public Node {
|
||||
public:
|
||||
ConvF2LNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeLong::LONG; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegL; }
|
||||
};
|
||||
|
||||
//------------------------------ConvI2DNode------------------------------------
|
||||
// Convert Integer to Double
|
||||
class ConvI2DNode : public Node {
|
||||
public:
|
||||
ConvI2DNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
};
|
||||
|
||||
//------------------------------ConvI2FNode------------------------------------
|
||||
// Convert Integer to Float
|
||||
class ConvI2FNode : public Node {
|
||||
public:
|
||||
ConvI2FNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
};
|
||||
|
||||
//------------------------------ConvI2LNode------------------------------------
|
||||
// Convert integer to long
|
||||
class ConvI2LNode : public TypeNode {
|
||||
public:
|
||||
ConvI2LNode(Node *in1, const TypeLong* t = TypeLong::INT)
|
||||
: TypeNode(t, 2)
|
||||
{ init_req(1, in1); }
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegL; }
|
||||
};
|
||||
|
||||
//------------------------------ConvL2DNode------------------------------------
|
||||
// Convert Long to Double
|
||||
class ConvL2DNode : public Node {
|
||||
public:
|
||||
ConvL2DNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
};
|
||||
|
||||
//------------------------------ConvL2FNode------------------------------------
|
||||
// Convert Long to Float
|
||||
class ConvL2FNode : public Node {
|
||||
public:
|
||||
ConvL2FNode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
};
|
||||
|
||||
//------------------------------ConvL2INode------------------------------------
|
||||
// Convert long to integer
|
||||
class ConvL2INode : public Node {
|
||||
public:
|
||||
ConvL2INode( Node *in1 ) : Node(0,in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//-----------------------------RoundFloatNode----------------------------------
|
||||
class RoundFloatNode: public Node {
|
||||
public:
|
||||
RoundFloatNode(Node* c, Node *in1): Node(c, in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
|
||||
//-----------------------------RoundDoubleNode---------------------------------
|
||||
class RoundDoubleNode: public Node {
|
||||
public:
|
||||
RoundDoubleNode(Node* c, Node *in1): Node(c, in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_VM_OPTO_CONVERTNODE_HPP
|
119
hotspot/src/share/vm/opto/countbitsnode.cpp
Normal file
119
hotspot/src/share/vm/opto/countbitsnode.cpp
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/countbitsnode.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/type.hpp"
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type* CountLeadingZerosINode::Value(PhaseTransform* phase) const {
|
||||
const Type* t = phase->type(in(1));
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
const TypeInt* ti = t->isa_int();
|
||||
if (ti && ti->is_con()) {
|
||||
jint i = ti->get_con();
|
||||
// HD, Figure 5-6
|
||||
if (i == 0)
|
||||
return TypeInt::make(BitsPerInt);
|
||||
int n = 1;
|
||||
unsigned int x = i;
|
||||
if (x >> 16 == 0) { n += 16; x <<= 16; }
|
||||
if (x >> 24 == 0) { n += 8; x <<= 8; }
|
||||
if (x >> 28 == 0) { n += 4; x <<= 4; }
|
||||
if (x >> 30 == 0) { n += 2; x <<= 2; }
|
||||
n -= x >> 31;
|
||||
return TypeInt::make(n);
|
||||
}
|
||||
return TypeInt::INT;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type* CountLeadingZerosLNode::Value(PhaseTransform* phase) const {
|
||||
const Type* t = phase->type(in(1));
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
const TypeLong* tl = t->isa_long();
|
||||
if (tl && tl->is_con()) {
|
||||
jlong l = tl->get_con();
|
||||
// HD, Figure 5-6
|
||||
if (l == 0)
|
||||
return TypeInt::make(BitsPerLong);
|
||||
int n = 1;
|
||||
unsigned int x = (((julong) l) >> 32);
|
||||
if (x == 0) { n += 32; x = (int) l; }
|
||||
if (x >> 16 == 0) { n += 16; x <<= 16; }
|
||||
if (x >> 24 == 0) { n += 8; x <<= 8; }
|
||||
if (x >> 28 == 0) { n += 4; x <<= 4; }
|
||||
if (x >> 30 == 0) { n += 2; x <<= 2; }
|
||||
n -= x >> 31;
|
||||
return TypeInt::make(n);
|
||||
}
|
||||
return TypeInt::INT;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type* CountTrailingZerosINode::Value(PhaseTransform* phase) const {
|
||||
const Type* t = phase->type(in(1));
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
const TypeInt* ti = t->isa_int();
|
||||
if (ti && ti->is_con()) {
|
||||
jint i = ti->get_con();
|
||||
// HD, Figure 5-14
|
||||
int y;
|
||||
if (i == 0)
|
||||
return TypeInt::make(BitsPerInt);
|
||||
int n = 31;
|
||||
y = i << 16; if (y != 0) { n = n - 16; i = y; }
|
||||
y = i << 8; if (y != 0) { n = n - 8; i = y; }
|
||||
y = i << 4; if (y != 0) { n = n - 4; i = y; }
|
||||
y = i << 2; if (y != 0) { n = n - 2; i = y; }
|
||||
y = i << 1; if (y != 0) { n = n - 1; }
|
||||
return TypeInt::make(n);
|
||||
}
|
||||
return TypeInt::INT;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type* CountTrailingZerosLNode::Value(PhaseTransform* phase) const {
|
||||
const Type* t = phase->type(in(1));
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
const TypeLong* tl = t->isa_long();
|
||||
if (tl && tl->is_con()) {
|
||||
jlong l = tl->get_con();
|
||||
// HD, Figure 5-14
|
||||
int x, y;
|
||||
if (l == 0)
|
||||
return TypeInt::make(BitsPerLong);
|
||||
int n = 63;
|
||||
y = (int) l; if (y != 0) { n = n - 32; x = y; } else x = (((julong) l) >> 32);
|
||||
y = x << 16; if (y != 0) { n = n - 16; x = y; }
|
||||
y = x << 8; if (y != 0) { n = n - 8; x = y; }
|
||||
y = x << 4; if (y != 0) { n = n - 4; x = y; }
|
||||
y = x << 2; if (y != 0) { n = n - 2; x = y; }
|
||||
y = x << 1; if (y != 0) { n = n - 1; }
|
||||
return TypeInt::make(n);
|
||||
}
|
||||
return TypeInt::INT;
|
||||
}
|
94
hotspot/src/share/vm/opto/countbitsnode.hpp
Normal file
94
hotspot/src/share/vm/opto/countbitsnode.hpp
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OPTO_COUNTBITSNODE_HPP
|
||||
#define SHARE_VM_OPTO_COUNTBITSNODE_HPP
|
||||
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
|
||||
class PhaseTransform;
|
||||
|
||||
//---------- CountBitsNode -----------------------------------------------------
|
||||
class CountBitsNode : public Node {
|
||||
public:
|
||||
CountBitsNode(Node* in1) : Node(0, in1) {}
|
||||
const Type* bottom_type() const { return TypeInt::INT; }
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
};
|
||||
|
||||
//---------- CountLeadingZerosINode --------------------------------------------
|
||||
// Count leading zeros (0-bit count starting from MSB) of an integer.
|
||||
class CountLeadingZerosINode : public CountBitsNode {
|
||||
public:
|
||||
CountLeadingZerosINode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* Value(PhaseTransform* phase) const;
|
||||
};
|
||||
|
||||
//---------- CountLeadingZerosLNode --------------------------------------------
|
||||
// Count leading zeros (0-bit count starting from MSB) of a long.
|
||||
class CountLeadingZerosLNode : public CountBitsNode {
|
||||
public:
|
||||
CountLeadingZerosLNode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* Value(PhaseTransform* phase) const;
|
||||
};
|
||||
|
||||
//---------- CountTrailingZerosINode -------------------------------------------
|
||||
// Count trailing zeros (0-bit count starting from LSB) of an integer.
|
||||
class CountTrailingZerosINode : public CountBitsNode {
|
||||
public:
|
||||
CountTrailingZerosINode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* Value(PhaseTransform* phase) const;
|
||||
};
|
||||
|
||||
//---------- CountTrailingZerosLNode -------------------------------------------
|
||||
// Count trailing zeros (0-bit count starting from LSB) of a long.
|
||||
class CountTrailingZerosLNode : public CountBitsNode {
|
||||
public:
|
||||
CountTrailingZerosLNode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* Value(PhaseTransform* phase) const;
|
||||
};
|
||||
|
||||
//---------- PopCountINode -----------------------------------------------------
|
||||
// Population count (bit count) of an integer.
|
||||
class PopCountINode : public CountBitsNode {
|
||||
public:
|
||||
PopCountINode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//---------- PopCountLNode -----------------------------------------------------
|
||||
// Population count (bit count) of a long.
|
||||
class PopCountLNode : public CountBitsNode {
|
||||
public:
|
||||
PopCountLNode(Node* in1) : CountBitsNode(in1) {}
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_VM_OPTO_COUNTBITSNODE_HPP
|
@ -26,8 +26,10 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/divnode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "interpreter/linkResolver.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callGenerator.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
@ -249,8 +250,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
}
|
||||
CallGenerator* miss_cg;
|
||||
Deoptimization::DeoptReason reason = morphism == 2 ?
|
||||
Deoptimization::Reason_bimorphic :
|
||||
(speculative_receiver_type == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check);
|
||||
Deoptimization::Reason_bimorphic : Deoptimization::reason_class_check(speculative_receiver_type != NULL);
|
||||
if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
|
||||
!too_many_traps(jvms->method(), jvms->bci(), reason)
|
||||
) {
|
||||
@ -631,13 +631,7 @@ void Parse::do_call() {
|
||||
}
|
||||
BasicType ct = ctype->basic_type();
|
||||
if (ct == T_OBJECT || ct == T_ARRAY) {
|
||||
ciKlass* better_type = method()->return_profiled_type(bci());
|
||||
if (UseTypeSpeculation && better_type != NULL) {
|
||||
// If profiling reports a single type for the return value,
|
||||
// feed it to the type system so it can propagate it as a
|
||||
// speculative type
|
||||
record_profile_for_speculation(stack(sp()-1), better_type);
|
||||
}
|
||||
record_profiled_return_for_speculation();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "opto/compile.hpp"
|
||||
#include "opto/escape.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
|
||||
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
|
||||
|
@ -27,7 +27,7 @@
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/compile.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/locknode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
|
@ -30,10 +30,14 @@
|
||||
#include "memory/barrierSet.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/graphKit.hpp"
|
||||
#include "opto/idealKit.hpp"
|
||||
#include "opto/intrinsicnode.hpp"
|
||||
#include "opto/locknode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
@ -612,10 +616,10 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
|
||||
// Usual case: Bail to interpreter.
|
||||
// Reserve the right to recompile if we haven't seen anything yet.
|
||||
|
||||
assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
|
||||
ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL;
|
||||
Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
|
||||
if (treat_throw_as_hot
|
||||
&& (method()->method_data()->trap_recompiled_at(bci(), NULL)
|
||||
&& (method()->method_data()->trap_recompiled_at(bci(), m)
|
||||
|| C->too_many_traps(reason))) {
|
||||
// We cannot afford to take more traps here. Suffer in the interpreter.
|
||||
if (C->log() != NULL)
|
||||
@ -1181,7 +1185,8 @@ extern int explicit_null_checks_inserted,
|
||||
Node* GraphKit::null_check_common(Node* value, BasicType type,
|
||||
// optional arguments for variations:
|
||||
bool assert_null,
|
||||
Node* *null_control) {
|
||||
Node* *null_control,
|
||||
bool speculative) {
|
||||
assert(!assert_null || null_control == NULL, "not both at once");
|
||||
if (stopped()) return top();
|
||||
if (!GenerateCompilerNullChecks && !assert_null && null_control == NULL) {
|
||||
@ -1291,13 +1296,13 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
|
||||
// Branch to failure if null
|
||||
float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
|
||||
Deoptimization::DeoptReason reason;
|
||||
if (assert_null)
|
||||
if (assert_null) {
|
||||
reason = Deoptimization::Reason_null_assert;
|
||||
else if (type == T_OBJECT)
|
||||
reason = Deoptimization::Reason_null_check;
|
||||
else
|
||||
} else if (type == T_OBJECT) {
|
||||
reason = Deoptimization::reason_null_check(speculative);
|
||||
} else {
|
||||
reason = Deoptimization::Reason_div0_check;
|
||||
|
||||
}
|
||||
// %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
|
||||
// ciMethodData::has_trap_at will return a conservative -1 if any
|
||||
// must-be-null assertion has failed. This could cause performance
|
||||
@ -2120,21 +2125,36 @@ void GraphKit::round_double_arguments(ciMethod* dest_method) {
|
||||
*
|
||||
* @param n node that the type applies to
|
||||
* @param exact_kls type from profiling
|
||||
* @param maybe_null did profiling see null?
|
||||
*
|
||||
* @return node with improved type
|
||||
*/
|
||||
Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
|
||||
Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null) {
|
||||
const Type* current_type = _gvn.type(n);
|
||||
assert(UseTypeSpeculation, "type speculation must be on");
|
||||
|
||||
const TypeOopPtr* speculative = current_type->speculative();
|
||||
const TypePtr* speculative = current_type->speculative();
|
||||
|
||||
// Should the klass from the profile be recorded in the speculative type?
|
||||
if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
|
||||
const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
|
||||
const TypeOopPtr* xtype = tklass->as_instance_type();
|
||||
assert(xtype->klass_is_exact(), "Should be exact");
|
||||
// Any reason to believe n is not null (from this profiling or a previous one)?
|
||||
const TypePtr* ptr = (maybe_null && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
|
||||
// record the new speculative type's depth
|
||||
speculative = xtype->with_inline_depth(jvms()->depth());
|
||||
speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
|
||||
speculative = speculative->with_inline_depth(jvms()->depth());
|
||||
} else if (current_type->would_improve_ptr(maybe_null)) {
|
||||
// Profiling report that null was never seen so we can change the
|
||||
// speculative type to non null ptr.
|
||||
assert(!maybe_null, "nothing to improve");
|
||||
if (speculative == NULL) {
|
||||
speculative = TypePtr::NOTNULL;
|
||||
} else {
|
||||
const TypePtr* ptr = TypePtr::NOTNULL;
|
||||
speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
|
||||
}
|
||||
}
|
||||
|
||||
if (speculative != current_type->speculative()) {
|
||||
@ -2167,7 +2187,15 @@ Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
|
||||
return n;
|
||||
}
|
||||
ciKlass* exact_kls = profile_has_unique_klass();
|
||||
return record_profile_for_speculation(n, exact_kls);
|
||||
bool maybe_null = true;
|
||||
if (java_bc() == Bytecodes::_checkcast ||
|
||||
java_bc() == Bytecodes::_instanceof ||
|
||||
java_bc() == Bytecodes::_aastore) {
|
||||
ciProfileData* data = method()->method_data()->bci_to_data(bci());
|
||||
bool maybe_null = data == NULL ? true : data->as_BitData()->null_seen();
|
||||
}
|
||||
return record_profile_for_speculation(n, exact_kls, maybe_null);
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2187,9 +2215,10 @@ void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method,
|
||||
for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
|
||||
const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
|
||||
if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
|
||||
ciKlass* better_type = method()->argument_profiled_type(bci(), i);
|
||||
if (better_type != NULL) {
|
||||
record_profile_for_speculation(argument(j), better_type);
|
||||
bool maybe_null = true;
|
||||
ciKlass* better_type = NULL;
|
||||
if (method()->argument_profiled_type(bci(), i, better_type, maybe_null)) {
|
||||
record_profile_for_speculation(argument(j), better_type, maybe_null);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
@ -2206,15 +2235,34 @@ void GraphKit::record_profiled_parameters_for_speculation() {
|
||||
}
|
||||
for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
|
||||
if (_gvn.type(local(i))->isa_oopptr()) {
|
||||
ciKlass* better_type = method()->parameter_profiled_type(j);
|
||||
if (better_type != NULL) {
|
||||
record_profile_for_speculation(local(i), better_type);
|
||||
bool maybe_null = true;
|
||||
ciKlass* better_type = NULL;
|
||||
if (method()->parameter_profiled_type(j, better_type, maybe_null)) {
|
||||
record_profile_for_speculation(local(i), better_type, maybe_null);
|
||||
}
|
||||
j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Record profiling data from return value profiling at an invoke with
|
||||
* the type system so that it can propagate it (speculation)
|
||||
*/
|
||||
void GraphKit::record_profiled_return_for_speculation() {
|
||||
if (!UseTypeSpeculation) {
|
||||
return;
|
||||
}
|
||||
bool maybe_null = true;
|
||||
ciKlass* better_type = NULL;
|
||||
if (method()->return_profiled_type(bci(), better_type, maybe_null)) {
|
||||
// If profiling reports a single type for the return value,
|
||||
// feed it to the type system so it can propagate it as a
|
||||
// speculative type
|
||||
record_profile_for_speculation(stack(sp()-1), better_type, maybe_null);
|
||||
}
|
||||
}
|
||||
|
||||
void GraphKit::round_double_result(ciMethod* dest_method) {
|
||||
// A non-strict method may return a double value which has an extended
|
||||
// exponent, but this must not be visible in a caller which is 'strict'
|
||||
@ -2294,10 +2342,12 @@ Node* GraphKit::dstore_rounding(Node* n) {
|
||||
// Null check oop. Set null-path control into Region in slot 3.
|
||||
// Make a cast-not-nullness use the other not-null control. Return cast.
|
||||
Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
|
||||
bool never_see_null, bool safe_for_replace) {
|
||||
bool never_see_null,
|
||||
bool safe_for_replace,
|
||||
bool speculative) {
|
||||
// Initial NULL check taken path
|
||||
(*null_control) = top();
|
||||
Node* cast = null_check_common(value, T_OBJECT, false, null_control);
|
||||
Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative);
|
||||
|
||||
// Generate uncommon_trap:
|
||||
if (never_see_null && (*null_control) != top()) {
|
||||
@ -2308,7 +2358,8 @@ Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
|
||||
PreserveJVMState pjvms(this);
|
||||
set_control(*null_control);
|
||||
replace_in_map(value, null());
|
||||
uncommon_trap(Deoptimization::Reason_null_check,
|
||||
Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative);
|
||||
uncommon_trap(reason,
|
||||
Deoptimization::Action_make_not_entrant);
|
||||
(*null_control) = top(); // NULL path is dead
|
||||
}
|
||||
@ -2732,11 +2783,16 @@ Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
|
||||
// recompile; the offending check will be recompiled to handle NULLs.
|
||||
// If we see several offending BCIs, then all checks in the
|
||||
// method will be recompiled.
|
||||
bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
|
||||
bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
|
||||
speculating = !_gvn.type(obj)->speculative_maybe_null();
|
||||
Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
|
||||
if (UncommonNullCast // Cutout for this technique
|
||||
&& obj != null() // And not the -Xcomp stupid case?
|
||||
&& !too_many_traps(Deoptimization::Reason_null_check)
|
||||
&& !too_many_traps(reason)
|
||||
) {
|
||||
if (speculating) {
|
||||
return true;
|
||||
}
|
||||
if (data == NULL)
|
||||
// Edge case: no mature data. Be optimistic here.
|
||||
return true;
|
||||
@ -2746,6 +2802,7 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
|
||||
java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
|
||||
return !data->as_BitData()->null_seen();
|
||||
}
|
||||
speculating = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2758,7 +2815,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
|
||||
bool safe_for_replace) {
|
||||
if (!UseTypeProfile || !TypeProfileCasts) return NULL;
|
||||
|
||||
Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
|
||||
Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);
|
||||
|
||||
// Make sure we haven't already deoptimized from this tactic.
|
||||
if (too_many_traps(reason))
|
||||
@ -2811,7 +2868,7 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
|
||||
// type == NULL if profiling tells us this object is always null
|
||||
if (type != NULL) {
|
||||
Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
|
||||
Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
|
||||
Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check;
|
||||
if (!too_many_traps(null_reason) &&
|
||||
!too_many_traps(class_reason)) {
|
||||
Node* not_null_obj = NULL;
|
||||
@ -2819,7 +2876,7 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
|
||||
// there's no need for a null check
|
||||
if (!not_null) {
|
||||
Node* null_ctl = top();
|
||||
not_null_obj = null_check_oop(obj, &null_ctl, true, true);
|
||||
not_null_obj = null_check_oop(obj, &null_ctl, true, true, true);
|
||||
assert(null_ctl->is_top(), "no null control here");
|
||||
} else {
|
||||
not_null_obj = obj;
|
||||
@ -2867,12 +2924,13 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replac
|
||||
if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode
|
||||
data = method()->method_data()->bci_to_data(bci());
|
||||
}
|
||||
bool speculative_not_null = false;
|
||||
bool never_see_null = (ProfileDynamicTypes // aggressive use of profile
|
||||
&& seems_never_null(obj, data));
|
||||
&& seems_never_null(obj, data, speculative_not_null));
|
||||
|
||||
// Null check; get casted pointer; set region slot 3
|
||||
Node* null_ctl = top();
|
||||
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
|
||||
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
|
||||
|
||||
// If not_null_obj is dead, only null-path is taken
|
||||
if (stopped()) { // Doing instance-of on a NULL?
|
||||
@ -2995,12 +3053,13 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
|
||||
C->set_has_split_ifs(true); // Has chance for split-if optimization
|
||||
|
||||
// Use null-cast information if it is available
|
||||
bool speculative_not_null = false;
|
||||
bool never_see_null = ((failure_control == NULL) // regular case only
|
||||
&& seems_never_null(obj, data));
|
||||
&& seems_never_null(obj, data, speculative_not_null));
|
||||
|
||||
// Null check; get casted pointer; set region slot 3
|
||||
Node* null_ctl = top();
|
||||
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
|
||||
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
|
||||
|
||||
// If not_null_obj is dead, only null-path is taken
|
||||
if (stopped()) { // Doing instance-of on a NULL?
|
||||
|
@ -351,9 +351,11 @@ class GraphKit : public Phase {
|
||||
// Return the value cast to not-null.
|
||||
// Be clever about equivalent dominating null checks.
|
||||
Node* null_check_common(Node* value, BasicType type,
|
||||
bool assert_null = false, Node* *null_control = NULL);
|
||||
bool assert_null = false,
|
||||
Node* *null_control = NULL,
|
||||
bool speculative = false);
|
||||
Node* null_check(Node* value, BasicType type = T_OBJECT) {
|
||||
return null_check_common(value, type);
|
||||
return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
|
||||
}
|
||||
Node* null_check_receiver() {
|
||||
assert(argument(0)->bottom_type()->isa_ptr(), "must be");
|
||||
@ -382,10 +384,12 @@ class GraphKit : public Phase {
|
||||
// If safe_for_replace, then we can replace the value with the cast
|
||||
// in the parsing map (the cast is guaranteed to dominate the map)
|
||||
Node* null_check_oop(Node* value, Node* *null_control,
|
||||
bool never_see_null = false, bool safe_for_replace = false);
|
||||
bool never_see_null = false,
|
||||
bool safe_for_replace = false,
|
||||
bool speculative = false);
|
||||
|
||||
// Check the null_seen bit.
|
||||
bool seems_never_null(Node* obj, ciProfileData* data);
|
||||
bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
|
||||
|
||||
// Check for unique class for receiver at call
|
||||
ciKlass* profile_has_unique_klass() {
|
||||
@ -399,10 +403,11 @@ class GraphKit : public Phase {
|
||||
}
|
||||
|
||||
// record type from profiling with the type system
|
||||
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
|
||||
Node* record_profiled_receiver_for_speculation(Node* n);
|
||||
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null);
|
||||
void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
|
||||
void record_profiled_parameters_for_speculation();
|
||||
void record_profiled_return_for_speculation();
|
||||
Node* record_profiled_receiver_for_speculation(Node* n);
|
||||
|
||||
// Use the type profile to narrow an object type.
|
||||
Node* maybe_cast_profiled_receiver(Node* not_null_obj,
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/divnode.hpp"
|
||||
#include "opto/graphKit.hpp"
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/chaitin.hpp"
|
||||
#include "opto/coalesce.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/indexSet.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
|
82
hotspot/src/share/vm/opto/intrinsicnode.cpp
Normal file
82
hotspot/src/share/vm/opto/intrinsicnode.cpp
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/intrinsicnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
|
||||
//=============================================================================
|
||||
// Do not match memory edge.
|
||||
uint StrIntrinsicNode::match_edge(uint idx) const {
|
||||
return idx == 2 || idx == 3;
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node. Strip out
|
||||
// control copies
|
||||
Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (remove_dead_region(phase, can_reshape)) return this;
|
||||
// Don't bother trying to transform a dead node
|
||||
if (in(0) && in(0)->is_top()) return NULL;
|
||||
|
||||
if (can_reshape) {
|
||||
Node* mem = phase->transform(in(MemNode::Memory));
|
||||
// If transformed to a MergeMem, get the desired slice
|
||||
uint alias_idx = phase->C->get_alias_index(adr_type());
|
||||
mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem;
|
||||
if (mem != in(MemNode::Memory)) {
|
||||
set_req(MemNode::Memory, mem);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *StrIntrinsicNode::Value( PhaseTransform *phase ) const {
|
||||
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------match_edge-------------------------------------
|
||||
// Do not match memory edge
|
||||
uint EncodeISOArrayNode::match_edge(uint idx) const {
|
||||
return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len)
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node. Strip out
|
||||
// control copies
|
||||
Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return remove_dead_region(phase, can_reshape) ? this : NULL;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const {
|
||||
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
|
||||
return bottom_type();
|
||||
}
|
||||
|
127
hotspot/src/share/vm/opto/intrinsicnode.hpp
Normal file
127
hotspot/src/share/vm/opto/intrinsicnode.hpp
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OPTO_INTRINSICNODE_HPP
|
||||
#define SHARE_VM_OPTO_INTRINSICNODE_HPP
|
||||
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
|
||||
|
||||
//----------------------PartialSubtypeCheckNode--------------------------------
|
||||
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
|
||||
// array for an instance of the superklass. Set a hidden internal cache on a
|
||||
// hit (cache is checked with exposed code in gen_subtype_check()). Return
|
||||
// not zero for a miss or zero for a hit.
|
||||
class PartialSubtypeCheckNode : public Node {
|
||||
public:
|
||||
PartialSubtypeCheckNode(Node* c, Node* sub, Node* super) : Node(c,sub,super) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
};
|
||||
|
||||
//------------------------------StrIntrinsic-------------------------------
|
||||
// Base class for Ideal nodes used in String instrinsic code.
|
||||
class StrIntrinsicNode: public Node {
|
||||
public:
|
||||
StrIntrinsicNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* c1, Node* s2, Node* c2):
|
||||
Node(control, char_array_mem, s1, c1, s2, c2) {
|
||||
}
|
||||
|
||||
StrIntrinsicNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* s2, Node* c):
|
||||
Node(control, char_array_mem, s1, s2, c) {
|
||||
}
|
||||
|
||||
StrIntrinsicNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* s2):
|
||||
Node(control, char_array_mem, s1, s2) {
|
||||
}
|
||||
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
|
||||
virtual uint match_edge(uint idx) const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
};
|
||||
|
||||
//------------------------------StrComp-------------------------------------
|
||||
class StrCompNode: public StrIntrinsicNode {
|
||||
public:
|
||||
StrCompNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* c1, Node* s2, Node* c2):
|
||||
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const { return TypeInt::INT; }
|
||||
};
|
||||
|
||||
//------------------------------StrEquals-------------------------------------
|
||||
class StrEqualsNode: public StrIntrinsicNode {
|
||||
public:
|
||||
StrEqualsNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* s2, Node* c):
|
||||
StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
|
||||
};
|
||||
|
||||
//------------------------------StrIndexOf-------------------------------------
|
||||
class StrIndexOfNode: public StrIntrinsicNode {
|
||||
public:
|
||||
StrIndexOfNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* c1, Node* s2, Node* c2):
|
||||
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const { return TypeInt::INT; }
|
||||
};
|
||||
|
||||
//------------------------------AryEq---------------------------------------
|
||||
class AryEqNode: public StrIntrinsicNode {
|
||||
public:
|
||||
AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
|
||||
StrIntrinsicNode(control, char_array_mem, s1, s2) {};
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
|
||||
};
|
||||
|
||||
|
||||
//------------------------------EncodeISOArray--------------------------------
|
||||
// encode char[] to byte[] in ISO_8859_1
|
||||
class EncodeISOArrayNode: public Node {
|
||||
public:
|
||||
EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
|
||||
virtual int Opcode() const;
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
virtual const Type* bottom_type() const { return TypeInt::INT; }
|
||||
virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
|
||||
virtual uint match_edge(uint idx) const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OPTO_INTRINSICNODE_HPP
|
@ -30,10 +30,16 @@
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callGenerator.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/countbitsnode.hpp"
|
||||
#include "opto/intrinsicnode.hpp"
|
||||
#include "opto/idealKit.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/narrowptrnode.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
@ -4658,7 +4664,7 @@ bool LibraryCallKit::inline_arraycopy() {
|
||||
|
||||
ciKlass* src_k = NULL;
|
||||
if (!has_src) {
|
||||
src_k = src_type->speculative_type();
|
||||
src_k = src_type->speculative_type_not_null();
|
||||
if (src_k != NULL && src_k->is_array_klass()) {
|
||||
could_have_src = true;
|
||||
}
|
||||
@ -4666,7 +4672,7 @@ bool LibraryCallKit::inline_arraycopy() {
|
||||
|
||||
ciKlass* dest_k = NULL;
|
||||
if (!has_dest) {
|
||||
dest_k = dest_type->speculative_type();
|
||||
dest_k = dest_type->speculative_type_not_null();
|
||||
if (dest_k != NULL && dest_k->is_array_klass()) {
|
||||
could_have_dest = true;
|
||||
}
|
||||
@ -4738,13 +4744,13 @@ bool LibraryCallKit::inline_arraycopy() {
|
||||
ciKlass* src_k = top_src->klass();
|
||||
ciKlass* dest_k = top_dest->klass();
|
||||
if (!src_spec) {
|
||||
src_k = src_type->speculative_type();
|
||||
src_k = src_type->speculative_type_not_null();
|
||||
if (src_k != NULL && src_k->is_array_klass()) {
|
||||
could_have_src = true;
|
||||
}
|
||||
}
|
||||
if (!dest_spec) {
|
||||
dest_k = dest_type->speculative_type();
|
||||
dest_k = dest_type->speculative_type_not_null();
|
||||
if (dest_k != NULL && dest_k->is_array_klass()) {
|
||||
could_have_dest = true;
|
||||
}
|
||||
|
@ -27,8 +27,10 @@
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
|
||||
|
@ -28,9 +28,12 @@
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/divnode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
|
@ -25,7 +25,9 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
|
||||
//================= Loop Unswitching =====================
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/divnode.hpp"
|
||||
#include "opto/idealGraphPrinter.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
|
@ -30,6 +30,8 @@
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
|
||||
|
@ -27,14 +27,17 @@
|
||||
#include "libadt/vectset.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/compile.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/locknode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/macro.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/narrowptrnode.hpp"
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
|
@ -26,10 +26,10 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/idealGraphPrinter.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
#include "opto/regmask.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
|
@ -31,11 +31,13 @@
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/compile.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/narrowptrnode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/regmask.hpp"
|
||||
|
||||
@ -2903,59 +2905,6 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
||||
return mem;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
// Do not match memory edge.
|
||||
uint StrIntrinsicNode::match_edge(uint idx) const {
|
||||
return idx == 2 || idx == 3;
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node. Strip out
|
||||
// control copies
|
||||
Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (remove_dead_region(phase, can_reshape)) return this;
|
||||
// Don't bother trying to transform a dead node
|
||||
if (in(0) && in(0)->is_top()) return NULL;
|
||||
|
||||
if (can_reshape) {
|
||||
Node* mem = phase->transform(in(MemNode::Memory));
|
||||
// If transformed to a MergeMem, get the desired slice
|
||||
uint alias_idx = phase->C->get_alias_index(adr_type());
|
||||
mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem;
|
||||
if (mem != in(MemNode::Memory)) {
|
||||
set_req(MemNode::Memory, mem);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *StrIntrinsicNode::Value( PhaseTransform *phase ) const {
|
||||
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------match_edge-------------------------------------
|
||||
// Do not match memory edge
|
||||
uint EncodeISOArrayNode::match_edge(uint idx) const {
|
||||
return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len)
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node. Strip out
|
||||
// control copies
|
||||
Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return remove_dead_region(phase, can_reshape) ? this : NULL;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const {
|
||||
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
|
||||
: MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
|
||||
|
@ -866,88 +866,6 @@ public:
|
||||
static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
|
||||
};
|
||||
|
||||
//------------------------------StrIntrinsic-------------------------------
|
||||
// Base class for Ideal nodes used in String instrinsic code.
|
||||
class StrIntrinsicNode: public Node {
|
||||
public:
|
||||
StrIntrinsicNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* c1, Node* s2, Node* c2):
|
||||
Node(control, char_array_mem, s1, c1, s2, c2) {
|
||||
}
|
||||
|
||||
StrIntrinsicNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* s2, Node* c):
|
||||
Node(control, char_array_mem, s1, s2, c) {
|
||||
}
|
||||
|
||||
StrIntrinsicNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* s2):
|
||||
Node(control, char_array_mem, s1, s2) {
|
||||
}
|
||||
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
|
||||
virtual uint match_edge(uint idx) const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
};
|
||||
|
||||
//------------------------------StrComp-------------------------------------
|
||||
class StrCompNode: public StrIntrinsicNode {
|
||||
public:
|
||||
StrCompNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* c1, Node* s2, Node* c2):
|
||||
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const { return TypeInt::INT; }
|
||||
};
|
||||
|
||||
//------------------------------StrEquals-------------------------------------
|
||||
class StrEqualsNode: public StrIntrinsicNode {
|
||||
public:
|
||||
StrEqualsNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* s2, Node* c):
|
||||
StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
|
||||
};
|
||||
|
||||
//------------------------------StrIndexOf-------------------------------------
|
||||
class StrIndexOfNode: public StrIntrinsicNode {
|
||||
public:
|
||||
StrIndexOfNode(Node* control, Node* char_array_mem,
|
||||
Node* s1, Node* c1, Node* s2, Node* c2):
|
||||
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const { return TypeInt::INT; }
|
||||
};
|
||||
|
||||
//------------------------------AryEq---------------------------------------
|
||||
class AryEqNode: public StrIntrinsicNode {
|
||||
public:
|
||||
AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
|
||||
StrIntrinsicNode(control, char_array_mem, s1, s2) {};
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
|
||||
};
|
||||
|
||||
|
||||
//------------------------------EncodeISOArray--------------------------------
|
||||
// encode char[] to byte[] in ISO_8859_1
|
||||
class EncodeISOArrayNode: public Node {
|
||||
public:
|
||||
EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
|
||||
virtual int Opcode() const;
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
virtual const Type* bottom_type() const { return TypeInt::INT; }
|
||||
virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
|
||||
virtual uint match_edge(uint idx) const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
};
|
||||
|
||||
//------------------------------MemBar-----------------------------------------
|
||||
// There are different flavors of Memory Barriers to match the Java Memory
|
||||
// Model. Monitor-enter and volatile-load act as Aquires: no following ref
|
||||
|
398
hotspot/src/share/vm/opto/movenode.cpp
Normal file
398
hotspot/src/share/vm/opto/movenode.cpp
Normal file
@ -0,0 +1,398 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
|
||||
//=============================================================================
|
||||
/*
|
||||
The major change is for CMoveP and StrComp. They have related but slightly
|
||||
different problems. They both take in TWO oops which are both null-checked
|
||||
independently before the using Node. After CCP removes the CastPP's they need
|
||||
to pick up the guarding test edge - in this case TWO control edges. I tried
|
||||
various solutions, all have problems:
|
||||
|
||||
(1) Do nothing. This leads to a bug where we hoist a Load from a CMoveP or a
|
||||
StrComp above a guarding null check. I've seen both cases in normal -Xcomp
|
||||
testing.
|
||||
|
||||
(2) Plug the control edge from 1 of the 2 oops in. Apparent problem here is
|
||||
to figure out which test post-dominates. The real problem is that it doesn't
|
||||
matter which one you pick. After you pick up, the dominating-test elider in
|
||||
IGVN can remove the test and allow you to hoist up to the dominating test on
|
||||
the chosen oop bypassing the test on the not-chosen oop. Seen in testing.
|
||||
Oops.
|
||||
|
||||
(3) Leave the CastPP's in. This makes the graph more accurate in some sense;
|
||||
we get to keep around the knowledge that an oop is not-null after some test.
|
||||
Alas, the CastPP's interfere with GVN (some values are the regular oop, some
|
||||
are the CastPP of the oop, all merge at Phi's which cannot collapse, etc).
|
||||
This cost us 10% on SpecJVM, even when I removed some of the more trivial
|
||||
cases in the optimizer. Removing more useless Phi's started allowing Loads to
|
||||
illegally float above null checks. I gave up on this approach.
|
||||
|
||||
(4) Add BOTH control edges to both tests. Alas, too much code knows that
|
||||
control edges are in slot-zero ONLY. Many quick asserts fail; no way to do
|
||||
this one. Note that I really want to allow the CMoveP to float and add both
|
||||
control edges to the dependent Load op - meaning I can select early but I
|
||||
cannot Load until I pass both tests.
|
||||
|
||||
(5) Do not hoist CMoveP and StrComp. To this end I added the v-call
|
||||
depends_only_on_test(). No obvious performance loss on Spec, but we are
|
||||
clearly conservative on CMoveP (also so on StrComp but that's unlikely to
|
||||
matter ever).
|
||||
|
||||
*/
|
||||
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node.
|
||||
// Move constants to the right.
|
||||
Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
|
||||
// Don't bother trying to transform a dead node
|
||||
if( in(0) && in(0)->is_top() ) return NULL;
|
||||
assert( !phase->eqv(in(Condition), this) &&
|
||||
!phase->eqv(in(IfFalse), this) &&
|
||||
!phase->eqv(in(IfTrue), this), "dead loop in CMoveNode::Ideal" );
|
||||
if( phase->type(in(Condition)) == Type::TOP )
|
||||
return NULL; // return NULL when Condition is dead
|
||||
|
||||
if( in(IfFalse)->is_Con() && !in(IfTrue)->is_Con() ) {
|
||||
if( in(Condition)->is_Bool() ) {
|
||||
BoolNode* b = in(Condition)->as_Bool();
|
||||
BoolNode* b2 = b->negate(phase);
|
||||
return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//------------------------------is_cmove_id------------------------------------
|
||||
// Helper function to check for CMOVE identity. Shared with PhiNode::Identity
|
||||
Node *CMoveNode::is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b ) {
|
||||
// Check for Cmp'ing and CMove'ing same values
|
||||
if( (phase->eqv(cmp->in(1),f) &&
|
||||
phase->eqv(cmp->in(2),t)) ||
|
||||
// Swapped Cmp is OK
|
||||
(phase->eqv(cmp->in(2),f) &&
|
||||
phase->eqv(cmp->in(1),t)) ) {
|
||||
// Give up this identity check for floating points because it may choose incorrect
|
||||
// value around 0.0 and -0.0
|
||||
if ( cmp->Opcode()==Op_CmpF || cmp->Opcode()==Op_CmpD )
|
||||
return NULL;
|
||||
// Check for "(t==f)?t:f;" and replace with "f"
|
||||
if( b->_test._test == BoolTest::eq )
|
||||
return f;
|
||||
// Allow the inverted case as well
|
||||
// Check for "(t!=f)?t:f;" and replace with "t"
|
||||
if( b->_test._test == BoolTest::ne )
|
||||
return t;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
// Conditional-move is an identity if both inputs are the same, or the test
|
||||
// true or false.
|
||||
Node *CMoveNode::Identity( PhaseTransform *phase ) {
|
||||
if( phase->eqv(in(IfFalse),in(IfTrue)) ) // C-moving identical inputs?
|
||||
return in(IfFalse); // Then it doesn't matter
|
||||
if( phase->type(in(Condition)) == TypeInt::ZERO )
|
||||
return in(IfFalse); // Always pick left(false) input
|
||||
if( phase->type(in(Condition)) == TypeInt::ONE )
|
||||
return in(IfTrue); // Always pick right(true) input
|
||||
|
||||
// Check for CMove'ing a constant after comparing against the constant.
|
||||
// Happens all the time now, since if we compare equality vs a constant in
|
||||
// the parser, we "know" the variable is constant on one path and we force
|
||||
// it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
|
||||
// conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
|
||||
// general in that we don't need constants.
|
||||
if( in(Condition)->is_Bool() ) {
|
||||
BoolNode *b = in(Condition)->as_Bool();
|
||||
Node *cmp = b->in(1);
|
||||
if( cmp->is_Cmp() ) {
|
||||
Node *id = is_cmove_id( phase, cmp, in(IfTrue), in(IfFalse), b );
|
||||
if( id ) return id;
|
||||
}
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
// Result is the meet of inputs
|
||||
const Type *CMoveNode::Value( PhaseTransform *phase ) const {
|
||||
if( phase->type(in(Condition)) == Type::TOP )
|
||||
return Type::TOP;
|
||||
return phase->type(in(IfFalse))->meet_speculative(phase->type(in(IfTrue)));
|
||||
}
|
||||
|
||||
//------------------------------make-------------------------------------------
|
||||
// Make a correctly-flavored CMove. Since _type is directly determined
|
||||
// from the inputs we do not need to specify it here.
|
||||
CMoveNode *CMoveNode::make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t ) {
|
||||
switch( t->basic_type() ) {
|
||||
case T_INT: return new (C) CMoveINode( bol, left, right, t->is_int() );
|
||||
case T_FLOAT: return new (C) CMoveFNode( bol, left, right, t );
|
||||
case T_DOUBLE: return new (C) CMoveDNode( bol, left, right, t );
|
||||
case T_LONG: return new (C) CMoveLNode( bol, left, right, t->is_long() );
|
||||
case T_OBJECT: return new (C) CMovePNode( c, bol, left, right, t->is_oopptr() );
|
||||
case T_ADDRESS: return new (C) CMovePNode( c, bol, left, right, t->is_ptr() );
|
||||
case T_NARROWOOP: return new (C) CMoveNNode( c, bol, left, right, t );
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node.
|
||||
// Check for conversions to boolean
|
||||
Node *CMoveINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// Try generic ideal's first
|
||||
Node *x = CMoveNode::Ideal(phase, can_reshape);
|
||||
if( x ) return x;
|
||||
|
||||
// If zero is on the left (false-case, no-move-case) it must mean another
|
||||
// constant is on the right (otherwise the shared CMove::Ideal code would
|
||||
// have moved the constant to the right). This situation is bad for Intel
|
||||
// and a don't-care for Sparc. It's bad for Intel because the zero has to
|
||||
// be manifested in a register with a XOR which kills flags, which are live
|
||||
// on input to the CMoveI, leading to a situation which causes excessive
|
||||
// spilling on Intel. For Sparc, if the zero in on the left the Sparc will
|
||||
// zero a register via G0 and conditionally-move the other constant. If the
|
||||
// zero is on the right, the Sparc will load the first constant with a
|
||||
// 13-bit set-lo and conditionally move G0. See bug 4677505.
|
||||
if( phase->type(in(IfFalse)) == TypeInt::ZERO && !(phase->type(in(IfTrue)) == TypeInt::ZERO) ) {
|
||||
if( in(Condition)->is_Bool() ) {
|
||||
BoolNode* b = in(Condition)->as_Bool();
|
||||
BoolNode* b2 = b->negate(phase);
|
||||
return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
|
||||
}
|
||||
}
|
||||
|
||||
// Now check for booleans
|
||||
int flip = 0;
|
||||
|
||||
// Check for picking from zero/one
|
||||
if( phase->type(in(IfFalse)) == TypeInt::ZERO && phase->type(in(IfTrue)) == TypeInt::ONE ) {
|
||||
flip = 1 - flip;
|
||||
} else if( phase->type(in(IfFalse)) == TypeInt::ONE && phase->type(in(IfTrue)) == TypeInt::ZERO ) {
|
||||
} else return NULL;
|
||||
|
||||
// Check for eq/ne test
|
||||
if( !in(1)->is_Bool() ) return NULL;
|
||||
BoolNode *bol = in(1)->as_Bool();
|
||||
if( bol->_test._test == BoolTest::eq ) {
|
||||
} else if( bol->_test._test == BoolTest::ne ) {
|
||||
flip = 1-flip;
|
||||
} else return NULL;
|
||||
|
||||
// Check for vs 0 or 1
|
||||
if( !bol->in(1)->is_Cmp() ) return NULL;
|
||||
const CmpNode *cmp = bol->in(1)->as_Cmp();
|
||||
if( phase->type(cmp->in(2)) == TypeInt::ZERO ) {
|
||||
} else if( phase->type(cmp->in(2)) == TypeInt::ONE ) {
|
||||
// Allow cmp-vs-1 if the other input is bounded by 0-1
|
||||
if( phase->type(cmp->in(1)) != TypeInt::BOOL )
|
||||
return NULL;
|
||||
flip = 1 - flip;
|
||||
} else return NULL;
|
||||
|
||||
// Convert to a bool (flipped)
|
||||
// Build int->bool conversion
|
||||
#ifndef PRODUCT
|
||||
if( PrintOpto ) tty->print_cr("CMOV to I2B");
|
||||
#endif
|
||||
Node *n = new (phase->C) Conv2BNode( cmp->in(1) );
|
||||
if( flip )
|
||||
n = new (phase->C) XorINode( phase->transform(n), phase->intcon(1) );
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node.
|
||||
// Check for absolute value
|
||||
Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// Try generic ideal's first
|
||||
Node *x = CMoveNode::Ideal(phase, can_reshape);
|
||||
if( x ) return x;
|
||||
|
||||
int cmp_zero_idx = 0; // Index of compare input where to look for zero
|
||||
int phi_x_idx = 0; // Index of phi input where to find naked x
|
||||
|
||||
// Find the Bool
|
||||
if( !in(1)->is_Bool() ) return NULL;
|
||||
BoolNode *bol = in(1)->as_Bool();
|
||||
// Check bool sense
|
||||
switch( bol->_test._test ) {
|
||||
case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = IfTrue; break;
|
||||
case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
|
||||
case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue; break;
|
||||
case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
|
||||
default: return NULL; break;
|
||||
}
|
||||
|
||||
// Find zero input of CmpF; the other input is being abs'd
|
||||
Node *cmpf = bol->in(1);
|
||||
if( cmpf->Opcode() != Op_CmpF ) return NULL;
|
||||
Node *X = NULL;
|
||||
bool flip = false;
|
||||
if( phase->type(cmpf->in(cmp_zero_idx)) == TypeF::ZERO ) {
|
||||
X = cmpf->in(3 - cmp_zero_idx);
|
||||
} else if (phase->type(cmpf->in(3 - cmp_zero_idx)) == TypeF::ZERO) {
|
||||
// The test is inverted, we should invert the result...
|
||||
X = cmpf->in(cmp_zero_idx);
|
||||
flip = true;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// If X is found on the appropriate phi input, find the subtract on the other
|
||||
if( X != in(phi_x_idx) ) return NULL;
|
||||
int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
|
||||
Node *sub = in(phi_sub_idx);
|
||||
|
||||
// Allow only SubF(0,X) and fail out for all others; NegF is not OK
|
||||
if( sub->Opcode() != Op_SubF ||
|
||||
sub->in(2) != X ||
|
||||
phase->type(sub->in(1)) != TypeF::ZERO ) return NULL;
|
||||
|
||||
Node *abs = new (phase->C) AbsFNode( X );
|
||||
if( flip )
|
||||
abs = new (phase->C) SubFNode(sub->in(1), phase->transform(abs));
|
||||
|
||||
return abs;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Return a node which is more "ideal" than the current node.
|
||||
// Check for absolute value
|
||||
Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// Try generic ideal's first
|
||||
Node *x = CMoveNode::Ideal(phase, can_reshape);
|
||||
if( x ) return x;
|
||||
|
||||
int cmp_zero_idx = 0; // Index of compare input where to look for zero
|
||||
int phi_x_idx = 0; // Index of phi input where to find naked x
|
||||
|
||||
// Find the Bool
|
||||
if( !in(1)->is_Bool() ) return NULL;
|
||||
BoolNode *bol = in(1)->as_Bool();
|
||||
// Check bool sense
|
||||
switch( bol->_test._test ) {
|
||||
case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = IfTrue; break;
|
||||
case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
|
||||
case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue; break;
|
||||
case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
|
||||
default: return NULL; break;
|
||||
}
|
||||
|
||||
// Find zero input of CmpD; the other input is being abs'd
|
||||
Node *cmpd = bol->in(1);
|
||||
if( cmpd->Opcode() != Op_CmpD ) return NULL;
|
||||
Node *X = NULL;
|
||||
bool flip = false;
|
||||
if( phase->type(cmpd->in(cmp_zero_idx)) == TypeD::ZERO ) {
|
||||
X = cmpd->in(3 - cmp_zero_idx);
|
||||
} else if (phase->type(cmpd->in(3 - cmp_zero_idx)) == TypeD::ZERO) {
|
||||
// The test is inverted, we should invert the result...
|
||||
X = cmpd->in(cmp_zero_idx);
|
||||
flip = true;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// If X is found on the appropriate phi input, find the subtract on the other
|
||||
if( X != in(phi_x_idx) ) return NULL;
|
||||
int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
|
||||
Node *sub = in(phi_sub_idx);
|
||||
|
||||
// Allow only SubD(0,X) and fail out for all others; NegD is not OK
|
||||
if( sub->Opcode() != Op_SubD ||
|
||||
sub->in(2) != X ||
|
||||
phase->type(sub->in(1)) != TypeD::ZERO ) return NULL;
|
||||
|
||||
Node *abs = new (phase->C) AbsDNode( X );
|
||||
if( flip )
|
||||
abs = new (phase->C) SubDNode(sub->in(1), phase->transform(abs));
|
||||
|
||||
return abs;
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *MoveL2DNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
const TypeLong *tl = t->is_long();
|
||||
if( !tl->is_con() ) return bottom_type();
|
||||
JavaValue v;
|
||||
v.set_jlong(tl->get_con());
|
||||
return TypeD::make( v.get_jdouble() );
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *MoveI2FNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
const TypeInt *ti = t->is_int();
|
||||
if( !ti->is_con() ) return bottom_type();
|
||||
JavaValue v;
|
||||
v.set_jint(ti->get_con());
|
||||
return TypeF::make( v.get_jfloat() );
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *MoveF2INode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
if( t == Type::FLOAT ) return TypeInt::INT;
|
||||
const TypeF *tf = t->is_float_constant();
|
||||
JavaValue v;
|
||||
v.set_jfloat(tf->getf());
|
||||
return TypeInt::make( v.get_jint() );
|
||||
}
|
||||
|
||||
//------------------------------Value------------------------------------------
|
||||
const Type *MoveD2LNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return Type::TOP;
|
||||
if( t == Type::DOUBLE ) return TypeLong::LONG;
|
||||
const TypeD *td = t->is_double_constant();
|
||||
JavaValue v;
|
||||
v.set_jdouble(td->getd());
|
||||
return TypeLong::make( v.get_jlong() );
|
||||
}
|
||||
|
152
hotspot/src/share/vm/opto/movenode.hpp
Normal file
152
hotspot/src/share/vm/opto/movenode.hpp
Normal file
@ -0,0 +1,152 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OPTO_MOVENODE_HPP
|
||||
#define SHARE_VM_OPTO_MOVENODE_HPP
|
||||
|
||||
#include "opto/node.hpp"
|
||||
|
||||
//------------------------------CMoveNode--------------------------------------
|
||||
// Conditional move
|
||||
class CMoveNode : public TypeNode {
|
||||
public:
|
||||
enum { Control, // When is it safe to do this cmove?
|
||||
Condition, // Condition controlling the cmove
|
||||
IfFalse, // Value if condition is false
|
||||
IfTrue }; // Value if condition is true
|
||||
CMoveNode( Node *bol, Node *left, Node *right, const Type *t ) : TypeNode(t,4)
|
||||
{
|
||||
init_class_id(Class_CMove);
|
||||
// all inputs are nullified in Node::Node(int)
|
||||
// init_req(Control,NULL);
|
||||
init_req(Condition,bol);
|
||||
init_req(IfFalse,left);
|
||||
init_req(IfTrue,right);
|
||||
}
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
static CMoveNode *make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t );
|
||||
// Helper function to spot cmove graph shapes
|
||||
static Node *is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b );
|
||||
};
|
||||
|
||||
//------------------------------CMoveDNode-------------------------------------
|
||||
class CMoveDNode : public CMoveNode {
|
||||
public:
|
||||
CMoveDNode( Node *bol, Node *left, Node *right, const Type* t) : CMoveNode(bol,left,right,t){}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
};
|
||||
|
||||
//------------------------------CMoveFNode-------------------------------------
|
||||
class CMoveFNode : public CMoveNode {
|
||||
public:
|
||||
CMoveFNode( Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) {}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
};
|
||||
|
||||
//------------------------------CMoveINode-------------------------------------
|
||||
class CMoveINode : public CMoveNode {
|
||||
public:
|
||||
CMoveINode( Node *bol, Node *left, Node *right, const TypeInt *ti ) : CMoveNode(bol,left,right,ti){}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
};
|
||||
|
||||
//------------------------------CMoveLNode-------------------------------------
|
||||
class CMoveLNode : public CMoveNode {
|
||||
public:
|
||||
CMoveLNode(Node *bol, Node *left, Node *right, const TypeLong *tl ) : CMoveNode(bol,left,right,tl){}
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//------------------------------CMovePNode-------------------------------------
|
||||
class CMovePNode : public CMoveNode {
|
||||
public:
|
||||
CMovePNode( Node *c, Node *bol, Node *left, Node *right, const TypePtr* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//------------------------------CMoveNNode-------------------------------------
|
||||
class CMoveNNode : public CMoveNode {
|
||||
public:
|
||||
CMoveNNode( Node *c, Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//
|
||||
class MoveI2FNode : public Node {
|
||||
public:
|
||||
MoveI2FNode( Node *value ) : Node(0,value) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::FLOAT; }
|
||||
virtual uint ideal_reg() const { return Op_RegF; }
|
||||
virtual const Type* Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
class MoveL2DNode : public Node {
|
||||
public:
|
||||
MoveL2DNode( Node *value ) : Node(0,value) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return Type::DOUBLE; }
|
||||
virtual uint ideal_reg() const { return Op_RegD; }
|
||||
virtual const Type* Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
class MoveF2INode : public Node {
|
||||
public:
|
||||
MoveF2INode( Node *value ) : Node(0,value) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual const Type* Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
class MoveD2LNode : public Node {
|
||||
public:
|
||||
MoveD2LNode( Node *value ) : Node(0,value) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeLong::LONG; }
|
||||
virtual uint ideal_reg() const { return Op_RegL; }
|
||||
virtual const Type* Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
//------------------------------BinaryNode-------------------------------------
|
||||
// Place holder for the 2 conditional inputs to a CMove. CMove needs 4
|
||||
// inputs: the Bool (for the lt/gt/eq/ne bits), the flags (result of some
|
||||
// compare), and the 2 values to select between. The Matcher requires a
|
||||
// binary tree so we break it down like this:
|
||||
// (CMove (Binary bol cmp) (Binary src1 src2))
|
||||
class BinaryNode : public Node {
|
||||
public:
|
||||
BinaryNode( Node *n1, Node *n2 ) : Node(0,n1,n2) { }
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return 0; }
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_VM_OPTO_MOVENODE_HPP
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
|
113
hotspot/src/share/vm/opto/narrowptrnode.cpp
Normal file
113
hotspot/src/share/vm/opto/narrowptrnode.cpp
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/narrowptrnode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
|
||||
Node* DecodeNNode::Identity(PhaseTransform* phase) {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return in(1);
|
||||
|
||||
if (in(1)->is_EncodeP()) {
|
||||
// (DecodeN (EncodeP p)) -> p
|
||||
return in(1)->in(1);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
const Type *DecodeNNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
if (t == TypeNarrowOop::NULL_PTR) return TypePtr::NULL_PTR;
|
||||
|
||||
assert(t->isa_narrowoop(), "only narrowoop here");
|
||||
return t->make_ptr();
|
||||
}
|
||||
|
||||
Node* EncodePNode::Identity(PhaseTransform* phase) {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return in(1);
|
||||
|
||||
if (in(1)->is_DecodeN()) {
|
||||
// (EncodeP (DecodeN p)) -> p
|
||||
return in(1)->in(1);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
const Type *EncodePNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
if (t == TypePtr::NULL_PTR) return TypeNarrowOop::NULL_PTR;
|
||||
|
||||
assert(t->isa_oop_ptr(), "only oopptr here");
|
||||
return t->make_narrowoop();
|
||||
}
|
||||
|
||||
|
||||
Node *EncodeNarrowPtrNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
|
||||
return MemNode::Ideal_common_DU_postCCP(ccp, this, in(1));
|
||||
}
|
||||
|
||||
Node* DecodeNKlassNode::Identity(PhaseTransform* phase) {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return in(1);
|
||||
|
||||
if (in(1)->is_EncodePKlass()) {
|
||||
// (DecodeNKlass (EncodePKlass p)) -> p
|
||||
return in(1)->in(1);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
const Type *DecodeNKlassNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
assert(t != TypeNarrowKlass::NULL_PTR, "null klass?");
|
||||
|
||||
assert(t->isa_narrowklass(), "only narrow klass ptr here");
|
||||
return t->make_ptr();
|
||||
}
|
||||
|
||||
Node* EncodePKlassNode::Identity(PhaseTransform* phase) {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if( t == Type::TOP ) return in(1);
|
||||
|
||||
if (in(1)->is_DecodeNKlass()) {
|
||||
// (EncodePKlass (DecodeNKlass p)) -> p
|
||||
return in(1)->in(1);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
const Type *EncodePKlassNode::Value( PhaseTransform *phase ) const {
|
||||
const Type *t = phase->type( in(1) );
|
||||
if (t == Type::TOP) return Type::TOP;
|
||||
assert (t != TypePtr::NULL_PTR, "null klass?");
|
||||
|
||||
assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
|
||||
return t->make_narrowklass();
|
||||
}
|
||||
|
119
hotspot/src/share/vm/opto/narrowptrnode.hpp
Normal file
119
hotspot/src/share/vm/opto/narrowptrnode.hpp
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OPTO_NARROWPTRNODE_HPP
|
||||
#define SHARE_VM_OPTO_NARROWPTRNODE_HPP
|
||||
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
|
||||
//------------------------------EncodeNarrowPtr--------------------------------
|
||||
class EncodeNarrowPtrNode : public TypeNode {
|
||||
protected:
|
||||
EncodeNarrowPtrNode(Node* value, const Type* type):
|
||||
TypeNode(type, 2) {
|
||||
init_class_id(Class_EncodeNarrowPtr);
|
||||
init_req(0, NULL);
|
||||
init_req(1, value);
|
||||
}
|
||||
public:
|
||||
virtual uint ideal_reg() const { return Op_RegN; }
|
||||
virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
|
||||
};
|
||||
|
||||
//------------------------------EncodeP--------------------------------
|
||||
// Encodes an oop pointers into its compressed form
|
||||
// Takes an extra argument which is the real heap base as a long which
|
||||
// may be useful for code generation in the backend.
|
||||
class EncodePNode : public EncodeNarrowPtrNode {
|
||||
public:
|
||||
EncodePNode(Node* value, const Type* type):
|
||||
EncodeNarrowPtrNode(value, type) {
|
||||
init_class_id(Class_EncodeP);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
//------------------------------EncodePKlass--------------------------------
|
||||
// Encodes a klass pointer into its compressed form
|
||||
// Takes an extra argument which is the real heap base as a long which
|
||||
// may be useful for code generation in the backend.
|
||||
class EncodePKlassNode : public EncodeNarrowPtrNode {
|
||||
public:
|
||||
EncodePKlassNode(Node* value, const Type* type):
|
||||
EncodeNarrowPtrNode(value, type) {
|
||||
init_class_id(Class_EncodePKlass);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
};
|
||||
|
||||
//------------------------------DecodeNarrowPtr--------------------------------
|
||||
class DecodeNarrowPtrNode : public TypeNode {
|
||||
protected:
|
||||
DecodeNarrowPtrNode(Node* value, const Type* type):
|
||||
TypeNode(type, 2) {
|
||||
init_class_id(Class_DecodeNarrowPtr);
|
||||
init_req(0, NULL);
|
||||
init_req(1, value);
|
||||
}
|
||||
public:
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
};
|
||||
|
||||
//------------------------------DecodeN--------------------------------
|
||||
// Converts a narrow oop into a real oop ptr.
|
||||
// Takes an extra argument which is the real heap base as a long which
|
||||
// may be useful for code generation in the backend.
|
||||
class DecodeNNode : public DecodeNarrowPtrNode {
|
||||
public:
|
||||
DecodeNNode(Node* value, const Type* type):
|
||||
DecodeNarrowPtrNode(value, type) {
|
||||
init_class_id(Class_DecodeN);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
};
|
||||
|
||||
//------------------------------DecodeNKlass--------------------------------
|
||||
// Converts a narrow klass pointer into a real klass ptr.
|
||||
// Takes an extra argument which is the real heap base as a long which
|
||||
// may be useful for code generation in the backend.
|
||||
class DecodeNKlassNode : public DecodeNarrowPtrNode {
|
||||
public:
|
||||
DecodeNKlassNode(Node* value, const Type* type):
|
||||
DecodeNarrowPtrNode(value, type) {
|
||||
init_class_id(Class_DecodeNKlass);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OPTO_NARROWPTRNODE_HPP
|
||||
|
63
hotspot/src/share/vm/opto/opaquenode.cpp
Normal file
63
hotspot/src/share/vm/opto/opaquenode.cpp
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
|
||||
//=============================================================================
|
||||
// Do not allow value-numbering
|
||||
uint Opaque1Node::hash() const { return NO_HASH; }
|
||||
uint Opaque1Node::cmp( const Node &n ) const {
|
||||
return (&n == this); // Always fail except on self
|
||||
}
|
||||
|
||||
//------------------------------Identity---------------------------------------
|
||||
// If _major_progress, then more loop optimizations follow. Do NOT remove
|
||||
// the opaque Node until no more loop ops can happen. Note the timing of
|
||||
// _major_progress; it's set in the major loop optimizations THEN comes the
|
||||
// call to IterGVN and any chance of hitting this code. Hence there's no
|
||||
// phase-ordering problem with stripping Opaque1 in IGVN followed by some
|
||||
// more loop optimizations that require it.
|
||||
Node *Opaque1Node::Identity( PhaseTransform *phase ) {
|
||||
return phase->C->major_progress() ? this : in(1);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
// A node to prevent unwanted optimizations. Allows constant folding. Stops
|
||||
// value-numbering, most Ideal calls or Identity functions. This Node is
|
||||
// specifically designed to prevent the pre-increment value of a loop trip
|
||||
// counter from being live out of the bottom of the loop (hence causing the
|
||||
// pre- and post-increment values both being live and thus requiring an extra
|
||||
// temp register and an extra move). If we "accidentally" optimize through
|
||||
// this kind of a Node, we'll get slightly pessimal, but correct, code. Thus
|
||||
// it's OK to be slightly sloppy on optimizations here.
|
||||
|
||||
// Do not allow value-numbering
|
||||
uint Opaque2Node::hash() const { return NO_HASH; }
|
||||
uint Opaque2Node::cmp( const Node &n ) const {
|
||||
return (&n == this); // Always fail except on self
|
||||
}
|
||||
|
||||
|
91
hotspot/src/share/vm/opto/opaquenode.hpp
Normal file
91
hotspot/src/share/vm/opto/opaquenode.hpp
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OPTO_OPAQUENODE_HPP
|
||||
#define SHARE_VM_OPTO_OPAQUENODE_HPP
|
||||
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
|
||||
//------------------------------Opaque1Node------------------------------------
|
||||
// A node to prevent unwanted optimizations. Allows constant folding.
|
||||
// Stops value-numbering, Ideal calls or Identity functions.
|
||||
class Opaque1Node : public Node {
|
||||
virtual uint hash() const ; // { return NO_HASH; }
|
||||
virtual uint cmp( const Node &n ) const;
|
||||
public:
|
||||
Opaque1Node( Compile* C, Node *n ) : Node(0,n) {
|
||||
// Put it on the Macro nodes list to removed during macro nodes expansion.
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
// Special version for the pre-loop to hold the original loop limit
|
||||
// which is consumed by range check elimination.
|
||||
Opaque1Node( Compile* C, Node *n, Node* orig_limit ) : Node(0,n,orig_limit) {
|
||||
// Put it on the Macro nodes list to removed during macro nodes expansion.
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
Node* original_loop_limit() { return req()==3 ? in(2) : NULL; }
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
virtual Node *Identity( PhaseTransform *phase );
|
||||
};
|
||||
|
||||
//------------------------------Opaque2Node------------------------------------
|
||||
// A node to prevent unwanted optimizations. Allows constant folding. Stops
|
||||
// value-numbering, most Ideal calls or Identity functions. This Node is
|
||||
// specifically designed to prevent the pre-increment value of a loop trip
|
||||
// counter from being live out of the bottom of the loop (hence causing the
|
||||
// pre- and post-increment values both being live and thus requiring an extra
|
||||
// temp register and an extra move). If we "accidentally" optimize through
|
||||
// this kind of a Node, we'll get slightly pessimal, but correct, code. Thus
|
||||
// it's OK to be slightly sloppy on optimizations here.
|
||||
class Opaque2Node : public Node {
|
||||
virtual uint hash() const ; // { return NO_HASH; }
|
||||
virtual uint cmp( const Node &n ) const;
|
||||
public:
|
||||
Opaque2Node( Compile* C, Node *n ) : Node(0,n) {
|
||||
// Put it on the Macro nodes list to removed during macro nodes expansion.
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||
};
|
||||
|
||||
//------------------------------Opaque3Node------------------------------------
|
||||
// A node to prevent unwanted optimizations. Will be optimized only during
|
||||
// macro nodes expansion.
|
||||
class Opaque3Node : public Opaque2Node {
|
||||
int _opt; // what optimization it was used for
|
||||
public:
|
||||
enum { RTM_OPT };
|
||||
Opaque3Node(Compile* C, Node *n, int opt) : Opaque2Node(C, n), _opt(opt) {}
|
||||
virtual int Opcode() const;
|
||||
bool rtm_opt() const { return (_opt == RTM_OPT); }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OPTO_OPAQUENODE_HPP
|
||||
|
@ -27,9 +27,11 @@
|
||||
#include "interpreter/linkResolver.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/idealGraphPrinter.hpp"
|
||||
#include "opto/locknode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
|
@ -30,6 +30,8 @@
|
||||
#include "interpreter/linkResolver.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/divnode.hpp"
|
||||
#include "opto/idealGraphPrinter.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
@ -1288,7 +1290,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
|
||||
(jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
|
||||
TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth);
|
||||
const Type* tcc = ccast->as_Type()->type();
|
||||
assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve");
|
||||
assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
|
||||
// Delay transform() call to allow recovery of pre-cast value
|
||||
// at the control merge.
|
||||
_gvn.set_type_bottom(ccast);
|
||||
@ -1352,7 +1354,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest,
|
||||
|
||||
if (ccast != NULL) {
|
||||
const Type* tcc = ccast->as_Type()->type();
|
||||
assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve");
|
||||
assert(tcc != tval && tcc->higher_equal(tval), "must improve");
|
||||
// Delay transform() call to allow recovery of pre-cast value
|
||||
// at the control merge.
|
||||
ccast->set_req(0, control());
|
||||
@ -1393,7 +1395,7 @@ Node* Parse::optimize_cmp_with_klass(Node* c) {
|
||||
Node* addp = load_klass->in(2);
|
||||
Node* obj = addp->in(AddPNode::Address);
|
||||
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
|
||||
if (obj_type->speculative_type() != NULL) {
|
||||
if (obj_type->speculative_type_not_null() != NULL) {
|
||||
ciKlass* k = obj_type->speculative_type();
|
||||
inc_sp(2);
|
||||
obj = maybe_cast_profiled_obj(obj, k);
|
||||
@ -2277,6 +2279,14 @@ void Parse::do_one_bytecode() {
|
||||
maybe_add_safepoint(iter().get_dest());
|
||||
a = null();
|
||||
b = pop();
|
||||
if (!_gvn.type(b)->speculative_maybe_null() &&
|
||||
!too_many_traps(Deoptimization::Reason_speculate_null_check)) {
|
||||
inc_sp(1);
|
||||
Node* null_ctl = top();
|
||||
b = null_check_oop(b, &null_ctl, true, true, true);
|
||||
assert(null_ctl->is_top(), "no null control here");
|
||||
dec_sp(1);
|
||||
}
|
||||
c = _gvn.transform( new (C) CmpPNode(b, a) );
|
||||
do_ifnull(btest, c);
|
||||
break;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "memory/universe.inline.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "opto/block.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/idealGraphPrinter.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
@ -330,7 +329,7 @@ void NodeHash::check_no_speculative_types() {
|
||||
Node *sentinel_node = sentinel();
|
||||
for (uint i = 0; i < max; ++i) {
|
||||
Node *n = at(i);
|
||||
if(n != NULL && n != sentinel_node && n->is_Type()) {
|
||||
if(n != NULL && n != sentinel_node && n->is_Type() && n->outcnt() > 0) {
|
||||
TypeNode* tn = n->as_Type();
|
||||
const Type* t = tn->type();
|
||||
const Type* t_no_spec = t->remove_speculative();
|
||||
|
@ -48,7 +48,6 @@
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/graphKit.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
|
||||
|
||||
//------------------------------split_thru_region------------------------------
|
||||
|
@ -28,9 +28,9 @@
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user