This commit is contained in:
Alejandro Murillo 2014-04-04 10:04:44 -07:00
commit 40609dfd04
146 changed files with 5416 additions and 4253 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -162,28 +162,27 @@ open_debug_file (const char *pathname, unsigned int crc)
static struct elf_section *find_section_by_name(char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
ELF_SHDR* cursct = NULL;
char *strtab;
int cnt;
int strtab_size;
// Section cache have to already contain data for e_shstrndx section.
// If it's not true - elf file is broken, so just bail out
if (scn_cache[ehdr->e_shstrndx].c_data == NULL) {
if ((scn_cache[ehdr->e_shstrndx].c_data
= read_section_data(fd, ehdr, cursct)) == NULL) {
return NULL;
}
return NULL;
}
strtab = scn_cache[ehdr->e_shstrndx].c_data;
strtab_size = scn_cache[ehdr->e_shstrndx].c_shdr->sh_size;
for (cursct = shbuf, cnt = 0;
cnt < ehdr->e_shnum;
cnt++, cursct++) {
if (strcmp(cursct->sh_name + strtab, name) == 0) {
scn_cache[cnt].c_data = read_section_data(fd, ehdr, cursct);
return &scn_cache[cnt];
for (cnt = 0; cnt < ehdr->e_shnum; ++cnt) {
if (scn_cache[cnt].c_shdr->sh_name < strtab_size) {
if (strcmp(scn_cache[cnt].c_shdr->sh_name + strtab, name) == 0) {
scn_cache[cnt].c_data = read_section_data(fd, ehdr, scn_cache[cnt].c_shdr);
return &scn_cache[cnt];
}
}
}
@ -195,12 +194,11 @@ static struct elf_section *find_section_by_name(char *name,
static int open_file_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
int debug_fd;
struct elf_section *debug_link = find_section_by_name(".gnu_debuglink", fd, ehdr,
shbuf, scn_cache);
scn_cache);
if (debug_link == NULL)
return -1;
char *debug_filename = debug_link->c_data;
@ -221,7 +219,6 @@ static int open_file_from_debug_link(const char *name,
/* Look in the same directory as the object. */
strcpy(last_slash+1, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
@ -261,10 +258,9 @@ static struct symtab* build_symtab_internal(int fd, const char *filename, bool t
static struct symtab *build_symtab_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
fd = open_file_from_debug_link(name, fd, ehdr, shbuf, scn_cache);
fd = open_file_from_debug_link(name, fd, ehdr, scn_cache);
if (fd >= 0) {
struct symtab *symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
@ -463,7 +459,7 @@ static struct symtab* build_symtab_internal(int fd, const char *filename, bool t
// Then, if that doesn't work, the debug link
if (symtab == NULL) {
symtab = build_symtab_from_debug_link(filename, fd, &ehdr, shbuf,
symtab = build_symtab_from_debug_link(filename, fd, &ehdr,
scn_cache);
}

View File

@ -66,8 +66,8 @@ ifndef CC_INTERP
FORCE_TIERED=1
endif
endif
# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
ifneq (,$(filter $(ARCH),ppc64 pp64le))
# C1 is not ported on ppc64, so we cannot build a tiered VM:
ifeq ($(ARCH),ppc64)
FORCE_TIERED=0
endif

View File

@ -33,6 +33,11 @@ SLASH_JAVA ?= /java
# ARCH can be set explicitly in spec.gmk
ifndef ARCH
ARCH := $(shell uname -m)
# Fold little endian PowerPC64 into big-endian (if ARCH is set in
# hotspot-spec.gmk, this will be done by the configure script).
ifeq ($(ARCH),ppc64le)
ARCH := ppc64
endif
endif
PATH_SEP ?= :

View File

@ -337,56 +337,20 @@ endif
ifeq ($(DEBUG_BINARIES), true)
CFLAGS += -g
else
# Use the stabs format for debugging information (this is the default
# on gcc-2.91). It's good enough, has all the information about line
# numbers and local variables, and libjvm.so is only about 16M.
# Change this back to "-g" if you want the most expressive format.
# (warning: that could easily inflate libjvm.so to 150M!)
# Note: The Itanium gcc compiler crashes when using -gstabs.
DEBUG_CFLAGS/ia64 = -g
DEBUG_CFLAGS/amd64 = -g
DEBUG_CFLAGS/arm = -g
DEBUG_CFLAGS/ppc = -g
DEBUG_CFLAGS/ppc64 = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
# Clang doesn't understand -gstabs
DEBUG_CFLAGS += -g
else
DEBUG_CFLAGS += -gstabs
endif
DEBUG_CFLAGS += -g
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
FASTDEBUG_CFLAGS/ia64 = -g
FASTDEBUG_CFLAGS/amd64 = -g
FASTDEBUG_CFLAGS/arm = -g
FASTDEBUG_CFLAGS/ppc = -g
FASTDEBUG_CFLAGS/ppc64 = -g
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
# Clang doesn't understand -gstabs
FASTDEBUG_CFLAGS += -g
else
FASTDEBUG_CFLAGS += -gstabs
endif
FASTDEBUG_CFLAGS += -g
endif
OPT_CFLAGS/ia64 = -g
OPT_CFLAGS/amd64 = -g
OPT_CFLAGS/arm = -g
OPT_CFLAGS/ppc = -g
OPT_CFLAGS/ppc64 = -g
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
# Clang doesn't understand -gstabs
OPT_CFLAGS += -g
else
OPT_CFLAGS += -gstabs
endif
OPT_CFLAGS += -g
endif
endif
endif

View File

@ -26,14 +26,26 @@
# make c code know it is on a 64 bit platform.
CFLAGS += -D_LP64=1
# fixes `relocation truncated to fit' error for gcc 4.1.
CFLAGS += -mminimal-toc
ifeq ($(origin OPENJDK_TARGET_CPU_ENDIAN),undefined)
# This can happen during hotspot standalone build. Set endianness from
# uname. We assume build and target machines are the same.
OPENJDK_TARGET_CPU_ENDIAN:=$(if $(filter ppc64le,$(shell uname -m)),little,big)
endif
# finds use ppc64 instructions, but schedule for power5
CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
ifeq ($(filter $(OPENJDK_TARGET_CPU_ENDIAN),big little),)
$(error OPENJDK_TARGET_CPU_ENDIAN value should be 'big' or 'little')
endif
# let linker find external 64 bit libs.
LFLAGS_VM += -L/lib64
ifeq ($(OPENJDK_TARGET_CPU_ENDIAN),big)
# fixes `relocation truncated to fit' error for gcc 4.1.
CFLAGS += -mminimal-toc
# specify lib format.
LFLAGS_VM += -Wl,-melf64ppc
# finds use ppc64 instructions, but schedule for power5
CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
else
# Little endian machine uses ELFv2 ABI.
CFLAGS += -DVM_LITTLE_ENDIAN -DABI_ELFv2
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
CFLAGS += -mcpu=power7 -mtune=power8 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
endif

View File

@ -260,7 +260,6 @@ ifeq ($(JVM_VARIANT_SERVER),true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.map
endif
endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
endif
ifeq ($(JVM_VARIANT_CLIENT),true)
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
@ -275,6 +274,8 @@ ifeq ($(JVM_VARIANT_CLIENT),true)
endif
endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
ifeq ($(BUILD_WIN_SA), 1)
EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)

View File

@ -1025,15 +1025,14 @@ class Assembler : public AbstractAssembler {
}
static void set_imm(int* instr, short s) {
short* p = ((short *)instr) + 1;
*p = s;
// imm is always in the lower 16 bits of the instruction,
// so this is endian-neutral. Same for the get_imm below.
uint32_t w = *(uint32_t *)instr;
*instr = (int)((w & ~0x0000FFFF) | (s & 0x0000FFFF));
}
static int get_imm(address a, int instruction_number) {
short imm;
short *p =((short *)a)+2*instruction_number+1;
imm = *p;
return (int)imm;
return (short)((int *)a)[instruction_number];
}
static inline int hi16_signed( int x) { return (int)(int16_t)(x >> 16); }

View File

@ -35,6 +35,126 @@ class Bytes: AllStatic {
// Can I count on address always being a pointer to an unsigned char? Yes.
#if defined(VM_LITTLE_ENDIAN)
// Returns true, if the byte ordering used by Java is different from the native byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return true; }
// Forward declarations of the compiler-dependent implementation
static inline u2 swap_u2(u2 x);
static inline u4 swap_u4(u4 x);
static inline u8 swap_u8(u8 x);
static inline u2 get_native_u2(address p) {
return (intptr_t(p) & 1) == 0
? *(u2*)p
: ( u2(p[1]) << 8 )
| ( u2(p[0]) );
}
static inline u4 get_native_u4(address p) {
switch (intptr_t(p) & 3) {
case 0: return *(u4*)p;
case 2: return ( u4( ((u2*)p)[1] ) << 16 )
| ( u4( ((u2*)p)[0] ) );
default: return ( u4(p[3]) << 24 )
| ( u4(p[2]) << 16 )
| ( u4(p[1]) << 8 )
| u4(p[0]);
}
}
static inline u8 get_native_u8(address p) {
switch (intptr_t(p) & 7) {
case 0: return *(u8*)p;
case 4: return ( u8( ((u4*)p)[1] ) << 32 )
| ( u8( ((u4*)p)[0] ) );
case 2: return ( u8( ((u2*)p)[3] ) << 48 )
| ( u8( ((u2*)p)[2] ) << 32 )
| ( u8( ((u2*)p)[1] ) << 16 )
| ( u8( ((u2*)p)[0] ) );
default: return ( u8(p[7]) << 56 )
| ( u8(p[6]) << 48 )
| ( u8(p[5]) << 40 )
| ( u8(p[4]) << 32 )
| ( u8(p[3]) << 24 )
| ( u8(p[2]) << 16 )
| ( u8(p[1]) << 8 )
| u8(p[0]);
}
}
static inline void put_native_u2(address p, u2 x) {
if ( (intptr_t(p) & 1) == 0 ) *(u2*)p = x;
else {
p[1] = x >> 8;
p[0] = x;
}
}
static inline void put_native_u4(address p, u4 x) {
switch ( intptr_t(p) & 3 ) {
case 0: *(u4*)p = x;
break;
case 2: ((u2*)p)[1] = x >> 16;
((u2*)p)[0] = x;
break;
default: ((u1*)p)[3] = x >> 24;
((u1*)p)[2] = x >> 16;
((u1*)p)[1] = x >> 8;
((u1*)p)[0] = x;
break;
}
}
static inline void put_native_u8(address p, u8 x) {
switch ( intptr_t(p) & 7 ) {
case 0: *(u8*)p = x;
break;
case 4: ((u4*)p)[1] = x >> 32;
((u4*)p)[0] = x;
break;
case 2: ((u2*)p)[3] = x >> 48;
((u2*)p)[2] = x >> 32;
((u2*)p)[1] = x >> 16;
((u2*)p)[0] = x;
break;
default: ((u1*)p)[7] = x >> 56;
((u1*)p)[6] = x >> 48;
((u1*)p)[5] = x >> 40;
((u1*)p)[4] = x >> 32;
((u1*)p)[3] = x >> 24;
((u1*)p)[2] = x >> 16;
((u1*)p)[1] = x >> 8;
((u1*)p)[0] = x;
}
}
// Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
// (no byte-order reversal is needed since Power CPUs are big-endian oriented).
static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); }
static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); }
static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); }
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); }
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); }
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); }
#else // !defined(VM_LITTLE_ENDIAN)
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return false; }
@ -150,6 +270,12 @@ class Bytes: AllStatic {
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, x); }
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, x); }
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, x); }
#endif // VM_LITTLE_ENDIAN
};
#if defined(TARGET_OS_ARCH_linux_ppc)
#include "bytes_linux_ppc.inline.hpp"
#endif
#endif // CPU_PPC_VM_BYTES_PPC_HPP

View File

@ -1672,7 +1672,7 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
//__ flush_bundle();
address entry = __ pc();
char *bname = NULL;
const char *bname = NULL;
uint tsize = 0;
switch(state) {
case ftos:

View File

@ -799,7 +799,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
if (UseCompressedOops && !wide) {
__ movl(as_Address(addr), (int32_t)NULL_WORD);
} else {
#ifdef _LP64
__ xorptr(rscratch1, rscratch1);
null_check_here = code_offset();
__ movptr(as_Address(addr), rscratch1);
#else
__ movptr(as_Address(addr), NULL_WORD);
#endif
}
} else {
if (is_literal_address(addr)) {

View File

@ -59,9 +59,9 @@ static BufferBlob* stub_blob;
static const int stub_size = 600;
extern "C" {
typedef void (*getPsrInfo_stub_t)(void*);
typedef void (*get_cpu_info_stub_t)(void*);
}
static getPsrInfo_stub_t getPsrInfo_stub = NULL;
static get_cpu_info_stub_t get_cpu_info_stub = NULL;
class VM_Version_StubGenerator: public StubCodeGenerator {
@ -69,7 +69,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
address generate_getPsrInfo() {
address generate_get_cpu_info() {
// Flags to test CPU type.
const uint32_t HS_EFL_AC = 0x40000;
const uint32_t HS_EFL_ID = 0x200000;
@ -81,13 +81,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done;
StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
# define __ _masm->
address start = __ pc();
//
// void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
// void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
//
// LP64: rcx and rdx are first and second argument registers on windows
@ -385,6 +385,14 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
};
void VM_Version::get_cpu_info_wrapper() {
get_cpu_info_stub(&_cpuid_info);
}
#ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
#endif
void VM_Version::get_processor_features() {
_cpu = 4; // 486 by default
@ -395,7 +403,11 @@ void VM_Version::get_processor_features() {
if (!Use486InstrsOnly) {
// Get raw processor info
getPsrInfo_stub(&_cpuid_info);
// Some platforms (like Win*) need a wrapper around here
// in order to properly handle SEGV for YMM registers test.
CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper);
assert_is_initialized();
_cpu = extended_cpu_family();
_model = extended_cpu_model();
@ -986,14 +998,14 @@ void VM_Version::initialize() {
ResourceMark rm;
// Making this stub must be FIRST use of assembler
stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
if (stub_blob == NULL) {
vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
}
CodeBuffer c(stub_blob);
VM_Version_StubGenerator g(&c);
getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
g.generate_getPsrInfo());
get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
g.generate_get_cpu_info());
get_processor_features();
}

View File

@ -507,6 +507,7 @@ public:
// The value used to check ymm register after signal handle
static int ymm_test_value() { return 0xCAFEBABE; }
static void get_cpu_info_wrapper();
static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; }
static bool is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; }
static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; }

View File

@ -60,8 +60,8 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
#include "services/runtimeService.hpp"
@ -70,16 +70,6 @@
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
# include "nativeInst_ppc.hpp"
#endif
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
// put OS-includes here (sorted alphabetically)
#include <errno.h>
@ -378,13 +368,14 @@ void os::Aix::query_multipage_support() {
assert(_page_size == SIZE_4K, "surprise!");
// query default data page size (default page size for C-Heap, pthread stacks and .bss).
// Query default data page size (default page size for C-Heap, pthread stacks and .bss).
// Default data page size is influenced either by linker options (-bdatapsize)
// or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
// default should be 4K.
size_t data_page_size = SIZE_4K;
{
void* p = ::malloc(SIZE_16M);
guarantee(p != NULL, "malloc failed");
data_page_size = os::Aix::query_pagesize(p);
::free(p);
}
@ -511,85 +502,76 @@ query_multipage_support_end:
} // end os::Aix::query_multipage_support()
// The code for this method was initially derived from the version in os_linux.cpp
// The code for this method was initially derived from the version in os_linux.cpp.
void os::init_system_properties_values() {
// The next few definitions allow the code to be verbatim:
#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
#define DEFAULT_LIBPATH "/usr/lib:/lib"
#define EXTENSIONS_DIR "/lib/ext"
#define ENDORSED_DIR "/lib/endorsed"
// Buffer that fits several sprintfs.
// Note that the space for the trailing null is provided
// by the nulls included by the sizeof operator.
const size_t bufsize =
MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
// sysclasspath, java_home, dll_dir
char *home_path;
char *dll_path;
char *pslash;
char buf[MAXPATHLEN];
os::jvm_path(buf, sizeof(buf));
{
char *pslash;
os::jvm_path(buf, bufsize);
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // get rid of /libjvm.so
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // get rid of /{client|server|hotspot}
}
dll_path = malloc(strlen(buf) + 1);
strcpy(dll_path, buf);
Arguments::set_dll_dir(dll_path);
if (pslash != NULL) {
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // get rid of /<arch>
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // get rid of /lib
*pslash = '\0'; // Get rid of /<arch>.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /lib.
}
}
}
Arguments::set_java_home(buf);
set_boot_path('/', ':');
}
home_path = malloc(strlen(buf) + 1);
strcpy(home_path, buf);
Arguments::set_java_home(home_path);
// Where to look for native libraries.
if (!set_boot_path('/', ':')) return;
// Where to look for native libraries
// On Aix we get the user setting of LIBPATH
// On Aix we get the user setting of LIBPATH.
// Eventually, all the library path setting will be done here.
char *ld_library_path;
// Construct the invariant part of ld_library_path.
ld_library_path = (char *) malloc(sizeof(DEFAULT_LIBPATH));
sprintf(ld_library_path, DEFAULT_LIBPATH);
// Get the user setting of LIBPATH, and prepended it.
char *v = ::getenv("LIBPATH");
if (v == NULL) {
v = "";
}
char *t = ld_library_path;
// That's +1 for the colon and +1 for the trailing '\0'
ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
sprintf(ld_library_path, "%s:%s", v, t);
// Get the user setting of LIBPATH.
const char *v = ::getenv("LIBPATH");
const char *v_colon = ":";
if (v == NULL) { v = ""; v_colon = ""; }
// Concatenate user and invariant part of ld_library_path.
// That's +1 for the colon and +1 for the trailing '\0'.
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
// Extensions directories
char* cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(EXTENSIONS_DIR));
sprintf(cbuf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(cbuf);
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
// Endorsed standards default directory.
cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
sprintf(cbuf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(cbuf);
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#undef malloc
#undef DEFAULT_LIBPATH
#undef EXTENSIONS_DIR
#undef ENDORSED_DIR

View File

@ -306,9 +306,6 @@ static const char *get_home() {
#endif
void os::init_system_properties_values() {
// char arch[12];
// sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
// The next steps are taken in the product version:
//
// Obtain the JAVA_HOME value from the location of libjvm.so.
@ -335,199 +332,205 @@ void os::init_system_properties_values() {
// Important note: if the location of libjvm.so changes this
// code needs to be changed accordingly.
// The next few definitions allow the code to be verbatim:
#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
#define getenv(n) ::getenv(n)
/*
* See ld(1):
* The linker uses the following search paths to locate required
* shared libraries:
* 1: ...
* ...
* 7: The default directories, normally /lib and /usr/lib.
*/
// See ld(1):
// The linker uses the following search paths to locate required
// shared libraries:
// 1: ...
// ...
// 7: The default directories, normally /lib and /usr/lib.
#ifndef DEFAULT_LIBPATH
#define DEFAULT_LIBPATH "/lib:/usr/lib"
#endif
// Base path of extensions installed on the system.
#define SYS_EXT_DIR "/usr/java/packages"
#define EXTENSIONS_DIR "/lib/ext"
#define ENDORSED_DIR "/lib/endorsed"
#define REG_DIR "/usr/java/packages"
#ifdef __APPLE__
#define SYS_EXTENSIONS_DIR "/Library/Java/Extensions"
#define SYS_EXTENSIONS_DIRS SYS_EXTENSIONS_DIR ":/Network" SYS_EXTENSIONS_DIR ":/System" SYS_EXTENSIONS_DIR ":/usr/lib/java"
const char *user_home_dir = get_home();
// the null in SYS_EXTENSIONS_DIRS counts for the size of the colon after user_home_dir
int system_ext_size = strlen(user_home_dir) + sizeof(SYS_EXTENSIONS_DIR) +
sizeof(SYS_EXTENSIONS_DIRS);
#endif
{
/* sysclasspath, java_home, dll_dir */
{
char *home_path;
char *dll_path;
char *pslash;
char buf[MAXPATHLEN];
os::jvm_path(buf, sizeof(buf));
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /{client|server|hotspot} */
dll_path = malloc(strlen(buf) + 1);
if (dll_path == NULL)
return;
strcpy(dll_path, buf);
Arguments::set_dll_dir(dll_path);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; /* get rid of /<arch> (/lib on macosx) */
#ifndef __APPLE__
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /lib */
#endif
}
}
home_path = malloc(strlen(buf) + 1);
if (home_path == NULL)
return;
strcpy(home_path, buf);
Arguments::set_java_home(home_path);
// Buffer that fits several sprintfs.
// Note that the space for the colon and the trailing null are provided
// by the nulls included by the sizeof operator.
const size_t bufsize =
MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
if (!set_boot_path('/', ':'))
return;
// sysclasspath, java_home, dll_dir
{
char *pslash;
os::jvm_path(buf, bufsize);
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
/*
* Where to look for native libraries
*
* Note: Due to a legacy implementation, most of the library path
* is set in the launcher. This was to accomodate linking restrictions
* on legacy Bsd implementations (which are no longer supported).
* Eventually, all the library path setting will be done here.
*
* However, to prevent the proliferation of improperly built native
* libraries, the new path component /usr/java/packages is added here.
* Eventually, all the library path setting will be done here.
*/
{
char *ld_library_path;
/*
* Construct the invariant part of ld_library_path. Note that the
* space for the colon and the trailing null are provided by the
* nulls included by the sizeof operator (so actually we allocate
* a byte more than necessary).
*/
#ifdef __APPLE__
ld_library_path = (char *) malloc(system_ext_size);
sprintf(ld_library_path, "%s" SYS_EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS, user_home_dir);
#else
ld_library_path = (char *) malloc(sizeof(REG_DIR) + sizeof("/lib/") +
strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH));
sprintf(ld_library_path, REG_DIR "/lib/%s:" DEFAULT_LIBPATH, cpu_arch);
#endif
/*
* Get the user setting of LD_LIBRARY_PATH, and prepended it. It
* should always exist (until the legacy problem cited above is
* addressed).
*/
#ifdef __APPLE__
// Prepend the default path with the JAVA_LIBRARY_PATH so that the app launcher code can specify a directory inside an app wrapper
char *l = getenv("JAVA_LIBRARY_PATH");
if (l != NULL) {
char *t = ld_library_path;
/* That's +1 for the colon and +1 for the trailing '\0' */
ld_library_path = (char *) malloc(strlen(l) + 1 + strlen(t) + 1);
sprintf(ld_library_path, "%s:%s", l, t);
free(t);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /<arch>.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /lib.
}
char *v = getenv("DYLD_LIBRARY_PATH");
#else
char *v = getenv("LD_LIBRARY_PATH");
#endif
if (v != NULL) {
char *t = ld_library_path;
/* That's +1 for the colon and +1 for the trailing '\0' */
ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
sprintf(ld_library_path, "%s:%s", v, t);
free(t);
}
#ifdef __APPLE__
// Apple's Java6 has "." at the beginning of java.library.path.
// OpenJDK on Windows has "." at the end of java.library.path.
// OpenJDK on Linux and Solaris don't have "." in java.library.path
// at all. To ease the transition from Apple's Java6 to OpenJDK7,
// "." is appended to the end of java.library.path. Yes, this
// could cause a change in behavior, but Apple's Java6 behavior
// can be achieved by putting "." at the beginning of the
// JAVA_LIBRARY_PATH environment variable.
{
char *t = ld_library_path;
// that's +3 for appending ":." and the trailing '\0'
ld_library_path = (char *) malloc(strlen(t) + 3);
sprintf(ld_library_path, "%s:%s", t, ".");
free(t);
}
#endif
Arguments::set_library_path(ld_library_path);
}
/*
* Extensions directories.
*
* Note that the space for the colon and the trailing null are provided
* by the nulls included by the sizeof operator (so actually one byte more
* than necessary is allocated).
*/
{
#ifdef __APPLE__
char *buf = malloc(strlen(Arguments::get_java_home()) +
sizeof(EXTENSIONS_DIR) + system_ext_size);
sprintf(buf, "%s" SYS_EXTENSIONS_DIR ":%s" EXTENSIONS_DIR ":"
SYS_EXTENSIONS_DIRS, user_home_dir, Arguments::get_java_home());
#else
char *buf = malloc(strlen(Arguments::get_java_home()) +
sizeof(EXTENSIONS_DIR) + sizeof(REG_DIR) + sizeof(EXTENSIONS_DIR));
sprintf(buf, "%s" EXTENSIONS_DIR ":" REG_DIR EXTENSIONS_DIR,
Arguments::get_java_home());
#endif
Arguments::set_ext_dirs(buf);
}
/* Endorsed standards default directory. */
{
char * buf;
buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
}
}
Arguments::set_java_home(buf);
set_boot_path('/', ':');
}
#ifdef __APPLE__
// Where to look for native libraries.
//
// Note: Due to a legacy implementation, most of the library path
// is set in the launcher. This was to accomodate linking restrictions
// on legacy Bsd implementations (which are no longer supported).
// Eventually, all the library path setting will be done here.
//
// However, to prevent the proliferation of improperly built native
// libraries, the new path component /usr/java/packages is added here.
// Eventually, all the library path setting will be done here.
{
// Get the user setting of LD_LIBRARY_PATH, and prepended it. It
// should always exist (until the legacy problem cited above is
// addressed).
const char *v = ::getenv("LD_LIBRARY_PATH");
const char *v_colon = ":";
if (v == NULL) { v = ""; v_colon = ""; }
// That's +1 for the colon and +1 for the trailing '\0'.
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char,
strlen(v) + 1 +
sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1,
mtInternal);
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
}
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
// Endorsed standards default directory.
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#else // __APPLE__
#define SYS_EXTENSIONS_DIR "/Library/Java/Extensions"
#define SYS_EXTENSIONS_DIRS SYS_EXTENSIONS_DIR ":/Network" SYS_EXTENSIONS_DIR ":/System" SYS_EXTENSIONS_DIR ":/usr/lib/java"
const char *user_home_dir = get_home();
// The null in SYS_EXTENSIONS_DIRS counts for the size of the colon after user_home_dir.
size_t system_ext_size = strlen(user_home_dir) + sizeof(SYS_EXTENSIONS_DIR) +
sizeof(SYS_EXTENSIONS_DIRS);
// Buffer that fits several sprintfs.
// Note that the space for the colon and the trailing null are provided
// by the nulls included by the sizeof operator.
const size_t bufsize =
MAX3((size_t)MAXPATHLEN, // for dll_dir & friends.
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + system_ext_size, // extensions dir
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
// sysclasspath, java_home, dll_dir
{
char *pslash;
os::jvm_path(buf, bufsize);
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /lib.
}
}
Arguments::set_java_home(buf);
set_boot_path('/', ':');
}
// Where to look for native libraries.
//
// Note: Due to a legacy implementation, most of the library path
// is set in the launcher. This was to accomodate linking restrictions
// on legacy Bsd implementations (which are no longer supported).
// Eventually, all the library path setting will be done here.
//
// However, to prevent the proliferation of improperly built native
// libraries, the new path component /usr/java/packages is added here.
// Eventually, all the library path setting will be done here.
{
// Get the user setting of LD_LIBRARY_PATH, and prepended it. It
// should always exist (until the legacy problem cited above is
// addressed).
// Prepend the default path with the JAVA_LIBRARY_PATH so that the app launcher code
// can specify a directory inside an app wrapper
const char *l = ::getenv("JAVA_LIBRARY_PATH");
const char *l_colon = ":";
if (l == NULL) { l = ""; l_colon = ""; }
const char *v = ::getenv("DYLD_LIBRARY_PATH");
const char *v_colon = ":";
if (v == NULL) { v = ""; v_colon = ""; }
// Apple's Java6 has "." at the beginning of java.library.path.
// OpenJDK on Windows has "." at the end of java.library.path.
// OpenJDK on Linux and Solaris don't have "." in java.library.path
// at all. To ease the transition from Apple's Java6 to OpenJDK7,
// "." is appended to the end of java.library.path. Yes, this
// could cause a change in behavior, but Apple's Java6 behavior
// can be achieved by putting "." at the beginning of the
// JAVA_LIBRARY_PATH environment variable.
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char,
strlen(v) + 1 + strlen(l) + 1 +
system_ext_size + 3,
mtInternal);
sprintf(ld_library_path, "%s%s%s%s%s" SYS_EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS ":.",
v, v_colon, l, l_colon, user_home_dir);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
}
// Extensions directories.
//
// Note that the space for the colon and the trailing null are provided
// by the nulls included by the sizeof operator (so actually one byte more
// than necessary is allocated).
sprintf(buf, "%s" SYS_EXTENSIONS_DIR ":%s" EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS,
user_home_dir, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
// Endorsed standards default directory.
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#undef SYS_EXTENSIONS_DIR
#endif
#undef malloc
#undef getenv
#undef SYS_EXTENSIONS_DIRS
#endif // __APPLE__
#undef SYS_EXT_DIR
#undef EXTENSIONS_DIR
#undef ENDORSED_DIR
// Done
return;
}
////////////////////////////////////////////////////////////////////////////////
@ -3091,7 +3094,7 @@ void os::Bsd::set_signal_handler(int sig, bool set_installed) {
sigAct.sa_sigaction = signalHandler;
sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
}
#if __APPLE__
#ifdef __APPLE__
// Needed for main thread as XNU (Mac OS X kernel) will only deliver SIGSEGV
// (which starts as SIGBUS) on main thread with faulting address inside "stack+guard pages"
// if the signal handler declares it will handle it on alternate stack.

View File

@ -319,9 +319,6 @@ void os::Linux::initialize_system_info() {
}
void os::init_system_properties_values() {
// char arch[12];
// sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
// The next steps are taken in the product version:
//
// Obtain the JAVA_HOME value from the location of libjvm.so.
@ -348,140 +345,101 @@ void os::init_system_properties_values() {
// Important note: if the location of libjvm.so changes this
// code needs to be changed accordingly.
// The next few definitions allow the code to be verbatim:
#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
#define getenv(n) ::getenv(n)
/*
* See ld(1):
* The linker uses the following search paths to locate required
* shared libraries:
* 1: ...
* ...
* 7: The default directories, normally /lib and /usr/lib.
*/
// See ld(1):
// The linker uses the following search paths to locate required
// shared libraries:
// 1: ...
// ...
// 7: The default directories, normally /lib and /usr/lib.
#if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390))
#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
#else
#define DEFAULT_LIBPATH "/lib:/usr/lib"
#endif
// Base path of extensions installed on the system.
#define SYS_EXT_DIR "/usr/java/packages"
#define EXTENSIONS_DIR "/lib/ext"
#define ENDORSED_DIR "/lib/endorsed"
#define REG_DIR "/usr/java/packages"
// Buffer that fits several sprintfs.
// Note that the space for the colon and the trailing null are provided
// by the nulls included by the sizeof operator.
const size_t bufsize =
MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
// sysclasspath, java_home, dll_dir
{
/* sysclasspath, java_home, dll_dir */
{
char *home_path;
char *dll_path;
char *pslash;
char buf[MAXPATHLEN];
os::jvm_path(buf, sizeof(buf));
char *pslash;
os::jvm_path(buf, bufsize);
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /<arch>.
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /{client|server|hotspot} */
dll_path = malloc(strlen(buf) + 1);
if (dll_path == NULL)
return;
strcpy(dll_path, buf);
Arguments::set_dll_dir(dll_path);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; /* get rid of /<arch> */
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /lib */
}
*pslash = '\0'; // Get rid of /lib.
}
home_path = malloc(strlen(buf) + 1);
if (home_path == NULL)
return;
strcpy(home_path, buf);
Arguments::set_java_home(home_path);
if (!set_boot_path('/', ':'))
return;
}
/*
* Where to look for native libraries
*
* Note: Due to a legacy implementation, most of the library path
* is set in the launcher. This was to accomodate linking restrictions
* on legacy Linux implementations (which are no longer supported).
* Eventually, all the library path setting will be done here.
*
* However, to prevent the proliferation of improperly built native
* libraries, the new path component /usr/java/packages is added here.
* Eventually, all the library path setting will be done here.
*/
{
char *ld_library_path;
/*
* Construct the invariant part of ld_library_path. Note that the
* space for the colon and the trailing null are provided by the
* nulls included by the sizeof operator (so actually we allocate
* a byte more than necessary).
*/
ld_library_path = (char *) malloc(sizeof(REG_DIR) + sizeof("/lib/") +
strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH));
sprintf(ld_library_path, REG_DIR "/lib/%s:" DEFAULT_LIBPATH, cpu_arch);
/*
* Get the user setting of LD_LIBRARY_PATH, and prepended it. It
* should always exist (until the legacy problem cited above is
* addressed).
*/
char *v = getenv("LD_LIBRARY_PATH");
if (v != NULL) {
char *t = ld_library_path;
/* That's +1 for the colon and +1 for the trailing '\0' */
ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
sprintf(ld_library_path, "%s:%s", v, t);
}
Arguments::set_library_path(ld_library_path);
}
/*
* Extensions directories.
*
* Note that the space for the colon and the trailing null are provided
* by the nulls included by the sizeof operator (so actually one byte more
* than necessary is allocated).
*/
{
char *buf = malloc(strlen(Arguments::get_java_home()) +
sizeof(EXTENSIONS_DIR) + sizeof(REG_DIR) + sizeof(EXTENSIONS_DIR));
sprintf(buf, "%s" EXTENSIONS_DIR ":" REG_DIR EXTENSIONS_DIR,
Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
}
/* Endorsed standards default directory. */
{
char * buf;
buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
}
}
Arguments::set_java_home(buf);
set_boot_path('/', ':');
}
#undef malloc
#undef getenv
// Where to look for native libraries.
//
// Note: Due to a legacy implementation, most of the library path
// is set in the launcher. This was to accomodate linking restrictions
// on legacy Linux implementations (which are no longer supported).
// Eventually, all the library path setting will be done here.
//
// However, to prevent the proliferation of improperly built native
// libraries, the new path component /usr/java/packages is added here.
// Eventually, all the library path setting will be done here.
{
// Get the user setting of LD_LIBRARY_PATH, and prepended it. It
// should always exist (until the legacy problem cited above is
// addressed).
const char *v = ::getenv("LD_LIBRARY_PATH");
const char *v_colon = ":";
if (v == NULL) { v = ""; v_colon = ""; }
// That's +1 for the colon and +1 for the trailing '\0'.
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char,
strlen(v) + 1 +
sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1,
mtInternal);
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
}
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
// Endorsed standards default directory.
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#undef DEFAULT_LIBPATH
#undef SYS_EXT_DIR
#undef EXTENSIONS_DIR
#undef ENDORSED_DIR
// Done
return;
}
////////////////////////////////////////////////////////////////////////////////
@ -1963,7 +1921,11 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
{EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
{EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
#if defined(VM_LITTLE_ENDIAN)
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"},
#else
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
#endif
{EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
{EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
{EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},

View File

@ -580,9 +580,6 @@ bool os::have_special_privileges() {
void os::init_system_properties_values() {
char arch[12];
sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
// The next steps are taken in the product version:
//
// Obtain the JAVA_HOME value from the location of libjvm.so.
@ -609,218 +606,174 @@ void os::init_system_properties_values() {
// Important note: if the location of libjvm.so changes this
// code needs to be changed accordingly.
// The next few definitions allow the code to be verbatim:
#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
#define free(p) FREE_C_HEAP_ARRAY(char, p, mtInternal)
#define getenv(n) ::getenv(n)
// Base path of extensions installed on the system.
#define SYS_EXT_DIR "/usr/jdk/packages"
#define EXTENSIONS_DIR "/lib/ext"
#define ENDORSED_DIR "/lib/endorsed"
#define COMMON_DIR "/usr/jdk/packages"
char cpu_arch[12];
// Buffer that fits several sprintfs.
// Note that the space for the colon and the trailing null are provided
// by the nulls included by the sizeof operator.
const size_t bufsize =
MAX4((size_t)MAXPATHLEN, // For dll_dir & friends.
sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
// sysclasspath, java_home, dll_dir
{
/* sysclasspath, java_home, dll_dir */
{
char *home_path;
char *dll_path;
char *pslash;
char buf[MAXPATHLEN];
os::jvm_path(buf, sizeof(buf));
char *pslash;
os::jvm_path(buf, bufsize);
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /<arch>.
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /{client|server|hotspot} */
dll_path = malloc(strlen(buf) + 1);
if (dll_path == NULL)
return;
strcpy(dll_path, buf);
Arguments::set_dll_dir(dll_path);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; /* get rid of /<arch> */
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /lib */
}
*pslash = '\0'; // Get rid of /lib.
}
home_path = malloc(strlen(buf) + 1);
if (home_path == NULL)
return;
strcpy(home_path, buf);
Arguments::set_java_home(home_path);
if (!set_boot_path('/', ':'))
return;
}
/*
* Where to look for native libraries
*/
{
// Use dlinfo() to determine the correct java.library.path.
//
// If we're launched by the Java launcher, and the user
// does not set java.library.path explicitly on the commandline,
// the Java launcher sets LD_LIBRARY_PATH for us and unsets
// LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case
// dlinfo returns LD_LIBRARY_PATH + crle settings (including
// /usr/lib), which is exactly what we want.
//
// If the user does set java.library.path, it completely
// overwrites this setting, and always has.
//
// If we're not launched by the Java launcher, we may
// get here with any/all of the LD_LIBRARY_PATH[_32|64]
// settings. Again, dlinfo does exactly what we want.
Dl_serinfo _info, *info = &_info;
Dl_serpath *path;
char* library_path;
char *common_path;
int i;
// determine search path count and required buffer size
if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
}
// allocate new buffer and initialize
info = (Dl_serinfo*)malloc(_info.dls_size);
if (info == NULL) {
vm_exit_out_of_memory(_info.dls_size, OOM_MALLOC_ERROR,
"init_system_properties_values info");
}
info->dls_size = _info.dls_size;
info->dls_cnt = _info.dls_cnt;
// obtain search path information
if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
free(info);
vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
}
path = &info->dls_serpath[0];
// Note: Due to a legacy implementation, most of the library path
// is set in the launcher. This was to accomodate linking restrictions
// on legacy Solaris implementations (which are no longer supported).
// Eventually, all the library path setting will be done here.
//
// However, to prevent the proliferation of improperly built native
// libraries, the new path component /usr/jdk/packages is added here.
// Determine the actual CPU architecture.
char cpu_arch[12];
sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
#ifdef _LP64
// If we are a 64-bit vm, perform the following translations:
// sparc -> sparcv9
// i386 -> amd64
if (strcmp(cpu_arch, "sparc") == 0)
strcat(cpu_arch, "v9");
else if (strcmp(cpu_arch, "i386") == 0)
strcpy(cpu_arch, "amd64");
#endif
// Construct the invariant part of ld_library_path. Note that the
// space for the colon and the trailing null are provided by the
// nulls included by the sizeof operator.
size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch);
common_path = malloc(bufsize);
if (common_path == NULL) {
free(info);
vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR,
"init_system_properties_values common_path");
}
sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch);
// struct size is more than sufficient for the path components obtained
// through the dlinfo() call, so only add additional space for the path
// components explicitly added here.
bufsize = info->dls_size + strlen(common_path);
library_path = malloc(bufsize);
if (library_path == NULL) {
free(info);
free(common_path);
vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR,
"init_system_properties_values library_path");
}
library_path[0] = '\0';
// Construct the desired Java library path from the linker's library
// search path.
//
// For compatibility, it is optimal that we insert the additional path
// components specific to the Java VM after those components specified
// in LD_LIBRARY_PATH (if any) but before those added by the ld.so
// infrastructure.
if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it
strcpy(library_path, common_path);
} else {
int inserted = 0;
for (i = 0; i < info->dls_cnt; i++, path++) {
uint_t flags = path->dls_flags & LA_SER_MASK;
if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
strcat(library_path, common_path);
strcat(library_path, os::path_separator());
inserted = 1;
}
strcat(library_path, path->dls_name);
strcat(library_path, os::path_separator());
}
// eliminate trailing path separator
library_path[strlen(library_path)-1] = '\0';
}
// happens before argument parsing - can't use a trace flag
// tty->print_raw("init_system_properties_values: native lib path: ");
// tty->print_raw_cr(library_path);
// callee copies into its own buffer
Arguments::set_library_path(library_path);
free(common_path);
free(library_path);
free(info);
}
/*
* Extensions directories.
*
* Note that the space for the colon and the trailing null are provided
* by the nulls included by the sizeof operator (so actually one byte more
* than necessary is allocated).
*/
{
char *buf = (char *) malloc(strlen(Arguments::get_java_home()) +
sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) +
sizeof(EXTENSIONS_DIR));
sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR,
Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
}
/* Endorsed standards default directory. */
{
char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
}
}
Arguments::set_java_home(buf);
set_boot_path('/', ':');
}
#undef malloc
#undef free
#undef getenv
// Where to look for native libraries.
{
// Use dlinfo() to determine the correct java.library.path.
//
// If we're launched by the Java launcher, and the user
// does not set java.library.path explicitly on the commandline,
// the Java launcher sets LD_LIBRARY_PATH for us and unsets
// LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case
// dlinfo returns LD_LIBRARY_PATH + crle settings (including
// /usr/lib), which is exactly what we want.
//
// If the user does set java.library.path, it completely
// overwrites this setting, and always has.
//
// If we're not launched by the Java launcher, we may
// get here with any/all of the LD_LIBRARY_PATH[_32|64]
// settings. Again, dlinfo does exactly what we want.
Dl_serinfo info_sz, *info = &info_sz;
Dl_serpath *path;
char *library_path;
char *common_path = buf;
// Determine search path count and required buffer size.
if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
}
// Allocate new buffer and initialize.
info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
info->dls_size = info_sz.dls_size;
info->dls_cnt = info_sz.dls_cnt;
// Obtain search path information.
if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, info, mtInternal);
vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
}
path = &info->dls_serpath[0];
// Note: Due to a legacy implementation, most of the library path
// is set in the launcher. This was to accomodate linking restrictions
// on legacy Solaris implementations (which are no longer supported).
// Eventually, all the library path setting will be done here.
//
// However, to prevent the proliferation of improperly built native
// libraries, the new path component /usr/jdk/packages is added here.
// Determine the actual CPU architecture.
sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
#ifdef _LP64
// If we are a 64-bit vm, perform the following translations:
// sparc -> sparcv9
// i386 -> amd64
if (strcmp(cpu_arch, "sparc") == 0) {
strcat(cpu_arch, "v9");
} else if (strcmp(cpu_arch, "i386") == 0) {
strcpy(cpu_arch, "amd64");
}
#endif
// Construct the invariant part of ld_library_path.
sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
// Struct size is more than sufficient for the path components obtained
// through the dlinfo() call, so only add additional space for the path
// components explicitly added here.
size_t library_path_size = info->dls_size + strlen(common_path);
library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
library_path[0] = '\0';
// Construct the desired Java library path from the linker's library
// search path.
//
// For compatibility, it is optimal that we insert the additional path
// components specific to the Java VM after those components specified
// in LD_LIBRARY_PATH (if any) but before those added by the ld.so
// infrastructure.
if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
strcpy(library_path, common_path);
} else {
int inserted = 0;
int i;
for (i = 0; i < info->dls_cnt; i++, path++) {
uint_t flags = path->dls_flags & LA_SER_MASK;
if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
strcat(library_path, common_path);
strcat(library_path, os::path_separator());
inserted = 1;
}
strcat(library_path, path->dls_name);
strcat(library_path, os::path_separator());
}
// Eliminate trailing path separator.
library_path[strlen(library_path)-1] = '\0';
}
// happens before argument parsing - can't use a trace flag
// tty->print_raw("init_system_properties_values: native lib path: ");
// tty->print_raw_cr(library_path);
// Callee copies into its own buffer.
Arguments::set_library_path(library_path);
FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
FREE_C_HEAP_ARRAY(char, info, mtInternal);
}
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
// Endorsed standards default directory.
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#undef SYS_EXT_DIR
#undef EXTENSIONS_DIR
#undef ENDORSED_DIR
#undef COMMON_DIR
}
void os::breakpoint() {

View File

@ -2702,7 +2702,6 @@ address os::win32::fast_jni_accessor_wrapper(BasicType type) {
}
#endif
#ifndef PRODUCT
void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
// Install a win32 structured exception handler around the test
// function call so the VM can generate an error dump if needed.
@ -2713,7 +2712,6 @@ void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
// Nothing to do.
}
}
#endif
// Virtual Memory

View File

@ -101,9 +101,7 @@ class win32 {
static address fast_jni_accessor_wrapper(BasicType);
#endif
#ifndef PRODUCT
static void call_test_func_with_wrapper(void (*funcPtr)(void));
#endif
// filter function to ignore faults on serializations page
static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);

View File

@ -111,9 +111,7 @@ inline bool os::supports_monotonic_clock() {
return win32::_has_performance_count;
}
#ifndef PRODUCT
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
os::win32::call_test_func_with_wrapper(f)
#endif
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
os::win32::call_test_func_with_wrapper(f)
#endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2014 Google Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
#define OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
#if defined(VM_LITTLE_ENDIAN)
#include <byteswap.h>
// Efficient swapping of data bytes from Java byte
// ordering to native byte ordering and vice versa.
inline u2 Bytes::swap_u2(u2 x) { return bswap_16(x); }
inline u4 Bytes::swap_u4(u4 x) { return bswap_32(x); }
inline u8 Bytes::swap_u8(u8 x) { return bswap_64(x); }
#endif // VM_LITTLE_ENDIAN
#endif // OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP

View File

@ -243,7 +243,6 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_file, "vmreg_arm.inline.hpp");
#endif
#ifdef TARGET_ARCH_ppc
AD.addInclude(AD._CPP_file, "assembler_ppc.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_ppc.hpp");
AD.addInclude(AD._CPP_file, "vmreg_ppc.inline.hpp");
#endif
@ -274,6 +273,7 @@ int main(int argc, char *argv[])
AD.addInclude(AD._DFA_file, "opto/cfgnode.hpp"); // Use PROB_MAX in predicate.
AD.addInclude(AD._DFA_file, "opto/matcher.hpp");
AD.addInclude(AD._DFA_file, "opto/opcodes.hpp");
AD.addInclude(AD._DFA_file, "opto/convertnode.hpp");
// Make sure each .cpp file starts with include lines:
// files declaring and defining generators for Mach* Objects (hpp,cpp)
// Generate the result files:

View File

@ -581,14 +581,14 @@ void ciMethod::assert_call_type_ok(int bci) {
* Check whether profiling provides a type for the argument i to the
* call at bci bci
*
* @param bci bci of the call
* @param i argument number
* @return profiled type
* @param [in]bci bci of the call
* @param [in]i argument number
* @param [out]type profiled type of argument, NULL if none
* @param [out]maybe_null true if null was seen for argument
* @return true if profiling exists
*
* If the profile reports that the argument may be null, return false
* at least for now.
*/
ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
bool ciMethod::argument_profiled_type(int bci, int i, ciKlass*& type, bool& maybe_null) {
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
ciProfileData* data = method_data()->bci_to_data(bci);
if (data != NULL) {
@ -596,82 +596,77 @@ ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
assert_virtual_call_type_ok(bci);
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
if (i >= call->number_of_arguments()) {
return NULL;
}
ciKlass* type = call->valid_argument_type(i);
if (type != NULL && !call->argument_maybe_null(i)) {
return type;
return false;
}
type = call->valid_argument_type(i);
maybe_null = call->argument_maybe_null(i);
return true;
} else if (data->is_CallTypeData()) {
assert_call_type_ok(bci);
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
if (i >= call->number_of_arguments()) {
return NULL;
}
ciKlass* type = call->valid_argument_type(i);
if (type != NULL && !call->argument_maybe_null(i)) {
return type;
return false;
}
type = call->valid_argument_type(i);
maybe_null = call->argument_maybe_null(i);
return true;
}
}
}
return NULL;
return false;
}
/**
* Check whether profiling provides a type for the return value from
* the call at bci bci
*
* @param bci bci of the call
* @return profiled type
* @param [in]bci bci of the call
* @param [out]type profiled type of argument, NULL if none
* @param [out]maybe_null true if null was seen for argument
* @return true if profiling exists
*
* If the profile reports that the argument may be null, return false
* at least for now.
*/
ciKlass* ciMethod::return_profiled_type(int bci) {
bool ciMethod::return_profiled_type(int bci, ciKlass*& type, bool& maybe_null) {
if (MethodData::profile_return() && method_data() != NULL && method_data()->is_mature()) {
ciProfileData* data = method_data()->bci_to_data(bci);
if (data != NULL) {
if (data->is_VirtualCallTypeData()) {
assert_virtual_call_type_ok(bci);
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
ciKlass* type = call->valid_return_type();
if (type != NULL && !call->return_maybe_null()) {
return type;
}
type = call->valid_return_type();
maybe_null = call->return_maybe_null();
return true;
} else if (data->is_CallTypeData()) {
assert_call_type_ok(bci);
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
ciKlass* type = call->valid_return_type();
if (type != NULL && !call->return_maybe_null()) {
return type;
}
type = call->valid_return_type();
maybe_null = call->return_maybe_null();
return true;
}
}
}
return NULL;
return false;
}
/**
* Check whether profiling provides a type for the parameter i
*
* @param i parameter number
* @return profiled type
* @param [in]i parameter number
* @param [out]type profiled type of parameter, NULL if none
* @param [out]maybe_null true if null was seen for parameter
* @return true if profiling exists
*
* If the profile reports that the argument may be null, return false
* at least for now.
*/
ciKlass* ciMethod::parameter_profiled_type(int i) {
bool ciMethod::parameter_profiled_type(int i, ciKlass*& type, bool& maybe_null) {
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
ciParametersTypeData* parameters = method_data()->parameters_type_data();
if (parameters != NULL && i < parameters->number_of_parameters()) {
ciKlass* type = parameters->valid_parameter_type(i);
if (type != NULL && !parameters->parameter_maybe_null(i)) {
return type;
}
type = parameters->valid_parameter_type(i);
maybe_null = parameters->parameter_maybe_null(i);
return true;
}
}
return NULL;
return false;
}

View File

@ -234,10 +234,10 @@ class ciMethod : public ciMetadata {
ciCallProfile call_profile_at_bci(int bci);
int interpreter_call_site_count(int bci);
// Does type profiling provide a useful type at this point?
ciKlass* argument_profiled_type(int bci, int i);
ciKlass* parameter_profiled_type(int i);
ciKlass* return_profiled_type(int bci);
// Does type profiling provide any useful information at this point?
bool argument_profiled_type(int bci, int i, ciKlass*& type, bool& maybe_null);
bool parameter_profiled_type(int i, ciKlass*& type, bool& maybe_null);
bool return_profiled_type(int bci, ciKlass*& type, bool& maybe_null);
ciField* get_field_at_bci( int bci, bool &will_link);
ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);

View File

@ -135,6 +135,14 @@ void ClassLoaderData::classes_do(void f(Klass * const)) {
}
}
void ClassLoaderData::methods_do(void f(Method*)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
if (k->oop_is_instance()) {
InstanceKlass::cast(k)->methods_do(f);
}
}
}
void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
// Lock to avoid classes being modified/added/removed during iteration
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
@ -624,6 +632,12 @@ void ClassLoaderDataGraph::classes_do(void f(Klass* const)) {
}
}
void ClassLoaderDataGraph::methods_do(void f(Method*)) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->methods_do(f);
}
}
void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->loaded_classes_do(klass_closure);

View File

@ -78,6 +78,7 @@ class ClassLoaderDataGraph : public AllStatic {
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void classes_do(KlassClosure* klass_closure);
static void classes_do(void f(Klass* const));
static void methods_do(void f(Method*));
static void loaded_classes_do(KlassClosure* klass_closure);
static void classes_unloading_do(void f(Klass* const));
static bool do_unloading(BoolObjectClosure* is_alive);
@ -189,6 +190,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
void classes_do(void f(Klass*));
void loaded_classes_do(KlassClosure* klass_closure);
void classes_do(void f(InstanceKlass*));
void methods_do(void f(Method*));
// Deallocate free list during class unloading.
void free_deallocate_list();

View File

@ -116,10 +116,6 @@ class MarkRefsIntoClosure: public CMSOopsInGenClosure {
MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
};
class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
@ -132,10 +128,6 @@ class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
};
// A variant of the above used in certain kinds of CMS
@ -152,10 +144,6 @@ class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
CMSBitMap* cms_bm);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
};
// The non-parallel version (the parallel version appears further below).
@ -181,10 +169,6 @@ class PushAndMarkClosure: public CMSOopClosure {
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
};
// In the parallel case, the bit map and the
@ -211,10 +195,6 @@ class Par_PushAndMarkClosure: public CMSOopClosure {
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
};
// The non-parallel version (the parallel version appears further below).
@ -245,9 +225,6 @@ class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
void set_freelistLock(Mutex* m) {
_freelistLock = m;
}
@ -282,9 +259,6 @@ class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
void trim_queue(uint size);
};

View File

@ -851,42 +851,60 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
UpwardsObjectClosure* cl) {
assert_locked(freelistLock());
NOT_PRODUCT(verify_objects_initialized());
Space::object_iterate_mem(mr, cl);
assert(!mr.is_empty(), "Should be non-empty");
// We use MemRegion(bottom(), end()) rather than used_region() below
// because the two are not necessarily equal for some kinds of
// spaces, in particular, certain kinds of free list spaces.
// We could use the more complicated but more precise:
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
// but the slight imprecision seems acceptable in the assertion check.
assert(MemRegion(bottom(), end()).contains(mr),
"Should be within used space");
HeapWord* prev = cl->previous(); // max address from last time
if (prev >= mr.end()) { // nothing to do
return;
}
// This assert will not work when we go from cms space to perm
// space, and use same closure. Easy fix deferred for later. XXX YSR
// assert(prev == NULL || contains(prev), "Should be within space");
bool last_was_obj_array = false;
HeapWord *blk_start_addr, *region_start_addr;
if (prev > mr.start()) {
region_start_addr = prev;
blk_start_addr = prev;
// The previous invocation may have pushed "prev" beyond the
// last allocated block yet there may be still be blocks
// in this region due to a particular coalescing policy.
// Relax the assertion so that the case where the unallocated
// block is maintained and "prev" is beyond the unallocated
// block does not cause the assertion to fire.
assert((BlockOffsetArrayUseUnallocatedBlock &&
(!is_in(prev))) ||
(blk_start_addr == block_start(region_start_addr)), "invariant");
} else {
region_start_addr = mr.start();
blk_start_addr = block_start(region_start_addr);
}
HeapWord* region_end_addr = mr.end();
MemRegion derived_mr(region_start_addr, region_end_addr);
while (blk_start_addr < region_end_addr) {
const size_t size = block_size(blk_start_addr);
if (block_is_obj(blk_start_addr)) {
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
} else {
last_was_obj_array = false;
}
blk_start_addr += size;
}
if (!last_was_obj_array) {
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
"Should be within (closed) used space");
assert(blk_start_addr > prev, "Invariant");
cl->set_previous(blk_start_addr); // min address for next time
}
}
// Callers of this iterator beware: The closure application should
// be robust in the face of uninitialized objects and should (always)
// return a correct size so that the next addr + size below gives us a
// valid block boundary. [See for instance,
// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
// in ConcurrentMarkSweepGeneration.cpp.]
HeapWord*
CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
assert_lock_strong(freelistLock());
HeapWord *addr, *last;
size_t size;
for (addr = bottom(), last = end();
addr < last; addr += size) {
FreeChunk* fc = (FreeChunk*)addr;
if (fc->is_free()) {
// Since we hold the free list lock, which protects direct
// allocation in this generation by mutators, a free object
// will remain free throughout this iteration code.
size = fc->size();
} else {
// Note that the object need not necessarily be initialized,
// because (for instance) the free list lock does NOT protect
// object initialization. The closure application below must
// therefore be correct in the face of uninitialized objects.
size = cl->do_object_careful(oop(addr));
if (size == 0) {
// An unparsable object found. Signal early termination.
return addr;
}
}
}
return NULL;
}
// Callers of this iterator beware: The closure application should
// be robust in the face of uninitialized objects and should (always)

View File

@ -338,10 +338,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
unallocated_block() : end());
}
bool is_in(const void* p) const {
return used_region().contains(p);
}
virtual bool is_free_block(const HeapWord* p) const;
// Resizing support
@ -363,6 +359,12 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// obj_is_alive() to determine whether it is safe to iterate of
// an object.
void safe_object_iterate(ObjectClosure* blk);
// Iterate over all objects that intersect with mr, calling "cl->do_object"
// on each. There is an exception to this: if this closure has already
// been invoked on an object, it may skip such objects in some cases. This is
// Most likely to happen in an "upwards" (ascending address) iteration of
// MemRegions.
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// Requires that "mr" be entirely within the space.
@ -371,11 +373,8 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// terminate the iteration and return the address of the start of the
// subregion that isn't done. Return of "NULL" indicates that the
// iteration completed.
virtual HeapWord*
object_iterate_careful_m(MemRegion mr,
ObjectClosureCareful* cl);
virtual HeapWord*
object_iterate_careful(ObjectClosureCareful* cl);
HeapWord* object_iterate_careful_m(MemRegion mr,
ObjectClosureCareful* cl);
// Override: provides a DCTO_CL specific to this kind of space.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,

View File

@ -1498,6 +1498,19 @@ class FalseBitMapClosure: public BitMapClosure {
}
};
// A version of ObjectClosure with "memory" (see _previous_address below)
class UpwardsObjectClosure: public BoolObjectClosure {
HeapWord* _previous_address;
public:
UpwardsObjectClosure() : _previous_address(NULL) { }
void set_previous(HeapWord* addr) { _previous_address = addr; }
HeapWord* previous() { return _previous_address; }
// A return value of "true" can be used by the caller to decide
// if this object's end should *NOT* be recorded in
// _previous_address above.
virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
};
// This closure is used during the second checkpointing phase
// to rescan the marked objects on the dirty cards in the mod
// union table and the card table proper. It's invoked via

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc_implementation/g1/dirtyCardQueue.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"

View File

@ -3529,6 +3529,29 @@ public:
}
};
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
const HeapRegion* hr,
const VerifyOption vo) const {
switch (vo) {
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
default: ShouldNotReachHere();
}
return false; // keep some compilers happy
}
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
const VerifyOption vo) const {
switch (vo) {
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
default: ShouldNotReachHere();
}
return false; // keep some compilers happy
}
void G1CollectedHeap::print_on(outputStream* st) const {
st->print(" %-20s", "garbage-first heap");
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
@ -6598,13 +6621,13 @@ public:
if (hr->is_young()) {
// TODO
} else if (hr->startsHumongous()) {
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
_humongous_count.increment(1u, hr->capacity());
} else if (hr->is_empty()) {
assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
_free_count.increment(1u, hr->capacity());
} else {
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
_old_count.increment(1u, hr->capacity());
}
return false;

View File

@ -706,19 +706,7 @@ public:
// This is a fast test on whether a reference points into the
// collection set or not. Assume that the reference
// points into the heap.
bool in_cset_fast_test(oop obj) {
assert(_in_cset_fast_test != NULL, "sanity");
assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
// no need to subtract the bottom of the heap from obj,
// _in_cset_fast_test is biased
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
bool ret = _in_cset_fast_test[index];
// let's make sure the result is consistent with what the slower
// test returns
assert( ret || !obj_in_cs(obj), "sanity");
assert(!ret || obj_in_cs(obj), "sanity");
return ret;
}
inline bool in_cset_fast_test(oop obj);
void clear_cset_fast_test() {
assert(_in_cset_fast_test_base != NULL, "sanity");
@ -1250,9 +1238,7 @@ public:
}
}
void old_set_remove(HeapRegion* hr) {
_old_set.remove(hr);
}
inline void old_set_remove(HeapRegion* hr);
size_t non_young_capacity_bytes() {
return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
@ -1343,7 +1329,7 @@ public:
void heap_region_iterate(HeapRegionClosure* blk) const;
// Return the region with the given index. It assumes the index is valid.
HeapRegion* region_at(uint index) const { return _hrs.at(index); }
inline HeapRegion* region_at(uint index) const;
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
@ -1472,10 +1458,7 @@ public:
return true;
}
bool is_in_young(const oop obj) {
HeapRegion* hr = heap_region_containing(obj);
return hr != NULL && hr->is_young();
}
inline bool is_in_young(const oop obj);
#ifdef ASSERT
virtual bool is_in_partial_collection(const void* p);
@ -1488,9 +1471,7 @@ public:
// pre-value that needs to be remembered; for the remembered-set
// update logging post-barrier, we don't maintain remembered set
// information for young gen objects.
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
return is_in_young(new_obj);
}
virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
// Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) {
@ -1584,23 +1565,9 @@ public:
// Added if it is NULL it isn't dead.
bool is_obj_dead(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj);
if (hr == NULL) {
if (obj == NULL) return false;
else return true;
}
else return is_obj_dead(obj, hr);
}
inline bool is_obj_dead(const oop obj) const;
bool is_obj_ill(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj);
if (hr == NULL) {
if (obj == NULL) return false;
else return true;
}
else return is_obj_ill(obj, hr);
}
inline bool is_obj_ill(const oop obj) const;
bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
@ -1694,26 +1661,10 @@ public:
bool is_obj_dead_cond(const oop obj,
const HeapRegion* hr,
const VerifyOption vo) const {
switch (vo) {
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
default: ShouldNotReachHere();
}
return false; // keep some compilers happy
}
const VerifyOption vo) const;
bool is_obj_dead_cond(const oop obj,
const VerifyOption vo) const {
switch (vo) {
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
default: ShouldNotReachHere();
}
return false; // keep some compilers happy
}
const VerifyOption vo) const;
// Printing
@ -1807,11 +1758,7 @@ protected:
DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
if (!from->is_survivor()) {
_g1_rem->par_write_ref(from, p, tid);
}
}
template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
// If the new value of the field points to the same region or
@ -1853,13 +1800,7 @@ public:
refs()->push(ref);
}
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
if (G1DeferredRSUpdate) {
deferred_rs_update(from, p, tid);
} else {
immediate_rs_update(from, p, tid);
}
}
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
@ -1983,54 +1924,7 @@ private:
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
void do_oop_partial_array(oop* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
assert(from_obj->is_objArray(), "must be obj array");
objArrayOop from_obj_array = objArrayOop(from_obj);
// The from-space object contains the real length.
int length = from_obj_array->length();
assert(from_obj->is_forwarded(), "must be forwarded");
oop to_obj = from_obj->forwardee();
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
objArrayOop to_obj_array = objArrayOop(to_obj);
// We keep track of the next start index in the length field of the
// to-space object.
int next_index = to_obj_array->length();
assert(0 <= next_index && next_index < length,
err_msg("invariant, next index: %d, length: %d", next_index, length));
int start = next_index;
int end = length;
int remainder = end - start;
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
if (remainder > 2 * ParGCArrayScanChunk) {
end = start + ParGCArrayScanChunk;
to_obj_array->set_length(end);
// Push the remainder before we process the range in case another
// worker has run out of things to do and can steal it.
oop* from_obj_p = set_partial_array_mask(from_obj);
push_on_queue(from_obj_p);
} else {
assert(length == end, "sanity");
// We'll process the final range for this object. Restore the length
// so that the heap remains parsable in case of evacuation failure.
to_obj_array->set_length(end);
}
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
// Process indexes [start,end). It will also process the header
// along with the first chunk (i.e., the chunk with start == 0).
// Note that at this point the length field of to_obj_array is not
// correct given that we are using it to keep track of the next
// start index. oop_iterate_range() (thankfully!) ignores the length
// field and only relies on the start / end parameters. It does
// however return the size of the object which will be incorrect. So
// we have to ignore it even if we wanted to use it.
to_obj_array->oop_iterate_range(&_scanner, start, end);
}
inline void do_oop_partial_array(oop* p);
// This method is applied to the fields of the objects that have just been copied.
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
@ -2060,26 +1954,9 @@ public:
oop copy_to_survivor_space(oop const obj);
template <class T> void deal_with_reference(T* ref_to_scan) {
if (!has_partial_array_mask(ref_to_scan)) {
// Note: we can use "raw" versions of "region_containing" because
// "obj_to_scan" is definitely in the heap, and is not in a
// humongous region.
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
do_oop_evac(ref_to_scan, r);
} else {
do_oop_partial_array((oop*)ref_to_scan);
}
}
template <class T> inline void deal_with_reference(T* ref_to_scan);
void deal_with_reference(StarTask ref) {
assert(verify_task(ref), "sanity");
if (ref.is_narrow()) {
deal_with_reference((narrowOop*)ref);
} else {
deal_with_reference((oop*)ref);
}
}
inline void deal_with_reference(StarTask ref);
public:
void trim_queue();

View File

@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
@ -36,6 +37,9 @@
// Inline functions for G1CollectedHeap
// Return the region with the given index. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
template <class T>
inline HeapRegion*
G1CollectedHeap::heap_region_containing(const T addr) const {
@ -55,6 +59,10 @@ G1CollectedHeap::heap_region_containing_raw(const T addr) const {
return res;
}
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
_old_set.remove(hr);
}
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set();
@ -151,6 +159,24 @@ inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
}
// This is a fast test on whether a reference points into the
// collection set or not. Assume that the reference
// points into the heap.
inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
assert(_in_cset_fast_test != NULL, "sanity");
assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
// no need to subtract the bottom of the heap from obj,
// _in_cset_fast_test is biased
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
bool ret = _in_cset_fast_test[index];
// let's make sure the result is consistent with what the slower
// test returns
assert( ret || !obj_in_cs(obj), "sanity");
assert(!ret || obj_in_cs(obj), "sanity");
return ret;
}
#ifndef PRODUCT
// Support for G1EvacuationFailureALot
@ -224,4 +250,121 @@ inline void G1CollectedHeap::reset_evacuation_should_fail() {
}
#endif // #ifndef PRODUCT
inline bool G1CollectedHeap::is_in_young(const oop obj) {
HeapRegion* hr = heap_region_containing(obj);
return hr != NULL && hr->is_young();
}
// We don't need barriers for initializing stores to objects
// in the young gen: for the SATB pre-barrier, there is no
// pre-value that needs to be remembered; for the remembered-set
// update logging post-barrier, we don't maintain remembered set
// information for young gen objects.
inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
return is_in_young(new_obj);
}
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj);
if (hr == NULL) {
if (obj == NULL) return false;
else return true;
}
else return is_obj_dead(obj, hr);
}
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj);
if (hr == NULL) {
if (obj == NULL) return false;
else return true;
}
else return is_obj_ill(obj, hr);
}
template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
if (!from->is_survivor()) {
_g1_rem->par_write_ref(from, p, tid);
}
}
template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
if (G1DeferredRSUpdate) {
deferred_rs_update(from, p, tid);
} else {
immediate_rs_update(from, p, tid);
}
}
inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
assert(from_obj->is_objArray(), "must be obj array");
objArrayOop from_obj_array = objArrayOop(from_obj);
// The from-space object contains the real length.
int length = from_obj_array->length();
assert(from_obj->is_forwarded(), "must be forwarded");
oop to_obj = from_obj->forwardee();
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
objArrayOop to_obj_array = objArrayOop(to_obj);
// We keep track of the next start index in the length field of the
// to-space object.
int next_index = to_obj_array->length();
assert(0 <= next_index && next_index < length,
err_msg("invariant, next index: %d, length: %d", next_index, length));
int start = next_index;
int end = length;
int remainder = end - start;
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
if (remainder > 2 * ParGCArrayScanChunk) {
end = start + ParGCArrayScanChunk;
to_obj_array->set_length(end);
// Push the remainder before we process the range in case another
// worker has run out of things to do and can steal it.
oop* from_obj_p = set_partial_array_mask(from_obj);
push_on_queue(from_obj_p);
} else {
assert(length == end, "sanity");
// We'll process the final range for this object. Restore the length
// so that the heap remains parsable in case of evacuation failure.
to_obj_array->set_length(end);
}
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
// Process indexes [start,end). It will also process the header
// along with the first chunk (i.e., the chunk with start == 0).
// Note that at this point the length field of to_obj_array is not
// correct given that we are using it to keep track of the next
// start index. oop_iterate_range() (thankfully!) ignores the length
// field and only relies on the start / end parameters. It does
// however return the size of the object which will be incorrect. So
// we have to ignore it even if we wanted to use it.
to_obj_array->oop_iterate_range(&_scanner, start, end);
}
template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
if (!has_partial_array_mask(ref_to_scan)) {
// Note: we can use "raw" versions of "region_containing" because
// "obj_to_scan" is definitely in the heap, and is not in a
// humongous region.
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
do_oop_evac(ref_to_scan, r);
} else {
do_oop_partial_array((oop*)ref_to_scan);
}
}
inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
assert(verify_task(ref), "sanity");
if (ref.is_narrow()) {
deal_with_reference((narrowOop*)ref);
} else {
deal_with_reference((oop*)ref);
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

View File

@ -472,9 +472,6 @@ HeapRegion::object_iterate_mem_careful(MemRegion mr,
} else if (!g1h->is_obj_dead(obj)) {
cl->do_object(obj);
}
if (cl->abort()) return cur;
// The check above must occur before the operation below, since an
// abort might invalidate the "size" operation.
cur += obj->size();
}
return NULL;

View File

@ -245,7 +245,7 @@ private:
enum ParIterState { Unclaimed, Claimed, Complete };
volatile ParIterState _iter_state;
volatile jlong _iter_claimed;
volatile size_t _iter_claimed;
// Unused unless G1RecordHRRSOops is true.
@ -319,16 +319,12 @@ public:
bool iter_is_complete();
// Support for claiming blocks of cards during iteration
size_t iter_claimed() const { return (size_t)_iter_claimed; }
size_t iter_claimed() const { return _iter_claimed; }
// Claim the next block of cards
size_t iter_claimed_next(size_t step) {
size_t current, next;
do {
current = iter_claimed();
next = current + step;
} while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
return current;
return Atomic::add(step, &_iter_claimed) - step;
}
void reset_for_par_iteration();
bool verify_ready_for_par_iteration() {

View File

@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "memory/allocation.hpp"
#include "memory/cardTableModRefBS.hpp"

View File

@ -89,6 +89,15 @@ bool VM_GC_Operation::doit_prologue() {
assert(((_gc_cause != GCCause::_no_gc) &&
(_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
// To be able to handle a GC the VM initialization needs to be completed.
if (!is_init_completed()) {
vm_exit_during_initialization(
err_msg("GC triggered before VM initialization completed. Try increasing "
"NewSize, current value " UINTX_FORMAT "%s.",
byte_size_in_proper_unit(NewSize),
proper_unit_for_byte_size(NewSize)));
}
acquire_pending_list_lock();
// If the GC count has changed someone beat us to the collection
// Get the Heap_lock after the pending_list_lock.

View File

@ -35,8 +35,6 @@
#include "runtime/timer.hpp"
#ifndef PRODUCT
// Standard closure for BytecodeTracer: prints the current bytecode
// and its attributes using bytecode-specific information.
@ -600,4 +598,3 @@ void BytecodePrinter::bytecode_epilog(int bci, outputStream* st) {
}
}
}
#endif // PRODUCT

View File

@ -34,8 +34,7 @@
// By specialising the BytecodeClosure, all kinds of bytecode traces can
// be done.
#ifndef PRODUCT
// class BytecodeTracer is only used by TraceBytecodes option
// class BytecodeTracer is used by TraceBytecodes option and PrintMethodData
class BytecodeClosure;
class BytecodeTracer: AllStatic {
@ -60,6 +59,4 @@ class BytecodeClosure {
virtual void trace(methodHandle method, address bcp, outputStream* st) = 0;
};
#endif // !PRODUCT
#endif // SHARE_VM_INTERPRETER_BYTECODETRACER_HPP

View File

@ -748,6 +748,12 @@ class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
bool _use_malloc;
size_t _size;
bool _free_in_destructor;
static bool should_use_malloc(size_t size) {
return size < ArrayAllocatorMallocLimit;
}
static char* allocate_inner(size_t& size, bool& use_malloc);
public:
ArrayAllocator(bool free_in_destructor = true) :
_addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
@ -759,6 +765,7 @@ class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
}
E* allocate(size_t length);
E* reallocate(size_t new_length);
void free();
};

View File

@ -122,35 +122,57 @@ template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
}
template <class E, MEMFLAGS F>
E* ArrayAllocator<E, F>::allocate(size_t length) {
assert(_addr == NULL, "Already in use");
char* ArrayAllocator<E, F>::allocate_inner(size_t &size, bool &use_malloc) {
char* addr = NULL;
_size = sizeof(E) * length;
_use_malloc = _size < ArrayAllocatorMallocLimit;
if (_use_malloc) {
_addr = AllocateHeap(_size, F);
if (_addr == NULL && _size >= (size_t)os::vm_allocation_granularity()) {
if (use_malloc) {
addr = AllocateHeap(size, F);
if (addr == NULL && size >= (size_t)os::vm_allocation_granularity()) {
// malloc failed let's try with mmap instead
_use_malloc = false;
use_malloc = false;
} else {
return (E*)_addr;
return addr;
}
}
int alignment = os::vm_allocation_granularity();
_size = align_size_up(_size, alignment);
size = align_size_up(size, alignment);
_addr = os::reserve_memory(_size, NULL, alignment, F);
if (_addr == NULL) {
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
addr = os::reserve_memory(size, NULL, alignment, F);
if (addr == NULL) {
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
}
os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
return addr;
}
template <class E, MEMFLAGS F>
E* ArrayAllocator<E, F>::allocate(size_t length) {
assert(_addr == NULL, "Already in use");
_size = sizeof(E) * length;
_use_malloc = should_use_malloc(_size);
_addr = allocate_inner(_size, _use_malloc);
return (E*)_addr;
}
template <class E, MEMFLAGS F>
E* ArrayAllocator<E, F>::reallocate(size_t new_length) {
size_t new_size = sizeof(E) * new_length;
bool use_malloc = should_use_malloc(new_size);
char* new_addr = allocate_inner(new_size, use_malloc);
memcpy(new_addr, _addr, MIN2(new_size, _size));
free();
_size = new_size;
_use_malloc = use_malloc;
_addr = new_addr;
return (E*)new_addr;
}
template<class E, MEMFLAGS F>
void ArrayAllocator<E, F>::free() {
if (_addr != NULL) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,8 +32,8 @@
// header: dump of archive instance plus versioning info, datestamp, etc.
// [magic # = 0xF00BABA2]
// ... padding to align on page-boundary
// read-write space from CompactingPermGenGen
// read-only space from CompactingPermGenGen
// read-write space
// read-only space
// misc data (block offset table, string table, symbols, dictionary, etc.)
// tag(666)

View File

@ -28,7 +28,6 @@
#include "memory/sharedHeap.hpp"
volatile jint GC_locker::_jni_lock_count = 0;
volatile jint GC_locker::_lock_count = 0;
volatile bool GC_locker::_needs_gc = false;
volatile bool GC_locker::_doing_gc = false;
@ -102,7 +101,7 @@ void GC_locker::jni_lock(JavaThread* thread) {
// We check that at least one thread is in a critical region before
// blocking because blocked threads are woken up by a thread exiting
// a JNI critical region.
while ((needs_gc() && is_jni_active()) || _doing_gc) {
while (is_active_and_needs_gc() || _doing_gc) {
JNICritical_lock->wait();
}
thread->enter_critical();
@ -116,27 +115,20 @@ void GC_locker::jni_unlock(JavaThread* thread) {
_jni_lock_count--;
decrement_debug_jni_lock_count();
thread->exit_critical();
if (needs_gc() && !is_jni_active()) {
if (needs_gc() && !is_active_internal()) {
// We're the last thread out. Cause a GC to occur.
// GC will also check is_active, so this check is not
// strictly needed. It's added here to make it clear that
// the GC will NOT be performed if any other caller
// of GC_locker::lock() still needs GC locked.
if (!is_active_internal()) {
_doing_gc = true;
{
// Must give up the lock while at a safepoint
MutexUnlocker munlock(JNICritical_lock);
if (PrintJNIGCStalls && PrintGCDetails) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
}
Universe::heap()->collect(GCCause::_gc_locker);
_doing_gc = true;
{
// Must give up the lock while at a safepoint
MutexUnlocker munlock(JNICritical_lock);
if (PrintJNIGCStalls && PrintGCDetails) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
}
_doing_gc = false;
Universe::heap()->collect(GCCause::_gc_locker);
}
_doing_gc = false;
_needs_gc = false;
JNICritical_lock->notify_all();
}

View File

@ -54,8 +54,6 @@ class GC_locker: public AllStatic {
// safepointing and decremented during the slow path of GC_locker
// unlocking.
static volatile jint _jni_lock_count; // number of jni active instances.
static volatile jint _lock_count; // number of other active instances
static volatile bool _needs_gc; // heap is filling, we need a GC
// note: bool is typedef'd as jint
static volatile bool _doing_gc; // unlock_critical() is doing a GC
@ -66,12 +64,6 @@ class GC_locker: public AllStatic {
static volatile jint _debug_jni_lock_count;
#endif
// Accessors
static bool is_jni_active() {
assert(_needs_gc, "only valid when _needs_gc is set");
return _jni_lock_count > 0;
}
// At a safepoint, visit all threads and count the number of active
// critical sections. This is used to ensure that all active
// critical sections are exited before a new one is started.
@ -82,7 +74,7 @@ class GC_locker: public AllStatic {
static bool is_active_internal() {
verify_critical_count();
return _lock_count > 0 || _jni_lock_count > 0;
return _jni_lock_count > 0;
}
public:
@ -132,10 +124,6 @@ class GC_locker: public AllStatic {
// not a stable predicate.
static void stall_until_clear();
// Non-structured GC locking: currently needed for JNI. Use with care!
static void lock();
static void unlock();
// The following two methods are used for JNI critical regions.
// If we find that we failed to perform a GC because the GC_locker
// was active, arrange for one as soon as possible by allowing

View File

@ -27,22 +27,6 @@
#include "memory/gcLocker.hpp"
inline void GC_locker::lock() {
// cast away volatile
Atomic::inc(&_lock_count);
CHECK_UNHANDLED_OOPS_ONLY(
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
assert(Universe::heap() == NULL ||
!Universe::heap()->is_gc_active(), "locking failed");
}
inline void GC_locker::unlock() {
// cast away volatile
Atomic::dec(&_lock_count);
CHECK_UNHANDLED_OOPS_ONLY(
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
}
inline void GC_locker::lock_critical(JavaThread* thread) {
if (!thread->in_critical()) {
if (needs_gc()) {

View File

@ -115,9 +115,6 @@ class ScanClosure: public OopsInKlassOrGenClosure {
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
Prefetch::style prefetch_style() {
return Prefetch::do_write;
}
};
// Closure for scanning DefNewGeneration.
@ -137,9 +134,6 @@ class FastScanClosure: public OopsInKlassOrGenClosure {
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
Prefetch::style prefetch_style() {
return Prefetch::do_write;
}
};
class KlassScanClosure: public KlassClosure {

View File

@ -27,11 +27,8 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "runtime/prefetch.hpp"
#include "utilities/top.hpp"
// The following classes are C++ `closures` for iterating over objects, roots and spaces
class CodeBlob;
class nmethod;
class ReferenceProcessor;
@ -39,22 +36,11 @@ class DataLayout;
class KlassClosure;
class ClassLoaderData;
// Closure provides abortability.
// The following classes are C++ `closures` for iterating over objects, roots and spaces
class Closure : public StackObj {
protected:
bool _abort;
void set_abort() { _abort = true; }
public:
Closure() : _abort(false) {}
// A subtype can use this mechanism to indicate to some iterator mapping
// functions that the iteration should cease.
bool abort() { return _abort; }
void clear_abort() { _abort = false; }
};
class Closure : public StackObj { };
// OopClosure is used for iterating through references to Java objects.
class OopClosure : public Closure {
public:
virtual void do_oop(oop* o) = 0;
@ -97,11 +83,6 @@ class ExtendedOopClosure : public OopClosure {
virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
// Controls how prefetching is done for invocations of this closure.
Prefetch::style prefetch_style() { // Note that this is non-virtual.
return Prefetch::do_none;
}
// True iff this closure may be safely applied more than once to an oop
// location without an intervening "major reset" (like the end of a GC).
virtual bool idempotent() { return false; }
@ -177,19 +158,6 @@ public:
ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
};
// A version of ObjectClosure with "memory" (see _previous_address below)
class UpwardsObjectClosure: public BoolObjectClosure {
HeapWord* _previous_address;
public:
UpwardsObjectClosure() : _previous_address(NULL) { }
void set_previous(HeapWord* addr) { _previous_address = addr; }
HeapWord* previous() { return _previous_address; }
// A return value of "true" can be used by the caller to decide
// if this object's end should *NOT* be recorded in
// _previous_address above.
virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
};
// A version of ObjectClosure that is expected to be robust
// in the face of possibly uninitialized objects.
class ObjectClosureCareful : public ObjectClosure {

View File

@ -645,9 +645,6 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
TraceTime timer("Dump Shared Spaces", TraceStartupTime);
ResourceMark rm;
// Lock out GC - is it necessary? I don't think we care.
No_GC_Verifier no_gc;
// Preload classes to be shared.
// Should use some os:: method rather than fopen() here. aB.
// Construct the path to the class list (in jre/lib)

View File

@ -302,10 +302,6 @@ void ContiguousSpace::clear(bool mangle_space) {
CompactibleSpace::clear(mangle_space);
}
bool ContiguousSpace::is_in(const void* p) const {
return _bottom <= p && p < _top;
}
bool ContiguousSpace::is_free_block(const HeapWord* p) const {
return p >= _top;
}
@ -547,115 +543,11 @@ void Space::oop_iterate(ExtendedOopClosure* blk) {
object_iterate(&blk2);
}
HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
guarantee(false, "NYI");
return bottom();
}
HeapWord* Space::object_iterate_careful_m(MemRegion mr,
ObjectClosureCareful* cl) {
guarantee(false, "NYI");
return bottom();
}
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
assert(!mr.is_empty(), "Should be non-empty");
// We use MemRegion(bottom(), end()) rather than used_region() below
// because the two are not necessarily equal for some kinds of
// spaces, in particular, certain kinds of free list spaces.
// We could use the more complicated but more precise:
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
// but the slight imprecision seems acceptable in the assertion check.
assert(MemRegion(bottom(), end()).contains(mr),
"Should be within used space");
HeapWord* prev = cl->previous(); // max address from last time
if (prev >= mr.end()) { // nothing to do
return;
}
// This assert will not work when we go from cms space to perm
// space, and use same closure. Easy fix deferred for later. XXX YSR
// assert(prev == NULL || contains(prev), "Should be within space");
bool last_was_obj_array = false;
HeapWord *blk_start_addr, *region_start_addr;
if (prev > mr.start()) {
region_start_addr = prev;
blk_start_addr = prev;
// The previous invocation may have pushed "prev" beyond the
// last allocated block yet there may be still be blocks
// in this region due to a particular coalescing policy.
// Relax the assertion so that the case where the unallocated
// block is maintained and "prev" is beyond the unallocated
// block does not cause the assertion to fire.
assert((BlockOffsetArrayUseUnallocatedBlock &&
(!is_in(prev))) ||
(blk_start_addr == block_start(region_start_addr)), "invariant");
} else {
region_start_addr = mr.start();
blk_start_addr = block_start(region_start_addr);
}
HeapWord* region_end_addr = mr.end();
MemRegion derived_mr(region_start_addr, region_end_addr);
while (blk_start_addr < region_end_addr) {
const size_t size = block_size(blk_start_addr);
if (block_is_obj(blk_start_addr)) {
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
} else {
last_was_obj_array = false;
}
blk_start_addr += size;
}
if (!last_was_obj_array) {
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
"Should be within (closed) used space");
assert(blk_start_addr > prev, "Invariant");
cl->set_previous(blk_start_addr); // min address for next time
}
}
bool Space::obj_is_alive(const HeapWord* p) const {
assert (block_is_obj(p), "The address should point to an object");
return true;
}
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
assert(!mr.is_empty(), "Should be non-empty");
assert(used_region().contains(mr), "Should be within used space");
HeapWord* prev = cl->previous(); // max address from last time
if (prev >= mr.end()) { // nothing to do
return;
}
// See comment above (in more general method above) in case you
// happen to use this method.
assert(prev == NULL || is_in_reserved(prev), "Should be within space");
bool last_was_obj_array = false;
HeapWord *obj_start_addr, *region_start_addr;
if (prev > mr.start()) {
region_start_addr = prev;
obj_start_addr = prev;
assert(obj_start_addr == block_start(region_start_addr), "invariant");
} else {
region_start_addr = mr.start();
obj_start_addr = block_start(region_start_addr);
}
HeapWord* region_end_addr = mr.end();
MemRegion derived_mr(region_start_addr, region_end_addr);
while (obj_start_addr < region_end_addr) {
oop obj = oop(obj_start_addr);
const size_t size = obj->size();
last_was_obj_array = cl->do_object_bm(obj, derived_mr);
obj_start_addr += size;
}
if (!last_was_obj_array) {
assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
"Should be within (closed) used space");
assert(obj_start_addr > prev, "Invariant");
cl->set_previous(obj_start_addr); // min address for next time
}
}
#if INCLUDE_ALL_GCS
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\

View File

@ -120,6 +120,12 @@ class Space: public CHeapObj<mtGC> {
void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
// Returns true if this object has been allocated since a
// generation's "save_marks" call.
virtual bool obj_allocated_since_save_marks(const oop obj) const {
return (HeapWord*)obj >= saved_mark_word();
}
MemRegionClosure* preconsumptionDirtyCardClosure() const {
return _preconsumptionDirtyCardClosure;
}
@ -127,9 +133,9 @@ class Space: public CHeapObj<mtGC> {
_preconsumptionDirtyCardClosure = cl;
}
// Returns a subregion of the space containing all the objects in
// Returns a subregion of the space containing only the allocated objects in
// the space.
virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
virtual MemRegion used_region() const = 0;
// Returns a region that is guaranteed to contain (at least) all objects
// allocated at the time of the last call to "save_marks". If the space
@ -139,7 +145,7 @@ class Space: public CHeapObj<mtGC> {
// saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
// the space must distinguish between objects in the region allocated before
// and after the call to save marks.
virtual MemRegion used_region_at_save_marks() const {
MemRegion used_region_at_save_marks() const {
return MemRegion(bottom(), saved_mark_word());
}
@ -172,7 +178,9 @@ class Space: public CHeapObj<mtGC> {
// expensive operation. To prevent performance problems
// on account of its inadvertent use in product jvm's,
// we restrict its use to assertion checks only.
virtual bool is_in(const void* p) const = 0;
bool is_in(const void* p) const {
return used_region().contains(p);
}
// Returns true iff the given reserved memory of the space contains the
// given address.
@ -204,24 +212,6 @@ class Space: public CHeapObj<mtGC> {
// objects whose internal references point to objects in the space.
virtual void safe_object_iterate(ObjectClosure* blk) = 0;
// Iterate over all objects that intersect with mr, calling "cl->do_object"
// on each. There is an exception to this: if this closure has already
// been invoked on an object, it may skip such objects in some cases. This is
// Most likely to happen in an "upwards" (ascending address) iteration of
// MemRegions.
virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// Iterate over as many initialized objects in the space as possible,
// calling "cl.do_object_careful" on each. Return NULL if all objects
// in the space (at the start of the iteration) were iterated over.
// Return an address indicating the extent of the iteration in the
// event that the iteration had to return because of finding an
// uninitialized object in the space, or if the closure "cl"
// signaled early termination.
virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
virtual HeapWord* object_iterate_careful_m(MemRegion mr,
ObjectClosureCareful* cl);
// Create and return a new dirty card to oop closure. Can be
// overridden to return the appropriate type of closure
// depending on the type of space in which the closure will
@ -262,10 +252,6 @@ class Space: public CHeapObj<mtGC> {
// Allocation (return NULL if full). Enforces mutual exclusion internally.
virtual HeapWord* par_allocate(size_t word_size) = 0;
// Returns true if this object has been allocated since a
// generation's "save_marks" call.
virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
// Mark-sweep-compact support: all spaces can update pointers to objects
// moving as a part of compaction.
virtual void adjust_pointers();
@ -397,7 +383,7 @@ public:
// Perform operations on the space needed after a compaction
// has been performed.
virtual void reset_after_compaction() {}
virtual void reset_after_compaction() = 0;
// Returns the next space (in the current generation) to be compacted in
// the global compaction order. Also is used to select the next
@ -462,7 +448,7 @@ protected:
HeapWord* _end_of_live;
// Minimum size of a free block.
virtual size_t minimum_free_block_size() const = 0;
virtual size_t minimum_free_block_size() const { return 0; }
// This the function is invoked when an allocation of an object covering
// "start" to "end occurs crosses the threshold; returns the next
@ -778,7 +764,7 @@ class ContiguousSpace: public CompactibleSpace {
HeapWord* top() const { return _top; }
void set_top(HeapWord* value) { _top = value; }
virtual void set_saved_mark() { _saved_mark_word = top(); }
void set_saved_mark() { _saved_mark_word = top(); }
void reset_saved_mark() { _saved_mark_word = bottom(); }
WaterMark bottom_mark() { return WaterMark(this, bottom()); }
@ -813,35 +799,30 @@ class ContiguousSpace: public CompactibleSpace {
size_t used() const { return byte_size(bottom(), top()); }
size_t free() const { return byte_size(top(), end()); }
// Override from space.
bool is_in(const void* p) const;
virtual bool is_free_block(const HeapWord* p) const;
// In a contiguous space we have a more obvious bound on what parts
// contain objects.
MemRegion used_region() const { return MemRegion(bottom(), top()); }
MemRegion used_region_at_save_marks() const {
return MemRegion(bottom(), saved_mark_word());
}
// Allocation (return NULL if full)
virtual HeapWord* allocate(size_t word_size);
virtual HeapWord* par_allocate(size_t word_size);
virtual bool obj_allocated_since_save_marks(const oop obj) const {
return (HeapWord*)obj >= saved_mark_word();
}
// Iteration
void oop_iterate(ExtendedOopClosure* cl);
void object_iterate(ObjectClosure* blk);
// For contiguous spaces this method will iterate safely over objects
// in the space (i.e., between bottom and top) when at a safepoint.
void safe_object_iterate(ObjectClosure* blk);
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// iterates on objects up to the safe limit
// Iterate over as many initialized objects in the space as possible,
// calling "cl.do_object_careful" on each. Return NULL if all objects
// in the space (at the start of the iteration) were iterated over.
// Return an address indicating the extent of the iteration in the
// event that the iteration had to return because of finding an
// uninitialized object in the space, or if the closure "cl"
// signaled early termination.
HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
HeapWord* concurrent_iteration_safe_limit() {
assert(_concurrent_iteration_safe_limit <= top(),
@ -872,7 +853,6 @@ class ContiguousSpace: public CompactibleSpace {
// set new iteration safe limit
set_concurrent_iteration_safe_limit(compaction_top());
}
virtual size_t minimum_free_block_size() const { return 0; }
// Override.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,

View File

@ -632,7 +632,6 @@ jint universe_init() {
guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
"oop size is not not a multiple of HeapWord size");
TraceTime timer("Genesis", TraceStartupTime);
GC_locker::lock(); // do not allow gc during bootstrapping
JavaClasses::compute_hard_coded_offsets();
jint status = Universe::initialize_heap();
@ -1164,8 +1163,6 @@ bool universe_post_init() {
MemoryService::add_metaspace_memory_pools();
GC_locker::unlock(); // allow gc after bootstrapping
MemoryService::set_universe_heap(Universe::_collectedHeap);
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,68 +27,61 @@
#include "oops/oop.hpp"
// An ConstMethod* represents portions of a Java method which
// do not vary.
// An ConstMethod represents portions of a Java method which are not written to after
// the classfile is parsed(*see below). This part of the method can be shared across
// processes in a read-only section with Class Data Sharing (CDS). It's important
// that this class doesn't have virtual functions because the vptr cannot be shared
// with CDS.
// (*)RewriteByteCodes and RewriteFrequentPairs is an exception but turned off in CDS
//
// Memory layout (each line represents a word). Note that most
// applications load thousands of methods, so keeping the size of this
// Note that most applications load thousands of methods, so keeping the size of this
// structure small has a big impact on footprint.
// The actual bytecodes are inlined after the end of the ConstMethod struct.
//
// |------------------------------------------------------|
// | header |
// | klass |
// |------------------------------------------------------|
// | fingerprint 1 |
// | fingerprint 2 |
// | constants (oop) |
// | stackmap_data (oop) |
// | constMethod_size |
// | interp_kind | flags | code_size |
// | name index | signature index |
// | method_idnum | max_stack |
// | max_locals | size_of_parameters |
// |------------------------------------------------------|
// | |
// | byte codes |
// | |
// |------------------------------------------------------|
// | compressed linenumber table |
// | (see class CompressedLineNumberReadStream) |
// | (note that length is unknown until decompressed) |
// | (access flags bit tells whether table is present) |
// | (indexed from start of ConstMethod*) |
// | (elements not necessarily sorted!) |
// |------------------------------------------------------|
// | localvariable table elements + length (length last) |
// | (length is u2, elements are 6-tuples of u2) |
// | (see class LocalVariableTableElement) |
// | (access flags bit tells whether table is present) |
// | (indexed from end of ConstMethod*) |
// |------------------------------------------------------|
// | exception table + length (length last) |
// | (length is u2, elements are 4-tuples of u2) |
// | (see class ExceptionTableElement) |
// | (access flags bit tells whether table is present) |
// | (indexed from end of ConstMethod*) |
// |------------------------------------------------------|
// | checked exceptions elements + length (length last) |
// | (length is u2, elements are u2) |
// | (see class CheckedExceptionElement) |
// | (access flags bit tells whether table is present) |
// | (indexed from end of ConstMethod*) |
// |------------------------------------------------------|
// | method parameters elements + length (length last) |
// | (length is u2, elements are u2, u4 structures) |
// | (see class MethodParametersElement) |
// | (access flags bit tells whether table is present) |
// | (indexed from end of ConstMethod*) |
// |------------------------------------------------------|
// | generic signature index (u2) |
// | (indexed from start of constMethodOop) |
// |------------------------------------------------------|
// | annotations arrays - method, parameter, type, default|
// | pointer to Array<u1> if annotation is present |
// |------------------------------------------------------|
// The line number table is compressed and inlined following the byte codes. It is
// found as the first byte following the byte codes. Note that accessing the line
// number and local variable tables is not performance critical at all.
//
// The checked exceptions table and the local variable table are inlined after the
// line number table, and indexed from the end of the method. We do not compress the
// checked exceptions table since the average length is less than 2, and it is used
// by reflection so access should be fast. We do not bother to compress the local
// variable table either since it is mostly absent.
//
//
// ConstMethod embedded field layout (after declared fields):
// [EMBEDDED byte codes]
// [EMBEDDED compressed linenumber table]
// (see class CompressedLineNumberReadStream)
// (note that length is unknown until decompressed)
// (access flags bit tells whether table is present)
// (indexed from start of ConstMethod)
// (elements not necessarily sorted!)
// [EMBEDDED localvariable table elements + length (length last)]
// (length is u2, elements are 6-tuples of u2)
// (see class LocalVariableTableElement)
// (access flags bit tells whether table is present)
// (indexed from end of ConstMethod*)
// [EMBEDDED exception table + length (length last)]
// (length is u2, elements are 4-tuples of u2)
// (see class ExceptionTableElement)
// (access flags bit tells whether table is present)
// (indexed from end of ConstMethod*)
// [EMBEDDED checked exceptions elements + length (length last)]
// (length is u2, elements are u2)
// (see class CheckedExceptionElement)
// (access flags bit tells whether table is present)
// (indexed from end of ConstMethod*)
// [EMBEDDED method parameters elements + length (length last)]
// (length is u2, elements are u2, u4 structures)
// (see class MethodParametersElement)
// (access flags bit tells whether table is present)
// (indexed from end of ConstMethod*)
// [EMBEDDED generic signature index (u2)]
// (indexed from end of constMethodOop)
// [EMBEDDED annotations arrays - method, parameter, type, default]
// pointer to Array<u1> if annotation is present
//
// IMPORTANT: If anything gets added here, there need to be changes to
// ensure that ServicabilityAgent doesn't get broken as a result!

View File

@ -180,12 +180,12 @@ int ConstantPool::cp_to_object_index(int cp_index) {
return (i < 0) ? _no_index_sentinel : i;
}
Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
Klass* ConstantPool::klass_at_impl(constantPoolHandle this_cp, int which, TRAPS) {
// A resolved constantPool entry will contain a Klass*, otherwise a Symbol*.
// It is not safe to rely on the tag bit's here, since we don't have a lock, and the entry and
// tag is not updated atomicly.
CPSlot entry = this_oop->slot_at(which);
CPSlot entry = this_cp->slot_at(which);
if (entry.is_resolved()) {
assert(entry.get_klass()->is_klass(), "must be");
// Already resolved - return entry.
@ -204,15 +204,15 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
Symbol* name = NULL;
Handle loader;
{ MonitorLockerEx ml(this_oop->lock());
{ MonitorLockerEx ml(this_cp->lock());
if (this_oop->tag_at(which).is_unresolved_klass()) {
if (this_oop->tag_at(which).is_unresolved_klass_in_error()) {
if (this_cp->tag_at(which).is_unresolved_klass()) {
if (this_cp->tag_at(which).is_unresolved_klass_in_error()) {
in_error = true;
} else {
do_resolve = true;
name = this_oop->unresolved_klass_at(which);
loader = Handle(THREAD, this_oop->pool_holder()->class_loader());
name = this_cp->unresolved_klass_at(which);
loader = Handle(THREAD, this_cp->pool_holder()->class_loader());
}
}
} // unlocking constantPool
@ -221,26 +221,26 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
// The original attempt to resolve this constant pool entry failed so find the
// original error and throw it again (JVMS 5.4.3).
if (in_error) {
Symbol* error = SystemDictionary::find_resolution_error(this_oop, which);
Symbol* error = SystemDictionary::find_resolution_error(this_cp, which);
guarantee(error != (Symbol*)NULL, "tag mismatch with resolution error table");
ResourceMark rm;
// exception text will be the class name
const char* className = this_oop->unresolved_klass_at(which)->as_C_string();
const char* className = this_cp->unresolved_klass_at(which)->as_C_string();
THROW_MSG_0(error, className);
}
if (do_resolve) {
// this_oop must be unlocked during resolve_or_fail
oop protection_domain = this_oop->pool_holder()->protection_domain();
// this_cp must be unlocked during resolve_or_fail
oop protection_domain = this_cp->pool_holder()->protection_domain();
Handle h_prot (THREAD, protection_domain);
Klass* k_oop = SystemDictionary::resolve_or_fail(name, loader, h_prot, true, THREAD);
Klass* kk = SystemDictionary::resolve_or_fail(name, loader, h_prot, true, THREAD);
KlassHandle k;
if (!HAS_PENDING_EXCEPTION) {
k = KlassHandle(THREAD, k_oop);
k = KlassHandle(THREAD, kk);
// preserve the resolved klass.
mirror_handle = Handle(THREAD, k_oop->java_mirror());
mirror_handle = Handle(THREAD, kk->java_mirror());
// Do access check for klasses
verify_constant_pool_resolve(this_oop, k, THREAD);
verify_constant_pool_resolve(this_cp, k, THREAD);
}
// Failed to resolve class. We must record the errors so that subsequent attempts
@ -251,12 +251,12 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
bool throw_orig_error = false;
{
MonitorLockerEx ml(this_oop->lock());
MonitorLockerEx ml(this_cp->lock());
// some other thread has beaten us and has resolved the class.
if (this_oop->tag_at(which).is_klass()) {
if (this_cp->tag_at(which).is_klass()) {
CLEAR_PENDING_EXCEPTION;
entry = this_oop->resolved_klass_at(which);
entry = this_cp->resolved_klass_at(which);
return entry.get_klass();
}
@ -267,12 +267,12 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
// and OutOfMemoryError, etc, or if the thread was hit by stop()
// Needs clarification to section 5.4.3 of the VM spec (see 6308271)
}
else if (!this_oop->tag_at(which).is_unresolved_klass_in_error()) {
SystemDictionary::add_resolution_error(this_oop, which, error);
this_oop->tag_at_put(which, JVM_CONSTANT_UnresolvedClassInError);
else if (!this_cp->tag_at(which).is_unresolved_klass_in_error()) {
SystemDictionary::add_resolution_error(this_cp, which, error);
this_cp->tag_at_put(which, JVM_CONSTANT_UnresolvedClassInError);
} else {
// some other thread has put the class in error state.
error = SystemDictionary::find_resolution_error(this_oop, which);
error = SystemDictionary::find_resolution_error(this_cp, which);
assert(error != NULL, "checking");
throw_orig_error = true;
}
@ -281,7 +281,7 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
if (throw_orig_error) {
CLEAR_PENDING_EXCEPTION;
ResourceMark rm;
const char* className = this_oop->unresolved_klass_at(which)->as_C_string();
const char* className = this_cp->unresolved_klass_at(which)->as_C_string();
THROW_MSG_0(error, className);
}
@ -305,32 +305,32 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
}
}
}
if (k() != this_oop->pool_holder()) {
if (k() != this_cp->pool_holder()) {
// only print something if the classes are different
if (source_file != NULL) {
tty->print("RESOLVE %s %s %s:%d\n",
this_oop->pool_holder()->external_name(),
this_cp->pool_holder()->external_name(),
InstanceKlass::cast(k())->external_name(), source_file, line_number);
} else {
tty->print("RESOLVE %s %s\n",
this_oop->pool_holder()->external_name(),
this_cp->pool_holder()->external_name(),
InstanceKlass::cast(k())->external_name());
}
}
return k();
} else {
MonitorLockerEx ml(this_oop->lock());
MonitorLockerEx ml(this_cp->lock());
// Only updated constant pool - if it is resolved.
do_resolve = this_oop->tag_at(which).is_unresolved_klass();
do_resolve = this_cp->tag_at(which).is_unresolved_klass();
if (do_resolve) {
ClassLoaderData* this_key = this_oop->pool_holder()->class_loader_data();
ClassLoaderData* this_key = this_cp->pool_holder()->class_loader_data();
this_key->record_dependency(k(), CHECK_NULL); // Can throw OOM
this_oop->klass_at_put(which, k());
this_cp->klass_at_put(which, k());
}
}
}
entry = this_oop->resolved_klass_at(which);
entry = this_cp->resolved_klass_at(which);
assert(entry.is_resolved() && entry.get_klass()->is_klass(), "must be resolved at this point");
return entry.get_klass();
}
@ -340,8 +340,8 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
// by compiler and exception handling. Also used to avoid classloads for
// instanceof operations. Returns NULL if the class has not been loaded or
// if the verification of constant pool failed
Klass* ConstantPool::klass_at_if_loaded(constantPoolHandle this_oop, int which) {
CPSlot entry = this_oop->slot_at(which);
Klass* ConstantPool::klass_at_if_loaded(constantPoolHandle this_cp, int which) {
CPSlot entry = this_cp->slot_at(which);
if (entry.is_resolved()) {
assert(entry.get_klass()->is_klass(), "must be");
return entry.get_klass();
@ -349,8 +349,8 @@ Klass* ConstantPool::klass_at_if_loaded(constantPoolHandle this_oop, int which)
assert(entry.is_unresolved(), "must be either symbol or klass");
Thread *thread = Thread::current();
Symbol* name = entry.get_symbol();
oop loader = this_oop->pool_holder()->class_loader();
oop protection_domain = this_oop->pool_holder()->protection_domain();
oop loader = this_cp->pool_holder()->class_loader();
oop protection_domain = this_cp->pool_holder()->protection_domain();
Handle h_prot (thread, protection_domain);
Handle h_loader (thread, loader);
Klass* k = SystemDictionary::find(name, h_loader, h_prot, thread);
@ -360,7 +360,7 @@ Klass* ConstantPool::klass_at_if_loaded(constantPoolHandle this_oop, int which)
EXCEPTION_MARK;
KlassHandle klass(THREAD, k);
// return NULL if verification fails
verify_constant_pool_resolve(this_oop, klass, THREAD);
verify_constant_pool_resolve(this_cp, klass, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return NULL;
@ -373,8 +373,8 @@ Klass* ConstantPool::klass_at_if_loaded(constantPoolHandle this_oop, int which)
}
Klass* ConstantPool::klass_ref_at_if_loaded(constantPoolHandle this_oop, int which) {
return klass_at_if_loaded(this_oop, this_oop->klass_ref_index_at(which));
Klass* ConstantPool::klass_ref_at_if_loaded(constantPoolHandle this_cp, int which) {
return klass_at_if_loaded(this_cp, this_cp->klass_ref_index_at(which));
}
@ -486,11 +486,11 @@ int ConstantPool::remap_instruction_operand_from_cache(int operand) {
}
void ConstantPool::verify_constant_pool_resolve(constantPoolHandle this_oop, KlassHandle k, TRAPS) {
void ConstantPool::verify_constant_pool_resolve(constantPoolHandle this_cp, KlassHandle k, TRAPS) {
if (k->oop_is_instance() || k->oop_is_objArray()) {
instanceKlassHandle holder (THREAD, this_oop->pool_holder());
Klass* elem_oop = k->oop_is_instance() ? k() : ObjArrayKlass::cast(k())->bottom_klass();
KlassHandle element (THREAD, elem_oop);
instanceKlassHandle holder (THREAD, this_cp->pool_holder());
Klass* elem = k->oop_is_instance() ? k() : ObjArrayKlass::cast(k())->bottom_klass();
KlassHandle element (THREAD, elem);
// The element type could be a typeArray - we only need the access check if it is
// an reference to another class
@ -559,10 +559,10 @@ BasicType ConstantPool::basic_type_for_signature_at(int which) {
}
void ConstantPool::resolve_string_constants_impl(constantPoolHandle this_oop, TRAPS) {
for (int index = 1; index < this_oop->length(); index++) { // Index 0 is unused
if (this_oop->tag_at(index).is_string()) {
this_oop->string_at(index, CHECK);
void ConstantPool::resolve_string_constants_impl(constantPoolHandle this_cp, TRAPS) {
for (int index = 1; index < this_cp->length(); index++) { // Index 0 is unused
if (this_cp->tag_at(index).is_string()) {
this_cp->string_at(index, CHECK);
}
}
}
@ -585,11 +585,11 @@ bool ConstantPool::resolve_class_constants(TRAPS) {
// If resolution for MethodHandle or MethodType fails, save the exception
// in the resolution error table, so that the same exception is thrown again.
void ConstantPool::save_and_throw_exception(constantPoolHandle this_oop, int which,
void ConstantPool::save_and_throw_exception(constantPoolHandle this_cp, int which,
int tag, TRAPS) {
ResourceMark rm;
Symbol* error = PENDING_EXCEPTION->klass()->name();
MonitorLockerEx ml(this_oop->lock()); // lock cpool to change tag.
MonitorLockerEx ml(this_cp->lock()); // lock cpool to change tag.
int error_tag = (tag == JVM_CONSTANT_MethodHandle) ?
JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError;
@ -601,12 +601,12 @@ void ConstantPool::save_and_throw_exception(constantPoolHandle this_oop, int whi
// and OutOfMemoryError, etc, or if the thread was hit by stop()
// Needs clarification to section 5.4.3 of the VM spec (see 6308271)
} else if (this_oop->tag_at(which).value() != error_tag) {
SystemDictionary::add_resolution_error(this_oop, which, error);
this_oop->tag_at_put(which, error_tag);
} else if (this_cp->tag_at(which).value() != error_tag) {
SystemDictionary::add_resolution_error(this_cp, which, error);
this_cp->tag_at_put(which, error_tag);
} else {
// some other thread has put the class in error state.
error = SystemDictionary::find_resolution_error(this_oop, which);
error = SystemDictionary::find_resolution_error(this_cp, which);
assert(error != NULL, "checking");
CLEAR_PENDING_EXCEPTION;
THROW_MSG(error, "");
@ -617,7 +617,7 @@ void ConstantPool::save_and_throw_exception(constantPoolHandle this_oop, int whi
// Called to resolve constants in the constant pool and return an oop.
// Some constant pool entries cache their resolved oop. This is also
// called to create oops from constants to use in arguments for invokedynamic
oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS) {
oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_cp, int index, int cache_index, TRAPS) {
oop result_oop = NULL;
Handle throw_exception;
@ -625,23 +625,23 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_oop, int inde
// It is possible that this constant is one which is cached in the objects.
// We'll do a linear search. This should be OK because this usage is rare.
assert(index > 0, "valid index");
cache_index = this_oop->cp_to_object_index(index);
cache_index = this_cp->cp_to_object_index(index);
}
assert(cache_index == _no_index_sentinel || cache_index >= 0, "");
assert(index == _no_index_sentinel || index >= 0, "");
if (cache_index >= 0) {
result_oop = this_oop->resolved_references()->obj_at(cache_index);
result_oop = this_cp->resolved_references()->obj_at(cache_index);
if (result_oop != NULL) {
return result_oop;
// That was easy...
}
index = this_oop->object_to_cp_index(cache_index);
index = this_cp->object_to_cp_index(cache_index);
}
jvalue prim_value; // temp used only in a few cases below
int tag_value = this_oop->tag_at(index).value();
int tag_value = this_cp->tag_at(index).value();
switch (tag_value) {
@ -650,7 +650,7 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_oop, int inde
case JVM_CONSTANT_Class:
{
assert(cache_index == _no_index_sentinel, "should not have been set");
Klass* resolved = klass_at_impl(this_oop, index, CHECK_NULL);
Klass* resolved = klass_at_impl(this_cp, index, CHECK_NULL);
// ldc wants the java mirror.
result_oop = resolved->java_mirror();
break;
@ -658,17 +658,17 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_oop, int inde
case JVM_CONSTANT_String:
assert(cache_index != _no_index_sentinel, "should have been set");
if (this_oop->is_pseudo_string_at(index)) {
result_oop = this_oop->pseudo_string_at(index, cache_index);
if (this_cp->is_pseudo_string_at(index)) {
result_oop = this_cp->pseudo_string_at(index, cache_index);
break;
}
result_oop = string_at_impl(this_oop, index, cache_index, CHECK_NULL);
result_oop = string_at_impl(this_cp, index, cache_index, CHECK_NULL);
break;
case JVM_CONSTANT_MethodHandleInError:
case JVM_CONSTANT_MethodTypeInError:
{
Symbol* error = SystemDictionary::find_resolution_error(this_oop, index);
Symbol* error = SystemDictionary::find_resolution_error(this_cp, index);
guarantee(error != (Symbol*)NULL, "tag mismatch with resolution error table");
ResourceMark rm;
THROW_MSG_0(error, "");
@ -677,72 +677,72 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_oop, int inde
case JVM_CONSTANT_MethodHandle:
{
int ref_kind = this_oop->method_handle_ref_kind_at(index);
int callee_index = this_oop->method_handle_klass_index_at(index);
Symbol* name = this_oop->method_handle_name_ref_at(index);
Symbol* signature = this_oop->method_handle_signature_ref_at(index);
int ref_kind = this_cp->method_handle_ref_kind_at(index);
int callee_index = this_cp->method_handle_klass_index_at(index);
Symbol* name = this_cp->method_handle_name_ref_at(index);
Symbol* signature = this_cp->method_handle_signature_ref_at(index);
if (PrintMiscellaneous)
tty->print_cr("resolve JVM_CONSTANT_MethodHandle:%d [%d/%d/%d] %s.%s",
ref_kind, index, this_oop->method_handle_index_at(index),
ref_kind, index, this_cp->method_handle_index_at(index),
callee_index, name->as_C_string(), signature->as_C_string());
KlassHandle callee;
{ Klass* k = klass_at_impl(this_oop, callee_index, CHECK_NULL);
{ Klass* k = klass_at_impl(this_cp, callee_index, CHECK_NULL);
callee = KlassHandle(THREAD, k);
}
KlassHandle klass(THREAD, this_oop->pool_holder());
KlassHandle klass(THREAD, this_cp->pool_holder());
Handle value = SystemDictionary::link_method_handle_constant(klass, ref_kind,
callee, name, signature,
THREAD);
result_oop = value();
if (HAS_PENDING_EXCEPTION) {
save_and_throw_exception(this_oop, index, tag_value, CHECK_NULL);
save_and_throw_exception(this_cp, index, tag_value, CHECK_NULL);
}
break;
}
case JVM_CONSTANT_MethodType:
{
Symbol* signature = this_oop->method_type_signature_at(index);
Symbol* signature = this_cp->method_type_signature_at(index);
if (PrintMiscellaneous)
tty->print_cr("resolve JVM_CONSTANT_MethodType [%d/%d] %s",
index, this_oop->method_type_index_at(index),
index, this_cp->method_type_index_at(index),
signature->as_C_string());
KlassHandle klass(THREAD, this_oop->pool_holder());
KlassHandle klass(THREAD, this_cp->pool_holder());
Handle value = SystemDictionary::find_method_handle_type(signature, klass, THREAD);
result_oop = value();
if (HAS_PENDING_EXCEPTION) {
save_and_throw_exception(this_oop, index, tag_value, CHECK_NULL);
save_and_throw_exception(this_cp, index, tag_value, CHECK_NULL);
}
break;
}
case JVM_CONSTANT_Integer:
assert(cache_index == _no_index_sentinel, "should not have been set");
prim_value.i = this_oop->int_at(index);
prim_value.i = this_cp->int_at(index);
result_oop = java_lang_boxing_object::create(T_INT, &prim_value, CHECK_NULL);
break;
case JVM_CONSTANT_Float:
assert(cache_index == _no_index_sentinel, "should not have been set");
prim_value.f = this_oop->float_at(index);
prim_value.f = this_cp->float_at(index);
result_oop = java_lang_boxing_object::create(T_FLOAT, &prim_value, CHECK_NULL);
break;
case JVM_CONSTANT_Long:
assert(cache_index == _no_index_sentinel, "should not have been set");
prim_value.j = this_oop->long_at(index);
prim_value.j = this_cp->long_at(index);
result_oop = java_lang_boxing_object::create(T_LONG, &prim_value, CHECK_NULL);
break;
case JVM_CONSTANT_Double:
assert(cache_index == _no_index_sentinel, "should not have been set");
prim_value.d = this_oop->double_at(index);
prim_value.d = this_cp->double_at(index);
result_oop = java_lang_boxing_object::create(T_DOUBLE, &prim_value, CHECK_NULL);
break;
default:
DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d/%d] = %d",
this_oop(), index, cache_index, tag_value) );
this_cp(), index, cache_index, tag_value) );
assert(false, "unexpected constant tag");
break;
}
@ -750,15 +750,15 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_oop, int inde
if (cache_index >= 0) {
// Cache the oop here also.
Handle result_handle(THREAD, result_oop);
MonitorLockerEx ml(this_oop->lock()); // don't know if we really need this
oop result = this_oop->resolved_references()->obj_at(cache_index);
MonitorLockerEx ml(this_cp->lock()); // don't know if we really need this
oop result = this_cp->resolved_references()->obj_at(cache_index);
// Benign race condition: resolved_references may already be filled in while we were trying to lock.
// The important thing here is that all threads pick up the same result.
// It doesn't matter which racing thread wins, as long as only one
// result is used by all threads, and all future queries.
// That result may be either a resolved constant or a failure exception.
if (result == NULL) {
this_oop->resolved_references()->obj_at_put(cache_index, result_handle());
this_cp->resolved_references()->obj_at_put(cache_index, result_handle());
return result_handle();
} else {
// Return the winning thread's result. This can be different than
@ -778,8 +778,8 @@ oop ConstantPool::uncached_string_at(int which, TRAPS) {
}
oop ConstantPool::resolve_bootstrap_specifier_at_impl(constantPoolHandle this_oop, int index, TRAPS) {
assert(this_oop->tag_at(index).is_invoke_dynamic(), "Corrupted constant pool");
oop ConstantPool::resolve_bootstrap_specifier_at_impl(constantPoolHandle this_cp, int index, TRAPS) {
assert(this_cp->tag_at(index).is_invoke_dynamic(), "Corrupted constant pool");
Handle bsm;
int argc;
@ -787,14 +787,14 @@ oop ConstantPool::resolve_bootstrap_specifier_at_impl(constantPoolHandle this_oo
// JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type], plus optional arguments
// The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
// It is accompanied by the optional arguments.
int bsm_index = this_oop->invoke_dynamic_bootstrap_method_ref_index_at(index);
oop bsm_oop = this_oop->resolve_possibly_cached_constant_at(bsm_index, CHECK_NULL);
int bsm_index = this_cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
oop bsm_oop = this_cp->resolve_possibly_cached_constant_at(bsm_index, CHECK_NULL);
if (!java_lang_invoke_MethodHandle::is_instance(bsm_oop)) {
THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "BSM not an MethodHandle");
}
// Extract the optional static arguments.
argc = this_oop->invoke_dynamic_argument_count_at(index);
argc = this_cp->invoke_dynamic_argument_count_at(index);
if (argc == 0) return bsm_oop;
bsm = Handle(THREAD, bsm_oop);
@ -808,21 +808,21 @@ oop ConstantPool::resolve_bootstrap_specifier_at_impl(constantPoolHandle this_oo
info->obj_at_put(0, bsm());
for (int i = 0; i < argc; i++) {
int arg_index = this_oop->invoke_dynamic_argument_index_at(index, i);
oop arg_oop = this_oop->resolve_possibly_cached_constant_at(arg_index, CHECK_NULL);
int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
oop arg_oop = this_cp->resolve_possibly_cached_constant_at(arg_index, CHECK_NULL);
info->obj_at_put(1+i, arg_oop);
}
return info();
}
oop ConstantPool::string_at_impl(constantPoolHandle this_oop, int which, int obj_index, TRAPS) {
oop ConstantPool::string_at_impl(constantPoolHandle this_cp, int which, int obj_index, TRAPS) {
// If the string has already been interned, this entry will be non-null
oop str = this_oop->resolved_references()->obj_at(obj_index);
oop str = this_cp->resolved_references()->obj_at(obj_index);
if (str != NULL) return str;
Symbol* sym = this_oop->unresolved_string_at(which);
Symbol* sym = this_cp->unresolved_string_at(which);
str = StringTable::intern(sym, CHECK_(NULL));
this_oop->string_at_put(which, obj_index, str);
this_cp->string_at_put(which, obj_index, str);
assert(java_lang_String::is_instance(str), "must be string");
return str;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,7 @@
# include "bytes_ppc.hpp"
#endif
// A constantPool is an array containing class constants as described in the
// A ConstantPool is an array containing class constants as described in the
// class file.
//
// Most of the constant pool entries are written during class parsing, which
@ -81,9 +81,10 @@ class CPSlot VALUE_OBJ_CLASS_SPEC {
};
class KlassSizeStats;
class ConstantPool : public Metadata {
friend class VMStructs;
friend class BytecodeInterpreter; // Directly extracts an oop in the pool for fast instanceof/checkcast
friend class BytecodeInterpreter; // Directly extracts a klass in the pool for fast instanceof/checkcast
friend class Universe; // For null constructor
private:
Array<u1>* _tags; // the tag array describing the constant pool's contents
@ -747,13 +748,13 @@ class ConstantPool : public Metadata {
friend class SystemDictionary;
// Used by compiler to prevent classloading.
static Method* method_at_if_loaded (constantPoolHandle this_oop, int which);
static bool has_appendix_at_if_loaded (constantPoolHandle this_oop, int which);
static oop appendix_at_if_loaded (constantPoolHandle this_oop, int which);
static bool has_method_type_at_if_loaded (constantPoolHandle this_oop, int which);
static oop method_type_at_if_loaded (constantPoolHandle this_oop, int which);
static Klass* klass_at_if_loaded (constantPoolHandle this_oop, int which);
static Klass* klass_ref_at_if_loaded (constantPoolHandle this_oop, int which);
static Method* method_at_if_loaded (constantPoolHandle this_cp, int which);
static bool has_appendix_at_if_loaded (constantPoolHandle this_cp, int which);
static oop appendix_at_if_loaded (constantPoolHandle this_cp, int which);
static bool has_method_type_at_if_loaded (constantPoolHandle this_cp, int which);
static oop method_type_at_if_loaded (constantPoolHandle this_cp, int which);
static Klass* klass_at_if_loaded (constantPoolHandle this_cp, int which);
static Klass* klass_ref_at_if_loaded (constantPoolHandle this_cp, int which);
// Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
// future by other Java code. These take constant pool indices rather than
@ -811,19 +812,19 @@ class ConstantPool : public Metadata {
}
// Performs the LinkResolver checks
static void verify_constant_pool_resolve(constantPoolHandle this_oop, KlassHandle klass, TRAPS);
static void verify_constant_pool_resolve(constantPoolHandle this_cp, KlassHandle klass, TRAPS);
// Implementation of methods that needs an exposed 'this' pointer, in order to
// handle GC while executing the method
static Klass* klass_at_impl(constantPoolHandle this_oop, int which, TRAPS);
static oop string_at_impl(constantPoolHandle this_oop, int which, int obj_index, TRAPS);
static Klass* klass_at_impl(constantPoolHandle this_cp, int which, TRAPS);
static oop string_at_impl(constantPoolHandle this_cp, int which, int obj_index, TRAPS);
// Resolve string constants (to prevent allocation during compilation)
static void resolve_string_constants_impl(constantPoolHandle this_oop, TRAPS);
static void resolve_string_constants_impl(constantPoolHandle this_cp, TRAPS);
static oop resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS);
static void save_and_throw_exception(constantPoolHandle this_oop, int which, int tag_value, TRAPS);
static oop resolve_bootstrap_specifier_at_impl(constantPoolHandle this_oop, int index, TRAPS);
static oop resolve_constant_at_impl(constantPoolHandle this_cp, int index, int cache_index, TRAPS);
static void save_and_throw_exception(constantPoolHandle this_cp, int which, int tag_value, TRAPS);
static oop resolve_bootstrap_specifier_at_impl(constantPoolHandle this_cp, int index, TRAPS);
public:
// Merging ConstantPool* support:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -328,7 +328,7 @@ void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
// the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
// not '(Ljava/lang/String;)Ljava/util/List;'.
// The fact that String and List are involved is encoded in the MethodType in refs[f2].
// This allows us to create fewer method oops, while keeping type safety.
// This allows us to create fewer Methods, while keeping type safety.
//
objArrayHandle resolved_references = cpool->resolved_references();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,8 +102,9 @@ class PSPromotionManager;
// _f1 = Method* for non-virtual calls, unused by virtual calls.
// for interface calls, which are essentially virtual but need a klass,
// contains Klass* for the corresponding interface.
// for invokedynamic, f1 contains a site-specific CallSite object (as an appendix)
// for invokehandle, f1 contains a site-specific MethodType object (as an appendix)
// for invokedynamic and invokehandle, f1 contains the adapter method which
// manages the actual call. The appendix is stored in the ConstantPool
// resolved_references array.
// (upcoming metadata changes will move the appendix to a separate array)
// _f2 = vtable/itable index (or final Method*) for virtual calls only,
// unused by non-virtual. The is_vfinal flag indicates this is a

View File

@ -432,8 +432,8 @@ void InstanceKlass::eager_initialize(Thread *thread) {
if (!InstanceKlass::cast(super)->is_initialized()) return;
// call body to expose the this pointer
instanceKlassHandle this_oop(thread, this);
eager_initialize_impl(this_oop);
instanceKlassHandle this_k(thread, this);
eager_initialize_impl(this_k);
}
}
@ -470,16 +470,16 @@ void InstanceKlass::fence_and_clear_init_lock() {
assert(!is_not_initialized(), "class must be initialized now");
}
void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_k) {
EXCEPTION_MARK;
oop init_lock = this_oop->init_lock();
oop init_lock = this_k->init_lock();
ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
// abort if someone beat us to the initialization
if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized()
if (!this_k->is_not_initialized()) return; // note: not equivalent to is_initialized()
ClassState old_state = this_oop->init_state();
link_class_impl(this_oop, true, THREAD);
ClassState old_state = this_k->init_state();
link_class_impl(this_k, true, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
// Abort if linking the class throws an exception.
@ -487,16 +487,16 @@ void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
// Use a test to avoid redundantly resetting the state if there's
// no change. Set_init_state() asserts that state changes make
// progress, whereas here we might just be spinning in place.
if( old_state != this_oop->_init_state )
this_oop->set_init_state (old_state);
if( old_state != this_k->_init_state )
this_k->set_init_state (old_state);
} else {
// linking successfull, mark class as initialized
this_oop->set_init_state (fully_initialized);
this_oop->fence_and_clear_init_lock();
this_k->set_init_state (fully_initialized);
this_k->fence_and_clear_init_lock();
// trace
if (TraceClassInitialization) {
ResourceMark rm(THREAD);
tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
tty->print_cr("[Initialized %s without side effects]", this_k->external_name());
}
}
}
@ -508,8 +508,8 @@ void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
void InstanceKlass::initialize(TRAPS) {
if (this->should_be_initialized()) {
HandleMark hm(THREAD);
instanceKlassHandle this_oop(THREAD, this);
initialize_impl(this_oop, CHECK);
instanceKlassHandle this_k(THREAD, this);
initialize_impl(this_k, CHECK);
// Note: at this point the class may be initialized
// OR it may be in the state of being initialized
// in case of recursive initialization!
@ -520,11 +520,11 @@ void InstanceKlass::initialize(TRAPS) {
bool InstanceKlass::verify_code(
instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
instanceKlassHandle this_k, bool throw_verifyerror, TRAPS) {
// 1) Verify the bytecodes
Verifier::Mode mode =
throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
return Verifier::verify(this_k, mode, this_k->should_verify_class(), CHECK_false);
}
@ -540,8 +540,8 @@ void InstanceKlass::link_class(TRAPS) {
assert(is_loaded(), "must be loaded");
if (!is_linked()) {
HandleMark hm(THREAD);
instanceKlassHandle this_oop(THREAD, this);
link_class_impl(this_oop, true, CHECK);
instanceKlassHandle this_k(THREAD, this);
link_class_impl(this_k, true, CHECK);
}
}
@ -551,22 +551,22 @@ bool InstanceKlass::link_class_or_fail(TRAPS) {
assert(is_loaded(), "must be loaded");
if (!is_linked()) {
HandleMark hm(THREAD);
instanceKlassHandle this_oop(THREAD, this);
link_class_impl(this_oop, false, CHECK_false);
instanceKlassHandle this_k(THREAD, this);
link_class_impl(this_k, false, CHECK_false);
}
return is_linked();
}
bool InstanceKlass::link_class_impl(
instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
instanceKlassHandle this_k, bool throw_verifyerror, TRAPS) {
// check for error state
if (this_oop->is_in_error_state()) {
if (this_k->is_in_error_state()) {
ResourceMark rm(THREAD);
THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
this_oop->external_name(), false);
this_k->external_name(), false);
}
// return if already verified
if (this_oop->is_linked()) {
if (this_k->is_linked()) {
return true;
}
@ -576,7 +576,7 @@ bool InstanceKlass::link_class_impl(
JavaThread* jt = (JavaThread*)THREAD;
// link super class before linking this class
instanceKlassHandle super(THREAD, this_oop->super());
instanceKlassHandle super(THREAD, this_k->super());
if (super.not_null()) {
if (super->is_interface()) { // check if super class is an interface
ResourceMark rm(THREAD);
@ -584,7 +584,7 @@ bool InstanceKlass::link_class_impl(
THREAD_AND_LOCATION,
vmSymbols::java_lang_IncompatibleClassChangeError(),
"class %s has interface %s as super class",
this_oop->external_name(),
this_k->external_name(),
super->external_name()
);
return false;
@ -594,7 +594,7 @@ bool InstanceKlass::link_class_impl(
}
// link all interfaces implemented by this class before linking this class
Array<Klass*>* interfaces = this_oop->local_interfaces();
Array<Klass*>* interfaces = this_k->local_interfaces();
int num_interfaces = interfaces->length();
for (int index = 0; index < num_interfaces; index++) {
HandleMark hm(THREAD);
@ -603,7 +603,7 @@ bool InstanceKlass::link_class_impl(
}
// in case the class is linked in the process of linking its superclasses
if (this_oop->is_linked()) {
if (this_k->is_linked()) {
return true;
}
@ -618,14 +618,14 @@ bool InstanceKlass::link_class_impl(
// verification & rewriting
{
oop init_lock = this_oop->init_lock();
oop init_lock = this_k->init_lock();
ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
// rewritten will have been set if loader constraint error found
// on an earlier link attempt
// don't verify or rewrite if already rewritten
if (!this_oop->is_linked()) {
if (!this_oop->is_rewritten()) {
if (!this_k->is_linked()) {
if (!this_k->is_rewritten()) {
{
// Timer includes any side effects of class verification (resolution,
// etc), but not recursive entry into verify_code().
@ -635,7 +635,7 @@ bool InstanceKlass::link_class_impl(
jt->get_thread_stat()->perf_recursion_counts_addr(),
jt->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_VERIFY);
bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
bool verify_ok = verify_code(this_k, throw_verifyerror, THREAD);
if (!verify_ok) {
return false;
}
@ -644,39 +644,39 @@ bool InstanceKlass::link_class_impl(
// Just in case a side-effect of verify linked this class already
// (which can sometimes happen since the verifier loads classes
// using custom class loaders, which are free to initialize things)
if (this_oop->is_linked()) {
if (this_k->is_linked()) {
return true;
}
// also sets rewritten
this_oop->rewrite_class(CHECK_false);
this_k->rewrite_class(CHECK_false);
}
// relocate jsrs and link methods after they are all rewritten
this_oop->link_methods(CHECK_false);
this_k->link_methods(CHECK_false);
// Initialize the vtable and interface table after
// methods have been rewritten since rewrite may
// fabricate new Method*s.
// also does loader constraint checking
if (!this_oop()->is_shared()) {
if (!this_k()->is_shared()) {
ResourceMark rm(THREAD);
this_oop->vtable()->initialize_vtable(true, CHECK_false);
this_oop->itable()->initialize_itable(true, CHECK_false);
this_k->vtable()->initialize_vtable(true, CHECK_false);
this_k->itable()->initialize_itable(true, CHECK_false);
}
#ifdef ASSERT
else {
ResourceMark rm(THREAD);
this_oop->vtable()->verify(tty, true);
this_k->vtable()->verify(tty, true);
// In case itable verification is ever added.
// this_oop->itable()->verify(tty, true);
// this_k->itable()->verify(tty, true);
}
#endif
this_oop->set_init_state(linked);
this_k->set_init_state(linked);
if (JvmtiExport::should_post_class_prepare()) {
Thread *thread = THREAD;
assert(thread->is_Java_thread(), "thread->is_Java_thread()");
JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
JvmtiExport::post_class_prepare((JavaThread *) thread, this_k());
}
}
}
@ -689,13 +689,13 @@ bool InstanceKlass::link_class_impl(
// verification but before the first method of the class is executed.
void InstanceKlass::rewrite_class(TRAPS) {
assert(is_loaded(), "must be loaded");
instanceKlassHandle this_oop(THREAD, this);
if (this_oop->is_rewritten()) {
assert(this_oop()->is_shared(), "rewriting an unshared class?");
instanceKlassHandle this_k(THREAD, this);
if (this_k->is_rewritten()) {
assert(this_k()->is_shared(), "rewriting an unshared class?");
return;
}
Rewriter::rewrite(this_oop, CHECK);
this_oop->set_rewritten();
Rewriter::rewrite(this_k, CHECK);
this_k->set_rewritten();
}
// Now relocate and link method entry points after class is rewritten.
@ -729,19 +729,19 @@ void InstanceKlass::link_methods(TRAPS) {
}
void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
void InstanceKlass::initialize_impl(instanceKlassHandle this_k, TRAPS) {
// Make sure klass is linked (verified) before initialization
// A class could already be verified, since it has been reflected upon.
this_oop->link_class(CHECK);
this_k->link_class(CHECK);
DTRACE_CLASSINIT_PROBE(required, InstanceKlass::cast(this_oop()), -1);
DTRACE_CLASSINIT_PROBE(required, InstanceKlass::cast(this_k()), -1);
bool wait = false;
// refer to the JVM book page 47 for description of steps
// Step 1
{
oop init_lock = this_oop->init_lock();
oop init_lock = this_k->init_lock();
ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
Thread *self = THREAD; // it's passed the current thread
@ -750,29 +750,29 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
// If we were to use wait() instead of waitInterruptibly() then
// we might end up throwing IE from link/symbol resolution sites
// that aren't expected to throw. This would wreak havoc. See 6320309.
while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
while(this_k->is_being_initialized() && !this_k->is_reentrant_initialization(self)) {
wait = true;
ol.waitUninterruptibly(CHECK);
}
// Step 3
if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
DTRACE_CLASSINIT_PROBE_WAIT(recursive, InstanceKlass::cast(this_oop()), -1,wait);
if (this_k->is_being_initialized() && this_k->is_reentrant_initialization(self)) {
DTRACE_CLASSINIT_PROBE_WAIT(recursive, InstanceKlass::cast(this_k()), -1,wait);
return;
}
// Step 4
if (this_oop->is_initialized()) {
DTRACE_CLASSINIT_PROBE_WAIT(concurrent, InstanceKlass::cast(this_oop()), -1,wait);
if (this_k->is_initialized()) {
DTRACE_CLASSINIT_PROBE_WAIT(concurrent, InstanceKlass::cast(this_k()), -1,wait);
return;
}
// Step 5
if (this_oop->is_in_error_state()) {
DTRACE_CLASSINIT_PROBE_WAIT(erroneous, InstanceKlass::cast(this_oop()), -1,wait);
if (this_k->is_in_error_state()) {
DTRACE_CLASSINIT_PROBE_WAIT(erroneous, InstanceKlass::cast(this_k()), -1,wait);
ResourceMark rm(THREAD);
const char* desc = "Could not initialize class ";
const char* className = this_oop->external_name();
const char* className = this_k->external_name();
size_t msglen = strlen(desc) + strlen(className) + 1;
char* message = NEW_RESOURCE_ARRAY(char, msglen);
if (NULL == message) {
@ -785,13 +785,13 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
}
// Step 6
this_oop->set_init_state(being_initialized);
this_oop->set_init_thread(self);
this_k->set_init_state(being_initialized);
this_k->set_init_thread(self);
}
// Step 7
Klass* super_klass = this_oop->super();
if (super_klass != NULL && !this_oop->is_interface() && super_klass->should_be_initialized()) {
Klass* super_klass = this_k->super();
if (super_klass != NULL && !this_k->is_interface() && super_klass->should_be_initialized()) {
super_klass->initialize(THREAD);
if (HAS_PENDING_EXCEPTION) {
@ -799,18 +799,18 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
CLEAR_PENDING_EXCEPTION;
{
EXCEPTION_MARK;
this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
this_k->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below
}
DTRACE_CLASSINIT_PROBE_WAIT(super__failed, InstanceKlass::cast(this_oop()), -1,wait);
DTRACE_CLASSINIT_PROBE_WAIT(super__failed, InstanceKlass::cast(this_k()), -1,wait);
THROW_OOP(e());
}
}
if (this_oop->has_default_methods()) {
if (this_k->has_default_methods()) {
// Step 7.5: initialize any interfaces which have default methods
for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) {
Klass* iface = this_oop->local_interfaces()->at(i);
for (int i = 0; i < this_k->local_interfaces()->length(); ++i) {
Klass* iface = this_k->local_interfaces()->at(i);
InstanceKlass* ik = InstanceKlass::cast(iface);
if (ik->has_default_methods() && ik->should_be_initialized()) {
ik->initialize(THREAD);
@ -821,7 +821,7 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
{
EXCEPTION_MARK;
// Locks object, set state, and notify all waiting threads
this_oop->set_initialization_state_and_notify(
this_k->set_initialization_state_and_notify(
initialization_error, THREAD);
// ignore any exception thrown, superclass initialization error is
@ -829,7 +829,7 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
CLEAR_PENDING_EXCEPTION;
}
DTRACE_CLASSINIT_PROBE_WAIT(
super__failed, InstanceKlass::cast(this_oop()), -1, wait);
super__failed, InstanceKlass::cast(this_k()), -1, wait);
THROW_OOP(e());
}
}
@ -840,7 +840,7 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
{
assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
JavaThread* jt = (JavaThread*)THREAD;
DTRACE_CLASSINIT_PROBE_WAIT(clinit, InstanceKlass::cast(this_oop()), -1,wait);
DTRACE_CLASSINIT_PROBE_WAIT(clinit, InstanceKlass::cast(this_k()), -1,wait);
// Timer includes any side effects of class initialization (resolution,
// etc), but not recursive entry into call_class_initializer().
PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
@ -849,14 +849,14 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
jt->get_thread_stat()->perf_recursion_counts_addr(),
jt->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_CLINIT);
this_oop->call_class_initializer(THREAD);
this_k->call_class_initializer(THREAD);
}
// Step 9
if (!HAS_PENDING_EXCEPTION) {
this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
this_k->set_initialization_state_and_notify(fully_initialized, CHECK);
{ ResourceMark rm(THREAD);
debug_only(this_oop->vtable()->verify(tty, true);)
debug_only(this_k->vtable()->verify(tty, true);)
}
}
else {
@ -868,13 +868,13 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
JvmtiExport::clear_detected_exception((JavaThread*)THREAD);
{
EXCEPTION_MARK;
this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
this_k->set_initialization_state_and_notify(initialization_error, THREAD);
CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below
// JVMTI has already reported the pending exception
// JVMTI internal flag reset is needed in order to report ExceptionInInitializerError
JvmtiExport::clear_detected_exception((JavaThread*)THREAD);
}
DTRACE_CLASSINIT_PROBE_WAIT(error, InstanceKlass::cast(this_oop()), -1,wait);
DTRACE_CLASSINIT_PROBE_WAIT(error, InstanceKlass::cast(this_k()), -1,wait);
if (e->is_a(SystemDictionary::Error_klass())) {
THROW_OOP(e());
} else {
@ -884,7 +884,7 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
&args);
}
}
DTRACE_CLASSINIT_PROBE_WAIT(end, InstanceKlass::cast(this_oop()), -1,wait);
DTRACE_CLASSINIT_PROBE_WAIT(end, InstanceKlass::cast(this_k()), -1,wait);
}
@ -894,11 +894,11 @@ void InstanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS)
set_initialization_state_and_notify_impl(kh, state, CHECK);
}
void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
oop init_lock = this_oop->init_lock();
void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_k, ClassState state, TRAPS) {
oop init_lock = this_k->init_lock();
ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
this_oop->set_init_state(state);
this_oop->fence_and_clear_init_lock();
this_k->set_init_state(state);
this_k->fence_and_clear_init_lock();
ol.notify_all(CHECK);
}
@ -952,12 +952,11 @@ void InstanceKlass::init_implementor() {
void InstanceKlass::process_interfaces(Thread *thread) {
// link this class into the implementors list of every interface it implements
Klass* this_as_klass_oop = this;
for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
assert(local_interfaces()->at(i)->is_klass(), "must be a klass");
InstanceKlass* interf = InstanceKlass::cast(local_interfaces()->at(i));
assert(interf->is_interface(), "expected interface");
interf->add_implementor(this_as_klass_oop);
interf->add_implementor(this);
}
}
@ -1083,12 +1082,12 @@ void InstanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
}
Klass* InstanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
instanceKlassHandle this_oop(THREAD, this);
return array_klass_impl(this_oop, or_null, n, THREAD);
instanceKlassHandle this_k(THREAD, this);
return array_klass_impl(this_k, or_null, n, THREAD);
}
Klass* InstanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
if (this_oop->array_klasses() == NULL) {
Klass* InstanceKlass::array_klass_impl(instanceKlassHandle this_k, bool or_null, int n, TRAPS) {
if (this_k->array_klasses() == NULL) {
if (or_null) return NULL;
ResourceMark rm;
@ -1099,14 +1098,14 @@ Klass* InstanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_nul
MutexLocker ma(MultiArray_lock, THREAD);
// Check if update has already taken place
if (this_oop->array_klasses() == NULL) {
Klass* k = ObjArrayKlass::allocate_objArray_klass(this_oop->class_loader_data(), 1, this_oop, CHECK_NULL);
this_oop->set_array_klasses(k);
if (this_k->array_klasses() == NULL) {
Klass* k = ObjArrayKlass::allocate_objArray_klass(this_k->class_loader_data(), 1, this_k, CHECK_NULL);
this_k->set_array_klasses(k);
}
}
}
// _this will always be set at this point
ObjArrayKlass* oak = (ObjArrayKlass*)this_oop->array_klasses();
ObjArrayKlass* oak = (ObjArrayKlass*)this_k->array_klasses();
if (or_null) {
return oak->array_klass_or_null(n);
}
@ -1133,20 +1132,20 @@ Method* InstanceKlass::class_initializer() {
return NULL;
}
void InstanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
void InstanceKlass::call_class_initializer_impl(instanceKlassHandle this_k, TRAPS) {
if (ReplayCompiles &&
(ReplaySuppressInitializers == 1 ||
ReplaySuppressInitializers >= 2 && this_oop->class_loader() != NULL)) {
ReplaySuppressInitializers >= 2 && this_k->class_loader() != NULL)) {
// Hide the existence of the initializer for the purpose of replaying the compile
return;
}
methodHandle h_method(THREAD, this_oop->class_initializer());
assert(!this_oop->is_initialized(), "we cannot initialize twice");
methodHandle h_method(THREAD, this_k->class_initializer());
assert(!this_k->is_initialized(), "we cannot initialize twice");
if (TraceClassInitialization) {
tty->print("%d Initializing ", call_class_initializer_impl_counter++);
this_oop->name()->print_value();
tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
this_k->name()->print_value();
tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_k());
}
if (h_method() != NULL) {
JavaCallArguments args; // No arguments
@ -1296,8 +1295,8 @@ void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAP
}
void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_k, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
for (JavaFieldStream fs(this_k()); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
fieldDescriptor& fd = fs.field_descriptor();
f(&fd, CHECK);
@ -1515,14 +1514,14 @@ Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
}
/* jni_id_for_impl for jfieldIds only */
JNIid* InstanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
JNIid* InstanceKlass::jni_id_for_impl(instanceKlassHandle this_k, int offset) {
MutexLocker ml(JfieldIdCreation_lock);
// Retry lookup after we got the lock
JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
JNIid* probe = this_k->jni_ids() == NULL ? NULL : this_k->jni_ids()->find(offset);
if (probe == NULL) {
// Slow case, allocate new static field identifier
probe = new JNIid(this_oop(), offset, this_oop->jni_ids());
this_oop->set_jni_ids(probe);
probe = new JNIid(this_k(), offset, this_k->jni_ids());
this_k->set_jni_ids(probe);
}
return probe;
}
@ -3161,8 +3160,8 @@ void InstanceKlass::verify_on(outputStream* st) {
}
// Verify first subklass
if (subklass_oop() != NULL) {
guarantee(subklass_oop()->is_klass(), "should be klass");
if (subklass() != NULL) {
guarantee(subklass()->is_klass(), "should be klass");
}
// Verify siblings

View File

@ -43,35 +43,7 @@
// An InstanceKlass is the VM level representation of a Java class.
// It contains all information needed for at class at execution runtime.
// InstanceKlass layout:
// [C++ vtbl pointer ] Klass
// [subtype cache ] Klass
// [instance size ] Klass
// [java mirror ] Klass
// [super ] Klass
// [access_flags ] Klass
// [name ] Klass
// [first subklass ] Klass
// [next sibling ] Klass
// [array klasses ]
// [methods ]
// [local interfaces ]
// [transitive interfaces ]
// [fields ]
// [constants ]
// [class loader ]
// [source file name ]
// [inner classes ]
// [static field size ]
// [nonstatic field size ]
// [static oop fields size ]
// [nonstatic oop maps size ]
// [has finalize method ]
// [deoptimization mark bit ]
// [initialization state ]
// [initializing thread ]
// [Java vtable length ]
// [oop map cache (stack maps) ]
// InstanceKlass embedded field layout (after declared fields):
// [EMBEDDED Java vtable ] size in words = vtable_len
// [EMBEDDED nonstatic oop-map blocks] size in words = nonstatic_oop_map_size
// The embedded nonstatic oop-map blocks are short pairs (offset, length)
@ -1031,16 +1003,16 @@ private:
// Static methods that are used to implement member methods where an exposed this pointer
// is needed due to possible GCs
static bool link_class_impl (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS);
static bool verify_code (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS);
static void initialize_impl (instanceKlassHandle this_oop, TRAPS);
static void eager_initialize_impl (instanceKlassHandle this_oop);
static void set_initialization_state_and_notify_impl (instanceKlassHandle this_oop, ClassState state, TRAPS);
static void call_class_initializer_impl (instanceKlassHandle this_oop, TRAPS);
static Klass* array_klass_impl (instanceKlassHandle this_oop, bool or_null, int n, TRAPS);
static void do_local_static_fields_impl (instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS);
static bool link_class_impl (instanceKlassHandle this_k, bool throw_verifyerror, TRAPS);
static bool verify_code (instanceKlassHandle this_k, bool throw_verifyerror, TRAPS);
static void initialize_impl (instanceKlassHandle this_k, TRAPS);
static void eager_initialize_impl (instanceKlassHandle this_k);
static void set_initialization_state_and_notify_impl (instanceKlassHandle this_k, ClassState state, TRAPS);
static void call_class_initializer_impl (instanceKlassHandle this_k, TRAPS);
static Klass* array_klass_impl (instanceKlassHandle this_k, bool or_null, int n, TRAPS);
static void do_local_static_fields_impl (instanceKlassHandle this_k, void f(fieldDescriptor* fd, TRAPS), TRAPS);
/* jni_id_for_impl for jfieldID only */
static JNIid* jni_id_for_impl (instanceKlassHandle this_oop, int offset);
static JNIid* jni_id_for_impl (instanceKlassHandle this_k, int offset);
// Returns the array class for the n'th dimension
Klass* array_klass_impl(bool or_null, int n, TRAPS);

View File

@ -334,19 +334,11 @@ GrowableArray<Klass*>* Klass::compute_secondary_supers(int num_extra_slots) {
}
Klass* Klass::subklass() const {
return _subklass == NULL ? NULL : _subklass;
}
InstanceKlass* Klass::superklass() const {
assert(super() == NULL || super()->oop_is_instance(), "must be instance klass");
return _super == NULL ? NULL : InstanceKlass::cast(_super);
}
Klass* Klass::next_sibling() const {
return _next_sibling == NULL ? NULL : _next_sibling;
}
void Klass::set_subklass(Klass* s) {
assert(s != this, "sanity check");
_subklass = s;
@ -365,7 +357,7 @@ void Klass::append_to_sibling_list() {
assert((!super->is_interface() // interfaces cannot be supers
&& (super->superklass() == NULL || !is_interface())),
"an interface can only be a subklass of Object");
Klass* prev_first_subklass = super->subklass_oop();
Klass* prev_first_subklass = super->subklass();
if (prev_first_subklass != NULL) {
// set our sibling to be the superklass' previous first subklass
set_next_sibling(prev_first_subklass);
@ -405,7 +397,7 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
assert(current->is_loader_alive(is_alive), "just checking, this should be live");
// Find and set the first alive subklass
Klass* sub = current->subklass_oop();
Klass* sub = current->subklass();
while (sub != NULL && !sub->is_loader_alive(is_alive)) {
#ifndef PRODUCT
if (TraceClassUnloading && WizardMode) {
@ -413,7 +405,7 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
tty->print_cr("[Unlinking class (subclass) %s]", sub->external_name());
}
#endif
sub = sub->next_sibling_oop();
sub = sub->next_sibling();
}
current->set_subklass(sub);
if (sub != NULL) {
@ -421,13 +413,13 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
}
// Find and set the first alive sibling
Klass* sibling = current->next_sibling_oop();
Klass* sibling = current->next_sibling();
while (sibling != NULL && !sibling->is_loader_alive(is_alive)) {
if (TraceClassUnloading && WizardMode) {
ResourceMark rm;
tty->print_cr("[Unlinking class (sibling) %s]", sibling->external_name());
}
sibling = sibling->next_sibling_oop();
sibling = sibling->next_sibling();
}
current->set_next_sibling(sibling);
if (sibling != NULL) {

View File

@ -56,34 +56,6 @@
// actual type. (See oop.inline.hpp for some of the forwarding code.)
// ALL FUNCTIONS IMPLEMENTING THIS DISPATCH ARE PREFIXED WITH "oop_"!
// Klass layout:
// [C++ vtbl ptr ] (contained in Metadata)
// [layout_helper ]
// [super_check_offset ] for fast subtype checks
// [name ]
// [secondary_super_cache] for fast subtype checks
// [secondary_supers ] array of 2ndary supertypes
// [primary_supers 0]
// [primary_supers 1]
// [primary_supers 2]
// ...
// [primary_supers 7]
// [java_mirror ]
// [super ]
// [subklass ] first subclass
// [next_sibling ] link to chain additional subklasses
// [next_link ]
// [class_loader_data]
// [modifier_flags]
// [access_flags ]
// [last_biased_lock_bulk_revocation_time] (64 bits)
// [prototype_header]
// [biased_lock_revocation_count]
// [_modified_oops]
// [_accumulated_modified_oops]
// [trace_id]
// Forward declarations.
template <class T> class Array;
template <class T> class GrowableArray;
@ -257,9 +229,9 @@ class Klass : public Metadata {
// Use InstanceKlass::contains_field_offset to classify field offsets.
// sub/superklass links
Klass* subklass() const { return _subklass; }
Klass* next_sibling() const { return _next_sibling; }
InstanceKlass* superklass() const;
Klass* subklass() const;
Klass* next_sibling() const;
void append_to_sibling_list(); // add newly created receiver to superklass' subklass list
void set_next_link(Klass* k) { _next_link = k; }
@ -281,8 +253,6 @@ class Klass : public Metadata {
bool has_accumulated_modified_oops() { return _accumulated_modified_oops == 1; }
protected: // internal accessors
Klass* subklass_oop() const { return _subklass; }
Klass* next_sibling_oop() const { return _next_sibling; }
void set_subklass(Klass* s);
void set_next_sibling(Klass* s);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -329,14 +329,12 @@ bool Method::was_executed_more_than(int n) {
}
}
#ifndef PRODUCT
void Method::print_invocation_count() {
if (is_static()) tty->print("static ");
if (is_final()) tty->print("final ");
if (is_synchronized()) tty->print("synchronized ");
if (is_native()) tty->print("native ");
method_holder()->name()->print_symbol_on(tty);
tty->print(".");
tty->print("%s::", method_holder()->external_name());
name()->print_symbol_on(tty);
signature()->print_symbol_on(tty);
@ -349,12 +347,12 @@ void Method::print_invocation_count() {
tty->print_cr (" interpreter_invocation_count: %8d ", interpreter_invocation_count());
tty->print_cr (" invocation_counter: %8d ", invocation_count());
tty->print_cr (" backedge_counter: %8d ", backedge_count());
#ifndef PRODUCT
if (CountCompiledCalls) {
tty->print_cr (" compiled_invocation_count: %8d ", compiled_invocation_count());
}
}
#endif
}
// Build a MethodData* object to hold information about this method
// collected in the interpreter.
@ -577,12 +575,12 @@ bool Method::is_static_initializer() const {
}
objArrayHandle Method::resolved_checked_exceptions_impl(Method* this_oop, TRAPS) {
int length = this_oop->checked_exceptions_length();
objArrayHandle Method::resolved_checked_exceptions_impl(Method* method, TRAPS) {
int length = method->checked_exceptions_length();
if (length == 0) { // common case
return objArrayHandle(THREAD, Universe::the_empty_class_klass_array());
} else {
methodHandle h_this(THREAD, this_oop);
methodHandle h_this(THREAD, method);
objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::Class_klass(), length, CHECK_(objArrayHandle()));
objArrayHandle mirrors (THREAD, m_oop);
for (int i = 0; i < length; i++) {
@ -1443,10 +1441,6 @@ void Method::print_name(outputStream* st) {
#endif // !PRODUCT || INCLUDE_JVMTI
//-----------------------------------------------------------------------------------
// Non-product code
#ifndef PRODUCT
void Method::print_codes_on(outputStream* st) const {
print_codes_on(0, code_size(), st);
}
@ -1460,7 +1454,6 @@ void Method::print_codes_on(int from, int to, outputStream* st) const {
BytecodeTracer::set_closure(BytecodeTracer::std_closure());
while (s.next() >= 0) BytecodeTracer::trace(mh, s.bcp(), st);
}
#endif // not PRODUCT
// Simple compression of line number tables. We use a regular compressed stream, except that we compress deltas

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,50 +40,15 @@
// A Method represents a Java method.
//
// Memory layout (each line represents a word). Note that most applications load thousands of methods,
// so keeping the size of this structure small has a big impact on footprint.
// Note that most applications load thousands of methods, so keeping the size of this
// class small has a big impact on footprint.
//
// The actual bytecodes are inlined after the end of the Method struct.
// Note that native_function and signature_handler have to be at fixed offsets
// (required by the interpreter)
//
// There are bits in the access_flags telling whether inlined tables are present.
// Note that accessing the line number and local variable tables is not performance critical at all.
// Accessing the checked exceptions table is used by reflection, so we put that last to make access
// to it fast.
//
// The line number table is compressed and inlined following the byte codes. It is found as the first
// byte following the byte codes. The checked exceptions table and the local variable table are inlined
// after the line number table, and indexed from the end of the method. We do not compress the checked
// exceptions table since the average length is less than 2, and do not bother to compress the local
// variable table either since it is mostly absent.
//
// Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter)
//
// |------------------------------------------------------|
// | header |
// | klass |
// |------------------------------------------------------|
// | ConstMethod* (metadata) |
// |------------------------------------------------------|
// | MethodData* (metadata) |
// | MethodCounters |
// |------------------------------------------------------|
// | access_flags |
// | vtable_index |
// |------------------------------------------------------|
// | result_index (C++ interpreter only) |
// |------------------------------------------------------|
// | method_size | intrinsic_id | flags |
// |------------------------------------------------------|
// | code (pointer) |
// | i2i (pointer) |
// | adapter (pointer) |
// | from_compiled_entry (pointer) |
// | from_interpreted_entry (pointer) |
// |------------------------------------------------------|
// | native_function (present only if native) |
// | signature_handler (present only if native) |
// |------------------------------------------------------|
// Method embedded field layout (after declared fields):
// [EMBEDDED native_function (present only if native) ]
// [EMBEDDED signature_handler (present only if native) ]
class CheckedExceptionElement;
class LocalVariableTableElement;
@ -429,6 +394,9 @@ class Method : public Metadata {
#ifndef PRODUCT
int compiled_invocation_count() const { return _compiled_invocation_count; }
void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
#else
// for PrintMethodData in a product build
int compiled_invocation_count() const { return 0; }
#endif // not PRODUCT
// Clear (non-shared space) pointers which could not be relevant
@ -497,10 +465,8 @@ class Method : public Metadata {
// Interpreter oopmap support
void mask_for(int bci, InterpreterOopMap* mask);
#ifndef PRODUCT
// operations on invocation counter
void print_invocation_count();
#endif
// byte codes
void set_code(address code) { return constMethod()->set_code(code); }
@ -509,8 +475,8 @@ class Method : public Metadata {
// prints byte codes
void print_codes() const { print_codes_on(tty); }
void print_codes_on(outputStream* st) const PRODUCT_RETURN;
void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN;
void print_codes_on(outputStream* st) const;
void print_codes_on(int from, int to, outputStream* st) const;
// method parameters
bool has_method_parameters() const
@ -661,7 +627,7 @@ class Method : public Metadata {
// Static methods that are used to implement member methods where an exposed this pointer
// is needed due to possible GCs
static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS);
static objArrayHandle resolved_checked_exceptions_impl(Method* method, TRAPS);
// Returns the byte code index from the byte code pointer
int bci_from(address bcp) const;

View File

@ -115,7 +115,6 @@ void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
print_data_on(st, print_data_on_helper(md));
}
#ifndef PRODUCT
void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
st->print("bci: %d", bci());
st->fill_to(tab_width_one);
@ -138,7 +137,6 @@ void ProfileData::print_shared(outputStream* st, const char* name, const char* e
void ProfileData::tab(outputStream* st, bool first) const {
st->fill_to(first ? tab_width_one : tab_width_two);
}
#endif // !PRODUCT
// ==================================================================
// BitData
@ -147,23 +145,19 @@ void ProfileData::tab(outputStream* st, bool first) const {
// whether a checkcast bytecode has seen a null value.
#ifndef PRODUCT
void BitData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "BitData", extra);
}
#endif // !PRODUCT
// ==================================================================
// CounterData
//
// A CounterData corresponds to a simple counter.
#ifndef PRODUCT
void CounterData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "CounterData", extra);
st->print_cr("count(%u)", count());
}
#endif // !PRODUCT
// ==================================================================
// JumpData
@ -188,12 +182,10 @@ void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
set_displacement(offset);
}
#ifndef PRODUCT
void JumpData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "JumpData", extra);
st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
}
#endif // !PRODUCT
int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
// Parameter profiling include the receiver
@ -342,7 +334,6 @@ bool TypeEntriesAtCall::arguments_profiling_enabled() {
return MethodData::profile_arguments();
}
#ifndef PRODUCT
void TypeEntries::print_klass(outputStream* st, intptr_t k) {
if (is_type_none(k)) {
st->print("none");
@ -398,7 +389,6 @@ void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) con
_ret.print_data_on(st);
}
}
#endif
// ==================================================================
// ReceiverTypeData
@ -417,7 +407,6 @@ void ReceiverTypeData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
}
}
#ifndef PRODUCT
void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
uint row;
int entries = 0;
@ -447,7 +436,6 @@ void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "VirtualCallData", extra);
print_receiver_data_on(st);
}
#endif // !PRODUCT
// ==================================================================
// RetData
@ -499,7 +487,6 @@ DataLayout* RetData::advance(MethodData *md, int bci) {
}
#endif // CC_INTERP
#ifndef PRODUCT
void RetData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "RetData", extra);
uint row;
@ -516,7 +503,6 @@ void RetData::print_data_on(outputStream* st, const char* extra) const {
}
}
}
#endif // !PRODUCT
// ==================================================================
// BranchData
@ -534,7 +520,6 @@ void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
set_displacement(offset);
}
#ifndef PRODUCT
void BranchData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "BranchData", extra);
st->print_cr("taken(%u) displacement(%d)",
@ -542,7 +527,6 @@ void BranchData::print_data_on(outputStream* st, const char* extra) const {
tab(st);
st->print_cr("not taken(%u)", not_taken());
}
#endif
// ==================================================================
// MultiBranchData
@ -608,7 +592,6 @@ void MultiBranchData::post_initialize(BytecodeStream* stream,
}
}
#ifndef PRODUCT
void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "MultiBranchData", extra);
st->print_cr("default_count(%u) displacement(%d)",
@ -620,9 +603,7 @@ void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
count_at(i), displacement_at(i));
}
}
#endif
#ifndef PRODUCT
void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "ArgInfoData", extra);
int nargs = number_of_args();
@ -632,8 +613,6 @@ void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
st->cr();
}
#endif
int ParametersTypeData::compute_cell_count(Method* m) {
if (!MethodData::profile_parameters_for_method(m)) {
return 0;
@ -654,7 +633,6 @@ bool ParametersTypeData::profiling_enabled() {
return MethodData::profile_parameters();
}
#ifndef PRODUCT
void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
st->print("parameter types", extra);
_parameters.print_data_on(st);
@ -666,7 +644,6 @@ void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) con
method()->print_short_name(st);
st->cr();
}
#endif
// ==================================================================
// MethodData*
@ -801,6 +778,8 @@ bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
case Bytecodes::_invokeinterface:
case Bytecodes::_if_acmpeq:
case Bytecodes::_if_acmpne:
case Bytecodes::_ifnull:
case Bytecodes::_ifnonnull:
case Bytecodes::_invokestatic:
#ifdef COMPILER2
return UseTypeSpeculation;
@ -1357,8 +1336,6 @@ ArgInfoData *MethodData::arg_info() {
// Printing
#ifndef PRODUCT
void MethodData::print_on(outputStream* st) const {
assert(is_methodData(), "should be method data");
st->print("method data for ");
@ -1367,15 +1344,12 @@ void MethodData::print_on(outputStream* st) const {
print_data_on(st);
}
#endif //PRODUCT
void MethodData::print_value_on(outputStream* st) const {
assert(is_methodData(), "should be method data");
st->print("method data for ");
method()->print_value_on(st);
}
#ifndef PRODUCT
void MethodData::print_data_on(outputStream* st) const {
ResourceMark rm;
ProfileData* data = first_data();
@ -1416,7 +1390,6 @@ void MethodData::print_data_on(outputStream* st) const {
if (dp >= end) return;
}
}
#endif
#if INCLUDE_SERVICES
// Size Statistics

View File

@ -280,12 +280,10 @@ class ProfileData : public ResourceObj {
friend class ReturnTypeEntry;
friend class TypeStackSlotEntries;
private:
#ifndef PRODUCT
enum {
tab_width_one = 16,
tab_width_two = 36
};
#endif // !PRODUCT
// This is a pointer to a section of profiling data.
DataLayout* _data;
@ -521,10 +519,8 @@ public:
void print_data_on(outputStream* st, const MethodData* md) const;
#ifndef PRODUCT
void print_shared(outputStream* st, const char* name, const char* extra) const;
void tab(outputStream* st, bool first = false) const;
#endif
};
// BitData
@ -583,9 +579,7 @@ public:
}
#endif // CC_INTERP
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// CounterData
@ -646,9 +640,7 @@ public:
}
#endif // CC_INTERP
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// JumpData
@ -733,9 +725,7 @@ public:
// Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo);
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// Entries in a ProfileData object to record types: it can either be
@ -808,9 +798,7 @@ public:
return with_status((intptr_t)k, in);
}
#ifndef PRODUCT
static void print_klass(outputStream* st, intptr_t k);
#endif
// GC support
static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
@ -919,9 +907,7 @@ public:
// GC support
void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
#endif
};
// Type entry used for return from a call. A single cell to record the
@ -964,9 +950,7 @@ public:
// GC support
void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
#endif
};
// Entries to collect type information at a call: contains arguments
@ -1144,9 +1128,7 @@ public:
}
}
#ifndef PRODUCT
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// ReceiverTypeData
@ -1288,10 +1270,8 @@ public:
}
#endif // CC_INTERP
#ifndef PRODUCT
void print_receiver_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// VirtualCallData
@ -1332,9 +1312,7 @@ public:
}
#endif // CC_INTERP
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// VirtualCallTypeData
@ -1458,9 +1436,7 @@ public:
}
}
#ifndef PRODUCT
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// RetData
@ -1561,9 +1537,7 @@ public:
// Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo);
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// BranchData
@ -1639,9 +1613,7 @@ public:
// Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo);
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// ArrayData
@ -1832,9 +1804,7 @@ public:
// Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo);
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
class ArgInfoData : public ArrayData {
@ -1859,9 +1829,7 @@ public:
array_set_int_at(arg, val);
}
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// ParametersTypeData
@ -1920,9 +1888,7 @@ public:
_parameters.clean_weak_klass_links(is_alive_closure);
}
#ifndef PRODUCT
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
static ByteSize stack_slot_offset(int i) {
return cell_offset(stack_slot_local_offset(i));
@ -1976,9 +1942,7 @@ public:
set_intptr_at(method_offset, (intptr_t)m);
}
#ifndef PRODUCT
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// MethodData*
@ -2052,7 +2016,7 @@ public:
// Whole-method sticky bits and flags
enum {
_trap_hist_limit = 19, // decoupled from Deoptimization::Reason_LIMIT
_trap_hist_limit = 20, // decoupled from Deoptimization::Reason_LIMIT
_trap_hist_mask = max_jubyte,
_extra_data_count = 4 // extra DataLayout headers, for trap history
}; // Public flag values
@ -2457,15 +2421,11 @@ public:
void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
// Printing
#ifndef PRODUCT
void print_on (outputStream* st) const;
#endif
void print_value_on(outputStream* st) const;
#ifndef PRODUCT
// printing support for method data
void print_data_on(outputStream* st) const;
#endif
const char* internal_name() const { return "{method data}"; }

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/machnode.hpp"

View File

@ -33,8 +33,8 @@
#include "opto/addnode.hpp"
#include "opto/callGenerator.hpp"
#include "opto/callnode.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/parse.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"

View File

@ -27,6 +27,7 @@
#include "compiler/oopMap.hpp"
#include "opto/callGenerator.hpp"
#include "opto/callnode.hpp"
#include "opto/castnode.hpp"
#include "opto/escape.hpp"
#include "opto/locknode.hpp"
#include "opto/machnode.hpp"

View File

@ -0,0 +1,294 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "opto/addnode.hpp"
#include "opto/castnode.hpp"
#include "opto/connode.hpp"
#include "opto/matcher.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"
#include "opto/type.hpp"
//=============================================================================
// If input is already higher or equal to cast type, then this is an identity.
Node *ConstraintCastNode::Identity( PhaseTransform *phase ) {
return phase->type(in(1))->higher_equal_speculative(_type) ? in(1) : this;
}
//------------------------------Value------------------------------------------
// Take 'join' of input and cast-up type
const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
const Type* ft = phase->type(in(1))->filter_speculative(_type);
#ifdef ASSERT
// Previous versions of this function had some special case logic,
// which is no longer necessary. Make sure of the required effects.
switch (Opcode()) {
case Op_CastII:
{
const Type* t1 = phase->type(in(1));
if( t1 == Type::TOP ) assert(ft == Type::TOP, "special case #1");
const Type* rt = t1->join_speculative(_type);
if (rt->empty()) assert(ft == Type::TOP, "special case #2");
break;
}
case Op_CastPP:
if (phase->type(in(1)) == TypePtr::NULL_PTR &&
_type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull)
assert(ft == Type::TOP, "special case #3");
break;
}
#endif //ASSERT
return ft;
}
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape){
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
}
//------------------------------Ideal_DU_postCCP-------------------------------
// Throw away cast after constant propagation
Node *ConstraintCastNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
const Type *t = ccp->type(in(1));
ccp->hash_delete(this);
set_type(t); // Turn into ID function
ccp->hash_insert(this);
return this;
}
//=============================================================================
//------------------------------Ideal_DU_postCCP-------------------------------
// If not converting int->oop, throw away cast after constant propagation
Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
const Type *t = ccp->type(in(1));
if (!t->isa_oop_ptr() || ((in(1)->is_DecodeN()) && Matcher::gen_narrow_oop_implicit_null_checks())) {
return NULL; // do not transform raw pointers or narrow oops
}
return ConstraintCastNode::Ideal_DU_postCCP(ccp);
}
//=============================================================================
//------------------------------Identity---------------------------------------
// If input is already higher or equal to cast type, then this is an identity.
Node *CheckCastPPNode::Identity( PhaseTransform *phase ) {
// Toned down to rescue meeting at a Phi 3 different oops all implementing
// the same interface. CompileTheWorld starting at 502, kd12rc1.zip.
return (phase->type(in(1)) == phase->type(this)) ? in(1) : this;
}
//------------------------------Value------------------------------------------
// Take 'join' of input and cast-up type, unless working with an Interface
const Type *CheckCastPPNode::Value( PhaseTransform *phase ) const {
if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
const Type *inn = phase->type(in(1));
if( inn == Type::TOP ) return Type::TOP; // No information yet
const TypePtr *in_type = inn->isa_ptr();
const TypePtr *my_type = _type->isa_ptr();
const Type *result = _type;
if( in_type != NULL && my_type != NULL ) {
TypePtr::PTR in_ptr = in_type->ptr();
if( in_ptr == TypePtr::Null ) {
result = in_type;
} else if( in_ptr == TypePtr::Constant ) {
// Casting a constant oop to an interface?
// (i.e., a String to a Comparable?)
// Then return the interface.
const TypeOopPtr *jptr = my_type->isa_oopptr();
assert( jptr, "" );
result = (jptr->klass()->is_interface() || !in_type->higher_equal(_type))
? my_type->cast_to_ptr_type( TypePtr::NotNull )
: in_type;
} else {
result = my_type->cast_to_ptr_type( my_type->join_ptr(in_ptr) );
}
}
// This is the code from TypePtr::xmeet() that prevents us from
// having 2 ways to represent the same type. We have to replicate it
// here because we don't go through meet/join.
if (result->remove_speculative() == result->speculative()) {
result = result->remove_speculative();
}
// Same as above: because we don't go through meet/join, remove the
// speculative type if we know we won't use it.
return result->cleanup_speculative();
// JOIN NOT DONE HERE BECAUSE OF INTERFACE ISSUES.
// FIX THIS (DO THE JOIN) WHEN UNION TYPES APPEAR!
//
// Remove this code after overnight run indicates no performance
// loss from not performing JOIN at CheckCastPPNode
//
// const TypeInstPtr *in_oop = in->isa_instptr();
// const TypeInstPtr *my_oop = _type->isa_instptr();
// // If either input is an 'interface', return destination type
// assert (in_oop == NULL || in_oop->klass() != NULL, "");
// assert (my_oop == NULL || my_oop->klass() != NULL, "");
// if( (in_oop && in_oop->klass()->is_interface())
// ||(my_oop && my_oop->klass()->is_interface()) ) {
// TypePtr::PTR in_ptr = in->isa_ptr() ? in->is_ptr()->_ptr : TypePtr::BotPTR;
// // Preserve cast away nullness for interfaces
// if( in_ptr == TypePtr::NotNull && my_oop && my_oop->_ptr == TypePtr::BotPTR ) {
// return my_oop->cast_to_ptr_type(TypePtr::NotNull);
// }
// return _type;
// }
//
// // Neither the input nor the destination type is an interface,
//
// // history: JOIN used to cause weird corner case bugs
// // return (in == TypeOopPtr::NULL_PTR) ? in : _type;
// // JOIN picks up NotNull in common instance-of/check-cast idioms, both oops.
// // JOIN does not preserve NotNull in other cases, e.g. RawPtr vs InstPtr
// const Type *join = in->join(_type);
// // Check if join preserved NotNull'ness for pointers
// if( join->isa_ptr() && _type->isa_ptr() ) {
// TypePtr::PTR join_ptr = join->is_ptr()->_ptr;
// TypePtr::PTR type_ptr = _type->is_ptr()->_ptr;
// // If there isn't any NotNull'ness to preserve
// // OR if join preserved NotNull'ness then return it
// if( type_ptr == TypePtr::BotPTR || type_ptr == TypePtr::Null ||
// join_ptr == TypePtr::NotNull || join_ptr == TypePtr::Constant ) {
// return join;
// }
// // ELSE return same old type as before
// return _type;
// }
// // Not joining two pointers
// return join;
}
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *CheckCastPPNode::Ideal(PhaseGVN *phase, bool can_reshape){
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *CastX2PNode::Value( PhaseTransform *phase ) const {
const Type* t = phase->type(in(1));
if (t == Type::TOP) return Type::TOP;
if (t->base() == Type_X && t->singleton()) {
uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con();
if (bits == 0) return TypePtr::NULL_PTR;
return TypeRawPtr::make((address) bits);
}
return CastX2PNode::bottom_type();
}
//------------------------------Idealize---------------------------------------
static inline bool fits_in_int(const Type* t, bool but_not_min_int = false) {
if (t == Type::TOP) return false;
const TypeX* tl = t->is_intptr_t();
jint lo = min_jint;
jint hi = max_jint;
if (but_not_min_int) ++lo; // caller wants to negate the value w/o overflow
return (tl->_lo >= lo) && (tl->_hi <= hi);
}
static inline Node* addP_of_X2P(PhaseGVN *phase,
Node* base,
Node* dispX,
bool negate = false) {
if (negate) {
dispX = new (phase->C) SubXNode(phase->MakeConX(0), phase->transform(dispX));
}
return new (phase->C) AddPNode(phase->C->top(),
phase->transform(new (phase->C) CastX2PNode(base)),
phase->transform(dispX));
}
Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// convert CastX2P(AddX(x, y)) to AddP(CastX2P(x), y) if y fits in an int
int op = in(1)->Opcode();
Node* x;
Node* y;
switch (op) {
case Op_SubX:
x = in(1)->in(1);
// Avoid ideal transformations ping-pong between this and AddP for raw pointers.
if (phase->find_intptr_t_con(x, -1) == 0)
break;
y = in(1)->in(2);
if (fits_in_int(phase->type(y), true)) {
return addP_of_X2P(phase, x, y, true);
}
break;
case Op_AddX:
x = in(1)->in(1);
y = in(1)->in(2);
if (fits_in_int(phase->type(y))) {
return addP_of_X2P(phase, x, y);
}
if (fits_in_int(phase->type(x))) {
return addP_of_X2P(phase, y, x);
}
break;
}
return NULL;
}
//------------------------------Identity---------------------------------------
Node *CastX2PNode::Identity( PhaseTransform *phase ) {
if (in(1)->Opcode() == Op_CastP2X) return in(1)->in(1);
return this;
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *CastP2XNode::Value( PhaseTransform *phase ) const {
const Type* t = phase->type(in(1));
if (t == Type::TOP) return Type::TOP;
if (t->base() == Type::RawPtr && t->singleton()) {
uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con();
return TypeX::make(bits);
}
return CastP2XNode::bottom_type();
}
Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
}
//------------------------------Identity---------------------------------------
Node *CastP2XNode::Identity( PhaseTransform *phase ) {
if (in(1)->Opcode() == Op_CastX2P) return in(1)->in(1);
return this;
}

View File

@ -0,0 +1,119 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_OPTO_CASTNODE_HPP
#define SHARE_VM_OPTO_CASTNODE_HPP
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
//------------------------------ConstraintCastNode-----------------------------
// cast to a different range
class ConstraintCastNode: public TypeNode {
public:
ConstraintCastNode (Node *n, const Type *t ): TypeNode(t,2) {
init_class_id(Class_ConstraintCast);
init_req(1, n);
}
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual int Opcode() const;
virtual uint ideal_reg() const = 0;
virtual Node *Ideal_DU_postCCP( PhaseCCP * );
};
//------------------------------CastIINode-------------------------------------
// cast integer to integer (different range)
class CastIINode: public ConstraintCastNode {
public:
CastIINode (Node *n, const Type *t ): ConstraintCastNode(n,t) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
};
//------------------------------CastPPNode-------------------------------------
// cast pointer to pointer (different type)
class CastPPNode: public ConstraintCastNode {
public:
CastPPNode (Node *n, const Type *t ): ConstraintCastNode(n, t) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
virtual Node *Ideal_DU_postCCP( PhaseCCP * );
};
//------------------------------CheckCastPPNode--------------------------------
// for _checkcast, cast pointer to pointer (different type), without JOIN,
class CheckCastPPNode: public TypeNode {
public:
CheckCastPPNode( Node *c, Node *n, const Type *t ) : TypeNode(t,2) {
init_class_id(Class_CheckCastPP);
init_req(0, c);
init_req(1, n);
}
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
// No longer remove CheckCast after CCP as it gives me a place to hang
// the proper address type - which is required to compute anti-deps.
//virtual Node *Ideal_DU_postCCP( PhaseCCP * );
};
//------------------------------CastX2PNode-------------------------------------
// convert a machine-pointer-sized integer to a raw pointer
class CastX2PNode : public Node {
public:
CastX2PNode( Node *n ) : Node(NULL, n) {}
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual Node *Identity( PhaseTransform *phase );
virtual uint ideal_reg() const { return Op_RegP; }
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
};
//------------------------------CastP2XNode-------------------------------------
// Used in both 32-bit and 64-bit land.
// Used for card-marks and unsafe pointer math.
class CastP2XNode : public Node {
public:
CastP2XNode( Node *ctrl, Node *n ) : Node(ctrl, n) {}
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual Node *Identity( PhaseTransform *phase );
virtual uint ideal_reg() const { return Op_RegX; }
virtual const Type *bottom_type() const { return TypeX_X; }
// Return false to keep node from moving away from an associated card mark.
virtual bool depends_only_on_test() const { return false; }
};
#endif // SHARE_VM_OPTO_CASTNODE_HPP

View File

@ -29,8 +29,11 @@
#include "opto/addnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/movenode.hpp"
#include "opto/narrowptrnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/regmask.hpp"

View File

@ -37,6 +37,7 @@
#include "opto/indexSet.hpp"
#include "opto/machnode.hpp"
#include "opto/memnode.hpp"
#include "opto/movenode.hpp"
#include "opto/opcodes.hpp"
#include "opto/rootnode.hpp"

View File

@ -25,17 +25,24 @@
#include "precompiled.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/countbitsnode.hpp"
#include "opto/divnode.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/locknode.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/memnode.hpp"
#include "opto/mathexactnode.hpp"
#include "opto/movenode.hpp"
#include "opto/mulnode.hpp"
#include "opto/multnode.hpp"
#include "opto/narrowptrnode.hpp"
#include "opto/node.hpp"
#include "opto/opaquenode.hpp"
#include "opto/rootnode.hpp"
#include "opto/subnode.hpp"
#include "opto/vectornode.hpp"

View File

@ -51,6 +51,7 @@
#include "opto/mathexactnode.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/narrowptrnode.hpp"
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/output.hpp"

File diff suppressed because it is too large Load Diff

View File

@ -139,630 +139,16 @@ public:
};
//------------------------------BinaryNode-------------------------------------
// Place holder for the 2 conditional inputs to a CMove. CMove needs 4
// inputs: the Bool (for the lt/gt/eq/ne bits), the flags (result of some
// compare), and the 2 values to select between. The Matcher requires a
// binary tree so we break it down like this:
// (CMove (Binary bol cmp) (Binary src1 src2))
class BinaryNode : public Node {
public:
BinaryNode( Node *n1, Node *n2 ) : Node(0,n1,n2) { }
virtual int Opcode() const;
virtual uint ideal_reg() const { return 0; }
};
//------------------------------CMoveNode--------------------------------------
// Conditional move
class CMoveNode : public TypeNode {
public:
enum { Control, // When is it safe to do this cmove?
Condition, // Condition controlling the cmove
IfFalse, // Value if condition is false
IfTrue }; // Value if condition is true
CMoveNode( Node *bol, Node *left, Node *right, const Type *t ) : TypeNode(t,4)
{
init_class_id(Class_CMove);
// all inputs are nullified in Node::Node(int)
// init_req(Control,NULL);
init_req(Condition,bol);
init_req(IfFalse,left);
init_req(IfTrue,right);
}
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
static CMoveNode *make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t );
// Helper function to spot cmove graph shapes
static Node *is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b );
};
//------------------------------CMoveDNode-------------------------------------
class CMoveDNode : public CMoveNode {
public:
CMoveDNode( Node *bol, Node *left, Node *right, const Type* t) : CMoveNode(bol,left,right,t){}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
};
//------------------------------CMoveFNode-------------------------------------
class CMoveFNode : public CMoveNode {
public:
CMoveFNode( Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) {}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
};
//------------------------------CMoveINode-------------------------------------
class CMoveINode : public CMoveNode {
public:
CMoveINode( Node *bol, Node *left, Node *right, const TypeInt *ti ) : CMoveNode(bol,left,right,ti){}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
};
//------------------------------CMoveLNode-------------------------------------
class CMoveLNode : public CMoveNode {
public:
CMoveLNode(Node *bol, Node *left, Node *right, const TypeLong *tl ) : CMoveNode(bol,left,right,tl){}
virtual int Opcode() const;
};
//------------------------------CMovePNode-------------------------------------
class CMovePNode : public CMoveNode {
public:
CMovePNode( Node *c, Node *bol, Node *left, Node *right, const TypePtr* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
virtual int Opcode() const;
};
//------------------------------CMoveNNode-------------------------------------
class CMoveNNode : public CMoveNode {
public:
CMoveNNode( Node *c, Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
virtual int Opcode() const;
};
//------------------------------ConstraintCastNode-----------------------------
// cast to a different range
class ConstraintCastNode: public TypeNode {
public:
ConstraintCastNode (Node *n, const Type *t ): TypeNode(t,2) {
init_class_id(Class_ConstraintCast);
init_req(1, n);
}
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual int Opcode() const;
virtual uint ideal_reg() const = 0;
virtual Node *Ideal_DU_postCCP( PhaseCCP * );
};
//------------------------------CastIINode-------------------------------------
// cast integer to integer (different range)
class CastIINode: public ConstraintCastNode {
public:
CastIINode (Node *n, const Type *t ): ConstraintCastNode(n,t) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
};
//------------------------------CastPPNode-------------------------------------
// cast pointer to pointer (different type)
class CastPPNode: public ConstraintCastNode {
public:
CastPPNode (Node *n, const Type *t ): ConstraintCastNode(n, t) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
virtual Node *Ideal_DU_postCCP( PhaseCCP * );
};
//------------------------------CheckCastPPNode--------------------------------
// for _checkcast, cast pointer to pointer (different type), without JOIN,
class CheckCastPPNode: public TypeNode {
public:
CheckCastPPNode( Node *c, Node *n, const Type *t ) : TypeNode(t,2) {
init_class_id(Class_CheckCastPP);
init_req(0, c);
init_req(1, n);
}
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
// No longer remove CheckCast after CCP as it gives me a place to hang
// the proper address type - which is required to compute anti-deps.
//virtual Node *Ideal_DU_postCCP( PhaseCCP * );
};
//------------------------------EncodeNarrowPtr--------------------------------
class EncodeNarrowPtrNode : public TypeNode {
protected:
EncodeNarrowPtrNode(Node* value, const Type* type):
TypeNode(type, 2) {
init_class_id(Class_EncodeNarrowPtr);
init_req(0, NULL);
init_req(1, value);
}
public:
virtual uint ideal_reg() const { return Op_RegN; }
virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
};
//------------------------------EncodeP--------------------------------
// Encodes an oop pointers into its compressed form
// Takes an extra argument which is the real heap base as a long which
// may be useful for code generation in the backend.
class EncodePNode : public EncodeNarrowPtrNode {
public:
EncodePNode(Node* value, const Type* type):
EncodeNarrowPtrNode(value, type) {
init_class_id(Class_EncodeP);
}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
};
//------------------------------EncodePKlass--------------------------------
// Encodes a klass pointer into its compressed form
// Takes an extra argument which is the real heap base as a long which
// may be useful for code generation in the backend.
class EncodePKlassNode : public EncodeNarrowPtrNode {
public:
EncodePKlassNode(Node* value, const Type* type):
EncodeNarrowPtrNode(value, type) {
init_class_id(Class_EncodePKlass);
}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
};
//------------------------------DecodeNarrowPtr--------------------------------
class DecodeNarrowPtrNode : public TypeNode {
protected:
DecodeNarrowPtrNode(Node* value, const Type* type):
TypeNode(type, 2) {
init_class_id(Class_DecodeNarrowPtr);
init_req(0, NULL);
init_req(1, value);
}
public:
virtual uint ideal_reg() const { return Op_RegP; }
};
//------------------------------DecodeN--------------------------------
// Converts a narrow oop into a real oop ptr.
// Takes an extra argument which is the real heap base as a long which
// may be useful for code generation in the backend.
class DecodeNNode : public DecodeNarrowPtrNode {
public:
DecodeNNode(Node* value, const Type* type):
DecodeNarrowPtrNode(value, type) {
init_class_id(Class_DecodeN);
}
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
};
//------------------------------DecodeNKlass--------------------------------
// Converts a narrow klass pointer into a real klass ptr.
// Takes an extra argument which is the real heap base as a long which
// may be useful for code generation in the backend.
class DecodeNKlassNode : public DecodeNarrowPtrNode {
public:
DecodeNKlassNode(Node* value, const Type* type):
DecodeNarrowPtrNode(value, type) {
init_class_id(Class_DecodeNKlass);
}
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
};
//------------------------------Conv2BNode-------------------------------------
// Convert int/pointer to a Boolean. Map zero to zero, all else to 1.
class Conv2BNode : public Node {
public:
Conv2BNode( Node *i ) : Node(0,i) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::BOOL; }
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegI; }
};
// The conversions operations are all Alpha sorted. Please keep it that way!
//------------------------------ConvD2FNode------------------------------------
// Convert double to float
class ConvD2FNode : public Node {
public:
ConvD2FNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual uint ideal_reg() const { return Op_RegF; }
};
//------------------------------ConvD2INode------------------------------------
// Convert Double to Integer
class ConvD2INode : public Node {
public:
ConvD2INode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegI; }
};
//------------------------------ConvD2LNode------------------------------------
// Convert Double to Long
class ConvD2LNode : public Node {
public:
ConvD2LNode( Node *dbl ) : Node(0,dbl) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeLong::LONG; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegL; }
};
//------------------------------ConvF2DNode------------------------------------
// Convert Float to a Double.
class ConvF2DNode : public Node {
public:
ConvF2DNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegD; }
};
//------------------------------ConvF2INode------------------------------------
// Convert float to integer
class ConvF2INode : public Node {
public:
ConvF2INode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegI; }
};
//------------------------------ConvF2LNode------------------------------------
// Convert float to long
class ConvF2LNode : public Node {
public:
ConvF2LNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeLong::LONG; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegL; }
};
//------------------------------ConvI2DNode------------------------------------
// Convert Integer to Double
class ConvI2DNode : public Node {
public:
ConvI2DNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegD; }
};
//------------------------------ConvI2FNode------------------------------------
// Convert Integer to Float
class ConvI2FNode : public Node {
public:
ConvI2FNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual uint ideal_reg() const { return Op_RegF; }
};
//------------------------------ConvI2LNode------------------------------------
// Convert integer to long
class ConvI2LNode : public TypeNode {
public:
ConvI2LNode(Node *in1, const TypeLong* t = TypeLong::INT)
: TypeNode(t, 2)
{ init_req(1, in1); }
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegL; }
};
//------------------------------ConvL2DNode------------------------------------
// Convert Long to Double
class ConvL2DNode : public Node {
public:
ConvL2DNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegD; }
};
//------------------------------ConvL2FNode------------------------------------
// Convert Long to Float
class ConvL2FNode : public Node {
public:
ConvL2FNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegF; }
};
//------------------------------ConvL2INode------------------------------------
// Convert long to integer
class ConvL2INode : public Node {
public:
ConvL2INode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegI; }
};
//------------------------------CastX2PNode-------------------------------------
// convert a machine-pointer-sized integer to a raw pointer
class CastX2PNode : public Node {
public:
CastX2PNode( Node *n ) : Node(NULL, n) {}
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual Node *Identity( PhaseTransform *phase );
virtual uint ideal_reg() const { return Op_RegP; }
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
};
//------------------------------CastP2XNode-------------------------------------
// Used in both 32-bit and 64-bit land.
// Used for card-marks and unsafe pointer math.
class CastP2XNode : public Node {
public:
CastP2XNode( Node *ctrl, Node *n ) : Node(ctrl, n) {}
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual Node *Identity( PhaseTransform *phase );
virtual uint ideal_reg() const { return Op_RegX; }
virtual const Type *bottom_type() const { return TypeX_X; }
// Return false to keep node from moving away from an associated card mark.
virtual bool depends_only_on_test() const { return false; }
};
//------------------------------ThreadLocalNode--------------------------------
// Ideal Node which returns the base of ThreadLocalStorage.
class ThreadLocalNode : public Node {
public:
ThreadLocalNode( ) : Node((Node*)Compile::current()->root()) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM;}
virtual uint ideal_reg() const { return Op_RegP; }
};
//------------------------------LoadReturnPCNode-------------------------------
class LoadReturnPCNode: public Node {
public:
LoadReturnPCNode(Node *c) : Node(c) { }
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
ThreadLocalNode( ) : Node((Node*)Compile::current()->root()) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM;}
virtual uint ideal_reg() const { return Op_RegP; }
};
//-----------------------------RoundFloatNode----------------------------------
class RoundFloatNode: public Node {
public:
RoundFloatNode(Node* c, Node *in1): Node(c, in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; }
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
};
//-----------------------------RoundDoubleNode---------------------------------
class RoundDoubleNode: public Node {
public:
RoundDoubleNode(Node* c, Node *in1): Node(c, in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
};
//------------------------------Opaque1Node------------------------------------
// A node to prevent unwanted optimizations. Allows constant folding.
// Stops value-numbering, Ideal calls or Identity functions.
class Opaque1Node : public Node {
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const;
public:
Opaque1Node( Compile* C, Node *n ) : Node(0,n) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
// Special version for the pre-loop to hold the original loop limit
// which is consumed by range check elimination.
Opaque1Node( Compile* C, Node *n, Node* orig_limit ) : Node(0,n,orig_limit) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
Node* original_loop_limit() { return req()==3 ? in(2) : NULL; }
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual Node *Identity( PhaseTransform *phase );
};
//------------------------------Opaque2Node------------------------------------
// A node to prevent unwanted optimizations. Allows constant folding. Stops
// value-numbering, most Ideal calls or Identity functions. This Node is
// specifically designed to prevent the pre-increment value of a loop trip
// counter from being live out of the bottom of the loop (hence causing the
// pre- and post-increment values both being live and thus requiring an extra
// temp register and an extra move). If we "accidentally" optimize through
// this kind of a Node, we'll get slightly pessimal, but correct, code. Thus
// it's OK to be slightly sloppy on optimizations here.
class Opaque2Node : public Node {
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const;
public:
Opaque2Node( Compile* C, Node *n ) : Node(0,n) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
};
//------------------------------Opaque3Node------------------------------------
// A node to prevent unwanted optimizations. Will be optimized only during
// macro nodes expansion.
class Opaque3Node : public Opaque2Node {
int _opt; // what optimization it was used for
public:
enum { RTM_OPT };
Opaque3Node(Compile* C, Node *n, int opt) : Opaque2Node(C, n), _opt(opt) {}
virtual int Opcode() const;
bool rtm_opt() const { return (_opt == RTM_OPT); }
};
//----------------------PartialSubtypeCheckNode--------------------------------
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
// array for an instance of the superklass. Set a hidden internal cache on a
// hit (cache is checked with exposed code in gen_subtype_check()). Return
// not zero for a miss or zero for a hit.
class PartialSubtypeCheckNode : public Node {
public:
PartialSubtypeCheckNode(Node* c, Node* sub, Node* super) : Node(c,sub,super) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
virtual uint ideal_reg() const { return Op_RegP; }
};
//
class MoveI2FNode : public Node {
public:
MoveI2FNode( Node *value ) : Node(0,value) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; }
virtual const Type* Value( PhaseTransform *phase ) const;
};
class MoveL2DNode : public Node {
public:
MoveL2DNode( Node *value ) : Node(0,value) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }
virtual const Type* Value( PhaseTransform *phase ) const;
};
class MoveF2INode : public Node {
public:
MoveF2INode( Node *value ) : Node(0,value) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
virtual const Type* Value( PhaseTransform *phase ) const;
};
class MoveD2LNode : public Node {
public:
MoveD2LNode( Node *value ) : Node(0,value) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeLong::LONG; }
virtual uint ideal_reg() const { return Op_RegL; }
virtual const Type* Value( PhaseTransform *phase ) const;
};
//---------- CountBitsNode -----------------------------------------------------
class CountBitsNode : public Node {
public:
CountBitsNode(Node* in1) : Node(0, in1) {}
const Type* bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
};
//---------- CountLeadingZerosINode --------------------------------------------
// Count leading zeros (0-bit count starting from MSB) of an integer.
class CountLeadingZerosINode : public CountBitsNode {
public:
CountLeadingZerosINode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseTransform* phase) const;
};
//---------- CountLeadingZerosLNode --------------------------------------------
// Count leading zeros (0-bit count starting from MSB) of a long.
class CountLeadingZerosLNode : public CountBitsNode {
public:
CountLeadingZerosLNode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseTransform* phase) const;
};
//---------- CountTrailingZerosINode -------------------------------------------
// Count trailing zeros (0-bit count starting from LSB) of an integer.
class CountTrailingZerosINode : public CountBitsNode {
public:
CountTrailingZerosINode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseTransform* phase) const;
};
//---------- CountTrailingZerosLNode -------------------------------------------
// Count trailing zeros (0-bit count starting from LSB) of a long.
class CountTrailingZerosLNode : public CountBitsNode {
public:
CountTrailingZerosLNode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseTransform* phase) const;
};
//---------- PopCountINode -----------------------------------------------------
// Population count (bit count) of an integer.
class PopCountINode : public CountBitsNode {
public:
PopCountINode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
};
//---------- PopCountLNode -----------------------------------------------------
// Population count (bit count) of a long.
class PopCountLNode : public CountBitsNode {
public:
PopCountLNode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
};
#endif // SHARE_VM_OPTO_CONNODE_HPP

View File

@ -0,0 +1,512 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "opto/addnode.hpp"
#include "opto/convertnode.hpp"
#include "opto/matcher.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"
//=============================================================================
//------------------------------Identity---------------------------------------
Node *Conv2BNode::Identity( PhaseTransform *phase ) {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return in(1);
if( t == TypeInt::ZERO ) return in(1);
if( t == TypeInt::ONE ) return in(1);
if( t == TypeInt::BOOL ) return in(1);
return this;
}
//------------------------------Value------------------------------------------
const Type *Conv2BNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
if( t == TypeInt::ZERO ) return TypeInt::ZERO;
if( t == TypePtr::NULL_PTR ) return TypeInt::ZERO;
const TypePtr *tp = t->isa_ptr();
if( tp != NULL ) {
if( tp->ptr() == TypePtr::AnyNull ) return Type::TOP;
if( tp->ptr() == TypePtr::Constant) return TypeInt::ONE;
if (tp->ptr() == TypePtr::NotNull) return TypeInt::ONE;
return TypeInt::BOOL;
}
if (t->base() != Type::Int) return TypeInt::BOOL;
const TypeInt *ti = t->is_int();
if( ti->_hi < 0 || ti->_lo > 0 ) return TypeInt::ONE;
return TypeInt::BOOL;
}
// The conversions operations are all Alpha sorted. Please keep it that way!
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvD2FNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
if( t == Type::DOUBLE ) return Type::FLOAT;
const TypeD *td = t->is_double_constant();
return TypeF::make( (float)td->getd() );
}
//------------------------------Identity---------------------------------------
// Float's can be converted to doubles with no loss of bits. Hence
// converting a float to a double and back to a float is a NOP.
Node *ConvD2FNode::Identity(PhaseTransform *phase) {
return (in(1)->Opcode() == Op_ConvF2D) ? in(1)->in(1) : this;
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvD2INode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
if( t == Type::DOUBLE ) return TypeInt::INT;
const TypeD *td = t->is_double_constant();
return TypeInt::make( SharedRuntime::d2i( td->getd() ) );
}
//------------------------------Ideal------------------------------------------
// If converting to an int type, skip any rounding nodes
Node *ConvD2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( in(1)->Opcode() == Op_RoundDouble )
set_req(1,in(1)->in(1));
return NULL;
}
//------------------------------Identity---------------------------------------
// Int's can be converted to doubles with no loss of bits. Hence
// converting an integer to a double and back to an integer is a NOP.
Node *ConvD2INode::Identity(PhaseTransform *phase) {
return (in(1)->Opcode() == Op_ConvI2D) ? in(1)->in(1) : this;
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvD2LNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
if( t == Type::DOUBLE ) return TypeLong::LONG;
const TypeD *td = t->is_double_constant();
return TypeLong::make( SharedRuntime::d2l( td->getd() ) );
}
//------------------------------Identity---------------------------------------
Node *ConvD2LNode::Identity(PhaseTransform *phase) {
// Remove ConvD2L->ConvL2D->ConvD2L sequences.
if( in(1) ->Opcode() == Op_ConvL2D &&
in(1)->in(1)->Opcode() == Op_ConvD2L )
return in(1)->in(1);
return this;
}
//------------------------------Ideal------------------------------------------
// If converting to an int type, skip any rounding nodes
Node *ConvD2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( in(1)->Opcode() == Op_RoundDouble )
set_req(1,in(1)->in(1));
return NULL;
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvF2DNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
if( t == Type::FLOAT ) return Type::DOUBLE;
const TypeF *tf = t->is_float_constant();
return TypeD::make( (double)tf->getf() );
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvF2INode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
if( t == Type::FLOAT ) return TypeInt::INT;
const TypeF *tf = t->is_float_constant();
return TypeInt::make( SharedRuntime::f2i( tf->getf() ) );
}
//------------------------------Identity---------------------------------------
Node *ConvF2INode::Identity(PhaseTransform *phase) {
// Remove ConvF2I->ConvI2F->ConvF2I sequences.
if( in(1) ->Opcode() == Op_ConvI2F &&
in(1)->in(1)->Opcode() == Op_ConvF2I )
return in(1)->in(1);
return this;
}
//------------------------------Ideal------------------------------------------
// If converting to an int type, skip any rounding nodes
Node *ConvF2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( in(1)->Opcode() == Op_RoundFloat )
set_req(1,in(1)->in(1));
return NULL;
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvF2LNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
if( t == Type::FLOAT ) return TypeLong::LONG;
const TypeF *tf = t->is_float_constant();
return TypeLong::make( SharedRuntime::f2l( tf->getf() ) );
}
//------------------------------Identity---------------------------------------
Node *ConvF2LNode::Identity(PhaseTransform *phase) {
// Remove ConvF2L->ConvL2F->ConvF2L sequences.
if( in(1) ->Opcode() == Op_ConvL2F &&
in(1)->in(1)->Opcode() == Op_ConvF2L )
return in(1)->in(1);
return this;
}
//------------------------------Ideal------------------------------------------
// If converting to an int type, skip any rounding nodes
Node *ConvF2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( in(1)->Opcode() == Op_RoundFloat )
set_req(1,in(1)->in(1));
return NULL;
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvI2DNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
const TypeInt *ti = t->is_int();
if( ti->is_con() ) return TypeD::make( (double)ti->get_con() );
return bottom_type();
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvI2FNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
const TypeInt *ti = t->is_int();
if( ti->is_con() ) return TypeF::make( (float)ti->get_con() );
return bottom_type();
}
//------------------------------Identity---------------------------------------
Node *ConvI2FNode::Identity(PhaseTransform *phase) {
// Remove ConvI2F->ConvF2I->ConvI2F sequences.
if( in(1) ->Opcode() == Op_ConvF2I &&
in(1)->in(1)->Opcode() == Op_ConvI2F )
return in(1)->in(1);
return this;
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvI2LNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
const TypeInt *ti = t->is_int();
const Type* tl = TypeLong::make(ti->_lo, ti->_hi, ti->_widen);
// Join my declared type against my incoming type.
tl = tl->filter(_type);
return tl;
}
#ifdef _LP64
static inline bool long_ranges_overlap(jlong lo1, jlong hi1,
jlong lo2, jlong hi2) {
// Two ranges overlap iff one range's low point falls in the other range.
return (lo2 <= lo1 && lo1 <= hi2) || (lo1 <= lo2 && lo2 <= hi1);
}
#endif
//------------------------------Ideal------------------------------------------
Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
const TypeLong* this_type = this->type()->is_long();
Node* this_changed = NULL;
// If _major_progress, then more loop optimizations follow. Do NOT
// remove this node's type assertion until no more loop ops can happen.
// The progress bit is set in the major loop optimizations THEN comes the
// call to IterGVN and any chance of hitting this code. Cf. Opaque1Node.
if (can_reshape && !phase->C->major_progress()) {
const TypeInt* in_type = phase->type(in(1))->isa_int();
if (in_type != NULL && this_type != NULL &&
(in_type->_lo != this_type->_lo ||
in_type->_hi != this_type->_hi)) {
// Although this WORSENS the type, it increases GVN opportunities,
// because I2L nodes with the same input will common up, regardless
// of slightly differing type assertions. Such slight differences
// arise routinely as a result of loop unrolling, so this is a
// post-unrolling graph cleanup. Choose a type which depends only
// on my input. (Exception: Keep a range assertion of >=0 or <0.)
jlong lo1 = this_type->_lo;
jlong hi1 = this_type->_hi;
int w1 = this_type->_widen;
if (lo1 != (jint)lo1 ||
hi1 != (jint)hi1 ||
lo1 > hi1) {
// Overflow leads to wraparound, wraparound leads to range saturation.
lo1 = min_jint; hi1 = max_jint;
} else if (lo1 >= 0) {
// Keep a range assertion of >=0.
lo1 = 0; hi1 = max_jint;
} else if (hi1 < 0) {
// Keep a range assertion of <0.
lo1 = min_jint; hi1 = -1;
} else {
lo1 = min_jint; hi1 = max_jint;
}
const TypeLong* wtype = TypeLong::make(MAX2((jlong)in_type->_lo, lo1),
MIN2((jlong)in_type->_hi, hi1),
MAX2((int)in_type->_widen, w1));
if (wtype != type()) {
set_type(wtype);
// Note: this_type still has old type value, for the logic below.
this_changed = this;
}
}
}
#ifdef _LP64
// Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y)) ,
// but only if x and y have subranges that cannot cause 32-bit overflow,
// under the assumption that x+y is in my own subrange this->type().
// This assumption is based on a constraint (i.e., type assertion)
// established in Parse::array_addressing or perhaps elsewhere.
// This constraint has been adjoined to the "natural" type of
// the incoming argument in(0). We know (because of runtime
// checks) - that the result value I2L(x+y) is in the joined range.
// Hence we can restrict the incoming terms (x, y) to values such
// that their sum also lands in that range.
// This optimization is useful only on 64-bit systems, where we hope
// the addition will end up subsumed in an addressing mode.
// It is necessary to do this when optimizing an unrolled array
// copy loop such as x[i++] = y[i++].
// On 32-bit systems, it's better to perform as much 32-bit math as
// possible before the I2L conversion, because 32-bit math is cheaper.
// There's no common reason to "leak" a constant offset through the I2L.
// Addressing arithmetic will not absorb it as part of a 64-bit AddL.
Node* z = in(1);
int op = z->Opcode();
if (op == Op_AddI || op == Op_SubI) {
Node* x = z->in(1);
Node* y = z->in(2);
assert (x != z && y != z, "dead loop in ConvI2LNode::Ideal");
if (phase->type(x) == Type::TOP) return this_changed;
if (phase->type(y) == Type::TOP) return this_changed;
const TypeInt* tx = phase->type(x)->is_int();
const TypeInt* ty = phase->type(y)->is_int();
const TypeLong* tz = this_type;
jlong xlo = tx->_lo;
jlong xhi = tx->_hi;
jlong ylo = ty->_lo;
jlong yhi = ty->_hi;
jlong zlo = tz->_lo;
jlong zhi = tz->_hi;
jlong vbit = CONST64(1) << BitsPerInt;
int widen = MAX2(tx->_widen, ty->_widen);
if (op == Op_SubI) {
jlong ylo0 = ylo;
ylo = -yhi;
yhi = -ylo0;
}
// See if x+y can cause positive overflow into z+2**32
if (long_ranges_overlap(xlo+ylo, xhi+yhi, zlo+vbit, zhi+vbit)) {
return this_changed;
}
// See if x+y can cause negative overflow into z-2**32
if (long_ranges_overlap(xlo+ylo, xhi+yhi, zlo-vbit, zhi-vbit)) {
return this_changed;
}
// Now it's always safe to assume x+y does not overflow.
// This is true even if some pairs x,y might cause overflow, as long
// as that overflow value cannot fall into [zlo,zhi].
// Confident that the arithmetic is "as if infinite precision",
// we can now use z's range to put constraints on those of x and y.
// The "natural" range of x [xlo,xhi] can perhaps be narrowed to a
// more "restricted" range by intersecting [xlo,xhi] with the
// range obtained by subtracting y's range from the asserted range
// of the I2L conversion. Here's the interval arithmetic algebra:
// x == z-y == [zlo,zhi]-[ylo,yhi] == [zlo,zhi]+[-yhi,-ylo]
// => x in [zlo-yhi, zhi-ylo]
// => x in [zlo-yhi, zhi-ylo] INTERSECT [xlo,xhi]
// => x in [xlo MAX zlo-yhi, xhi MIN zhi-ylo]
jlong rxlo = MAX2(xlo, zlo - yhi);
jlong rxhi = MIN2(xhi, zhi - ylo);
// And similarly, x changing place with y:
jlong rylo = MAX2(ylo, zlo - xhi);
jlong ryhi = MIN2(yhi, zhi - xlo);
if (rxlo > rxhi || rylo > ryhi) {
return this_changed; // x or y is dying; don't mess w/ it
}
if (op == Op_SubI) {
jlong rylo0 = rylo;
rylo = -ryhi;
ryhi = -rylo0;
}
Node* cx = phase->transform( new (phase->C) ConvI2LNode(x, TypeLong::make(rxlo, rxhi, widen)) );
Node* cy = phase->transform( new (phase->C) ConvI2LNode(y, TypeLong::make(rylo, ryhi, widen)) );
switch (op) {
case Op_AddI: return new (phase->C) AddLNode(cx, cy);
case Op_SubI: return new (phase->C) SubLNode(cx, cy);
default: ShouldNotReachHere();
}
}
#endif //_LP64
return this_changed;
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvL2DNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
const TypeLong *tl = t->is_long();
if( tl->is_con() ) return TypeD::make( (double)tl->get_con() );
return bottom_type();
}
//=============================================================================
//------------------------------Value------------------------------------------
const Type *ConvL2FNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
const TypeLong *tl = t->is_long();
if( tl->is_con() ) return TypeF::make( (float)tl->get_con() );
return bottom_type();
}
//=============================================================================
//----------------------------Identity-----------------------------------------
Node *ConvL2INode::Identity( PhaseTransform *phase ) {
// Convert L2I(I2L(x)) => x
if (in(1)->Opcode() == Op_ConvI2L) return in(1)->in(1);
return this;
}
//------------------------------Value------------------------------------------
const Type *ConvL2INode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
const TypeLong *tl = t->is_long();
if (tl->is_con())
// Easy case.
return TypeInt::make((jint)tl->get_con());
return bottom_type();
}
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node.
// Blow off prior masking to int
Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node *andl = in(1);
uint andl_op = andl->Opcode();
if( andl_op == Op_AndL ) {
// Blow off prior masking to int
if( phase->type(andl->in(2)) == TypeLong::make( 0xFFFFFFFF ) ) {
set_req(1,andl->in(1));
return this;
}
}
// Swap with a prior add: convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
// This replaces an 'AddL' with an 'AddI'.
if( andl_op == Op_AddL ) {
// Don't do this for nodes which have more than one user since
// we'll end up computing the long add anyway.
if (andl->outcnt() > 1) return NULL;
Node* x = andl->in(1);
Node* y = andl->in(2);
assert( x != andl && y != andl, "dead loop in ConvL2INode::Ideal" );
if (phase->type(x) == Type::TOP) return NULL;
if (phase->type(y) == Type::TOP) return NULL;
Node *add1 = phase->transform(new (phase->C) ConvL2INode(x));
Node *add2 = phase->transform(new (phase->C) ConvL2INode(y));
return new (phase->C) AddINode(add1,add2);
}
// Disable optimization: LoadL->ConvL2I ==> LoadI.
// It causes problems (sizes of Load and Store nodes do not match)
// in objects initialization code and Escape Analysis.
return NULL;
}
//=============================================================================
//------------------------------Identity---------------------------------------
// Remove redundant roundings
Node *RoundFloatNode::Identity( PhaseTransform *phase ) {
assert(Matcher::strict_fp_requires_explicit_rounding, "should only generate for Intel");
// Do not round constants
if (phase->type(in(1))->base() == Type::FloatCon) return in(1);
int op = in(1)->Opcode();
// Redundant rounding
if( op == Op_RoundFloat ) return in(1);
// Already rounded
if( op == Op_Parm ) return in(1);
if( op == Op_LoadF ) return in(1);
return this;
}
//------------------------------Value------------------------------------------
const Type *RoundFloatNode::Value( PhaseTransform *phase ) const {
return phase->type( in(1) );
}
//=============================================================================
//------------------------------Identity---------------------------------------
// Remove redundant roundings. Incoming arguments are already rounded.
Node *RoundDoubleNode::Identity( PhaseTransform *phase ) {
assert(Matcher::strict_fp_requires_explicit_rounding, "should only generate for Intel");
// Do not round constants
if (phase->type(in(1))->base() == Type::DoubleCon) return in(1);
int op = in(1)->Opcode();
// Redundant rounding
if( op == Op_RoundDouble ) return in(1);
// Already rounded
if( op == Op_Parm ) return in(1);
if( op == Op_LoadD ) return in(1);
if( op == Op_ConvF2D ) return in(1);
if( op == Op_ConvI2D ) return in(1);
return this;
}
//------------------------------Value------------------------------------------
const Type *RoundDoubleNode::Value( PhaseTransform *phase ) const {
return phase->type( in(1) );
}

View File

@ -0,0 +1,215 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_OPTO_CONVERTNODE_HPP
#define SHARE_VM_OPTO_CONVERTNODE_HPP
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
//------------------------------Conv2BNode-------------------------------------
// Convert int/pointer to a Boolean. Map zero to zero, all else to 1.
class Conv2BNode : public Node {
public:
Conv2BNode( Node *i ) : Node(0,i) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::BOOL; }
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegI; }
};
// The conversions operations are all Alpha sorted. Please keep it that way!
//------------------------------ConvD2FNode------------------------------------
// Convert double to float
class ConvD2FNode : public Node {
public:
ConvD2FNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual uint ideal_reg() const { return Op_RegF; }
};
//------------------------------ConvD2INode------------------------------------
// Convert Double to Integer
class ConvD2INode : public Node {
public:
ConvD2INode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegI; }
};
//------------------------------ConvD2LNode------------------------------------
// Convert Double to Long
class ConvD2LNode : public Node {
public:
ConvD2LNode( Node *dbl ) : Node(0,dbl) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeLong::LONG; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegL; }
};
//------------------------------ConvF2DNode------------------------------------
// Convert Float to a Double.
class ConvF2DNode : public Node {
public:
ConvF2DNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegD; }
};
//------------------------------ConvF2INode------------------------------------
// Convert float to integer
class ConvF2INode : public Node {
public:
ConvF2INode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegI; }
};
//------------------------------ConvF2LNode------------------------------------
// Convert float to long
class ConvF2LNode : public Node {
public:
ConvF2LNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeLong::LONG; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegL; }
};
//------------------------------ConvI2DNode------------------------------------
// Convert Integer to Double
class ConvI2DNode : public Node {
public:
ConvI2DNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegD; }
};
//------------------------------ConvI2FNode------------------------------------
// Convert Integer to Float
class ConvI2FNode : public Node {
public:
ConvI2FNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual uint ideal_reg() const { return Op_RegF; }
};
//------------------------------ConvI2LNode------------------------------------
// Convert integer to long
class ConvI2LNode : public TypeNode {
public:
ConvI2LNode(Node *in1, const TypeLong* t = TypeLong::INT)
: TypeNode(t, 2)
{ init_req(1, in1); }
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegL; }
};
//------------------------------ConvL2DNode------------------------------------
// Convert Long to Double
class ConvL2DNode : public Node {
public:
ConvL2DNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegD; }
};
//------------------------------ConvL2FNode------------------------------------
// Convert Long to Float
class ConvL2FNode : public Node {
public:
ConvL2FNode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const { return Op_RegF; }
};
//------------------------------ConvL2INode------------------------------------
// Convert long to integer
class ConvL2INode : public Node {
public:
ConvL2INode( Node *in1 ) : Node(0,in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegI; }
};
//-----------------------------RoundFloatNode----------------------------------
class RoundFloatNode: public Node {
public:
RoundFloatNode(Node* c, Node *in1): Node(c, in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; }
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
};
//-----------------------------RoundDoubleNode---------------------------------
class RoundDoubleNode: public Node {
public:
RoundDoubleNode(Node* c, Node *in1): Node(c, in1) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
};
#endif // SHARE_VM_OPTO_CONVERTNODE_HPP

View File

@ -0,0 +1,119 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "opto/countbitsnode.hpp"
#include "opto/opcodes.hpp"
#include "opto/phaseX.hpp"
#include "opto/type.hpp"
//------------------------------Value------------------------------------------
const Type* CountLeadingZerosINode::Value(PhaseTransform* phase) const {
const Type* t = phase->type(in(1));
if (t == Type::TOP) return Type::TOP;
const TypeInt* ti = t->isa_int();
if (ti && ti->is_con()) {
jint i = ti->get_con();
// HD, Figure 5-6
if (i == 0)
return TypeInt::make(BitsPerInt);
int n = 1;
unsigned int x = i;
if (x >> 16 == 0) { n += 16; x <<= 16; }
if (x >> 24 == 0) { n += 8; x <<= 8; }
if (x >> 28 == 0) { n += 4; x <<= 4; }
if (x >> 30 == 0) { n += 2; x <<= 2; }
n -= x >> 31;
return TypeInt::make(n);
}
return TypeInt::INT;
}
//------------------------------Value------------------------------------------
const Type* CountLeadingZerosLNode::Value(PhaseTransform* phase) const {
const Type* t = phase->type(in(1));
if (t == Type::TOP) return Type::TOP;
const TypeLong* tl = t->isa_long();
if (tl && tl->is_con()) {
jlong l = tl->get_con();
// HD, Figure 5-6
if (l == 0)
return TypeInt::make(BitsPerLong);
int n = 1;
unsigned int x = (((julong) l) >> 32);
if (x == 0) { n += 32; x = (int) l; }
if (x >> 16 == 0) { n += 16; x <<= 16; }
if (x >> 24 == 0) { n += 8; x <<= 8; }
if (x >> 28 == 0) { n += 4; x <<= 4; }
if (x >> 30 == 0) { n += 2; x <<= 2; }
n -= x >> 31;
return TypeInt::make(n);
}
return TypeInt::INT;
}
//------------------------------Value------------------------------------------
const Type* CountTrailingZerosINode::Value(PhaseTransform* phase) const {
const Type* t = phase->type(in(1));
if (t == Type::TOP) return Type::TOP;
const TypeInt* ti = t->isa_int();
if (ti && ti->is_con()) {
jint i = ti->get_con();
// HD, Figure 5-14
int y;
if (i == 0)
return TypeInt::make(BitsPerInt);
int n = 31;
y = i << 16; if (y != 0) { n = n - 16; i = y; }
y = i << 8; if (y != 0) { n = n - 8; i = y; }
y = i << 4; if (y != 0) { n = n - 4; i = y; }
y = i << 2; if (y != 0) { n = n - 2; i = y; }
y = i << 1; if (y != 0) { n = n - 1; }
return TypeInt::make(n);
}
return TypeInt::INT;
}
//------------------------------Value------------------------------------------
const Type* CountTrailingZerosLNode::Value(PhaseTransform* phase) const {
const Type* t = phase->type(in(1));
if (t == Type::TOP) return Type::TOP;
const TypeLong* tl = t->isa_long();
if (tl && tl->is_con()) {
jlong l = tl->get_con();
// HD, Figure 5-14
int x, y;
if (l == 0)
return TypeInt::make(BitsPerLong);
int n = 63;
y = (int) l; if (y != 0) { n = n - 32; x = y; } else x = (((julong) l) >> 32);
y = x << 16; if (y != 0) { n = n - 16; x = y; }
y = x << 8; if (y != 0) { n = n - 8; x = y; }
y = x << 4; if (y != 0) { n = n - 4; x = y; }
y = x << 2; if (y != 0) { n = n - 2; x = y; }
y = x << 1; if (y != 0) { n = n - 1; }
return TypeInt::make(n);
}
return TypeInt::INT;
}

View File

@ -0,0 +1,94 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_OPTO_COUNTBITSNODE_HPP
#define SHARE_VM_OPTO_COUNTBITSNODE_HPP
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
class PhaseTransform;
//---------- CountBitsNode -----------------------------------------------------
class CountBitsNode : public Node {
public:
CountBitsNode(Node* in1) : Node(0, in1) {}
const Type* bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
};
//---------- CountLeadingZerosINode --------------------------------------------
// Count leading zeros (0-bit count starting from MSB) of an integer.
class CountLeadingZerosINode : public CountBitsNode {
public:
CountLeadingZerosINode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseTransform* phase) const;
};
//---------- CountLeadingZerosLNode --------------------------------------------
// Count leading zeros (0-bit count starting from MSB) of a long.
class CountLeadingZerosLNode : public CountBitsNode {
public:
CountLeadingZerosLNode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseTransform* phase) const;
};
//---------- CountTrailingZerosINode -------------------------------------------
// Count trailing zeros (0-bit count starting from LSB) of an integer.
class CountTrailingZerosINode : public CountBitsNode {
public:
CountTrailingZerosINode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseTransform* phase) const;
};
//---------- CountTrailingZerosLNode -------------------------------------------
// Count trailing zeros (0-bit count starting from LSB) of a long.
class CountTrailingZerosLNode : public CountBitsNode {
public:
CountTrailingZerosLNode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseTransform* phase) const;
};
//---------- PopCountINode -----------------------------------------------------
// Population count (bit count) of an integer.
class PopCountINode : public CountBitsNode {
public:
PopCountINode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
};
//---------- PopCountLNode -----------------------------------------------------
// Population count (bit count) of a long.
class PopCountLNode : public CountBitsNode {
public:
PopCountLNode(Node* in1) : CountBitsNode(in1) {}
virtual int Opcode() const;
};
#endif // SHARE_VM_OPTO_COUNTBITSNODE_HPP

View File

@ -26,8 +26,10 @@
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/divnode.hpp"
#include "opto/machnode.hpp"
#include "opto/movenode.hpp"
#include "opto/matcher.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"

View File

@ -31,6 +31,7 @@
#include "interpreter/linkResolver.hpp"
#include "opto/addnode.hpp"
#include "opto/callGenerator.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/parse.hpp"
@ -249,8 +250,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
CallGenerator* miss_cg;
Deoptimization::DeoptReason reason = morphism == 2 ?
Deoptimization::Reason_bimorphic :
(speculative_receiver_type == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check);
Deoptimization::Reason_bimorphic : Deoptimization::reason_class_check(speculative_receiver_type != NULL);
if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
!too_many_traps(jvms->method(), jvms->bci(), reason)
) {
@ -631,13 +631,7 @@ void Parse::do_call() {
}
BasicType ct = ctype->basic_type();
if (ct == T_OBJECT || ct == T_ARRAY) {
ciKlass* better_type = method()->return_profiled_type(bci());
if (UseTypeSpeculation && better_type != NULL) {
// If profiling reports a single type for the return value,
// feed it to the type system so it can propagate it as a
// speculative type
record_profile_for_speculation(stack(sp()-1), better_type);
}
record_profiled_return_for_speculation();
}
}

View File

@ -33,6 +33,7 @@
#include "opto/compile.hpp"
#include "opto/escape.hpp"
#include "opto/phaseX.hpp"
#include "opto/movenode.hpp"
#include "opto/rootnode.hpp"
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :

View File

@ -27,7 +27,7 @@
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/locknode.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"

View File

@ -30,10 +30,14 @@
#include "memory/barrierSet.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "opto/addnode.hpp"
#include "opto/castnode.hpp"
#include "opto/convertnode.hpp"
#include "opto/graphKit.hpp"
#include "opto/idealKit.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/locknode.hpp"
#include "opto/machnode.hpp"
#include "opto/opaquenode.hpp"
#include "opto/parse.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
@ -612,10 +616,10 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
// Usual case: Bail to interpreter.
// Reserve the right to recompile if we haven't seen anything yet.
assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL;
Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
if (treat_throw_as_hot
&& (method()->method_data()->trap_recompiled_at(bci(), NULL)
&& (method()->method_data()->trap_recompiled_at(bci(), m)
|| C->too_many_traps(reason))) {
// We cannot afford to take more traps here. Suffer in the interpreter.
if (C->log() != NULL)
@ -1181,7 +1185,8 @@ extern int explicit_null_checks_inserted,
Node* GraphKit::null_check_common(Node* value, BasicType type,
// optional arguments for variations:
bool assert_null,
Node* *null_control) {
Node* *null_control,
bool speculative) {
assert(!assert_null || null_control == NULL, "not both at once");
if (stopped()) return top();
if (!GenerateCompilerNullChecks && !assert_null && null_control == NULL) {
@ -1291,13 +1296,13 @@ Node* GraphKit::null_check_common(Node* value, BasicType type,
// Branch to failure if null
float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
Deoptimization::DeoptReason reason;
if (assert_null)
if (assert_null) {
reason = Deoptimization::Reason_null_assert;
else if (type == T_OBJECT)
reason = Deoptimization::Reason_null_check;
else
} else if (type == T_OBJECT) {
reason = Deoptimization::reason_null_check(speculative);
} else {
reason = Deoptimization::Reason_div0_check;
}
// %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
// ciMethodData::has_trap_at will return a conservative -1 if any
// must-be-null assertion has failed. This could cause performance
@ -2120,21 +2125,36 @@ void GraphKit::round_double_arguments(ciMethod* dest_method) {
*
* @param n node that the type applies to
* @param exact_kls type from profiling
* @param maybe_null did profiling see null?
*
* @return node with improved type
*/
Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null) {
const Type* current_type = _gvn.type(n);
assert(UseTypeSpeculation, "type speculation must be on");
const TypeOopPtr* speculative = current_type->speculative();
const TypePtr* speculative = current_type->speculative();
// Should the klass from the profile be recorded in the speculative type?
if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
const TypeOopPtr* xtype = tklass->as_instance_type();
assert(xtype->klass_is_exact(), "Should be exact");
// Any reason to believe n is not null (from this profiling or a previous one)?
const TypePtr* ptr = (maybe_null && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
// record the new speculative type's depth
speculative = xtype->with_inline_depth(jvms()->depth());
speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
speculative = speculative->with_inline_depth(jvms()->depth());
} else if (current_type->would_improve_ptr(maybe_null)) {
// Profiling report that null was never seen so we can change the
// speculative type to non null ptr.
assert(!maybe_null, "nothing to improve");
if (speculative == NULL) {
speculative = TypePtr::NOTNULL;
} else {
const TypePtr* ptr = TypePtr::NOTNULL;
speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
}
}
if (speculative != current_type->speculative()) {
@ -2167,7 +2187,15 @@ Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
return n;
}
ciKlass* exact_kls = profile_has_unique_klass();
return record_profile_for_speculation(n, exact_kls);
bool maybe_null = true;
if (java_bc() == Bytecodes::_checkcast ||
java_bc() == Bytecodes::_instanceof ||
java_bc() == Bytecodes::_aastore) {
ciProfileData* data = method()->method_data()->bci_to_data(bci());
bool maybe_null = data == NULL ? true : data->as_BitData()->null_seen();
}
return record_profile_for_speculation(n, exact_kls, maybe_null);
return n;
}
/**
@ -2187,9 +2215,10 @@ void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method,
for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
ciKlass* better_type = method()->argument_profiled_type(bci(), i);
if (better_type != NULL) {
record_profile_for_speculation(argument(j), better_type);
bool maybe_null = true;
ciKlass* better_type = NULL;
if (method()->argument_profiled_type(bci(), i, better_type, maybe_null)) {
record_profile_for_speculation(argument(j), better_type, maybe_null);
}
i++;
}
@ -2206,15 +2235,34 @@ void GraphKit::record_profiled_parameters_for_speculation() {
}
for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
if (_gvn.type(local(i))->isa_oopptr()) {
ciKlass* better_type = method()->parameter_profiled_type(j);
if (better_type != NULL) {
record_profile_for_speculation(local(i), better_type);
bool maybe_null = true;
ciKlass* better_type = NULL;
if (method()->parameter_profiled_type(j, better_type, maybe_null)) {
record_profile_for_speculation(local(i), better_type, maybe_null);
}
j++;
}
}
}
/**
* Record profiling data from return value profiling at an invoke with
* the type system so that it can propagate it (speculation)
*/
void GraphKit::record_profiled_return_for_speculation() {
if (!UseTypeSpeculation) {
return;
}
bool maybe_null = true;
ciKlass* better_type = NULL;
if (method()->return_profiled_type(bci(), better_type, maybe_null)) {
// If profiling reports a single type for the return value,
// feed it to the type system so it can propagate it as a
// speculative type
record_profile_for_speculation(stack(sp()-1), better_type, maybe_null);
}
}
void GraphKit::round_double_result(ciMethod* dest_method) {
// A non-strict method may return a double value which has an extended
// exponent, but this must not be visible in a caller which is 'strict'
@ -2294,10 +2342,12 @@ Node* GraphKit::dstore_rounding(Node* n) {
// Null check oop. Set null-path control into Region in slot 3.
// Make a cast-not-nullness use the other not-null control. Return cast.
Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
bool never_see_null, bool safe_for_replace) {
bool never_see_null,
bool safe_for_replace,
bool speculative) {
// Initial NULL check taken path
(*null_control) = top();
Node* cast = null_check_common(value, T_OBJECT, false, null_control);
Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative);
// Generate uncommon_trap:
if (never_see_null && (*null_control) != top()) {
@ -2308,7 +2358,8 @@ Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
PreserveJVMState pjvms(this);
set_control(*null_control);
replace_in_map(value, null());
uncommon_trap(Deoptimization::Reason_null_check,
Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative);
uncommon_trap(reason,
Deoptimization::Action_make_not_entrant);
(*null_control) = top(); // NULL path is dead
}
@ -2732,11 +2783,16 @@ Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
// recompile; the offending check will be recompiled to handle NULLs.
// If we see several offending BCIs, then all checks in the
// method will be recompiled.
bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
speculating = !_gvn.type(obj)->speculative_maybe_null();
Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
if (UncommonNullCast // Cutout for this technique
&& obj != null() // And not the -Xcomp stupid case?
&& !too_many_traps(Deoptimization::Reason_null_check)
&& !too_many_traps(reason)
) {
if (speculating) {
return true;
}
if (data == NULL)
// Edge case: no mature data. Be optimistic here.
return true;
@ -2746,6 +2802,7 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
return !data->as_BitData()->null_seen();
}
speculating = false;
return false;
}
@ -2758,7 +2815,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
bool safe_for_replace) {
if (!UseTypeProfile || !TypeProfileCasts) return NULL;
Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);
// Make sure we haven't already deoptimized from this tactic.
if (too_many_traps(reason))
@ -2811,7 +2868,7 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
// type == NULL if profiling tells us this object is always null
if (type != NULL) {
Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check;
if (!too_many_traps(null_reason) &&
!too_many_traps(class_reason)) {
Node* not_null_obj = NULL;
@ -2819,7 +2876,7 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
// there's no need for a null check
if (!not_null) {
Node* null_ctl = top();
not_null_obj = null_check_oop(obj, &null_ctl, true, true);
not_null_obj = null_check_oop(obj, &null_ctl, true, true, true);
assert(null_ctl->is_top(), "no null control here");
} else {
not_null_obj = obj;
@ -2867,12 +2924,13 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replac
if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode
data = method()->method_data()->bci_to_data(bci());
}
bool speculative_not_null = false;
bool never_see_null = (ProfileDynamicTypes // aggressive use of profile
&& seems_never_null(obj, data));
&& seems_never_null(obj, data, speculative_not_null));
// Null check; get casted pointer; set region slot 3
Node* null_ctl = top();
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a NULL?
@ -2995,12 +3053,13 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
C->set_has_split_ifs(true); // Has chance for split-if optimization
// Use null-cast information if it is available
bool speculative_not_null = false;
bool never_see_null = ((failure_control == NULL) // regular case only
&& seems_never_null(obj, data));
&& seems_never_null(obj, data, speculative_not_null));
// Null check; get casted pointer; set region slot 3
Node* null_ctl = top();
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a NULL?

View File

@ -351,9 +351,11 @@ class GraphKit : public Phase {
// Return the value cast to not-null.
// Be clever about equivalent dominating null checks.
Node* null_check_common(Node* value, BasicType type,
bool assert_null = false, Node* *null_control = NULL);
bool assert_null = false,
Node* *null_control = NULL,
bool speculative = false);
Node* null_check(Node* value, BasicType type = T_OBJECT) {
return null_check_common(value, type);
return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
}
Node* null_check_receiver() {
assert(argument(0)->bottom_type()->isa_ptr(), "must be");
@ -382,10 +384,12 @@ class GraphKit : public Phase {
// If safe_for_replace, then we can replace the value with the cast
// in the parsing map (the cast is guaranteed to dominate the map)
Node* null_check_oop(Node* value, Node* *null_control,
bool never_see_null = false, bool safe_for_replace = false);
bool never_see_null = false,
bool safe_for_replace = false,
bool speculative = false);
// Check the null_seen bit.
bool seems_never_null(Node* obj, ciProfileData* data);
bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
// Check for unique class for receiver at call
ciKlass* profile_has_unique_klass() {
@ -399,10 +403,11 @@ class GraphKit : public Phase {
}
// record type from profiling with the type system
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
Node* record_profiled_receiver_for_speculation(Node* n);
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null);
void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
void record_profiled_parameters_for_speculation();
void record_profiled_return_for_speculation();
Node* record_profiled_receiver_for_speculation(Node* n);
// Use the type profile to narrow an object type.
Node* maybe_cast_profiled_receiver(Node* not_null_obj,

View File

@ -27,6 +27,7 @@
#include "opto/addnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/castnode.hpp"
#include "opto/connode.hpp"
#include "opto/divnode.hpp"
#include "opto/graphKit.hpp"

View File

@ -31,7 +31,6 @@
#include "opto/cfgnode.hpp"
#include "opto/chaitin.hpp"
#include "opto/coalesce.hpp"
#include "opto/connode.hpp"
#include "opto/indexSet.hpp"
#include "opto/machnode.hpp"
#include "opto/memnode.hpp"

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/memnode.hpp"
#include "opto/phaseX.hpp"
//=============================================================================
// Do not match memory edge.
uint StrIntrinsicNode::match_edge(uint idx) const {
return idx == 2 || idx == 3;
}
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node
if (in(0) && in(0)->is_top()) return NULL;
if (can_reshape) {
Node* mem = phase->transform(in(MemNode::Memory));
// If transformed to a MergeMem, get the desired slice
uint alias_idx = phase->C->get_alias_index(adr_type());
mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem;
if (mem != in(MemNode::Memory)) {
set_req(MemNode::Memory, mem);
return this;
}
}
return NULL;
}
//------------------------------Value------------------------------------------
const Type *StrIntrinsicNode::Value( PhaseTransform *phase ) const {
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
return bottom_type();
}
//=============================================================================
//------------------------------match_edge-------------------------------------
// Do not match memory edge
uint EncodeISOArrayNode::match_edge(uint idx) const {
return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len)
}
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return remove_dead_region(phase, can_reshape) ? this : NULL;
}
//------------------------------Value------------------------------------------
const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const {
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
return bottom_type();
}

View File

@ -0,0 +1,127 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_OPTO_INTRINSICNODE_HPP
#define SHARE_VM_OPTO_INTRINSICNODE_HPP
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
//----------------------PartialSubtypeCheckNode--------------------------------
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
// array for an instance of the superklass. Set a hidden internal cache on a
// hit (cache is checked with exposed code in gen_subtype_check()). Return
// not zero for a miss or zero for a hit.
class PartialSubtypeCheckNode : public Node {
public:
PartialSubtypeCheckNode(Node* c, Node* sub, Node* super) : Node(c,sub,super) {}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
virtual uint ideal_reg() const { return Op_RegP; }
};
//------------------------------StrIntrinsic-------------------------------
// Base class for Ideal nodes used in String instrinsic code.
class StrIntrinsicNode: public Node {
public:
StrIntrinsicNode(Node* control, Node* char_array_mem,
Node* s1, Node* c1, Node* s2, Node* c2):
Node(control, char_array_mem, s1, c1, s2, c2) {
}
StrIntrinsicNode(Node* control, Node* char_array_mem,
Node* s1, Node* s2, Node* c):
Node(control, char_array_mem, s1, s2, c) {
}
StrIntrinsicNode(Node* control, Node* char_array_mem,
Node* s1, Node* s2):
Node(control, char_array_mem, s1, s2) {
}
virtual bool depends_only_on_test() const { return false; }
virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
virtual uint match_edge(uint idx) const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
};
//------------------------------StrComp-------------------------------------
class StrCompNode: public StrIntrinsicNode {
public:
StrCompNode(Node* control, Node* char_array_mem,
Node* s1, Node* c1, Node* s2, Node* c2):
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::INT; }
};
//------------------------------StrEquals-------------------------------------
class StrEqualsNode: public StrIntrinsicNode {
public:
StrEqualsNode(Node* control, Node* char_array_mem,
Node* s1, Node* s2, Node* c):
StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};
//------------------------------StrIndexOf-------------------------------------
class StrIndexOfNode: public StrIntrinsicNode {
public:
StrIndexOfNode(Node* control, Node* char_array_mem,
Node* s1, Node* c1, Node* s2, Node* c2):
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::INT; }
};
//------------------------------AryEq---------------------------------------
class AryEqNode: public StrIntrinsicNode {
public:
AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
StrIntrinsicNode(control, char_array_mem, s1, s2) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};
//------------------------------EncodeISOArray--------------------------------
// encode char[] to byte[] in ISO_8859_1
class EncodeISOArrayNode: public Node {
public:
EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
virtual int Opcode() const;
virtual bool depends_only_on_test() const { return false; }
virtual const Type* bottom_type() const { return TypeInt::INT; }
virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
virtual uint match_edge(uint idx) const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
};
#endif // SHARE_VM_OPTO_INTRINSICNODE_HPP

View File

@ -30,10 +30,16 @@
#include "oops/objArrayKlass.hpp"
#include "opto/addnode.hpp"
#include "opto/callGenerator.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/convertnode.hpp"
#include "opto/countbitsnode.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/idealKit.hpp"
#include "opto/mathexactnode.hpp"
#include "opto/movenode.hpp"
#include "opto/mulnode.hpp"
#include "opto/narrowptrnode.hpp"
#include "opto/parse.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
@ -4658,7 +4664,7 @@ bool LibraryCallKit::inline_arraycopy() {
ciKlass* src_k = NULL;
if (!has_src) {
src_k = src_type->speculative_type();
src_k = src_type->speculative_type_not_null();
if (src_k != NULL && src_k->is_array_klass()) {
could_have_src = true;
}
@ -4666,7 +4672,7 @@ bool LibraryCallKit::inline_arraycopy() {
ciKlass* dest_k = NULL;
if (!has_dest) {
dest_k = dest_type->speculative_type();
dest_k = dest_type->speculative_type_not_null();
if (dest_k != NULL && dest_k->is_array_klass()) {
could_have_dest = true;
}
@ -4738,13 +4744,13 @@ bool LibraryCallKit::inline_arraycopy() {
ciKlass* src_k = top_src->klass();
ciKlass* dest_k = top_dest->klass();
if (!src_spec) {
src_k = src_type->speculative_type();
src_k = src_type->speculative_type_not_null();
if (src_k != NULL && src_k->is_array_klass()) {
could_have_src = true;
}
}
if (!dest_spec) {
dest_k = dest_type->speculative_type();
dest_k = dest_type->speculative_type_not_null();
if (dest_k != NULL && dest_k->is_array_klass()) {
could_have_dest = true;
}

View File

@ -27,8 +27,10 @@
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/loopnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/opaquenode.hpp"
#include "opto/rootnode.hpp"
#include "opto/subnode.hpp"

View File

@ -28,9 +28,12 @@
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/divnode.hpp"
#include "opto/loopnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/movenode.hpp"
#include "opto/opaquenode.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"

View File

@ -25,7 +25,9 @@
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/loopnode.hpp"
#include "opto/opaquenode.hpp"
#include "opto/rootnode.hpp"
//================= Loop Unswitching =====================

View File

@ -30,6 +30,7 @@
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/divnode.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/loopnode.hpp"

View File

@ -30,6 +30,8 @@
#include "opto/loopnode.hpp"
#include "opto/matcher.hpp"
#include "opto/mulnode.hpp"
#include "opto/movenode.hpp"
#include "opto/opaquenode.hpp"
#include "opto/rootnode.hpp"
#include "opto/subnode.hpp"

View File

@ -27,14 +27,17 @@
#include "libadt/vectset.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/locknode.hpp"
#include "opto/loopnode.hpp"
#include "opto/macro.hpp"
#include "opto/memnode.hpp"
#include "opto/narrowptrnode.hpp"
#include "opto/node.hpp"
#include "opto/opaquenode.hpp"
#include "opto/phaseX.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"

View File

@ -26,10 +26,10 @@
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/connode.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/matcher.hpp"
#include "opto/memnode.hpp"
#include "opto/movenode.hpp"
#include "opto/opcodes.hpp"
#include "opto/regmask.hpp"
#include "opto/rootnode.hpp"

View File

@ -31,11 +31,13 @@
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/matcher.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/narrowptrnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/regmask.hpp"
@ -2903,59 +2905,6 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
return mem;
}
//=============================================================================
// Do not match memory edge.
uint StrIntrinsicNode::match_edge(uint idx) const {
return idx == 2 || idx == 3;
}
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node
if (in(0) && in(0)->is_top()) return NULL;
if (can_reshape) {
Node* mem = phase->transform(in(MemNode::Memory));
// If transformed to a MergeMem, get the desired slice
uint alias_idx = phase->C->get_alias_index(adr_type());
mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem;
if (mem != in(MemNode::Memory)) {
set_req(MemNode::Memory, mem);
return this;
}
}
return NULL;
}
//------------------------------Value------------------------------------------
const Type *StrIntrinsicNode::Value( PhaseTransform *phase ) const {
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
return bottom_type();
}
//=============================================================================
//------------------------------match_edge-------------------------------------
// Do not match memory edge
uint EncodeISOArrayNode::match_edge(uint idx) const {
return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len)
}
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return remove_dead_region(phase, can_reshape) ? this : NULL;
}
//------------------------------Value------------------------------------------
const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const {
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
return bottom_type();
}
//=============================================================================
MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
: MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),

View File

@ -866,88 +866,6 @@ public:
static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
};
//------------------------------StrIntrinsic-------------------------------
// Base class for Ideal nodes used in String instrinsic code.
class StrIntrinsicNode: public Node {
public:
StrIntrinsicNode(Node* control, Node* char_array_mem,
Node* s1, Node* c1, Node* s2, Node* c2):
Node(control, char_array_mem, s1, c1, s2, c2) {
}
StrIntrinsicNode(Node* control, Node* char_array_mem,
Node* s1, Node* s2, Node* c):
Node(control, char_array_mem, s1, s2, c) {
}
StrIntrinsicNode(Node* control, Node* char_array_mem,
Node* s1, Node* s2):
Node(control, char_array_mem, s1, s2) {
}
virtual bool depends_only_on_test() const { return false; }
virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
virtual uint match_edge(uint idx) const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
};
//------------------------------StrComp-------------------------------------
class StrCompNode: public StrIntrinsicNode {
public:
StrCompNode(Node* control, Node* char_array_mem,
Node* s1, Node* c1, Node* s2, Node* c2):
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::INT; }
};
//------------------------------StrEquals-------------------------------------
class StrEqualsNode: public StrIntrinsicNode {
public:
StrEqualsNode(Node* control, Node* char_array_mem,
Node* s1, Node* s2, Node* c):
StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};
//------------------------------StrIndexOf-------------------------------------
class StrIndexOfNode: public StrIntrinsicNode {
public:
StrIndexOfNode(Node* control, Node* char_array_mem,
Node* s1, Node* c1, Node* s2, Node* c2):
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::INT; }
};
//------------------------------AryEq---------------------------------------
class AryEqNode: public StrIntrinsicNode {
public:
AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
StrIntrinsicNode(control, char_array_mem, s1, s2) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};
//------------------------------EncodeISOArray--------------------------------
// encode char[] to byte[] in ISO_8859_1
class EncodeISOArrayNode: public Node {
public:
EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
virtual int Opcode() const;
virtual bool depends_only_on_test() const { return false; }
virtual const Type* bottom_type() const { return TypeInt::INT; }
virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
virtual uint match_edge(uint idx) const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
};
//------------------------------MemBar-----------------------------------------
// There are different flavors of Memory Barriers to match the Java Memory
// Model. Monitor-enter and volatile-load act as Aquires: no following ref

View File

@ -0,0 +1,398 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "opto/addnode.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/movenode.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"
//=============================================================================
/*
The major change is for CMoveP and StrComp. They have related but slightly
different problems. They both take in TWO oops which are both null-checked
independently before the using Node. After CCP removes the CastPP's they need
to pick up the guarding test edge - in this case TWO control edges. I tried
various solutions, all have problems:
(1) Do nothing. This leads to a bug where we hoist a Load from a CMoveP or a
StrComp above a guarding null check. I've seen both cases in normal -Xcomp
testing.
(2) Plug the control edge from 1 of the 2 oops in. Apparent problem here is
to figure out which test post-dominates. The real problem is that it doesn't
matter which one you pick. After you pick up, the dominating-test elider in
IGVN can remove the test and allow you to hoist up to the dominating test on
the chosen oop bypassing the test on the not-chosen oop. Seen in testing.
Oops.
(3) Leave the CastPP's in. This makes the graph more accurate in some sense;
we get to keep around the knowledge that an oop is not-null after some test.
Alas, the CastPP's interfere with GVN (some values are the regular oop, some
are the CastPP of the oop, all merge at Phi's which cannot collapse, etc).
This cost us 10% on SpecJVM, even when I removed some of the more trivial
cases in the optimizer. Removing more useless Phi's started allowing Loads to
illegally float above null checks. I gave up on this approach.
(4) Add BOTH control edges to both tests. Alas, too much code knows that
control edges are in slot-zero ONLY. Many quick asserts fail; no way to do
this one. Note that I really want to allow the CMoveP to float and add both
control edges to the dependent Load op - meaning I can select early but I
cannot Load until I pass both tests.
(5) Do not hoist CMoveP and StrComp. To this end I added the v-call
depends_only_on_test(). No obvious performance loss on Spec, but we are
clearly conservative on CMoveP (also so on StrComp but that's unlikely to
matter ever).
*/
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node.
// Move constants to the right.
Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
// Don't bother trying to transform a dead node
if( in(0) && in(0)->is_top() ) return NULL;
assert( !phase->eqv(in(Condition), this) &&
!phase->eqv(in(IfFalse), this) &&
!phase->eqv(in(IfTrue), this), "dead loop in CMoveNode::Ideal" );
if( phase->type(in(Condition)) == Type::TOP )
return NULL; // return NULL when Condition is dead
if( in(IfFalse)->is_Con() && !in(IfTrue)->is_Con() ) {
if( in(Condition)->is_Bool() ) {
BoolNode* b = in(Condition)->as_Bool();
BoolNode* b2 = b->negate(phase);
return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
}
}
return NULL;
}
//------------------------------is_cmove_id------------------------------------
// Helper function to check for CMOVE identity. Shared with PhiNode::Identity
Node *CMoveNode::is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b ) {
// Check for Cmp'ing and CMove'ing same values
if( (phase->eqv(cmp->in(1),f) &&
phase->eqv(cmp->in(2),t)) ||
// Swapped Cmp is OK
(phase->eqv(cmp->in(2),f) &&
phase->eqv(cmp->in(1),t)) ) {
// Give up this identity check for floating points because it may choose incorrect
// value around 0.0 and -0.0
if ( cmp->Opcode()==Op_CmpF || cmp->Opcode()==Op_CmpD )
return NULL;
// Check for "(t==f)?t:f;" and replace with "f"
if( b->_test._test == BoolTest::eq )
return f;
// Allow the inverted case as well
// Check for "(t!=f)?t:f;" and replace with "t"
if( b->_test._test == BoolTest::ne )
return t;
}
return NULL;
}
//------------------------------Identity---------------------------------------
// Conditional-move is an identity if both inputs are the same, or the test
// true or false.
Node *CMoveNode::Identity( PhaseTransform *phase ) {
if( phase->eqv(in(IfFalse),in(IfTrue)) ) // C-moving identical inputs?
return in(IfFalse); // Then it doesn't matter
if( phase->type(in(Condition)) == TypeInt::ZERO )
return in(IfFalse); // Always pick left(false) input
if( phase->type(in(Condition)) == TypeInt::ONE )
return in(IfTrue); // Always pick right(true) input
// Check for CMove'ing a constant after comparing against the constant.
// Happens all the time now, since if we compare equality vs a constant in
// the parser, we "know" the variable is constant on one path and we force
// it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
// conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
// general in that we don't need constants.
if( in(Condition)->is_Bool() ) {
BoolNode *b = in(Condition)->as_Bool();
Node *cmp = b->in(1);
if( cmp->is_Cmp() ) {
Node *id = is_cmove_id( phase, cmp, in(IfTrue), in(IfFalse), b );
if( id ) return id;
}
}
return this;
}
//------------------------------Value------------------------------------------
// Result is the meet of inputs
const Type *CMoveNode::Value( PhaseTransform *phase ) const {
if( phase->type(in(Condition)) == Type::TOP )
return Type::TOP;
return phase->type(in(IfFalse))->meet_speculative(phase->type(in(IfTrue)));
}
//------------------------------make-------------------------------------------
// Make a correctly-flavored CMove. Since _type is directly determined
// from the inputs we do not need to specify it here.
CMoveNode *CMoveNode::make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t ) {
switch( t->basic_type() ) {
case T_INT: return new (C) CMoveINode( bol, left, right, t->is_int() );
case T_FLOAT: return new (C) CMoveFNode( bol, left, right, t );
case T_DOUBLE: return new (C) CMoveDNode( bol, left, right, t );
case T_LONG: return new (C) CMoveLNode( bol, left, right, t->is_long() );
case T_OBJECT: return new (C) CMovePNode( c, bol, left, right, t->is_oopptr() );
case T_ADDRESS: return new (C) CMovePNode( c, bol, left, right, t->is_ptr() );
case T_NARROWOOP: return new (C) CMoveNNode( c, bol, left, right, t );
default:
ShouldNotReachHere();
return NULL;
}
}
//=============================================================================
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node.
// Check for conversions to boolean
Node *CMoveINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Try generic ideal's first
Node *x = CMoveNode::Ideal(phase, can_reshape);
if( x ) return x;
// If zero is on the left (false-case, no-move-case) it must mean another
// constant is on the right (otherwise the shared CMove::Ideal code would
// have moved the constant to the right). This situation is bad for Intel
// and a don't-care for Sparc. It's bad for Intel because the zero has to
// be manifested in a register with a XOR which kills flags, which are live
// on input to the CMoveI, leading to a situation which causes excessive
// spilling on Intel. For Sparc, if the zero in on the left the Sparc will
// zero a register via G0 and conditionally-move the other constant. If the
// zero is on the right, the Sparc will load the first constant with a
// 13-bit set-lo and conditionally move G0. See bug 4677505.
if( phase->type(in(IfFalse)) == TypeInt::ZERO && !(phase->type(in(IfTrue)) == TypeInt::ZERO) ) {
if( in(Condition)->is_Bool() ) {
BoolNode* b = in(Condition)->as_Bool();
BoolNode* b2 = b->negate(phase);
return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
}
}
// Now check for booleans
int flip = 0;
// Check for picking from zero/one
if( phase->type(in(IfFalse)) == TypeInt::ZERO && phase->type(in(IfTrue)) == TypeInt::ONE ) {
flip = 1 - flip;
} else if( phase->type(in(IfFalse)) == TypeInt::ONE && phase->type(in(IfTrue)) == TypeInt::ZERO ) {
} else return NULL;
// Check for eq/ne test
if( !in(1)->is_Bool() ) return NULL;
BoolNode *bol = in(1)->as_Bool();
if( bol->_test._test == BoolTest::eq ) {
} else if( bol->_test._test == BoolTest::ne ) {
flip = 1-flip;
} else return NULL;
// Check for vs 0 or 1
if( !bol->in(1)->is_Cmp() ) return NULL;
const CmpNode *cmp = bol->in(1)->as_Cmp();
if( phase->type(cmp->in(2)) == TypeInt::ZERO ) {
} else if( phase->type(cmp->in(2)) == TypeInt::ONE ) {
// Allow cmp-vs-1 if the other input is bounded by 0-1
if( phase->type(cmp->in(1)) != TypeInt::BOOL )
return NULL;
flip = 1 - flip;
} else return NULL;
// Convert to a bool (flipped)
// Build int->bool conversion
#ifndef PRODUCT
if( PrintOpto ) tty->print_cr("CMOV to I2B");
#endif
Node *n = new (phase->C) Conv2BNode( cmp->in(1) );
if( flip )
n = new (phase->C) XorINode( phase->transform(n), phase->intcon(1) );
return n;
}
//=============================================================================
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node.
// Check for absolute value
Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Try generic ideal's first
Node *x = CMoveNode::Ideal(phase, can_reshape);
if( x ) return x;
int cmp_zero_idx = 0; // Index of compare input where to look for zero
int phi_x_idx = 0; // Index of phi input where to find naked x
// Find the Bool
if( !in(1)->is_Bool() ) return NULL;
BoolNode *bol = in(1)->as_Bool();
// Check bool sense
switch( bol->_test._test ) {
case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = IfTrue; break;
case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue; break;
case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
default: return NULL; break;
}
// Find zero input of CmpF; the other input is being abs'd
Node *cmpf = bol->in(1);
if( cmpf->Opcode() != Op_CmpF ) return NULL;
Node *X = NULL;
bool flip = false;
if( phase->type(cmpf->in(cmp_zero_idx)) == TypeF::ZERO ) {
X = cmpf->in(3 - cmp_zero_idx);
} else if (phase->type(cmpf->in(3 - cmp_zero_idx)) == TypeF::ZERO) {
// The test is inverted, we should invert the result...
X = cmpf->in(cmp_zero_idx);
flip = true;
} else {
return NULL;
}
// If X is found on the appropriate phi input, find the subtract on the other
if( X != in(phi_x_idx) ) return NULL;
int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
Node *sub = in(phi_sub_idx);
// Allow only SubF(0,X) and fail out for all others; NegF is not OK
if( sub->Opcode() != Op_SubF ||
sub->in(2) != X ||
phase->type(sub->in(1)) != TypeF::ZERO ) return NULL;
Node *abs = new (phase->C) AbsFNode( X );
if( flip )
abs = new (phase->C) SubFNode(sub->in(1), phase->transform(abs));
return abs;
}
//=============================================================================
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node.
// Check for absolute value
Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Try generic ideal's first
Node *x = CMoveNode::Ideal(phase, can_reshape);
if( x ) return x;
int cmp_zero_idx = 0; // Index of compare input where to look for zero
int phi_x_idx = 0; // Index of phi input where to find naked x
// Find the Bool
if( !in(1)->is_Bool() ) return NULL;
BoolNode *bol = in(1)->as_Bool();
// Check bool sense
switch( bol->_test._test ) {
case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = IfTrue; break;
case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue; break;
case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
default: return NULL; break;
}
// Find zero input of CmpD; the other input is being abs'd
Node *cmpd = bol->in(1);
if( cmpd->Opcode() != Op_CmpD ) return NULL;
Node *X = NULL;
bool flip = false;
if( phase->type(cmpd->in(cmp_zero_idx)) == TypeD::ZERO ) {
X = cmpd->in(3 - cmp_zero_idx);
} else if (phase->type(cmpd->in(3 - cmp_zero_idx)) == TypeD::ZERO) {
// The test is inverted, we should invert the result...
X = cmpd->in(cmp_zero_idx);
flip = true;
} else {
return NULL;
}
// If X is found on the appropriate phi input, find the subtract on the other
if( X != in(phi_x_idx) ) return NULL;
int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
Node *sub = in(phi_sub_idx);
// Allow only SubD(0,X) and fail out for all others; NegD is not OK
if( sub->Opcode() != Op_SubD ||
sub->in(2) != X ||
phase->type(sub->in(1)) != TypeD::ZERO ) return NULL;
Node *abs = new (phase->C) AbsDNode( X );
if( flip )
abs = new (phase->C) SubDNode(sub->in(1), phase->transform(abs));
return abs;
}
//------------------------------Value------------------------------------------
const Type *MoveL2DNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
const TypeLong *tl = t->is_long();
if( !tl->is_con() ) return bottom_type();
JavaValue v;
v.set_jlong(tl->get_con());
return TypeD::make( v.get_jdouble() );
}
//------------------------------Value------------------------------------------
const Type *MoveI2FNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
const TypeInt *ti = t->is_int();
if( !ti->is_con() ) return bottom_type();
JavaValue v;
v.set_jint(ti->get_con());
return TypeF::make( v.get_jfloat() );
}
//------------------------------Value------------------------------------------
const Type *MoveF2INode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
if( t == Type::FLOAT ) return TypeInt::INT;
const TypeF *tf = t->is_float_constant();
JavaValue v;
v.set_jfloat(tf->getf());
return TypeInt::make( v.get_jint() );
}
//------------------------------Value------------------------------------------
const Type *MoveD2LNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(1) );
if( t == Type::TOP ) return Type::TOP;
if( t == Type::DOUBLE ) return TypeLong::LONG;
const TypeD *td = t->is_double_constant();
JavaValue v;
v.set_jdouble(td->getd());
return TypeLong::make( v.get_jlong() );
}

Some files were not shown because too many files have changed in this diff Show More