This commit is contained in:
J. Duke 2017-07-05 19:10:56 +02:00
commit e2b69dcd29
140 changed files with 1960 additions and 3064 deletions
.hgtags-top-repo
common/autoconf
hotspot
.hgtags
agent/src/share/classes/sun/jvm/hotspot/oops
make
bsd/makefiles
hotspot_version
windows
src

@ -227,3 +227,4 @@ d2dcb110e9dbaf9903c05b211df800e78e4b394e jdk8-b100
b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103
96c1b9b7524b52c3fcefc90ffad4c767396727c8 jdk8-b104
5166118c59178b5d31001bc4058e92486ee07d9b jdk8-b105
8e7b4d9fb00fdf1334376aeac050c9bca6d1b383 jdk8-b106

@ -3818,7 +3818,7 @@ fi
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1377850299
DATE_WHEN_GENERATED=1378914658
###############################################################################
#
@ -6775,6 +6775,18 @@ test -n "$target_alias" &&
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
s390)
VAR_CPU=s390
VAR_CPU_ARCH=s390
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
s390x)
VAR_CPU=s390x
VAR_CPU_ARCH=s390
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
sparc)
VAR_CPU=sparc
VAR_CPU_ARCH=sparc
@ -6883,6 +6895,18 @@ $as_echo "$OPENJDK_BUILD_OS-$OPENJDK_BUILD_CPU" >&6; }
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
s390)
VAR_CPU=s390
VAR_CPU_ARCH=s390
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
s390x)
VAR_CPU=s390x
VAR_CPU_ARCH=s390
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
sparc)
VAR_CPU=sparc
VAR_CPU_ARCH=sparc

@ -60,6 +60,18 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_CPU],
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
s390)
VAR_CPU=s390
VAR_CPU_ARCH=s390
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
s390x)
VAR_CPU=s390x
VAR_CPU_ARCH=s390
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
sparc)
VAR_CPU=sparc
VAR_CPU_ARCH=sparc

@ -373,3 +373,5 @@ c4697c1c448416108743b59118b4a2498b339d0c jdk8-b102
c1604d5885a6f2adc0bcea2fa142a8f6bafad2f0 hs25-b47
acac3bde66b2c22791c257a8d99611d6d08c6713 jdk8-b105
18b4798adbc42c6fa16f5ecb7d5cd3ca130754bf hs25-b48
aed585cafc0d9655726af6d1e1081d1c94cb3b5c jdk8-b106
50794d8ac11c9579b41dec4de23b808fef9f34a1 hs25-b49

@ -354,9 +354,16 @@ public class InstanceKlass extends Klass {
public boolean getIsMarkedDependent() { return isMarkedDependent.getValue(this) != 0; }
public long getVtableLen() { return vtableLen.getValue(this); }
public long getItableLen() { return itableLen.getValue(this); }
public Symbol getGenericSignature() { return getConstants().getSymbolAt(genericSignatureIndex.getValue(this)); }
public long majorVersion() { return majorVersion.getValue(this); }
public long minorVersion() { return minorVersion.getValue(this); }
public Symbol getGenericSignature() {
long index = genericSignatureIndex.getValue(this);
if (index != 0) {
return getConstants().getSymbolAt(index);
} else {
return null;
}
}
// "size helper" == instance size in words
public long getSizeHelper() {

@ -129,16 +129,21 @@ ifeq ($(USE_CLANG), true)
# We only use precompiled headers for the JVM build
CFLAGS += $(VM_PCH_FLAG)
# There are some files which don't like precompiled headers
# The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build.
# But Clang doesn't support a precompiled header which was compiled with -O3
# to be used in a compilation unit which uses '-O0'. We could also prepare an
# extra '-O0' PCH file for the opt build and use it here, but it's probably
# not worth the effort as long as only two files need this special handling.
# The following files are compiled at various optimization
# levels due to optimization issues encountered at the
# 'OPT_CFLAGS_DEFAULT' level. The Clang compiler issues a compile
# time error if there is an optimization level specification
# skew between the PCH file and the C++ file. Especially if the
# PCH file is compiled at a higher optimization level than
# the C++ file. One solution might be to prepare extra optimization
# level specific PCH files for the opt build and use them here, but
# it's probably not worth the effort as long as only a few files
# need this special handling.
PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH)
PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
PCH_FLAG/unsafe.o = $(PCH_FLAG/NO_PCH)
endif
else # ($(USE_CLANG), true)
@ -306,6 +311,7 @@ OPT_CFLAGS/NOOPT=-O0
ifeq ($(USE_CLANG), true)
ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1)
OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
OPT_CFLAGS/unsafe.o += -O1
endif
else
# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=48
HS_BUILD_NUMBER=49
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

@ -82,6 +82,7 @@ for /F %%i in ('sh %HotSpotWorkSpace%/make/windows/get_msc_ver.sh') do set %%i
echo **************************************************************
set ProjectFile=%HotSpotBuildSpace%\jvm.vcproj
echo MSC_VER = "%MSC_VER%"
if "%MSC_VER%" == "1200" (
set ProjectFile=%HotSpotBuildSpace%\jvm.dsp
echo Will generate VC6 project {unsupported}
@ -96,11 +97,17 @@ if "%MSC_VER%" == "1600" (
echo Will generate VC10 {Visual Studio 2010}
set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
) else (
if "%MSC_VER%" == "1700" (
echo Will generate VC10 {compatible with Visual Studio 2012}
echo After opening in VS 2012, click "Update" when prompted.
set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
) else (
echo Will generate VC7 project {Visual Studio 2003 .NET}
)
)
)
)
)
echo %ProjectFile%
echo **************************************************************

@ -69,6 +69,13 @@ VcVersion=VC9
VcVersion=VC10
ProjectFile=jvm.vcxproj
!elseif "$(MSC_VER)" == "1700"
# This is VS2012, but it loads VS10 projects just fine (and will
# upgrade them automatically to VS2012 format).
VcVersion=VC10
ProjectFile=jvm.vcxproj
!else
VcVersion=VC7

@ -307,7 +307,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(a_byte == *start++, "should be the same code");
}
#endif
} else if (_id == load_mirror_id) {
} else if (_id == load_mirror_id || _id == load_appendix_id) {
// produce a copy of the load mirror instruction for use by the being initialized case
#ifdef ASSERT
address start = __ pc();
@ -384,6 +384,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere();
}
__ bind(call_patch);
@ -397,7 +398,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
ce->add_call_info_here(_info);
__ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
__ delayed()->nop();
if (_id == load_klass_id || _id == load_mirror_id) {
if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section();
address pc = (address)_pc_start;
RelocIterator iter(cs, pc, pc + 1);

@ -520,7 +520,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
// Allocate a new index in table to hold the object once it's been patched
int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");

@ -804,6 +804,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
case load_appendix_patching_id:
{ __ set_info("load_appendix_patching", dont_gc_arguments);
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
}
break;
case dtrace_object_alloc_id:
{ // O0: object
__ set_info("dtrace_object_alloc", dont_gc_arguments);

@ -402,6 +402,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere();
}
__ bind(call_patch);
@ -419,7 +420,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
__ nop();
}
if (_id == load_klass_id || _id == load_mirror_id) {
if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section();
RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);

@ -362,7 +362,7 @@ int LIR_Assembler::check_icache() {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
jobject o = NULL;
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id);
PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
__ movoop(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info);
}

@ -1499,6 +1499,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
case load_appendix_patching_id:
{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
// we should set up register map
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
}
break;
case dtrace_object_alloc_id:
{ // rax,: object
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);

@ -2767,7 +2767,19 @@ void os::numa_make_global(char *addr, size_t bytes) {
Linux::numa_interleave_memory(addr, bytes);
}
// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
// bind policy to MPOL_PREFERRED for the current thread.
#define USE_MPOL_PREFERRED 0
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
// To make NUMA and large pages more robust when both enabled, we need to ease
// the requirements on where the memory should be allocated. MPOL_BIND is the
// default policy and it will force memory to be allocated on the specified
// node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
// the specified node, but will not force it. Using this policy will prevent
// getting SIGBUS when trying to allocate large pages on NUMA nodes with no
// free large pages.
Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
}
@ -2869,6 +2881,8 @@ bool os::Linux::libnuma_init() {
libnuma_dlsym(handle, "numa_tonode_memory")));
set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
libnuma_dlsym(handle, "numa_interleave_memory")));
set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
libnuma_dlsym(handle, "numa_set_bind_policy")));
if (numa_available() != -1) {
@ -2935,6 +2949,7 @@ os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
os::Linux::numa_available_func_t os::Linux::_numa_available;
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
unsigned long* os::Linux::_numa_all_nodes;
bool os::pd_uncommit_memory(char* addr, size_t size) {
@ -2943,6 +2958,53 @@ bool os::pd_uncommit_memory(char* addr, size_t size) {
return res != (uintptr_t) MAP_FAILED;
}
static
address get_stack_commited_bottom(address bottom, size_t size) {
address nbot = bottom;
address ntop = bottom + size;
size_t page_sz = os::vm_page_size();
unsigned pages = size / page_sz;
unsigned char vec[1];
unsigned imin = 1, imax = pages + 1, imid;
int mincore_return_value;
while (imin < imax) {
imid = (imax + imin) / 2;
nbot = ntop - (imid * page_sz);
// Use a trick with mincore to check whether the page is mapped or not.
// mincore sets vec to 1 if page resides in memory and to 0 if page
// is swapped output but if page we are asking for is unmapped
// it returns -1,ENOMEM
mincore_return_value = mincore(nbot, page_sz, vec);
if (mincore_return_value == -1) {
// Page is not mapped go up
// to find first mapped page
if (errno != EAGAIN) {
assert(errno == ENOMEM, "Unexpected mincore errno");
imax = imid;
}
} else {
// Page is mapped go down
// to find first not mapped page
imin = imid + 1;
}
}
nbot = nbot + page_sz;
// Adjust stack bottom one page up if last checked page is not mapped
if (mincore_return_value == -1) {
nbot = nbot + page_sz;
}
return nbot;
}
// Linux uses a growable mapping for the stack, and if the mapping for
// the stack guard pages is not removed when we detach a thread the
// stack cannot grow beyond the pages where the stack guard was
@ -2957,59 +3019,37 @@ bool os::pd_uncommit_memory(char* addr, size_t size) {
// So, we need to know the extent of the stack mapping when
// create_stack_guard_pages() is called.
// Find the bounds of the stack mapping. Return true for success.
//
// We only need this for stacks that are growable: at the time of
// writing thread stacks don't use growable mappings (i.e. those
// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
// only applies to the main thread.
static
bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) {
char buf[128];
int fd, sz;
if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) {
return false;
}
const char kw[] = "[stack]";
const int kwlen = sizeof(kw)-1;
// Address part of /proc/self/maps couldn't be more than 128 bytes
while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) {
if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) {
// Extract addresses
if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
uintptr_t sp = (uintptr_t) __builtin_frame_address(0);
if (sp >= *bottom && sp <= *top) {
::close(fd);
return true;
}
}
}
}
::close(fd);
return false;
}
// If the (growable) stack mapping already extends beyond the point
// where we're going to put our guard pages, truncate the mapping at
// that point by munmap()ping it. This ensures that when we later
// munmap() the guard pages we don't leave a hole in the stack
// mapping. This only affects the main/initial thread, but guard
// against future OS changes
// mapping. This only affects the main/initial thread
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
uintptr_t stack_extent, stack_base;
bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
assert(os::Linux::is_initial_thread(),
"growable stack in non-initial thread");
if (stack_extent < (uintptr_t)addr)
::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
if (os::Linux::is_initial_thread()) {
// As we manually grow stack up to bottom inside create_attached_thread(),
// it's likely that os::Linux::initial_thread_stack_bottom is mapped and
// we don't need to do anything special.
// Check it first, before calling heavy function.
uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
unsigned char vec[1];
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
// Fallback to slow path on all errors, including EAGAIN
stack_extent = (uintptr_t) get_stack_commited_bottom(
os::Linux::initial_thread_stack_bottom(),
(size_t)addr - stack_extent);
}
if (stack_extent < (uintptr_t)addr) {
::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
}
}
return os::commit_memory(addr, size, !ExecMem);
@ -3018,13 +3058,13 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
// If this is a growable mapping, remove the guard pages entirely by
// munmap()ping them. If not, just call uncommit_memory(). This only
// affects the main/initial thread, but guard against future OS changes
// It's safe to always unmap guard pages for initial thread because we
// always place it right after end of the mapped region
bool os::remove_stack_guard_pages(char* addr, size_t size) {
uintptr_t stack_extent, stack_base;
bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
assert(os::Linux::is_initial_thread(),
"growable stack in non-initial thread");
if (os::Linux::is_initial_thread()) {
return ::munmap(addr, size) == 0;
}

@ -235,6 +235,7 @@ private:
typedef int (*numa_available_func_t)(void);
typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
typedef void (*numa_set_bind_policy_func_t)(int policy);
static sched_getcpu_func_t _sched_getcpu;
static numa_node_to_cpus_func_t _numa_node_to_cpus;
@ -242,6 +243,7 @@ private:
static numa_available_func_t _numa_available;
static numa_tonode_memory_func_t _numa_tonode_memory;
static numa_interleave_memory_func_t _numa_interleave_memory;
static numa_set_bind_policy_func_t _numa_set_bind_policy;
static unsigned long* _numa_all_nodes;
static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
@ -250,6 +252,7 @@ private:
static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
static int sched_getcpu_syscall(void);
public:
@ -267,6 +270,11 @@ public:
_numa_interleave_memory(start, size, _numa_all_nodes);
}
}
static void numa_set_bind_policy(int policy) {
if (_numa_set_bind_policy != NULL) {
_numa_set_bind_policy(policy);
}
}
static int get_node_by_cpu(int cpu_id);
};

@ -30,6 +30,8 @@
#include <unistd.h>
#include <sys/resource.h>
#include <sys/utsname.h>
#include <pthread.h>
#include <signal.h>
// Check core dump limit and report possible place where core can be found
@ -320,11 +322,17 @@ os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
* The callback is supposed to provide the method that should be protected.
*/
bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
sigset_t saved_sig_mask;
assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
assert(!WatcherThread::watcher_thread()->has_crash_protection(),
"crash_protection already set?");
if (sigsetjmp(_jmpbuf, 1) == 0) {
// we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
// since on at least some systems (OS X) siglongjmp will restore the mask
// for the process, not the thread
pthread_sigmask(0, NULL, &saved_sig_mask);
if (sigsetjmp(_jmpbuf, 0) == 0) {
// make sure we can see in the signal handler that we have crash protection
// installed
WatcherThread::watcher_thread()->set_crash_protection(this);
@ -334,6 +342,7 @@ bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
return true;
}
// this happens when we siglongjmp() back
pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
WatcherThread::watcher_thread()->set_crash_protection(NULL);
return false;
}

@ -44,6 +44,6 @@ define_pd_global(uintx,JVMInvokeMethodSlack, 10*K);
define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address
define_pd_global(uintx,HeapBaseMinAddress, 256*M);
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
#endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP

@ -106,10 +106,12 @@ public class CallSite {
" (" + getMethod().getBytes() + " bytes) " + getReason());
}
}
stream.printf(" (end time: %6.4f", getTimeStamp());
if (getEndNodes() > 0) {
stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes());
stream.printf(" nodes: %d live: %d", getEndNodes(), getEndLiveNodes());
}
stream.println("");
stream.println(")");
if (getReceiver() != null) {
emit(stream, indent + 4);
// stream.println("type profile " + method.holder + " -> " + receiver + " (" +

@ -207,7 +207,12 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
}
String search(Attributes attr, String name) {
return search(attr, name, null);
String result = attr.getValue(name);
if (result != null) {
return result;
} else {
throw new InternalError("can't find " + name);
}
}
String search(Attributes attr, String name, String defaultValue) {
@ -215,13 +220,7 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
if (result != null) {
return result;
}
if (defaultValue != null) {
return defaultValue;
}
for (int i = 0; i < attr.getLength(); i++) {
System.out.println(attr.getQName(i) + " " + attr.getValue(attr.getQName(i)));
}
throw new InternalError("can't find " + name);
return defaultValue;
}
int indent = 0;
@ -268,17 +267,18 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
Phase p = new Phase(search(atts, "name"),
Double.parseDouble(search(atts, "stamp")),
Integer.parseInt(search(atts, "nodes", "0")),
Integer.parseInt(search(atts, "live")));
Integer.parseInt(search(atts, "live", "0")));
phaseStack.push(p);
} else if (qname.equals("phase_done")) {
Phase p = phaseStack.pop();
if (! p.getId().equals(search(atts, "name"))) {
String phaseName = search(atts, "name", null);
if (phaseName != null && !p.getId().equals(phaseName)) {
System.out.println("phase: " + p.getId());
throw new InternalError("phase name mismatch");
}
p.setEnd(Double.parseDouble(search(atts, "stamp")));
p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
p.setEndLiveNodes(Integer.parseInt(search(atts, "live")));
p.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
compile.getPhases().add(p);
} else if (qname.equals("task")) {
compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1")));
@ -413,8 +413,8 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
}
} else if (qname.equals("parse_done")) {
CallSite call = scopes.pop();
call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1")));
call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1")));
call.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
scopes.push(call);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
#include "adlc.hpp"
void* Chunk::operator new(size_t requested_size, size_t length) {
void* Chunk::operator new(size_t requested_size, size_t length) throw() {
return CHeapObj::operator new(requested_size + length);
}
@ -163,7 +163,7 @@ bool Arena::contains( const void *ptr ) const {
//-----------------------------------------------------------------------------
// CHeapObj
void* CHeapObj::operator new(size_t size){
void* CHeapObj::operator new(size_t size) throw() {
return (void *) malloc(size);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,7 @@
class CHeapObj {
public:
void* operator new(size_t size);
void* operator new(size_t size) throw();
void operator delete(void* p);
void* new_array(size_t size);
};
@ -53,7 +53,7 @@ class CHeapObj {
class ValueObj {
public:
void* operator new(size_t size);
void* operator new(size_t size) throw();
void operator delete(void* p);
};
@ -61,7 +61,7 @@ class ValueObj {
class AllStatic {
public:
void* operator new(size_t size);
void* operator new(size_t size) throw();
void operator delete(void* p);
};
@ -70,7 +70,7 @@ class AllStatic {
// Linked list of raw memory chunks
class Chunk: public CHeapObj {
public:
void* operator new(size_t size, size_t length);
void* operator new(size_t size, size_t length) throw();
void operator delete(void* p, size_t length);
Chunk(size_t length);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -485,7 +485,7 @@ int get_legal_text(FileBuff &fbuf, char **legal_text)
// VS2005 has its own definition, identical to this one.
#if !defined(_WIN32) || defined(_WIN64) || _MSC_VER < 1400
void *operator new( size_t size, int, const char *, int ) {
void *operator new( size_t size, int, const char *, int ) throw() {
return ::operator new( size );
}
#endif

@ -1095,7 +1095,7 @@ static void check_peepmatch_instruction_sequence(FILE *fp, PeepMatch *pmatch, Pe
fprintf(fp, " // Identify previous instruction if inside this block\n");
fprintf(fp, " if( ");
print_block_index(fp, inst_position);
fprintf(fp, " > 0 ) {\n Node *n = block->_nodes.at(");
fprintf(fp, " > 0 ) {\n Node *n = block->get_node(");
print_block_index(fp, inst_position);
fprintf(fp, ");\n inst%d = (n->is_Mach()) ? ", inst_position);
fprintf(fp, "n->as_Mach() : NULL;\n }\n");

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -296,8 +296,8 @@ class CodeBuffer: public StackObj {
// CodeBuffers must be allocated on the stack except for a single
// special case during expansion which is handled internally. This
// is done to guarantee proper cleanup of resources.
void* operator new(size_t size) { return ResourceObj::operator new(size); }
void operator delete(void* p) { ShouldNotCallThis(); }
void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
void operator delete(void* p) { ShouldNotCallThis(); }
public:
typedef int csize_t; // code size type; would be size_t except for history

@ -364,7 +364,8 @@ class PatchingStub: public CodeStub {
enum PatchID {
access_field_id,
load_klass_id,
load_mirror_id
load_mirror_id,
load_appendix_id
};
enum constants {
patch_info_size = 3
@ -417,7 +418,7 @@ class PatchingStub: public CodeStub {
}
NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
n_move->set_offset(field_offset);
} else if (_id == load_klass_id || _id == load_mirror_id) {
} else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
assert(_obj != noreg, "must have register object for load_klass/load_mirror");
#ifdef ASSERT
// verify that we're pointing at a NativeMovConstReg

@ -74,16 +74,19 @@ class PhaseTraceTime: public TraceTime {
private:
JavaThread* _thread;
CompileLog* _log;
TimerName _timer;
public:
PhaseTraceTime(TimerName timer)
: TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) {
: TraceTime("", &timers[timer], CITime || CITimeEach, Verbose),
_log(NULL), _timer(timer)
{
if (Compilation::current() != NULL) {
_log = Compilation::current()->log();
}
if (_log != NULL) {
_log->begin_head("phase name='%s'", timer_name[timer]);
_log->begin_head("phase name='%s'", timer_name[_timer]);
_log->stamp();
_log->end_head();
}
@ -91,7 +94,7 @@ class PhaseTraceTime: public TraceTime {
~PhaseTraceTime() {
if (_log != NULL)
_log->done("phase");
_log->done("phase name='%s'", timer_name[_timer]);
}
};

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -279,8 +279,8 @@ class InstructionMark: public StackObj {
// Base class for objects allocated by the compiler in the compilation arena
class CompilationResourceObj ALLOCATION_SUPER_CLASS_SPEC {
public:
void* operator new(size_t size) { return Compilation::current()->arena()->Amalloc(size); }
void* operator new(size_t size, Arena* arena) {
void* operator new(size_t size) throw() { return Compilation::current()->arena()->Amalloc(size); }
void* operator new(size_t size, Arena* arena) throw() {
return arena->Amalloc(size);
}
void operator delete(void* p) {} // nothing to do

@ -1583,7 +1583,7 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
ObjectType* obj_type = obj->type()->as_ObjectType();
if (obj_type->is_constant() && !PatchALot) {
ciObject* const_oop = obj_type->constant_value();
if (!const_oop->is_null_object()) {
if (!const_oop->is_null_object() && const_oop->is_loaded()) {
if (field->is_constant()) {
ciConstant field_val = field->constant_value_of(const_oop);
BasicType field_type = field_val.basic_type();
@ -1667,9 +1667,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
assert(declared_signature != NULL, "cannot be null");
// FIXME bail out for now
if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
}
// we have to make sure the argument size (incl. the receiver)
@ -1713,10 +1712,23 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
break;
}
} else {
if (bc_raw == Bytecodes::_invokehandle) {
assert(!will_link, "should come here only for unlinked call");
code = Bytecodes::_invokespecial;
}
}
// Push appendix argument (MethodType, CallSite, etc.), if one.
if (stream()->has_appendix()) {
bool patch_for_appendix = false;
int patching_appendix_arg = 0;
if (C1PatchInvokeDynamic &&
(Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
apush(arg);
patch_for_appendix = true;
patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
} else if (stream()->has_appendix()) {
ciObject* appendix = stream()->get_appendix();
Value arg = append(new Constant(new ObjectConstant(appendix)));
apush(arg);
@ -1732,7 +1744,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
!(// %%% FIXME: Are both of these relevant?
target->is_method_handle_intrinsic() ||
target->is_compiled_lambda_form())) {
target->is_compiled_lambda_form()) &&
!patch_for_appendix) {
Value receiver = NULL;
ciInstanceKlass* receiver_klass = NULL;
bool type_is_exact = false;
@ -1850,7 +1863,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// check if we could do inlining
if (!PatchALot && Inline && klass->is_loaded() &&
(klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
&& target->is_loaded()) {
&& target->is_loaded()
&& !patch_for_appendix) {
// callee is known => check if we have static binding
assert(target->is_loaded(), "callee must be known");
if (code == Bytecodes::_invokestatic ||
@ -1901,7 +1915,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface;
Values* args = state()->pop_arguments(target->arg_size_no_receiver());
Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
Value recv = has_receiver ? apop() : NULL;
int vtable_index = Method::invalid_vtable_index;

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -323,7 +323,7 @@ class Instruction: public CompilationResourceObj {
}
public:
void* operator new(size_t size) {
void* operator new(size_t size) throw() {
Compilation* c = Compilation::current();
void* res = c->arena()->Amalloc(size);
((Instruction*)res)->_id = c->get_next_id();
@ -1611,7 +1611,7 @@ LEAF(BlockBegin, StateSplit)
friend class SuxAndWeightAdjuster;
public:
void* operator new(size_t size) {
void* operator new(size_t size) throw() {
Compilation* c = Compilation::current();
void* res = c->arena()->Amalloc(size);
((BlockBegin*)res)->_id = c->get_next_id();

@ -1211,8 +1211,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
bool is_invokedynamic() const { return code() == lir_dynamic_call; }
bool is_method_handle_invoke() const {
return
is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
||
method()->is_compiled_lambda_form() // Java-generated adapter
||
method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic

@ -93,12 +93,23 @@ void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_cod
default:
ShouldNotReachHere();
}
} else if (patch->id() == PatchingStub::load_appendix_id) {
Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
} else {
ShouldNotReachHere();
}
#endif
}
PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
IRScope* scope = info->scope();
Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
if (Bytecodes::has_optional_appendix(bc_raw)) {
return PatchingStub::load_appendix_id;
}
return PatchingStub::load_mirror_id;
}
//---------------------------------------------------------------

@ -119,6 +119,8 @@ class LIR_Assembler: public CompilationResourceObj {
void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op);
PatchingStub::PatchID patching_id(CodeEmitInfo* info);
public:
LIR_Assembler(Compilation* c);
~LIR_Assembler();

@ -819,6 +819,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
bool load_klass_or_mirror_patch_id =
(stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
@ -888,10 +889,32 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
mirror = Handle(THREAD, m);
}
break;
default: Unimplemented();
default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
}
// convert to handle
load_klass = KlassHandle(THREAD, k);
} else if (stub_id == load_appendix_patching_id) {
Bytecode_invoke bytecode(caller_method, bci);
Bytecodes::Code bc = bytecode.invoke_code();
CallInfo info;
constantPoolHandle pool(thread, caller_method->constants());
int index = bytecode.index();
LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
appendix = info.resolved_appendix();
switch (bc) {
case Bytecodes::_invokehandle: {
int cache_index = ConstantPool::decode_cpcache_index(index, true);
assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
break;
}
case Bytecodes::_invokedynamic: {
pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
break;
}
default: fatal("unexpected bytecode for load_appendix_patching_id");
}
} else {
ShouldNotReachHere();
}
@ -992,8 +1015,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value");
if (stub_id == Runtime1::load_klass_patching_id) {
assert(load_klass() != NULL, "klass not set");
n_copy->set_data((intx) (load_klass()));
assert(load_klass() != NULL, "klass not set");
n_copy->set_data((intx) (load_klass()));
} else {
assert(mirror() != NULL, "klass not set");
n_copy->set_data((intx) (mirror()));
@ -1002,43 +1025,55 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
if (TracePatching) {
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
}
}
} else if (stub_id == Runtime1::load_appendix_patching_id) {
NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
assert(n_copy->data() == 0 ||
n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value");
n_copy->set_data((intx) (appendix()));
#if defined(SPARC) || defined(PPC)
// Update the location in the nmethod with the proper
// metadata. When the code was generated, a NULL was stuffed
// in the metadata table and that table needs to be update to
// have the right value. On intel the value is kept
// directly in the instruction instead of in the metadata
// table, so set_data above effectively updated the value.
nmethod* nm = CodeCache::find_nmethod(instr_pc);
assert(nm != NULL, "invalid nmethod_pc");
RelocIterator mds(nm, copy_buff, copy_buff + 1);
bool found = false;
while (mds.next() && !found) {
if (mds.type() == relocInfo::oop_type) {
assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
oop_Relocation* r = mds.oop_reloc();
oop* oop_adr = r->oop_addr();
*oop_adr = mirror();
r->fix_oop_relocation();
found = true;
} else if (mds.type() == relocInfo::metadata_type) {
assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
metadata_Relocation* r = mds.metadata_reloc();
Metadata** metadata_adr = r->metadata_addr();
*metadata_adr = load_klass();
r->fix_metadata_relocation();
found = true;
}
}
assert(found, "the metadata must exist!");
#endif
if (TracePatching) {
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
}
} else {
ShouldNotReachHere();
}
#if defined(SPARC) || defined(PPC)
if (load_klass_or_mirror_patch_id ||
stub_id == Runtime1::load_appendix_patching_id) {
// Update the location in the nmethod with the proper
// metadata. When the code was generated, a NULL was stuffed
// in the metadata table and that table needs to be update to
// have the right value. On intel the value is kept
// directly in the instruction instead of in the metadata
// table, so set_data above effectively updated the value.
nmethod* nm = CodeCache::find_nmethod(instr_pc);
assert(nm != NULL, "invalid nmethod_pc");
RelocIterator mds(nm, copy_buff, copy_buff + 1);
bool found = false;
while (mds.next() && !found) {
if (mds.type() == relocInfo::oop_type) {
assert(stub_id == Runtime1::load_mirror_patching_id ||
stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
oop_Relocation* r = mds.oop_reloc();
oop* oop_adr = r->oop_addr();
*oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
r->fix_oop_relocation();
found = true;
} else if (mds.type() == relocInfo::metadata_type) {
assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
metadata_Relocation* r = mds.metadata_reloc();
Metadata** metadata_adr = r->metadata_addr();
*metadata_adr = load_klass();
r->fix_metadata_relocation();
found = true;
}
}
assert(found, "the metadata must exist!");
}
#endif
if (do_patch) {
// replace instructions
// first replace the tail, then the call
@ -1077,7 +1112,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
ICache::invalidate_range(instr_pc, *byte_count);
NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
if (load_klass_or_mirror_patch_id) {
if (load_klass_or_mirror_patch_id ||
stub_id == Runtime1::load_appendix_patching_id) {
relocInfo::relocType rtype =
(stub_id == Runtime1::load_klass_patching_id) ?
relocInfo::metadata_type :
@ -1118,7 +1154,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// If we are patching in a non-perm oop, make sure the nmethod
// is on the right list.
if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
(appendix.not_null() && appendix->is_scavengable()))) {
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
guarantee(nm != NULL, "only nmethods can contain non-perm oops");
@ -1179,6 +1216,24 @@ int Runtime1::move_mirror_patching(JavaThread* thread) {
return caller_is_deopted();
}
int Runtime1::move_appendix_patching(JavaThread* thread) {
//
// NOTE: we are still in Java
//
Thread* THREAD = thread;
debug_only(NoHandleMark nhm;)
{
// Enter VM mode
ResetNoHandleMark rnhm;
patch_code(thread, load_appendix_patching_id);
}
// Back in JAVA, use no oops DON'T safepoint
// Return true if calling code is deoptimized
return caller_is_deopted();
}
//
// Entry point for compiled code. We want to patch a nmethod.
// We don't do a normal VM transition here because we want to

@ -67,6 +67,7 @@ class StubAssembler;
stub(access_field_patching) \
stub(load_klass_patching) \
stub(load_mirror_patching) \
stub(load_appendix_patching) \
stub(g1_pre_barrier_slow) \
stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \
@ -160,6 +161,7 @@ class Runtime1: public AllStatic {
static int access_field_patching(JavaThread* thread);
static int move_klass_patching(JavaThread* thread);
static int move_mirror_patching(JavaThread* thread);
static int move_appendix_patching(JavaThread* thread);
static void patch_code(JavaThread* thread, StubID stub_id);

@ -25,4 +25,4 @@
#include "precompiled.hpp"
#include "c1/c1_globals.hpp"
C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)

@ -54,7 +54,7 @@
//
// Defines all global flags used by the client compiler.
//
#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \
#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
\
/* Printing */ \
notproduct(bool, PrintC1Statistics, false, \
@ -333,15 +333,19 @@
"Use CHA and exact type results at call sites when updating MDOs")\
\
product(bool, C1UpdateMethodData, trueInTiered, \
"Update MethodData*s in Tier1-generated code") \
"Update MethodData*s in Tier1-generated code") \
\
develop(bool, PrintCFGToFile, false, \
"print control flow graph to a separate file during compilation") \
\
diagnostic(bool, C1PatchInvokeDynamic, true, \
"Patch invokedynamic appendix not known at compile time") \
\
\
// Read default values for c1 globals
C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
#endif // SHARE_VM_C1_C1_GLOBALS_HPP

@ -1150,6 +1150,10 @@ void ciEnv::record_out_of_memory_failure() {
record_method_not_compilable("out of memory");
}
ciInstance* ciEnv::unloaded_ciinstance() {
GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
}
void ciEnv::dump_replay_data(outputStream* out) {
VM_ENTRY_MARK;
MutexLocker ml(Compile_lock);

@ -400,6 +400,7 @@ public:
static ciInstanceKlass* unloaded_ciinstance_klass() {
return _unloaded_ciinstance_klass;
}
ciInstance* unloaded_ciinstance();
ciKlass* find_system_klass(ciSymbol* klass_name);
// Note: To find a class from its name string, use ciSymbol::make,

@ -60,10 +60,10 @@ ciType* ciInstance::java_mirror_type() {
//
// Constant value of a field.
ciConstant ciInstance::field_value(ciField* field) {
assert(is_loaded() &&
field->holder()->is_loaded() &&
klass()->is_subclass_of(field->holder()),
"invalid access");
assert(is_loaded(), "invalid access - must be loaded");
assert(field->holder()->is_loaded(), "invalid access - holder must be loaded");
assert(klass()->is_subclass_of(field->holder()), "invalid access - must be subclass");
VM_ENTRY_MARK;
ciConstant result;
Handle obj = get_oop();

@ -177,6 +177,10 @@ class ciMethod : public ciMetadata {
address bcp = code() + bci;
return Bytecodes::java_code_at(NULL, bcp);
}
Bytecodes::Code raw_code_at_bci(int bci) {
address bcp = code() + bci;
return Bytecodes::code_at(NULL, bcp);
}
BCEscapeAnalyzer *get_bcea();
ciMethodBlocks *get_method_blocks();

@ -563,7 +563,10 @@ ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signatu
return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
}
ciInstance* ciObjectFactory::get_unloaded_object_constant() {
if (ciEnv::_Object_klass == NULL) return NULL;
return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass());
}
//------------------------------------------------------------------
// ciObjectFactory::get_empty_methodData

@ -131,6 +131,8 @@ public:
ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
ciInstance* get_unloaded_object_constant();
// Get the ciMethodData representing the methodData for a method
// with none.
ciMethodData* get_empty_methodData();

@ -28,7 +28,6 @@
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/defaultMethods.hpp"
#include "classfile/genericSignatures.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
@ -3039,35 +3038,6 @@ AnnotationArray* ClassFileParser::assemble_annotations(u1* runtime_visible_annot
return annotations;
}
#ifdef ASSERT
static void parseAndPrintGenericSignatures(
instanceKlassHandle this_klass, TRAPS) {
assert(ParseAllGenericSignatures == true, "Shouldn't call otherwise");
ResourceMark rm;
if (this_klass->generic_signature() != NULL) {
using namespace generic;
ClassDescriptor* spec = ClassDescriptor::parse_generic_signature(this_klass(), CHECK);
tty->print_cr("Parsing %s", this_klass->generic_signature()->as_C_string());
spec->print_on(tty);
for (int i = 0; i < this_klass->methods()->length(); ++i) {
Method* m = this_klass->methods()->at(i);
MethodDescriptor* method_spec = MethodDescriptor::parse_generic_signature(m, spec);
Symbol* sig = m->generic_signature();
if (sig == NULL) {
sig = m->signature();
}
tty->print_cr("Parsing %s", sig->as_C_string());
method_spec->print_on(tty);
}
}
}
#endif // def ASSERT
instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index,
TRAPS) {
instanceKlassHandle super_klass;
@ -4060,12 +4030,6 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle));
#ifdef ASSERT
if (ParseAllGenericSignatures) {
parseAndPrintGenericSignatures(this_klass, CHECK_(nullHandle));
}
#endif
// Generate any default methods - default methods are interface methods
// that have a default implementation. This is new with Lambda project.
if (has_default_methods && !access_flags.is_interface() &&

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -197,7 +197,7 @@ ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() {
}
ClassFileStream* ClassPathDirEntry::open_stream(const char* name) {
ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
// construct full path name
char path[JVM_MAXPATHLEN];
if (jio_snprintf(path, sizeof(path), "%s%s%s", _dir, os::file_separator(), name) == -1) {
@ -240,7 +240,7 @@ ClassPathZipEntry::~ClassPathZipEntry() {
FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
}
ClassFileStream* ClassPathZipEntry::open_stream(const char* name) {
ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
// enable call to C land
JavaThread* thread = JavaThread::current();
ThreadToNativeFromVM ttn(thread);
@ -284,24 +284,24 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
}
}
LazyClassPathEntry::LazyClassPathEntry(char* path, struct stat st) : ClassPathEntry() {
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
_path = strdup(path);
_st = st;
_st = *st;
_meta_index = NULL;
_resolved_entry = NULL;
_has_error = false;
}
bool LazyClassPathEntry::is_jar_file() {
return ((_st.st_mode & S_IFREG) == S_IFREG);
}
ClassPathEntry* LazyClassPathEntry::resolve_entry() {
ClassPathEntry* LazyClassPathEntry::resolve_entry(TRAPS) {
if (_resolved_entry != NULL) {
return (ClassPathEntry*) _resolved_entry;
}
ClassPathEntry* new_entry = NULL;
ClassLoader::create_class_path_entry(_path, _st, &new_entry, false);
assert(new_entry != NULL, "earlier code should have caught this");
new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
{
ThreadCritical tc;
if (_resolved_entry == NULL) {
@ -314,12 +314,21 @@ ClassPathEntry* LazyClassPathEntry::resolve_entry() {
return (ClassPathEntry*) _resolved_entry;
}
ClassFileStream* LazyClassPathEntry::open_stream(const char* name) {
ClassFileStream* LazyClassPathEntry::open_stream(const char* name, TRAPS) {
if (_meta_index != NULL &&
!_meta_index->may_contain(name)) {
return NULL;
}
return resolve_entry()->open_stream(name);
if (_has_error) {
return NULL;
}
ClassPathEntry* cpe = resolve_entry(THREAD);
if (cpe == NULL) {
_has_error = true;
return NULL;
} else {
return cpe->open_stream(name, THREAD);
}
}
bool LazyClassPathEntry::is_lazy() {
@ -465,20 +474,19 @@ void ClassLoader::setup_bootstrap_search_path() {
}
}
void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy) {
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
JavaThread* thread = JavaThread::current();
if (lazy) {
*new_entry = new LazyClassPathEntry(path, st);
return;
return new LazyClassPathEntry(path, st);
}
if ((st.st_mode & S_IFREG) == S_IFREG) {
ClassPathEntry* new_entry = NULL;
if ((st->st_mode & S_IFREG) == S_IFREG) {
// Regular file, should be a zip file
// Canonicalized filename
char canonical_path[JVM_MAXPATHLEN];
if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
// This matches the classic VM
EXCEPTION_MARK;
THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname");
THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
}
char* error_msg = NULL;
jzfile* zip;
@ -489,7 +497,7 @@ void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathE
zip = (*ZipOpen)(canonical_path, &error_msg);
}
if (zip != NULL && error_msg == NULL) {
*new_entry = new ClassPathZipEntry(zip, path);
new_entry = new ClassPathZipEntry(zip, path);
if (TraceClassLoading) {
tty->print_cr("[Opened %s]", path);
}
@ -504,16 +512,16 @@ void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathE
msg = NEW_RESOURCE_ARRAY(char, len); ;
jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
}
EXCEPTION_MARK;
THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg);
THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
}
} else {
// Directory
*new_entry = new ClassPathDirEntry(path);
new_entry = new ClassPathDirEntry(path);
if (TraceClassLoading) {
tty->print_cr("[Path %s]", path);
}
}
return new_entry;
}
@ -572,13 +580,14 @@ void ClassLoader::add_to_list(ClassPathEntry *new_entry) {
}
}
void ClassLoader::update_class_path_entry_list(const char *path,
void ClassLoader::update_class_path_entry_list(char *path,
bool check_for_duplicates) {
struct stat st;
if (os::stat((char *)path, &st) == 0) {
if (os::stat(path, &st) == 0) {
// File or directory found
ClassPathEntry* new_entry = NULL;
create_class_path_entry((char *)path, st, &new_entry, LazyBootClassLoader);
Thread* THREAD = Thread::current();
new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
// The kernel VM adds dynamically to the end of the classloader path and
// doesn't reorder the bootclasspath which would break java.lang.Package
// (see PackageInfo).
@ -897,7 +906,7 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
PerfClassTraceTime::CLASS_LOAD);
ClassPathEntry* e = _first_entry;
while (e != NULL) {
stream = e->open_stream(name);
stream = e->open_stream(name, CHECK_NULL);
if (stream != NULL) {
break;
}
@ -1257,11 +1266,16 @@ bool ClassPathZipEntry::is_rt_jar12() {
}
void LazyClassPathEntry::compile_the_world(Handle loader, TRAPS) {
resolve_entry()->compile_the_world(loader, CHECK);
ClassPathEntry* cpe = resolve_entry(THREAD);
if (cpe != NULL) {
cpe->compile_the_world(loader, CHECK);
}
}
bool LazyClassPathEntry::is_rt_jar() {
return resolve_entry()->is_rt_jar();
Thread* THREAD = Thread::current();
ClassPathEntry* cpe = resolve_entry(THREAD);
return (cpe != NULL) ? cpe->is_jar_file() : false;
}
void ClassLoader::compile_the_world() {

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,7 @@ class ClassPathEntry: public CHeapObj<mtClass> {
ClassPathEntry();
// Attempt to locate file_name through this class path entry.
// Returns a class file parsing stream if successfull.
virtual ClassFileStream* open_stream(const char* name) = 0;
virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0;
// Debugging
NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
NOT_PRODUCT(virtual bool is_rt_jar() = 0;)
@ -77,7 +77,7 @@ class ClassPathDirEntry: public ClassPathEntry {
bool is_jar_file() { return false; }
const char* name() { return _dir; }
ClassPathDirEntry(char* dir);
ClassFileStream* open_stream(const char* name);
ClassFileStream* open_stream(const char* name, TRAPS);
// Debugging
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
NOT_PRODUCT(bool is_rt_jar();)
@ -107,7 +107,7 @@ class ClassPathZipEntry: public ClassPathEntry {
const char* name() { return _zip_name; }
ClassPathZipEntry(jzfile* zip, const char* zip_name);
~ClassPathZipEntry();
ClassFileStream* open_stream(const char* name);
ClassFileStream* open_stream(const char* name, TRAPS);
void contents_do(void f(const char* name, void* context), void* context);
// Debugging
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
@ -125,13 +125,14 @@ class LazyClassPathEntry: public ClassPathEntry {
char* _path; // dir or file
struct stat _st;
MetaIndex* _meta_index;
bool _has_error;
volatile ClassPathEntry* _resolved_entry;
ClassPathEntry* resolve_entry();
ClassPathEntry* resolve_entry(TRAPS);
public:
bool is_jar_file();
const char* name() { return _path; }
LazyClassPathEntry(char* path, struct stat st);
ClassFileStream* open_stream(const char* name);
LazyClassPathEntry(char* path, const struct stat* st);
ClassFileStream* open_stream(const char* name, TRAPS);
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
virtual bool is_lazy();
// Debugging
@ -207,14 +208,15 @@ class ClassLoader: AllStatic {
static void setup_meta_index();
static void setup_bootstrap_search_path();
static void load_zip_library();
static void create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy);
static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
bool lazy, TRAPS);
// Canonicalizes path names, so strcmp will work properly. This is mainly
// to avoid confusing the zip library
static bool get_canonical_path(char* orig, char* out, int len);
public:
// Used by the kernel jvm.
static void update_class_path_entry_list(const char *path,
static void update_class_path_entry_list(char *path,
bool check_for_duplicates);
static void print_bootclasspath();

@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "classfile/bytecodeAssembler.hpp"
#include "classfile/defaultMethods.hpp"
#include "classfile/genericSignatures.hpp"
#include "classfile/symbolTable.hpp"
#include "memory/allocation.hpp"
#include "memory/metadataFactory.hpp"
@ -75,14 +74,6 @@ class PseudoScope : public ResourceObj {
}
};
class ContextMark : public PseudoScopeMark {
private:
generic::Context::Mark _mark;
public:
ContextMark(const generic::Context::Mark& cm) : _mark(cm) {}
virtual void destroy() { _mark.destroy(); }
};
#ifndef PRODUCT
static void print_slot(outputStream* str, Symbol* name, Symbol* signature) {
ResourceMark rm;
@ -503,38 +494,6 @@ Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
}
// A generic method family contains a set of all methods that implement a single
// language-level method. Because of erasure, these methods may have different
// signatures. As members of the set are collected while walking over the
// hierarchy, they are tagged with a qualification state. The qualification
// state for an erased method is set to disqualified if there exists a path
// from the root of hierarchy to the method that contains an interleaving
// language-equivalent method defined in an interface.
class GenericMethodFamily : public MethodFamily {
private:
generic::MethodDescriptor* _descriptor; // language-level description
public:
GenericMethodFamily(generic::MethodDescriptor* canonical_desc)
: _descriptor(canonical_desc) {}
generic::MethodDescriptor* descriptor() const { return _descriptor; }
bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
return descriptor()->covariant_match(md, ctx);
}
#ifndef PRODUCT
Symbol* get_generic_sig() const {
generic::Context ctx(NULL); // empty, as _descriptor already canonicalized
TempNewSymbol sig = descriptor()->reify_signature(&ctx, Thread::current());
return sig;
}
#endif // ndef PRODUCT
};
class StateRestorer;
@ -571,26 +530,6 @@ class StatefulMethodFamily : public ResourceObj {
StateRestorer* record_method_and_dq_further(Method* mo);
};
// StatefulGenericMethodFamily is a wrapper around GenericMethodFamily that maintains the
// qualification state during hierarchy visitation, and applies that state
// when adding members to the GenericMethodFamily.
class StatefulGenericMethodFamily : public StatefulMethodFamily {
public:
StatefulGenericMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx)
: StatefulMethodFamily(new GenericMethodFamily(md->canonicalize(ctx))) {
}
GenericMethodFamily* get_method_family() {
return (GenericMethodFamily*)_method_family;
}
bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
return get_method_family()->descriptor_matches(md, ctx);
}
};
class StateRestorer : public PseudoScopeMark {
private:
StatefulMethodFamily* _method;
@ -616,39 +555,6 @@ StateRestorer* StatefulMethodFamily::record_method_and_dq_further(Method* mo) {
return mark;
}
class StatefulGenericMethodFamilies : public ResourceObj {
private:
GrowableArray<StatefulGenericMethodFamily*> _methods;
public:
StatefulGenericMethodFamily* find_matching(
generic::MethodDescriptor* md, generic::Context* ctx) {
for (int i = 0; i < _methods.length(); ++i) {
StatefulGenericMethodFamily* existing = _methods.at(i);
if (existing->descriptor_matches(md, ctx)) {
return existing;
}
}
return NULL;
}
StatefulGenericMethodFamily* find_matching_or_create(
generic::MethodDescriptor* md, generic::Context* ctx) {
StatefulGenericMethodFamily* method = find_matching(md, ctx);
if (method == NULL) {
method = new StatefulGenericMethodFamily(md, ctx);
_methods.append(method);
}
return method;
}
void extract_families_into(GrowableArray<GenericMethodFamily*>* array) {
for (int i = 0; i < _methods.length(); ++i) {
array->append(_methods.at(i)->get_method_family());
}
}
};
// Represents a location corresponding to a vtable slot for methods that
// neither the class nor any of it's ancestors provide an implementaion.
// Default methods may be present to fill this slot.
@ -779,146 +685,11 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
};
// Iterates over the type hierarchy looking for all methods with a specific
// method name. The result of this is a set of method families each of
// which is populated with a set of methods that implement the same
// language-level signature.
class FindMethodsByGenericSig : public HierarchyVisitor<FindMethodsByGenericSig> {
private:
// Context data
Thread* THREAD;
generic::DescriptorCache* _cache;
Symbol* _method_name;
generic::Context* _ctx;
StatefulGenericMethodFamilies _families;
public:
FindMethodsByGenericSig(generic::DescriptorCache* cache, Symbol* name,
generic::Context* ctx, Thread* thread) :
_cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {}
void get_discovered_families(GrowableArray<GenericMethodFamily*>* methods) {
_families.extract_families_into(methods);
}
void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); }
void free_node_data(void* node_data) {
PseudoScope::cast(node_data)->destroy();
}
bool visit() {
PseudoScope* scope = PseudoScope::cast(current_data());
InstanceKlass* klass = current_class();
InstanceKlass* sub = current_depth() > 0 ? class_at_depth(1) : NULL;
ContextMark* cm = new ContextMark(_ctx->mark());
scope->add_mark(cm); // will restore context when scope is freed
_ctx->apply_type_arguments(sub, klass, THREAD);
int start, end = 0;
start = klass->find_method_by_name(_method_name, &end);
if (start != -1) {
for (int i = start; i < end; ++i) {
Method* m = klass->methods()->at(i);
// This gets the method's parameter list with its generic type
// parameters resolved
generic::MethodDescriptor* md = _cache->descriptor_for(m, THREAD);
// Find all methods on this hierarchy that match this method
// (name, signature). This class collects other families of this
// method name.
StatefulGenericMethodFamily* family =
_families.find_matching_or_create(md, _ctx);
if (klass->is_interface()) {
// ???
StateRestorer* restorer = family->record_method_and_dq_further(m);
scope->add_mark(restorer);
} else {
// This is the rule that methods in classes "win" (bad word) over
// methods in interfaces. This works because of single inheritance
family->set_target_if_empty(m);
}
}
}
return true;
}
};
#ifndef PRODUCT
static void print_generic_families(
GrowableArray<GenericMethodFamily*>* methods, Symbol* match) {
streamIndentor si(tty, 4);
if (methods->length() == 0) {
tty->indent();
tty->print_cr("No Logical Method found");
}
for (int i = 0; i < methods->length(); ++i) {
tty->indent();
GenericMethodFamily* lm = methods->at(i);
if (lm->contains_signature(match)) {
tty->print_cr("<Matching>");
} else {
tty->print_cr("<Non-Matching>");
}
lm->print_sig_on(tty, lm->get_generic_sig(), 1);
}
}
#endif // ndef PRODUCT
static void create_overpasses(
GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
static void generate_generic_defaults(
InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
EmptyVtableSlot* slot, int current_slot_index, TRAPS) {
if (slot->is_bound()) {
#ifndef PRODUCT
if (TraceDefaultMethods) {
streamIndentor si(tty, 4);
tty->indent().print_cr("Already bound to logical method:");
GenericMethodFamily* lm = (GenericMethodFamily*)(slot->get_binding());
lm->print_sig_on(tty, lm->get_generic_sig(), 1);
}
#endif // ndef PRODUCT
return; // covered by previous processing
}
generic::DescriptorCache cache;
generic::Context ctx(&cache);
FindMethodsByGenericSig visitor(&cache, slot->name(), &ctx, CHECK);
visitor.run(klass);
GrowableArray<GenericMethodFamily*> discovered_families;
visitor.get_discovered_families(&discovered_families);
#ifndef PRODUCT
if (TraceDefaultMethods) {
print_generic_families(&discovered_families, slot->signature());
}
#endif // ndef PRODUCT
// Find and populate any other slots that match the discovered families
for (int j = current_slot_index; j < empty_slots->length(); ++j) {
EmptyVtableSlot* open_slot = empty_slots->at(j);
if (slot->name() == open_slot->name()) {
for (int k = 0; k < discovered_families.length(); ++k) {
GenericMethodFamily* lm = discovered_families.at(k);
if (lm->contains_signature(open_slot->signature())) {
lm->determine_target(klass, CHECK);
open_slot->bind_family(lm);
}
}
}
}
}
static void generate_erased_defaults(
InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
EmptyVtableSlot* slot, TRAPS) {
@ -943,21 +714,14 @@ static void merge_in_new_methods(InstanceKlass* klass,
//
// First if finds any name/signature slots that need any implementation (either
// because they are miranda or a superclass's implementation is an overpass
// itself). For each slot, iterate over the hierarchy, using generic signature
// information to partition any methods that match the name into method families
// where each family contains methods whose signatures are equivalent at the
// language level (i.e., their reified parameters match and return values are
// covariant). Check those sets to see if they contain a signature that matches
// the slot we're looking at (if we're lucky, there might be other empty slots
// that we can fill using the same analysis).
// itself). For each slot, iterate over the hierarchy, to see if they contain a
// signature that matches the slot we are looking at.
//
// For each slot filled, we generate an overpass method that either calls the
// unique default method candidate using invokespecial, or throws an exception
// (in the case of no default method candidates, or more than one valid
// candidate). These methods are then added to the class's method list. If
// the method set we're using contains methods (qualified or not) with a
// different runtime signature than the method we're creating, then we have to
// create bridges with those signatures too.
// candidate). These methods are then added to the class's method list.
// The JVM does not create bridges nor handle generic signatures here.
void DefaultMethods::generate_default_methods(
InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
@ -997,11 +761,7 @@ void DefaultMethods::generate_default_methods(
}
#endif // ndef PRODUCT
if (ParseGenericDefaults) {
generate_generic_defaults(klass, empty_slots, slot, i, CHECK);
} else {
generate_erased_defaults(klass, empty_slots, slot, CHECK);
}
generate_erased_defaults(klass, empty_slots, slot, CHECK);
}
#ifndef PRODUCT
if (TraceDefaultMethods) {
@ -1019,13 +779,13 @@ void DefaultMethods::generate_default_methods(
}
/**
* Generic analysis was used upon interface '_target' and found a unique
* default method candidate with generic signature '_method_desc'. This
* Interface inheritance rules were used to find a unique default method
* candidate for the resolved class. This
* method is only viable if it would also be in the set of default method
* candidates if we ran a full analysis on the current class.
*
* The only reason that the method would not be in the set of candidates for
* the current class is if that there's another covariantly matching method
* the current class is if that there's another matching method
* which is "more specific" than the found method -- i.e., one could find a
* path in the interface hierarchy in which the matching method appears
* before we get to '_target'.
@ -1110,49 +870,6 @@ class ErasedShadowChecker : public ShadowChecker {
: ShadowChecker(thread, name, holder, target) {}
};
class GenericShadowChecker : public ShadowChecker {
private:
generic::DescriptorCache* _cache;
generic::MethodDescriptor* _method_desc;
bool path_has_shadow() {
generic::Context ctx(_cache);
for (int i = current_depth() - 1; i > 0; --i) {
InstanceKlass* ik = class_at_depth(i);
InstanceKlass* sub = class_at_depth(i + 1);
ctx.apply_type_arguments(sub, ik, THREAD);
if (ik->is_interface()) {
int end;
int start = ik->find_method_by_name(_method_name, &end);
if (start != -1) {
for (int j = start; j < end; ++j) {
Method* mo = ik->methods()->at(j);
generic::MethodDescriptor* md = _cache->descriptor_for(mo, THREAD);
if (_method_desc->covariant_match(md, &ctx)) {
return true;
}
}
}
}
}
return false;
}
public:
GenericShadowChecker(generic::DescriptorCache* cache, Thread* thread,
Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc,
InstanceKlass* target)
: ShadowChecker(thread, name, holder, target) {
_cache = cache;
_method_desc = desc;
}
};
// Find the unique qualified candidate from the perspective of the super_class
// which is the resolved_klass, which must be an immediate superinterface
// of klass
@ -1166,103 +883,48 @@ Method* find_erased_super_default(InstanceKlass* current_class, InstanceKlass* s
if (family != NULL) {
family->determine_target(current_class, CHECK_NULL); // get target from current_class
}
if (family->has_target()) {
Method* target = family->get_selected_target();
InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
if (family->has_target()) {
Method* target = family->get_selected_target();
InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
// Verify that the identified method is valid from the context of
// the current class, which is the caller class for invokespecial
// link resolution, i.e. ensure there it is not shadowed.
// You can use invokespecial to disambiguate interface methods, but
// you can not use it to skip over an interface method that would shadow it.
ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
checker.run(current_class);
// Verify that the identified method is valid from the context of
// the current class, which is the caller class for invokespecial
// link resolution, i.e. ensure there it is not shadowed.
// You can use invokespecial to disambiguate interface methods, but
// you can not use it to skip over an interface method that would shadow it.
ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
checker.run(current_class);
if (checker.found_shadow()) {
if (checker.found_shadow()) {
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr(" Only candidate found was shadowed.");
}
if (TraceDefaultMethods) {
tty->print_cr(" Only candidate found was shadowed.");
}
#endif // ndef PRODUCT
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
"Accessible default method not found", NULL);
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
"Accessible default method not found", NULL);
} else {
#ifndef PRODUCT
if (TraceDefaultMethods) {
family->print_sig_on(tty, target->signature(), 1);
}
#endif // ndef PRODUCT
return target;
}
} else {
#ifndef PRODUCT
if (TraceDefaultMethods) {
family->print_sig_on(tty, target->signature(), 1);
}
#endif // ndef PRODUCT
return target;
}
assert(family->throws_exception(), "must have target or throw");
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
family->get_exception_message()->as_C_string(), NULL);
}
} else {
assert(family->throws_exception(), "must have target or throw");
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
family->get_exception_message()->as_C_string(), NULL);
// no method found
ResourceMark rm(THREAD);
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(),
Method::name_and_sig_as_C_string(current_class,
method_name, sig), NULL);
}
}
// super_class is assumed to be the direct super of current_class
Method* find_generic_super_default( InstanceKlass* current_class,
InstanceKlass* super_class,
Symbol* method_name, Symbol* sig, TRAPS) {
generic::DescriptorCache cache;
generic::Context ctx(&cache);
// Prime the initial generic context for current -> super_class
ctx.apply_type_arguments(current_class, super_class, CHECK_NULL);
FindMethodsByGenericSig visitor(&cache, method_name, &ctx, CHECK_NULL);
visitor.run(super_class);
GrowableArray<GenericMethodFamily*> families;
visitor.get_discovered_families(&families);
#ifndef PRODUCT
if (TraceDefaultMethods) {
print_generic_families(&families, sig);
}
#endif // ndef PRODUCT
GenericMethodFamily* selected_family = NULL;
for (int i = 0; i < families.length(); ++i) {
GenericMethodFamily* lm = families.at(i);
if (lm->contains_signature(sig)) {
lm->determine_target(current_class, CHECK_NULL);
selected_family = lm;
}
}
if (selected_family->has_target()) {
Method* target = selected_family->get_selected_target();
InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
// Verify that the identified method is valid from the context of
// the current class
GenericShadowChecker checker(&cache, THREAD, target->name(),
holder, selected_family->descriptor(), super_class);
checker.run(current_class);
if (checker.found_shadow()) {
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr(" Only candidate found was shadowed.");
}
#endif // ndef PRODUCT
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
"Accessible default method not found", NULL);
} else {
return target;
}
} else {
assert(selected_family->throws_exception(), "must have target or throw");
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
selected_family->get_exception_message()->as_C_string(), NULL);
}
}
// This is called during linktime when we find an invokespecial call that
// refers to a direct superinterface. It indicates that we should find the
// default method in the hierarchy of that superinterface, and if that method
@ -1296,13 +958,8 @@ Method* DefaultMethods::find_super_default(
assert(super_class->is_interface(), "only call for default methods");
Method* target = NULL;
if (ParseGenericDefaults) {
target = find_generic_super_default(current_class, super_class,
method_name, sig, CHECK_NULL);
} else {
target = find_erased_super_default(current_class, super_class,
method_name, sig, CHECK_NULL);
}
target = find_erased_super_default(current_class, super_class,
method_name, sig, CHECK_NULL);
#ifndef PRODUCT
if (target != NULL) {

File diff suppressed because it is too large Load Diff

@ -1,467 +0,0 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
#define SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
#include "classfile/symbolTable.hpp"
#include "memory/allocation.hpp"
#include "runtime/signature.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/resourceHash.hpp"
class stringStream;
namespace generic {
class Identifier;
class ClassDescriptor;
class MethodDescriptor;
class TypeParameter; // a formal type parameter declared in generic signatures
class TypeArgument; // The "type value" passed to fill parameters in supertypes
class TypeVariable; // A usage of a type parameter as a value
/**
* Example:
*
* <T, V> class Foo extends Bar<String> { int m(V v) {} }
* ^^^^^^ ^^^^^^ ^^
* type parameters type argument type variable
*
* Note that a type variable could be passed as an argument too:
* <T, V> class Foo extends Bar<T> { int m(V v) {} }
* ^^^
* type argument's value is a type variable
*/
class Type;
class ClassType;
class ArrayType;
class PrimitiveType;
class Context;
class DescriptorCache;
class DescriptorStream;
class Identifier : public ResourceObj {
private:
Symbol* _sym;
int _begin;
int _end;
public:
Identifier(Symbol* sym, int begin, int end) :
_sym(sym), _begin(begin), _end(end) {}
bool equals(Identifier* other);
bool equals(Symbol* sym);
#ifndef PRODUCT
void print_on(outputStream* str) const;
#endif // ndef PRODUCT
};
class Descriptor : public ResourceObj {
protected:
GrowableArray<TypeParameter*> _type_parameters;
ClassDescriptor* _outer_class;
Descriptor(GrowableArray<TypeParameter*>& params,
ClassDescriptor* outer)
: _type_parameters(params), _outer_class(outer) {}
public:
ClassDescriptor* outer_class() { return _outer_class; }
void set_outer_class(ClassDescriptor* sig) { _outer_class = sig; }
virtual ClassDescriptor* as_class_signature() { return NULL; }
virtual MethodDescriptor* as_method_signature() { return NULL; }
bool is_class_signature() { return as_class_signature() != NULL; }
bool is_method_signature() { return as_method_signature() != NULL; }
GrowableArray<TypeParameter*>& type_parameters() {
return _type_parameters;
}
TypeParameter* find_type_parameter(Identifier* id, int* param_depth);
virtual void bind_variables_to_parameters() = 0;
#ifndef PRODUCT
virtual void print_on(outputStream* str) const = 0;
#endif
};
class ClassDescriptor : public Descriptor {
private:
ClassType* _super;
GrowableArray<ClassType*> _interfaces;
MethodDescriptor* _outer_method;
ClassDescriptor(GrowableArray<TypeParameter*>& ftp, ClassType* scs,
GrowableArray<ClassType*>& sis, ClassDescriptor* outer_class = NULL,
MethodDescriptor* outer_method = NULL)
: Descriptor(ftp, outer_class), _super(scs), _interfaces(sis),
_outer_method(outer_method) {}
static u2 get_outer_class_index(InstanceKlass* k, TRAPS);
static ClassDescriptor* parse_generic_signature(Klass* k, Symbol* original_name, TRAPS);
public:
virtual ClassDescriptor* as_class_signature() { return this; }
MethodDescriptor* outer_method() { return _outer_method; }
void set_outer_method(MethodDescriptor* m) { _outer_method = m; }
ClassType* super() { return _super; }
ClassType* interface_desc(Symbol* sym);
static ClassDescriptor* parse_generic_signature(Klass* k, TRAPS);
static ClassDescriptor* parse_generic_signature(Symbol* sym);
// For use in superclass chains in positions where this is no generic info
static ClassDescriptor* placeholder(InstanceKlass* klass);
#ifndef PRODUCT
void print_on(outputStream* str) const;
#endif
ClassDescriptor* canonicalize(Context* ctx);
// Linking sets the position index in any contained TypeVariable type
// to correspond to the location of that identifier in the formal type
// parameters.
void bind_variables_to_parameters();
};
class MethodDescriptor : public Descriptor {
private:
GrowableArray<Type*> _parameters;
Type* _return_type;
GrowableArray<Type*> _throws;
MethodDescriptor(GrowableArray<TypeParameter*>& ftp, ClassDescriptor* outer,
GrowableArray<Type*>& sigs, Type* rt, GrowableArray<Type*>& throws)
: Descriptor(ftp, outer), _parameters(sigs), _return_type(rt),
_throws(throws) {}
public:
static MethodDescriptor* parse_generic_signature(Method* m, ClassDescriptor* outer);
static MethodDescriptor* parse_generic_signature(Symbol* sym, ClassDescriptor* outer);
MethodDescriptor* as_method_signature() { return this; }
// Performs generic analysis on the method parameters to determine
// if both methods refer to the same argument types.
bool covariant_match(MethodDescriptor* other, Context* ctx);
// Returns a new method descriptor with all generic variables
// removed and replaced with whatever is indicated using the Context.
MethodDescriptor* canonicalize(Context* ctx);
void bind_variables_to_parameters();
#ifndef PRODUCT
TempNewSymbol reify_signature(Context* ctx, TRAPS);
void print_on(outputStream* str) const;
#endif
};
class TypeParameter : public ResourceObj {
private:
Identifier* _identifier;
ClassType* _class_bound;
GrowableArray<ClassType*> _interface_bounds;
// The position is the ordinal location of the parameter within the
// formal parameter list (excluding outer classes). It is only set for
// formal type parameters that are associated with a class -- method
// type parameters are left as -1. When resolving a generic variable to
// find the actual type, this index is used to access the generic type
// argument in the provided context object.
int _position; // Assigned during variable linking
TypeParameter(Identifier* id, ClassType* class_bound,
GrowableArray<ClassType*>& interface_bounds) :
_identifier(id), _class_bound(class_bound),
_interface_bounds(interface_bounds), _position(-1) {}
public:
static TypeParameter* parse_generic_signature(DescriptorStream* str);
ClassType* bound();
int position() { return _position; }
void bind_variables_to_parameters(Descriptor* sig, int position);
Identifier* identifier() { return _identifier; }
Type* resolve(Context* ctx, int inner_depth, int ctx_depth);
TypeParameter* canonicalize(Context* ctx, int ctx_depth);
#ifndef PRODUCT
void print_on(outputStream* str) const;
#endif
};
class Type : public ResourceObj {
public:
static Type* parse_generic_signature(DescriptorStream* str);
virtual ClassType* as_class() { return NULL; }
virtual TypeVariable* as_variable() { return NULL; }
virtual ArrayType* as_array() { return NULL; }
virtual PrimitiveType* as_primitive() { return NULL; }
virtual bool covariant_match(Type* gt, Context* ctx) = 0;
virtual Type* canonicalize(Context* ctx, int ctx_depth) = 0;
virtual void bind_variables_to_parameters(Descriptor* sig) = 0;
#ifndef PRODUCT
virtual void reify_signature(stringStream* ss, Context* ctx) = 0;
virtual void print_on(outputStream* str) const = 0;
#endif
};
class ClassType : public Type {
friend class ClassDescriptor;
protected:
Identifier* _identifier;
GrowableArray<TypeArgument*> _type_arguments;
ClassType* _outer_class;
ClassType(Identifier* identifier,
GrowableArray<TypeArgument*>& args,
ClassType* outer)
: _identifier(identifier), _type_arguments(args), _outer_class(outer) {}
// Returns true if there are inner classes to read
static Identifier* parse_generic_signature_simple(
GrowableArray<TypeArgument*>* args,
bool* has_inner, DescriptorStream* str);
static ClassType* parse_generic_signature(ClassType* outer,
DescriptorStream* str);
static ClassType* from_symbol(Symbol* sym);
public:
ClassType* as_class() { return this; }
static ClassType* parse_generic_signature(DescriptorStream* str);
static ClassType* java_lang_Object();
Identifier* identifier() { return _identifier; }
int type_arguments_length() { return _type_arguments.length(); }
TypeArgument* type_argument_at(int i);
virtual ClassType* outer_class() { return _outer_class; }
bool covariant_match(Type* gt, Context* ctx);
ClassType* canonicalize(Context* ctx, int context_depth);
void bind_variables_to_parameters(Descriptor* sig);
#ifndef PRODUCT
void reify_signature(stringStream* ss, Context* ctx);
void print_on(outputStream* str) const;
#endif
};
class TypeVariable : public Type {
private:
Identifier* _id;
TypeParameter* _parameter; // assigned during linking
// how many steps "out" from inner classes, -1 if method
int _inner_depth;
TypeVariable(Identifier* id)
: _id(id), _parameter(NULL), _inner_depth(0) {}
public:
TypeVariable* as_variable() { return this; }
static TypeVariable* parse_generic_signature(DescriptorStream* str);
Identifier* identifier() { return _id; }
TypeParameter* parameter() { return _parameter; }
int inner_depth() { return _inner_depth; }
void bind_variables_to_parameters(Descriptor* sig);
Type* resolve(Context* ctx, int ctx_depth);
bool covariant_match(Type* gt, Context* ctx);
Type* canonicalize(Context* ctx, int ctx_depth);
#ifndef PRODUCT
void reify_signature(stringStream* ss, Context* ctx);
void print_on(outputStream* str) const;
#endif
};
class ArrayType : public Type {
private:
Type* _base;
ArrayType(Type* base) : _base(base) {}
public:
ArrayType* as_array() { return this; }
static ArrayType* parse_generic_signature(DescriptorStream* str);
bool covariant_match(Type* gt, Context* ctx);
ArrayType* canonicalize(Context* ctx, int ctx_depth);
void bind_variables_to_parameters(Descriptor* sig);
#ifndef PRODUCT
void reify_signature(stringStream* ss, Context* ctx);
void print_on(outputStream* str) const;
#endif
};
class PrimitiveType : public Type {
friend class Type;
private:
char _type; // includes V for void
PrimitiveType(char& type) : _type(type) {}
public:
PrimitiveType* as_primitive() { return this; }
bool covariant_match(Type* gt, Context* ctx);
PrimitiveType* canonicalize(Context* ctx, int ctx_depth);
void bind_variables_to_parameters(Descriptor* sig);
#ifndef PRODUCT
void reify_signature(stringStream* ss, Context* ctx);
void print_on(outputStream* str) const;
#endif
};
class TypeArgument : public ResourceObj {
private:
Type* _lower_bound;
Type* _upper_bound; // may be null or == _lower_bound
TypeArgument(Type* lower_bound, Type* upper_bound)
: _lower_bound(lower_bound), _upper_bound(upper_bound) {}
public:
static TypeArgument* parse_generic_signature(DescriptorStream* str);
Type* lower_bound() { return _lower_bound; }
Type* upper_bound() { return _upper_bound; }
void bind_variables_to_parameters(Descriptor* sig);
TypeArgument* canonicalize(Context* ctx, int ctx_depth);
bool covariant_match(TypeArgument* a, Context* ctx);
#ifndef PRODUCT
void print_on(outputStream* str) const;
#endif
};
class Context : public ResourceObj {
private:
DescriptorCache* _cache;
GrowableArray<ClassType*> _type_arguments;
void reset_to_mark(int size);
public:
// When this object goes out of scope or 'destroy' is
// called, then the application of the type to the
// context is wound-back (unless it's been deactivated).
class Mark : public StackObj {
private:
mutable Context* _context;
int _marked_size;
bool is_active() const { return _context != NULL; }
void deactivate() const { _context = NULL; }
public:
Mark() : _context(NULL), _marked_size(0) {}
Mark(Context* ctx, int sz) : _context(ctx), _marked_size(sz) {}
Mark(const Mark& m) : _context(m._context), _marked_size(m._marked_size) {
m.deactivate(); // Ownership is transferred
}
Mark& operator=(const Mark& cm) {
destroy();
_context = cm._context;
_marked_size = cm._marked_size;
cm.deactivate();
return *this;
}
void destroy();
~Mark() { destroy(); }
};
Context(DescriptorCache* cache) : _cache(cache) {}
Mark mark() { return Mark(this, _type_arguments.length()); }
void apply_type_arguments(InstanceKlass* current, InstanceKlass* super,TRAPS);
ClassType* at_depth(int i) const;
#ifndef PRODUCT
void print_on(outputStream* str) const;
#endif
};
/**
* Contains a cache of descriptors for classes and methods so they can be
* looked-up instead of reparsing each time they are needed.
*/
class DescriptorCache : public ResourceObj {
private:
ResourceHashtable<InstanceKlass*, ClassDescriptor*> _class_descriptors;
ResourceHashtable<Method*, MethodDescriptor*> _method_descriptors;
public:
ClassDescriptor* descriptor_for(InstanceKlass* ikh, TRAPS);
MethodDescriptor* descriptor_for(Method* mh, ClassDescriptor* cd, TRAPS);
// Class descriptor derived from method holder
MethodDescriptor* descriptor_for(Method* mh, TRAPS);
};
} // namespace generic
#endif // SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP

@ -188,6 +188,10 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
Symbol* name = klass->name();
Klass* refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass();
Klass* lambda_magic_klass = SystemDictionary::lambda_MagicLambdaImpl_klass();
bool is_reflect = refl_magic_klass != NULL && klass->is_subtype_of(refl_magic_klass);
bool is_lambda = lambda_magic_klass != NULL && klass->is_subtype_of(lambda_magic_klass);
return (should_verify_for(klass->class_loader(), should_verify_class) &&
// return if the class is a bootstrapping class
@ -210,9 +214,9 @@ bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool shou
// sun/reflect/SerializationConstructorAccessor.
// NOTE: this is called too early in the bootstrapping process to be
// guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
(refl_magic_klass == NULL ||
!klass->is_subtype_of(refl_magic_klass) ||
VerifyReflectionBytecodes)
// Also for lambda generated code, gte jdk8
(!is_reflect || VerifyReflectionBytecodes) &&
(!is_lambda || VerifyLambdaBytecodes)
);
}
@ -2318,9 +2322,6 @@ void ClassVerifier::verify_invoke_instructions(
types = 1 << JVM_CONSTANT_InvokeDynamic;
break;
case Bytecodes::_invokespecial:
types = (1 << JVM_CONSTANT_InterfaceMethodref) |
(1 << JVM_CONSTANT_Methodref);
break;
case Bytecodes::_invokestatic:
types = (_klass->major_version() < STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION) ?
(1 << JVM_CONSTANT_Methodref) :

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -245,7 +245,7 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
}
void* BufferBlob::operator new(size_t s, unsigned size) {
void* BufferBlob::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size);
return p;
}
@ -347,14 +347,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
}
void* RuntimeStub::operator new(size_t s, unsigned size) {
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, true);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
// operator new shared by all singletons:
void* SingletonBlob::operator new(size_t s, unsigned size) {
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, true);
if (!p) fatal("Initial size of CodeCache is too small");
return p;

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -209,7 +209,7 @@ class BufferBlob: public CodeBlob {
BufferBlob(const char* name, int size);
BufferBlob(const char* name, int size, CodeBuffer* cb);
void* operator new(size_t s, unsigned size);
void* operator new(size_t s, unsigned size) throw();
public:
// Creation
@ -283,7 +283,7 @@ class RuntimeStub: public CodeBlob {
bool caller_must_gc_arguments
);
void* operator new(size_t s, unsigned size);
void* operator new(size_t s, unsigned size) throw();
public:
// Creation
@ -321,7 +321,7 @@ class SingletonBlob: public CodeBlob {
friend class VMStructs;
protected:
void* operator new(size_t s, unsigned size);
void* operator new(size_t s, unsigned size) throw();
public:
SingletonBlob(

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,7 @@ class DIR_Chunk {
int _length; // number of bytes in the stream
int _hash; // hash of stream bytes (for quicker reuse)
void* operator new(size_t ignore, DebugInformationRecorder* dir) {
void* operator new(size_t ignore, DebugInformationRecorder* dir) throw() {
assert(ignore == sizeof(DIR_Chunk), "");
if (dir->_next_chunk >= dir->_next_chunk_limit) {
const int CHUNK = 100;

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -93,18 +93,21 @@ HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
#endif
bool nmethod::is_compiled_by_c1() const {
if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
if (is_native_method()) return false;
if (compiler() == NULL) {
return false;
}
return compiler()->is_c1();
}
bool nmethod::is_compiled_by_c2() const {
if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
if (is_native_method()) return false;
if (compiler() == NULL) {
return false;
}
return compiler()->is_c2();
}
bool nmethod::is_compiled_by_shark() const {
if (is_native_method()) return false;
assert(compiler() != NULL, "must be");
if (compiler() == NULL) {
return false;
}
return compiler()->is_shark();
}
@ -800,7 +803,7 @@ nmethod::nmethod(
}
#endif // def HAVE_DTRACE_H
void* nmethod::operator new(size_t size, int nmethod_size) throw () {
void* nmethod::operator new(size_t size, int nmethod_size) throw() {
// Not critical, may return null if there is too little continuous memory
return CodeCache::allocate(nmethod_size);
}
@ -1401,6 +1404,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// nmethods aren't scanned for GC.
_oops_are_stale = true;
#endif
// the Method may be reclaimed by class unloading now that the
// nmethod is in zombie state
set_method(NULL);
} else {
assert(state == not_entrant, "other cases may need to be handled differently");
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -265,7 +265,7 @@ class nmethod : public CodeBlob {
int comp_level);
// helper methods
void* operator new(size_t size, int nmethod_size);
void* operator new(size_t size, int nmethod_size) throw();
const char* reloc_string_for(u_char* begin, u_char* end);
// Returns true if this thread changed the state of the nmethod or

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -677,7 +677,7 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
}
public:
void* operator new(size_t size, const RelocationHolder& holder) {
void* operator new(size_t size, const RelocationHolder& holder) throw() {
if (size > sizeof(holder._relocbuf)) guarantee_size();
assert((void* const *)holder.reloc() == &holder._relocbuf[0], "ptrs must agree");
return holder.reloc();

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,7 @@ VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
static int num_vtable_chunks = 0;
void* VtableStub::operator new(size_t size, int code_size) {
void* VtableStub::operator new(size_t size, int code_size) throw() {
assert(size == sizeof(VtableStub), "mismatched size");
num_vtable_chunks++;
// compute real VtableStub size (rounded to nearest word)

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@ class VtableStub {
bool _is_vtable_stub; // True if vtable stub, false, is itable stub
/* code follows here */ // The vtableStub code
void* operator new(size_t size, int code_size);
void* operator new(size_t size, int code_size) throw();
VtableStub(bool is_vtable_stub, int index)
: _next(NULL), _is_vtable_stub(is_vtable_stub),

@ -1718,7 +1718,7 @@ static void codecache_print(bool detailed)
CodeCache::print_summary(&s, detailed);
}
ttyLocker ttyl;
tty->print_cr(s.as_string());
tty->print(s.as_string());
}
// ------------------------------------------------------------------

@ -2493,11 +2493,11 @@ void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
void G1CollectedHeap::register_concurrent_cycle_end() {
if (_concurrent_cycle_started) {
_gc_timer_cm->register_gc_end(os::elapsed_counter());
if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
}
_gc_timer_cm->register_gc_end(os::elapsed_counter());
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
_concurrent_cycle_started = false;

@ -168,7 +168,15 @@ G1CollectorPolicy::G1CollectorPolicy() :
// Set up the region size and associated fields. Given that the
// policy is created before the heap, we have to set this up here,
// so it's done as soon as possible.
HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
// It would have been natural to pass initial_heap_byte_size() and
// max_heap_byte_size() to setup_heap_region_size() but those have
// not been set up at this point since they should be aligned with
// the region size. So, there is a circular dependency here. We base
// the region size on the heap size, but the heap size should be
// aligned with the region size. To get around this we use the
// unaligned values for the heap.
HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
HeapRegionRemSet::setup_remset_size();
G1ErgoVerbose::initialize();

@ -149,18 +149,11 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
// many regions in the heap (based on the min heap size).
#define TARGET_REGION_NUMBER 2048
void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
// region_size in bytes
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
uintx region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
// We base the automatic calculation on the min heap size. This
// can be problematic if the spread between min and max is quite
// wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
// the max size, the region size might be way too large for the
// min size. Either way, some users might have to set the region
// size manually for some -Xms / -Xmx combos.
region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
(uintx) MIN_REGION_SIZE);
}

@ -361,7 +361,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// CardsPerRegion). All those fields are considered constant
// throughout the JVM's execution, therefore they should only be set
// up once during initialization time.
static void setup_heap_region_size(uintx min_heap_size);
static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
enum ClaimValues {
InitialClaimValue = 0,

@ -28,6 +28,7 @@
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "runtime/os.hpp"
#include "trace/tracing.hpp"
#include "trace/traceBackend.hpp"
#if INCLUDE_ALL_GCS
@ -54,11 +55,12 @@ void GCTracer::send_garbage_collection_event() const {
}
void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
EventGCReferenceStatistics e;
EventGCReferenceStatistics e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_type((u1)type);
e.set_count(count);
e.set_endtime(os::elapsed_counter());
e.commit();
}
}
@ -105,20 +107,22 @@ static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
}
void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
EventPromotionFailed e;
EventPromotionFailed e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_data(to_trace_struct(pf_info));
e.set_thread(pf_info.thread()->thread_id());
e.set_endtime(os::elapsed_counter());
e.commit();
}
}
// Common to CMS and G1
void OldGCTracer::send_concurrent_mode_failure_event() {
EventConcurrentModeFailure e;
EventConcurrentModeFailure e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_endtime(os::elapsed_counter());
e.commit();
}
}
@ -136,7 +140,7 @@ void G1NewTracer::send_g1_young_gc_event() {
}
void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
EventEvacuationInfo e;
EventEvacuationInfo e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_cSetRegions(info->collectionset_regions());
@ -147,15 +151,17 @@ void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
e.set_bytesCopied(info->bytes_copied());
e.set_regionsFreed(info->regions_freed());
e.set_endtime(os::elapsed_counter());
e.commit();
}
}
void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
EventEvacuationFailed e;
EventEvacuationFailed e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_data(to_trace_struct(ef_info));
e.set_endtime(os::elapsed_counter());
e.commit();
}
}
@ -189,12 +195,13 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
void visit(const GCHeapSummary* heap_summary) const {
const VirtualSpaceSummary& heap_space = heap_summary->heap();
EventGCHeapSummary e;
EventGCHeapSummary e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_id);
e.set_when((u1)_when);
e.set_heapSpace(to_trace_struct(heap_space));
e.set_heapUsed(heap_summary->used());
e.set_endtime(os::elapsed_counter());
e.commit();
}
}
@ -209,7 +216,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
const SpaceSummary& from_space = ps_heap_summary->from();
const SpaceSummary& to_space = ps_heap_summary->to();
EventPSHeapSummary e;
EventPSHeapSummary e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_id);
e.set_when((u1)_when);
@ -220,6 +227,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
e.set_endtime(os::elapsed_counter());
e.commit();
}
}
@ -241,13 +249,14 @@ static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
}
void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
EventMetaspaceSummary e;
EventMetaspaceSummary e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_when((u1) when);
e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
e.set_endtime(os::elapsed_counter());
e.commit();
}
}
@ -282,8 +291,6 @@ class PhaseSender : public PhaseVisitor {
default: /* Ignore sending this phase */ break;
}
}
#undef send_phase
};
void GCTracer::send_phase_events(TimePartitions* time_partitions) const {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -144,9 +144,9 @@ class AdaptivePaddedAverage : public AdaptiveWeightedAverage {
_padded_avg(0.0), _deviation(0.0), _padding(padding) {}
// Placement support
void* operator new(size_t ignored, void* p) { return p; }
void* operator new(size_t ignored, void* p) throw() { return p; }
// Allocator
void* operator new(size_t size) { return CHeapObj<mtGC>::operator new(size); }
void* operator new(size_t size) throw() { return CHeapObj<mtGC>::operator new(size); }
// Accessor
float padded_average() const { return _padded_avg; }

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -163,8 +163,8 @@ extern void safe_free (const char *file, unsigned line, void *ptr);
extern void *safe_calloc (const char *file, unsigned line, unsigned nitems, unsigned size);
extern void *safe_realloc(const char *file, unsigned line, void *ptr, unsigned size);
extern char *safe_strdup (const char *file, unsigned line, const char *src);
inline void *operator new( size_t size ) { return malloc(size); }
inline void operator delete( void *ptr ) { free(ptr); }
inline void *operator new( size_t size ) throw() { return malloc(size); }
inline void operator delete( void *ptr ) { free(ptr); }
#endif
//-----------------------------------------------------------------------------

@ -49,19 +49,19 @@
# include "os_bsd.inline.hpp"
#endif
void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }
void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
void* StackObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; }
void StackObj::operator delete [](void* p) { ShouldNotCallThis(); }
void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
void StackObj::operator delete [](void* p) { ShouldNotCallThis(); }
void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }
void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }
void* _ValueObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; }
void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }
void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
size_t word_size, bool read_only,
MetaspaceObj::Type type, TRAPS) {
MetaspaceObj::Type type, TRAPS) throw() {
// Klass has it's own operator new
return Metaspace::allocate(loader_data, word_size, read_only,
type, CHECK_NULL);
@ -80,7 +80,7 @@ void MetaspaceObj::print_address_on(outputStream* st) const {
st->print(" {"INTPTR_FORMAT"}", this);
}
void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
address res;
switch (type) {
case C_HEAP:
@ -97,12 +97,12 @@ void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flag
return res;
}
void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) {
void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {
return (address) operator new(size, type, flags);
}
void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
allocation_type type, MEMFLAGS flags) {
allocation_type type, MEMFLAGS flags) throw() {
//should only call this with std::nothrow, use other operator new() otherwise
address res;
switch (type) {
@ -121,7 +121,7 @@ void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_cons
}
void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant,
allocation_type type, MEMFLAGS flags) {
allocation_type type, MEMFLAGS flags) throw() {
return (address)operator new(size, nothrow_constant, type, flags);
}
@ -370,7 +370,7 @@ class ChunkPoolCleaner : public PeriodicTask {
//--------------------------------------------------------------------------------------
// Chunk implementation
void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
// requested_size is equal to sizeof(Chunk) but in order for the arena
// allocations to come out aligned as expected the size must be aligned
// to expected arena alignment.
@ -478,18 +478,18 @@ Arena::~Arena() {
NOT_PRODUCT(Atomic::dec(&_instance_count);)
}
void* Arena::operator new(size_t size) {
void* Arena::operator new(size_t size) throw() {
assert(false, "Use dynamic memory type binding");
return NULL;
}
void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) {
void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {
assert(false, "Use dynamic memory type binding");
return NULL;
}
// dynamic memory type binding
void* Arena::operator new(size_t size, MEMFLAGS flags) {
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
#ifdef ASSERT
void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
@ -499,7 +499,7 @@ void* Arena::operator new(size_t size, MEMFLAGS flags) {
#endif
}
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
#ifdef ASSERT
void* p = os::malloc(size, flags|otArena, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
@ -688,22 +688,22 @@ void* Arena::internal_malloc_4(size_t x) {
// define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed.
//
#ifndef ALLOW_OPERATOR_NEW_USAGE
void* operator new(size_t size){
void* operator new(size_t size) throw() {
assert(false, "Should not call global operator new");
return 0;
}
void* operator new [](size_t size){
void* operator new [](size_t size) throw() {
assert(false, "Should not call global operator new[]");
return 0;
}
void* operator new(size_t size, const std::nothrow_t& nothrow_constant){
void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
assert(false, "Should not call global operator new");
return 0;
}
void* operator new [](size_t size, std::nothrow_t& nothrow_constant){
void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() {
assert(false, "Should not call global operator new[]");
return 0;
}

@ -204,12 +204,12 @@ const bool NMT_track_callsite = false;
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
public:
_NOINLINE_ void* operator new(size_t size, address caller_pc = 0);
_NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
address caller_pc = 0);
_NOINLINE_ void* operator new [](size_t size, address caller_pc = 0);
address caller_pc = 0) throw();
_NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
address caller_pc = 0);
address caller_pc = 0) throw();
void operator delete(void* p);
void operator delete [] (void* p);
};
@ -219,9 +219,9 @@ template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
class StackObj ALLOCATION_SUPER_CLASS_SPEC {
private:
void* operator new(size_t size);
void* operator new(size_t size) throw();
void operator delete(void* p);
void* operator new [](size_t size);
void* operator new [](size_t size) throw();
void operator delete [](void* p);
};
@ -245,9 +245,9 @@ class StackObj ALLOCATION_SUPER_CLASS_SPEC {
//
class _ValueObj {
private:
void* operator new(size_t size);
void* operator new(size_t size) throw();
void operator delete(void* p);
void* operator new [](size_t size);
void* operator new [](size_t size) throw();
void operator delete [](void* p);
};
@ -316,7 +316,7 @@ class MetaspaceObj {
void* operator new(size_t size, ClassLoaderData* loader_data,
size_t word_size, bool read_only,
Type type, Thread* thread);
Type type, Thread* thread) throw();
// can't use TRAPS from this header file.
void operator delete(void* p) { ShouldNotCallThis(); }
};
@ -339,7 +339,7 @@ class Chunk: CHeapObj<mtChunk> {
Chunk* _next; // Next Chunk in list
const size_t _len; // Size of this Chunk
public:
void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
void operator delete(void* p);
Chunk(size_t length);
@ -422,12 +422,12 @@ protected:
char* hwm() const { return _hwm; }
// new operators
void* operator new (size_t size);
void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
void* operator new (size_t size) throw();
void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
// dynamic memory type tagging
void* operator new(size_t size, MEMFLAGS flags);
void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags);
void* operator new(size_t size, MEMFLAGS flags) throw();
void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
void operator delete(void* p);
// Fast allocate in the arena. Common case is: pointer test + increment.
@ -583,44 +583,44 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
#endif // ASSERT
public:
void* operator new(size_t size, allocation_type type, MEMFLAGS flags);
void* operator new [](size_t size, allocation_type type, MEMFLAGS flags);
void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw();
void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw();
void* operator new(size_t size, const std::nothrow_t& nothrow_constant,
allocation_type type, MEMFLAGS flags);
allocation_type type, MEMFLAGS flags) throw();
void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
allocation_type type, MEMFLAGS flags);
allocation_type type, MEMFLAGS flags) throw();
void* operator new(size_t size, Arena *arena) {
void* operator new(size_t size, Arena *arena) throw() {
address res = (address)arena->Amalloc(size);
DEBUG_ONLY(set_allocation_type(res, ARENA);)
return res;
}
void* operator new [](size_t size, Arena *arena) {
void* operator new [](size_t size, Arena *arena) throw() {
address res = (address)arena->Amalloc(size);
DEBUG_ONLY(set_allocation_type(res, ARENA);)
return res;
}
void* operator new(size_t size) {
void* operator new(size_t size) throw() {
address res = (address)resource_allocate_bytes(size);
DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
return res;
}
void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
return res;
}
void* operator new [](size_t size) {
void* operator new [](size_t size) throw() {
address res = (address)resource_allocate_bytes(size);
DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
return res;
}
void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) {
void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() {
address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
return res;

@ -85,7 +85,7 @@ inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
address caller_pc){
address caller_pc) throw() {
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
@ -94,7 +94,7 @@ template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
const std::nothrow_t& nothrow_constant, address caller_pc) {
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
AllocFailStrategy::RETURN_NULL);
#ifdef ASSERT
@ -104,12 +104,12 @@ template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
address caller_pc){
address caller_pc) throw() {
return CHeapObj<F>::operator new(size, caller_pc);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
const std::nothrow_t& nothrow_constant, address caller_pc) {
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
}

@ -55,6 +55,7 @@ static void fail(const char *msg, va_list ap) {
" shared archive file.\n");
jio_vfprintf(defaultStream::error_stream(), msg, ap);
jio_fprintf(defaultStream::error_stream(), "\n");
// Do not change the text of the below message because some tests check for it.
vm_exit_during_initialization("Unable to use shared archive.", NULL);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,11 +102,11 @@ MemRegion MemRegion::minus(const MemRegion mr2) const {
return MemRegion();
}
void* MemRegion::operator new(size_t size) {
void* MemRegion::operator new(size_t size) throw() {
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
}
void* MemRegion::operator new [](size_t size) {
void* MemRegion::operator new [](size_t size) throw() {
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
}
void MemRegion::operator delete(void* p) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,8 +94,8 @@ public:
size_t word_size() const { return _word_size; }
bool is_empty() const { return word_size() == 0; }
void* operator new(size_t size);
void* operator new [](size_t size);
void* operator new(size_t size) throw();
void* operator new [](size_t size) throw();
void operator delete(void* p);
void operator delete [](void* p);
};
@ -111,13 +111,13 @@ public:
class MemRegionClosureRO: public MemRegionClosure {
public:
void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) {
void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) throw() {
return ResourceObj::operator new(size, type, flags);
}
void* operator new(size_t size, Arena *arena) {
void* operator new(size_t size, Arena *arena) throw() {
return ResourceObj::operator new(size, arena);
}
void* operator new(size_t size) {
void* operator new(size_t size) throw() {
return ResourceObj::operator new(size);
}

@ -139,7 +139,7 @@ Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
return NULL;
}
void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) {
void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
MetaspaceObj::ClassType, CHECK_NULL);
}

@ -179,7 +179,7 @@ class Klass : public Metadata {
// Constructor
Klass();
void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS);
void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw();
public:
bool is_klass() const volatile { return true; }

@ -720,11 +720,22 @@ void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report,
}
}
bool Method::is_always_compilable() const {
// Generated adapters must be compiled
if (is_method_handle_intrinsic() && is_synthetic()) {
assert(!is_not_c1_compilable(), "sanity check");
assert(!is_not_c2_compilable(), "sanity check");
return true;
}
return false;
}
bool Method::is_not_compilable(int comp_level) const {
if (number_of_breakpoints() > 0)
return true;
if (is_method_handle_intrinsic())
return !is_synthetic(); // the generated adapters must be compiled
if (is_always_compilable())
return false;
if (comp_level == CompLevel_any)
return is_not_c1_compilable() || is_not_c2_compilable();
if (is_c1_compile(comp_level))
@ -736,6 +747,10 @@ bool Method::is_not_compilable(int comp_level) const {
// call this when compiler finds that this method is not compilable
void Method::set_not_compilable(int comp_level, bool report, const char* reason) {
if (is_always_compilable()) {
// Don't mark a method which should be always compilable
return;
}
print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
if (comp_level == CompLevel_all) {
set_not_c1_compilable();

@ -796,6 +796,7 @@ class Method : public Metadata {
void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
set_not_osr_compilable(comp_level, false);
}
bool is_always_compilable() const;
private:
void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);

@ -41,19 +41,19 @@ Symbol::Symbol(const u1* name, int length, int refcount) {
}
}
void* Symbol::operator new(size_t sz, int len, TRAPS) {
void* Symbol::operator new(size_t sz, int len, TRAPS) throw() {
int alloc_size = size(len)*HeapWordSize;
address res = (address) AllocateHeap(alloc_size, mtSymbol);
return res;
}
void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) {
void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) throw() {
int alloc_size = size(len)*HeapWordSize;
address res = (address)arena->Amalloc(alloc_size);
return res;
}
void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) {
void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) throw() {
address res;
int alloc_size = size(len)*HeapWordSize;
res = (address) Metaspace::allocate(loader_data, size(len), true,

@ -136,9 +136,9 @@ class Symbol : private SymbolBase {
}
Symbol(const u1* name, int length, int refcount);
void* operator new(size_t size, int len, TRAPS);
void* operator new(size_t size, int len, Arena* arena, TRAPS);
void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS);
void* operator new(size_t size, int len, TRAPS) throw();
void* operator new(size_t size, int len, Arena* arena, TRAPS) throw();
void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS) throw();
void operator delete(void* p);

@ -112,9 +112,9 @@ uint Block::compute_loop_alignment() {
// exceeds OptoLoopAlignment.
uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
PhaseRegAlloc* ra) {
uint last_inst = _nodes.size();
uint last_inst = number_of_nodes();
for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
uint inst_size = _nodes[j]->size(ra);
uint inst_size = get_node(j)->size(ra);
if( inst_size > 0 ) {
inst_cnt--;
uint sz = sum_size + inst_size;
@ -131,8 +131,8 @@ uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
}
uint Block::find_node( const Node *n ) const {
for( uint i = 0; i < _nodes.size(); i++ ) {
if( _nodes[i] == n )
for( uint i = 0; i < number_of_nodes(); i++ ) {
if( get_node(i) == n )
return i;
}
ShouldNotReachHere();
@ -141,7 +141,7 @@ uint Block::find_node( const Node *n ) const {
// Find and remove n from block list
void Block::find_remove( const Node *n ) {
_nodes.remove(find_node(n));
remove_node(find_node(n));
}
// Return empty status of a block. Empty blocks contain only the head, other
@ -154,10 +154,10 @@ int Block::is_Empty() const {
}
int success_result = completely_empty;
int end_idx = _nodes.size()-1;
int end_idx = number_of_nodes() - 1;
// Check for ending goto
if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) {
if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
success_result = empty_with_goto;
end_idx--;
}
@ -170,7 +170,7 @@ int Block::is_Empty() const {
// Ideal nodes are allowable in empty blocks: skip them Only MachNodes
// turn directly into code, because only MachNodes have non-trivial
// emit() functions.
while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
end_idx--;
}
@ -209,15 +209,15 @@ bool Block::has_uncommon_code() const {
// True if block is low enough frequency or guarded by a test which
// mostly does not go here.
bool Block::is_uncommon(PhaseCFG* cfg) const {
bool PhaseCFG::is_uncommon(const Block* block) {
// Initial blocks must never be moved, so are never uncommon.
if (head()->is_Root() || head()->is_Start()) return false;
if (block->head()->is_Root() || block->head()->is_Start()) return false;
// Check for way-low freq
if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
// Look for code shape indicating uncommon_trap or slow path
if (has_uncommon_code()) return true;
if (block->has_uncommon_code()) return true;
const float epsilon = 0.05f;
const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
@ -225,8 +225,8 @@ bool Block::is_uncommon(PhaseCFG* cfg) const {
uint freq_preds = 0;
uint uncommon_for_freq_preds = 0;
for( uint i=1; i<num_preds(); i++ ) {
Block* guard = cfg->get_block_for_node(pred(i));
for( uint i=1; i< block->num_preds(); i++ ) {
Block* guard = get_block_for_node(block->pred(i));
// Check to see if this block follows its guard 1 time out of 10000
// or less.
//
@ -244,14 +244,14 @@ bool Block::is_uncommon(PhaseCFG* cfg) const {
uncommon_preds++;
} else {
freq_preds++;
if( _freq < guard->_freq * guard_factor ) {
if(block->_freq < guard->_freq * guard_factor ) {
uncommon_for_freq_preds++;
}
}
}
if( num_preds() > 1 &&
if( block->num_preds() > 1 &&
// The block is uncommon if all preds are uncommon or
(uncommon_preds == (num_preds()-1) ||
(uncommon_preds == (block->num_preds()-1) ||
// it is uncommon for all frequent preds.
uncommon_for_freq_preds == freq_preds) ) {
return true;
@ -344,8 +344,8 @@ void Block::dump() const {
void Block::dump(const PhaseCFG* cfg) const {
dump_head(cfg);
for (uint i=0; i< _nodes.size(); i++) {
_nodes[i]->dump();
for (uint i=0; i< number_of_nodes(); i++) {
get_node(i)->dump();
}
tty->print("\n");
}
@ -434,7 +434,7 @@ uint PhaseCFG::build_cfg() {
map_node_to_block(p, bb);
map_node_to_block(x, bb);
if( x != p ) { // Only for root is x == p
bb->_nodes.push((Node*)x);
bb->push_node((Node*)x);
}
// Now handle predecessors
++sum; // Count 1 for self block
@ -469,11 +469,11 @@ uint PhaseCFG::build_cfg() {
assert( x != proj, "" );
// Map basic block of projection
map_node_to_block(proj, pb);
pb->_nodes.push(proj);
pb->push_node(proj);
}
// Insert self as a child of my predecessor block
pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
"too many control users, not a CFG?" );
}
}
@ -495,7 +495,7 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
// surrounding blocks.
float freq = in->_freq * in->succ_prob(succ_no);
// get ProjNode corresponding to the succ_no'th successor of the in block
ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
// create region for basic block
RegionNode* region = new (C) RegionNode(2);
region->init_req(1, proj);
@ -507,7 +507,7 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
Node* gto = _goto->clone(); // get a new goto node
gto->set_req(0, region);
// add it to the basic block
block->_nodes.push(gto);
block->push_node(gto);
map_node_to_block(gto, block);
C->regalloc()->set_bad(gto->_idx);
// hook up successor block
@ -527,9 +527,9 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
// Does this block end in a multiway branch that cannot have the default case
// flipped for another case?
static bool no_flip_branch( Block *b ) {
int branch_idx = b->_nodes.size() - b->_num_succs-1;
int branch_idx = b->number_of_nodes() - b->_num_succs-1;
if( branch_idx < 1 ) return false;
Node *bra = b->_nodes[branch_idx];
Node *bra = b->get_node(branch_idx);
if( bra->is_Catch() )
return true;
if( bra->is_Mach() ) {
@ -550,16 +550,16 @@ static bool no_flip_branch( Block *b ) {
void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
// Find true target
int end_idx = b->end_idx();
int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
int idx = b->get_node(end_idx+1)->as_Proj()->_con;
Block *succ = b->_succs[idx];
Node* gto = _goto->clone(); // get a new goto node
gto->set_req(0, b->head());
Node *bp = b->_nodes[end_idx];
b->_nodes.map(end_idx,gto); // Slam over NeverBranch
Node *bp = b->get_node(end_idx);
b->map_node(gto, end_idx); // Slam over NeverBranch
map_node_to_block(gto, b);
C->regalloc()->set_bad(gto->_idx);
b->_nodes.pop(); // Yank projections
b->_nodes.pop(); // Yank projections
b->pop_node(); // Yank projections
b->pop_node(); // Yank projections
b->_succs.map(0,succ); // Map only successor
b->_num_succs = 1;
// remap successor's predecessors if necessary
@ -575,8 +575,8 @@ void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
// Scan through block, yanking dead path from
// all regions and phis.
dead->head()->del_req(j);
for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
dead->_nodes[k]->del_req(j);
for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
dead->get_node(k)->del_req(j);
}
// Helper function to move block bx to the slot following b_index. Return
@ -620,7 +620,7 @@ void PhaseCFG::move_to_end(Block *b, uint i) {
if (e != Block::not_empty) {
if (e == Block::empty_with_goto) {
// Remove the goto, but leave the block.
b->_nodes.pop();
b->pop_node();
}
// Mark this block as a connector block, which will cause it to be
// ignored in certain functions such as non_connector_successor().
@ -663,13 +663,13 @@ void PhaseCFG::remove_empty_blocks() {
// to give a fake exit path to infinite loops. At this late stage they
// need to turn into Goto's so that when you enter the infinite loop you
// indeed hang.
if (block->_nodes[block->end_idx()]->Opcode() == Op_NeverBranch) {
if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
convert_NeverBranch_to_Goto(block);
}
// Look for uncommon blocks and move to end.
if (!C->do_freq_based_layout()) {
if (block->is_uncommon(this)) {
if (is_uncommon(block)) {
move_to_end(block, i);
last--; // No longer check for being uncommon!
if (no_flip_branch(block)) { // Fall-thru case must follow?
@ -720,9 +720,9 @@ void PhaseCFG::fixup_flow() {
// exchange the true and false targets.
if (no_flip_branch(block)) {
// Find fall through case - if must fall into its target
int branch_idx = block->_nodes.size() - block->_num_succs;
int branch_idx = block->number_of_nodes() - block->_num_succs;
for (uint j2 = 0; j2 < block->_num_succs; j2++) {
const ProjNode* p = block->_nodes[branch_idx + j2]->as_Proj();
const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
if (p->_con == 0) {
// successor j2 is fall through case
if (block->non_connector_successor(j2) != bnext) {
@ -743,14 +743,14 @@ void PhaseCFG::fixup_flow() {
// Remove all CatchProjs
for (uint j = 0; j < block->_num_succs; j++) {
block->_nodes.pop();
block->pop_node();
}
} else if (block->_num_succs == 1) {
// Block ends in a Goto?
if (bnext == bs0) {
// We fall into next block; remove the Goto
block->_nodes.pop();
block->pop_node();
}
} else if(block->_num_succs == 2) { // Block ends in a If?
@ -759,9 +759,9 @@ void PhaseCFG::fixup_flow() {
// be projections (in any order), the 3rd last node must be
// the IfNode (we have excluded other 2-way exits such as
// CatchNodes already).
MachNode* iff = block->_nodes[block->_nodes.size() - 3]->as_Mach();
ProjNode* proj0 = block->_nodes[block->_nodes.size() - 2]->as_Proj();
ProjNode* proj1 = block->_nodes[block->_nodes.size() - 1]->as_Proj();
MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach();
ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
// Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
@ -833,8 +833,8 @@ void PhaseCFG::fixup_flow() {
iff->as_MachIf()->negate();
}
block->_nodes.pop(); // Remove IfFalse & IfTrue projections
block->_nodes.pop();
block->pop_node(); // Remove IfFalse & IfTrue projections
block->pop_node();
} else {
// Multi-exit block, e.g. a switch statement
@ -895,13 +895,13 @@ void PhaseCFG::verify() const {
// Verify sane CFG
for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i);
uint cnt = block->_nodes.size();
uint cnt = block->number_of_nodes();
uint j;
for (j = 0; j < cnt; j++) {
Node *n = block->_nodes[j];
Node *n = block->get_node(j);
assert(get_block_for_node(n) == block, "");
if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
assert(j == 1 || block->_nodes[j-1]->is_Phi(), "CreateEx must be first instruction in block");
assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
}
for (uint k = 0; k < n->req(); k++) {
Node *def = n->in(k);
@ -930,14 +930,14 @@ void PhaseCFG::verify() const {
}
j = block->end_idx();
Node* bp = (Node*)block->_nodes[block->_nodes.size() - 1]->is_block_proj();
Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
assert(bp, "last instruction must be a block proj");
assert(bp == block->_nodes[j], "wrong number of successors for this block");
assert(bp == block->get_node(j), "wrong number of successors for this block");
if (bp->is_Catch()) {
while (block->_nodes[--j]->is_MachProj()) {
while (block->get_node(--j)->is_MachProj()) {
;
}
assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
} else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
assert(block->_num_succs == 2, "Conditional branch must have two targets");
}
@ -1440,9 +1440,9 @@ void Trace::fixup_blocks(PhaseCFG &cfg) {
Block *bnext = next(b);
Block *bs0 = b->non_connector_successor(0);
MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
if (bnext == bs0) {
// Fall-thru case in succs[0], should be in succs[1]
@ -1454,8 +1454,8 @@ void Trace::fixup_blocks(PhaseCFG &cfg) {
b->_succs.map( 1, tbs0 );
// Flip projections to match targets
b->_nodes.map(b->_nodes.size()-2, proj1);
b->_nodes.map(b->_nodes.size()-1, proj0);
b->map_node(proj1, b->number_of_nodes() - 2);
b->map_node(proj0, b->number_of_nodes() - 1);
}
}
}

@ -105,15 +105,53 @@ class CFGElement : public ResourceObj {
// any optimization pass. They are created late in the game.
class Block : public CFGElement {
friend class VMStructs;
public:
private:
// Nodes in this block, in order
Node_List _nodes;
public:
// Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
Node* get_node(uint at_index) const {
return _nodes[at_index];
}
// Get the number of nodes in this block
uint number_of_nodes() const {
return _nodes.size();
}
// Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
void map_node(Node* node, uint to_index) {
_nodes.map(to_index, node);
}
// Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
void insert_node(Node* node, uint at_index) {
_nodes.insert(at_index, node);
}
// Remove a node at index 'at_index'
void remove_node(uint at_index) {
_nodes.remove(at_index);
}
// Push a node 'node' onto the node list
void push_node(Node* node) {
_nodes.push(node);
}
// Pop the last node off the node list
Node* pop_node() {
return _nodes.pop();
}
// Basic blocks have a Node which defines Control for all Nodes pinned in
// this block. This Node is a RegionNode. Exception-causing Nodes
// (division, subroutines) and Phi functions are always pinned. Later,
// every Node will get pinned to some block.
Node *head() const { return _nodes[0]; }
Node *head() const { return get_node(0); }
// CAUTION: num_preds() is ONE based, so that predecessor numbers match
// input edges to Regions and Phis.
@ -274,29 +312,12 @@ class Block : public CFGElement {
// Add an instruction to an existing block. It must go after the head
// instruction and before the end instruction.
void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
void add_inst( Node *n ) { insert_node(n, end_idx()); }
// Find node in block
uint find_node( const Node *n ) const;
// Find and remove n from block list
void find_remove( const Node *n );
// helper function that adds caller save registers to MachProjNode
void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
// Schedule a call next in the block
uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
// Perform basic-block local scheduling
Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
// Cleanup if any code lands between a Call and his Catch
void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
// Detect implicit-null-check opportunities. Basically, find NULL checks
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby.
void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
// Return the empty status of a block
enum { not_empty, empty_with_goto, completely_empty };
int is_Empty() const;
@ -328,10 +349,6 @@ class Block : public CFGElement {
// Examine block's code shape to predict if it is not commonly executed.
bool has_uncommon_code() const;
// Use frequency calculations and code shape to predict if the block
// is uncommon.
bool is_uncommon(PhaseCFG* cfg) const;
#ifndef PRODUCT
// Debugging print of basic block
void dump_bidx(const Block* orig, outputStream* st = tty) const;
@ -414,6 +431,27 @@ class PhaseCFG : public Phase {
// to late. Helper for schedule_late.
Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
void set_next_call(Block* block, Node* n, VectorSet& next_call);
void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
// Perform basic-block local scheduling
Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
// Schedule a call next in the block
uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
// Cleanup if any code lands between a Call and his Catch
void call_catch_cleanup(Block* block);
Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
// Detect implicit-null-check opportunities. Basically, find NULL checks
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby.
void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
// Perform a Depth First Search (DFS).
// Setup 'vertex' as DFS to vertex mapping.
// Setup 'semi' as vertex to DFS mapping.
@ -530,6 +568,10 @@ class PhaseCFG : public Phase {
return (_node_to_block_mapping.lookup(node->_idx) != NULL);
}
// Use frequency calculations and code shape to predict if the block
// is uncommon.
bool is_uncommon(const Block* block);
#ifdef ASSERT
Unique_Node_List _raw_oops;
#endif
@ -550,7 +592,7 @@ class PhaseCFG : public Phase {
// Insert a node into a block at index and map the node to the block
void insert(Block *b, uint idx, Node *n) {
b->_nodes.insert( idx, n );
b->insert_node(n , idx);
map_node_to_block(n, b);
}

@ -121,8 +121,8 @@ struct OopFlow : public ResourceObj {
// Given reaching-defs for this block start, compute it for this block end
void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
for( uint i=0; i<_b->_nodes.size(); i++ ) {
Node *n = _b->_nodes[i];
for( uint i=0; i<_b->number_of_nodes(); i++ ) {
Node *n = _b->get_node(i);
if( n->jvms() ) { // Build an OopMap here?
JVMState *jvms = n->jvms();
@ -447,8 +447,8 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work
}
// Now walk tmp_live up the block backwards, computing live
for( int k=b->_nodes.size()-1; k>=0; k-- ) {
Node *n = b->_nodes[k];
for( int k=b->number_of_nodes()-1; k>=0; k-- ) {
Node *n = b->get_node(k);
// KILL def'd bits
int first = regalloc->get_reg_first(n);
int second = regalloc->get_reg_second(n);
@ -544,12 +544,12 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work
for (i = 1; i < cfg->number_of_blocks(); i++) {
Block* block = cfg->get_block(i);
uint j;
for (j = 1; j < block->_nodes.size(); j++) {
if (block->_nodes[j]->jvms() && (*safehash)[block->_nodes[j]] == NULL) {
for (j = 1; j < block->number_of_nodes(); j++) {
if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) {
break;
}
}
if (j < block->_nodes.size()) {
if (j < block->number_of_nodes()) {
break;
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -260,7 +260,7 @@ class WarmCallInfo : public ResourceObj {
// Because WarmInfo objects live over the entire lifetime of the
// Compile object, they are allocated into the comp_arena, which
// does not get resource marked or reset during the compile process
void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
void operator delete( void * ) { } // fast deallocation
static WarmCallInfo* always_hot();

@ -458,7 +458,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
st->print("={");
uint nf = spobj->n_fields();
if (nf > 0) {
uint first_ind = spobj->first_index();
uint first_ind = spobj->first_index(mcall->jvms());
Node* fld_node = mcall->in(first_ind);
ciField* cifield;
if (iklass != NULL) {
@ -1063,7 +1063,6 @@ void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
int scloff = jvms->scloff();
int endoff = jvms->endoff();
assert(endoff == (int)req(), "no other states or debug info after me");
assert(jvms->scl_size() == 0, "parsed code should not have scalar objects");
Node* top = Compile::current()->top();
for (uint i = 0; i < grow_by; i++) {
ins_req(monoff, top);
@ -1079,32 +1078,31 @@ void SafePointNode::push_monitor(const FastLockNode *lock) {
const int MonitorEdges = 2;
assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
assert(req() == jvms()->endoff(), "correct sizing");
assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
int nextmon = jvms()->scloff();
if (GenerateSynchronizationCode) {
add_req(lock->box_node());
add_req(lock->obj_node());
ins_req(nextmon, lock->box_node());
ins_req(nextmon+1, lock->obj_node());
} else {
Node* top = Compile::current()->top();
add_req(top);
add_req(top);
ins_req(nextmon, top);
ins_req(nextmon, top);
}
jvms()->set_scloff(nextmon+MonitorEdges);
jvms()->set_scloff(nextmon + MonitorEdges);
jvms()->set_endoff(req());
}
void SafePointNode::pop_monitor() {
// Delete last monitor from debug info
assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
debug_only(int num_before_pop = jvms()->nof_monitors());
const int MonitorEdges = (1<<JVMState::logMonitorEdges);
const int MonitorEdges = 2;
assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
int scloff = jvms()->scloff();
int endoff = jvms()->endoff();
int new_scloff = scloff - MonitorEdges;
int new_endoff = endoff - MonitorEdges;
jvms()->set_scloff(new_scloff);
jvms()->set_endoff(new_endoff);
while (scloff > new_scloff) del_req(--scloff);
while (scloff > new_scloff) del_req_ordered(--scloff);
assert(jvms()->nof_monitors() == num_before_pop-1, "");
}
@ -1169,13 +1167,12 @@ uint SafePointScalarObjectNode::match_edge(uint idx) const {
}
SafePointScalarObjectNode*
SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
SafePointScalarObjectNode::clone(Dict* sosn_map) const {
void* cached = (*sosn_map)[(void*)this];
if (cached != NULL) {
return (SafePointScalarObjectNode*)cached;
}
SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
res->_first_index += jvms_adj;
sosn_map->Insert((void*)this, (void*)res);
return res;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -216,7 +216,7 @@ public:
// Because JVMState objects live over the entire lifetime of the
// Compile object, they are allocated into the comp_arena, which
// does not get resource marked or reset during the compile process
void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
void operator delete( void * ) { } // fast deallocation
// Create a new JVMState, ready for abstract interpretation.
@ -449,14 +449,17 @@ public:
// at a safepoint.
class SafePointScalarObjectNode: public TypeNode {
uint _first_index; // First input edge index of a SafePoint node where
uint _first_index; // First input edge relative index of a SafePoint node where
// states of the scalarized object fields are collected.
// It is relative to the last (youngest) jvms->_scloff.
uint _n_fields; // Number of non-static fields of the scalarized object.
DEBUG_ONLY(AllocateNode* _alloc;)
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const;
uint first_index() const { return _first_index; }
public:
SafePointScalarObjectNode(const TypeOopPtr* tp,
#ifdef ASSERT
@ -469,7 +472,10 @@ public:
virtual const RegMask &out_RegMask() const;
virtual uint match_edge(uint idx) const;
uint first_index() const { return _first_index; }
uint first_index(JVMState* jvms) const {
assert(jvms != NULL, "missed JVMS");
return jvms->scloff() + _first_index;
}
uint n_fields() const { return _n_fields; }
#ifdef ASSERT
@ -485,7 +491,7 @@ public:
// corresponds appropriately to "this" in "new_call". Assumes that
// "sosn_map" is a map, specific to the translation of "s" to "new_call",
// mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
SafePointScalarObjectNode* clone(Dict* sosn_map) const;
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;

@ -301,7 +301,7 @@ int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint&
// Copy kill projections after the cloned node
Node* kills = proj->clone();
kills->set_req(0, copy);
b->_nodes.insert(idx++, kills);
b->insert_node(kills, idx++);
_cfg.map_node_to_block(kills, b);
new_lrg(kills, max_lrg_id++);
}
@ -682,11 +682,11 @@ void PhaseChaitin::de_ssa() {
uint lr_counter = 1;
for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
Block* block = _cfg.get_block(i);
uint cnt = block->_nodes.size();
uint cnt = block->number_of_nodes();
// Handle all the normal Nodes in the block
for( uint j = 0; j < cnt; j++ ) {
Node *n = block->_nodes[j];
Node *n = block->get_node(j);
// Pre-color to the zero live range, or pick virtual register
const RegMask &rm = n->out_RegMask();
_lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
@ -710,8 +710,8 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
Block* block = _cfg.get_block(i);
// For all instructions
for (uint j = 1; j < block->_nodes.size(); j++) {
Node* n = block->_nodes[j];
for (uint j = 1; j < block->number_of_nodes(); j++) {
Node* n = block->get_node(j);
uint input_edge_start =1; // Skip control most nodes
if (n->is_Mach()) {
input_edge_start = n->as_Mach()->oper_input_base();
@ -1604,7 +1604,7 @@ void PhaseChaitin::fixup_spills() {
// For all instructions in block
uint last_inst = block->end_idx();
for (uint j = 1; j <= last_inst; j++) {
Node* n = block->_nodes[j];
Node* n = block->get_node(j);
// Dead instruction???
assert( n->outcnt() != 0 ||// Nothing dead after post alloc
@ -1641,7 +1641,7 @@ void PhaseChaitin::fixup_spills() {
assert( cisc->oper_input_base() == 2, "Only adding one edge");
cisc->ins_req(1,src); // Requires a memory edge
}
block->_nodes.map(j,cisc); // Insert into basic block
block->map_node(cisc, j); // Insert into basic block
n->subsume_by(cisc, C); // Correct graph
//
++_used_cisc_instructions;
@ -1698,7 +1698,7 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
// (where top() node is placed).
base->init_req(0, _cfg.get_root_node());
Block *startb = _cfg.get_block_for_node(C->top());
startb->_nodes.insert(startb->find_node(C->top()), base );
startb->insert_node(base, startb->find_node(C->top()));
_cfg.map_node_to_block(base, startb);
assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
}
@ -1743,9 +1743,9 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
// Search the current block for an existing base-Phi
Block *b = _cfg.get_block_for_node(derived);
for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
Node *phi = b->_nodes[i];
Node *phi = b->get_node(i);
if( !phi->is_Phi() ) { // Found end of Phis with no match?
b->_nodes.insert( i, base ); // Must insert created Phi here as base
b->insert_node(base, i); // Must insert created Phi here as base
_cfg.map_node_to_block(base, b);
new_lrg(base,maxlrg++);
break;
@ -1786,7 +1786,7 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
IndexSet liveout(_live->live(block));
for (uint j = block->end_idx() + 1; j > 1; j--) {
Node* n = block->_nodes[j - 1];
Node* n = block->get_node(j - 1);
// Pre-split compares of loop-phis. Loop-phis form a cycle we would
// like to see in the same register. Compare uses the loop-phi and so
@ -1979,8 +1979,8 @@ void PhaseChaitin::dump(const Block *b) const {
b->dump_head(&_cfg);
// For all instructions
for( uint j = 0; j < b->_nodes.size(); j++ )
dump(b->_nodes[j]);
for( uint j = 0; j < b->number_of_nodes(); j++ )
dump(b->get_node(j));
// Print live-out info at end of block
if( _live ) {
tty->print("Liveout: ");
@ -2271,8 +2271,8 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
int dump_once = 0;
// For all instructions
for( uint j = 0; j < block->_nodes.size(); j++ ) {
Node *n = block->_nodes[j];
for( uint j = 0; j < block->number_of_nodes(); j++ ) {
Node *n = block->get_node(j);
if (_lrg_map.find_const(n) == lidx) {
if (!dump_once++) {
tty->cr();

@ -54,9 +54,9 @@ void PhaseCoalesce::dump() const {
for( j=0; j<b->_num_succs; j++ )
tty->print("B%d ",b->_succs[j]->_pre_order);
tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
uint cnt = b->_nodes.size();
uint cnt = b->number_of_nodes();
for( j=0; j<cnt; j++ ) {
Node *n = b->_nodes[j];
Node *n = b->get_node(j);
dump( n );
tty->print("\t%s\t",n->Name());
@ -152,7 +152,7 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
// after the last use. Last use is really first-use on a backwards scan.
uint i = b->end_idx()-1;
while(1) {
Node *n = b->_nodes[i];
Node *n = b->get_node(i);
// Check for end of virtual copies; this is also the end of the
// parallel renaming effort.
if (n->_idx < _unique) {
@ -174,7 +174,7 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
// the last kill. Thus it is the first kill on a backwards scan.
i = b->end_idx()-1;
while (1) {
Node *n = b->_nodes[i];
Node *n = b->get_node(i);
// Check for end of virtual copies; this is also the end of the
// parallel renaming effort.
if (n->_idx < _unique) {
@ -200,13 +200,13 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
tmp ->set_req(idx,copy->in(idx));
copy->set_req(idx,tmp);
// Save source in temp early, before source is killed
b->_nodes.insert(kill_src_idx,tmp);
b->insert_node(tmp, kill_src_idx);
_phc._cfg.map_node_to_block(tmp, b);
last_use_idx++;
}
// Insert just after last use
b->_nodes.insert(last_use_idx+1,copy);
b->insert_node(copy, last_use_idx + 1);
}
void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
@ -237,8 +237,8 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
Block *b = _phc._cfg.get_block(i);
uint cnt = b->num_preds(); // Number of inputs to the Phi
for( uint l = 1; l<b->_nodes.size(); l++ ) {
Node *n = b->_nodes[l];
for( uint l = 1; l<b->number_of_nodes(); l++ ) {
Node *n = b->get_node(l);
// Do not use removed-copies, use copied value instead
uint ncnt = n->req();
@ -260,7 +260,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
n->replace_by(def);
n->set_req(cidx,NULL);
b->_nodes.remove(l);
b->remove_node(l);
l--;
continue;
}
@ -321,13 +321,13 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
m->as_Mach()->rematerialize()) {
copy = m->clone();
// Insert the copy in the basic block, just before us
b->_nodes.insert(l++, copy);
b->insert_node(copy, l++);
l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
} else {
const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
copy = new (C) MachSpillCopyNode(m, *rm, *rm);
// Insert the copy in the basic block, just before us
b->_nodes.insert(l++, copy);
b->insert_node(copy, l++);
}
// Insert the copy in the use-def chain
n->set_req(idx, copy);
@ -339,7 +339,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
} // End of is two-adr
// Insert a copy at a debug use for a lrg which has high frequency
if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) {
// Walk the debug inputs to the node and check for lrg freq
JVMState* jvms = n->jvms();
uint debug_start = jvms ? jvms->debug_start() : 999999;
@ -376,7 +376,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
// Insert the copy in the use-def chain
n->set_req(inpidx, copy );
// Insert the copy in the basic block, just before us
b->_nodes.insert( l++, copy );
b->insert_node(copy, l++);
// Extend ("register allocate") the names array for the copy.
uint max_lrg_id = _phc._lrg_map.max_lrg_id();
_phc.new_lrg(copy, max_lrg_id);
@ -431,8 +431,8 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) {
}
// Visit all the Phis in successor block
for( uint k = 1; k<bs->_nodes.size(); k++ ) {
Node *n = bs->_nodes[k];
for( uint k = 1; k<bs->number_of_nodes(); k++ ) {
Node *n = bs->get_node(k);
if( !n->is_Phi() ) break;
combine_these_two( n, n->in(j) );
}
@ -442,7 +442,7 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) {
// Check _this_ block for 2-address instructions and copies.
uint cnt = b->end_idx();
for( i = 1; i<cnt; i++ ) {
Node *n = b->_nodes[i];
Node *n = b->get_node(i);
uint idx;
// 2-address instructions have a virtual Copy matching their input
// to their output
@ -490,10 +490,10 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
dst_copy->set_req( didx, src_def );
// Add copy to free list
// _phc.free_spillcopy(b->_nodes[bindex]);
assert( b->_nodes[bindex] == dst_copy, "" );
assert( b->get_node(bindex) == dst_copy, "" );
dst_copy->replace_by( dst_copy->in(didx) );
dst_copy->set_req( didx, NULL);
b->_nodes.remove(bindex);
b->remove_node(bindex);
if( bindex < b->_ihrp_index ) b->_ihrp_index--;
if( bindex < b->_fhrp_index ) b->_fhrp_index--;
@ -523,8 +523,8 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy,
bindex2 = b2->end_idx()-1;
}
// Get prior instruction
assert(bindex2 < b2->_nodes.size(), "index out of bounds");
Node *x = b2->_nodes[bindex2];
assert(bindex2 < b2->number_of_nodes(), "index out of bounds");
Node *x = b2->get_node(bindex2);
if( x == prev_copy ) { // Previous copy in copy chain?
if( prev_copy == src_copy)// Found end of chain and all interferences
break; // So break out of loop
@ -769,14 +769,14 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
// Conservative (but pessimistic) copy coalescing of a single block
void PhaseConservativeCoalesce::coalesce( Block *b ) {
// Bail out on infrequent blocks
if (b->is_uncommon(&_phc._cfg)) {
if (_phc._cfg.is_uncommon(b)) {
return;
}
// Check this block for copies.
for( uint i = 1; i<b->end_idx(); i++ ) {
// Check for actual copies on inputs. Coalesce a copy into its
// input if use and copy's input are compatible.
Node *copy1 = b->_nodes[i];
Node *copy1 = b->get_node(i);
uint idx1 = copy1->is_Copy();
if( !idx1 ) continue; // Not a copy

@ -2258,7 +2258,7 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
if (block->is_connector() && !Verbose) {
continue;
}
n = block->_nodes[0];
n = block->head();
if (pcs && n->_idx < pc_limit) {
tty->print("%3.3x ", pcs[n->_idx]);
} else {
@ -2273,12 +2273,12 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
// For all instructions
Node *delay = NULL;
for (uint j = 0; j < block->_nodes.size(); j++) {
for (uint j = 0; j < block->number_of_nodes(); j++) {
if (VMThread::should_terminate()) {
cut_short = true;
break;
}
n = block->_nodes[j];
n = block->get_node(j);
if (valid_bundle_info(n)) {
Bundle* bundle = node_bundling(n);
if (bundle->used_in_unconditional_delay()) {

@ -211,21 +211,21 @@ class Block_Stack {
uint Block_Stack::most_frequent_successor( Block *b ) {
uint freq_idx = 0;
int eidx = b->end_idx();
Node *n = b->_nodes[eidx];
Node *n = b->get_node(eidx);
int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
switch( op ) {
case Op_CountedLoopEnd:
case Op_If: { // Split frequency amongst children
float prob = n->as_MachIf()->_prob;
// Is succ[0] the TRUE branch or the FALSE branch?
if( b->_nodes[eidx+1]->Opcode() == Op_IfFalse )
if( b->get_node(eidx+1)->Opcode() == Op_IfFalse )
prob = 1.0f - prob;
freq_idx = prob < PROB_FAIR; // freq=1 for succ[0] < 0.5 prob
break;
}
case Op_Catch: // Split frequency amongst children
for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ )
if( b->_nodes[eidx+1+freq_idx]->as_CatchProj()->_con == CatchProjNode::fall_through_index )
if( b->get_node(eidx+1+freq_idx)->as_CatchProj()->_con == CatchProjNode::fall_through_index )
break;
// Handle case of no fall-thru (e.g., check-cast MUST throw an exception)
if( freq_idx == b->_num_succs ) freq_idx = 0;

@ -102,12 +102,12 @@ void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
uint j = 0;
if (pb->_num_succs != 1) { // More then 1 successor?
// Search for successor
uint max = pb->_nodes.size();
uint max = pb->number_of_nodes();
assert( max > 1, "" );
uint start = max - pb->_num_succs;
// Find which output path belongs to projection
for (j = start; j < max; j++) {
if( pb->_nodes[j] == in0 )
if( pb->get_node(j) == in0 )
break;
}
assert( j < max, "must find" );
@ -1027,8 +1027,8 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
Block* least = LCA;
double least_freq = least->_freq;
uint target = get_latency_for_node(self);
uint start_latency = get_latency_for_node(LCA->_nodes[0]);
uint end_latency = get_latency_for_node(LCA->_nodes[LCA->end_idx()]);
uint start_latency = get_latency_for_node(LCA->head());
uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx()));
bool in_latency = (target <= start_latency);
const Block* root_block = get_block_for_node(_root);
@ -1049,9 +1049,9 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
self->dump();
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
LCA->_pre_order,
LCA->_nodes[0]->_idx,
LCA->head()->_idx,
start_latency,
LCA->_nodes[LCA->end_idx()]->_idx,
LCA->get_node(LCA->end_idx())->_idx,
end_latency,
least_freq);
}
@ -1074,14 +1074,14 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
if (mach && LCA == root_block)
break;
uint start_lat = get_latency_for_node(LCA->_nodes[0]);
uint start_lat = get_latency_for_node(LCA->head());
uint end_idx = LCA->end_idx();
uint end_lat = get_latency_for_node(LCA->_nodes[end_idx]);
uint end_lat = get_latency_for_node(LCA->get_node(end_idx));
double LCA_freq = LCA->_freq;
#ifndef PRODUCT
if (trace_opto_pipelining()) {
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
}
#endif
cand_cnt++;
@ -1342,7 +1342,7 @@ void PhaseCFG::global_code_motion() {
Node* proj = _matcher._null_check_tests[i];
Node* val = _matcher._null_check_tests[i + 1];
Block* block = get_block_for_node(proj);
block->implicit_null_check(this, proj, val, allowed_reasons);
implicit_null_check(block, proj, val, allowed_reasons);
// The implicit_null_check will only perform the transformation
// if the null branch is truly uncommon, *and* it leads to an
// uncommon trap. Combined with the too_many_traps guards
@ -1363,7 +1363,7 @@ void PhaseCFG::global_code_motion() {
visited.Clear();
for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i);
if (!block->schedule_local(this, _matcher, ready_cnt, visited)) {
if (!schedule_local(block, ready_cnt, visited)) {
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
C->record_method_not_compilable("local schedule failed");
}
@ -1375,7 +1375,7 @@ void PhaseCFG::global_code_motion() {
// clone the instructions on all paths below the Catch.
for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i);
block->call_catch_cleanup(this, C);
call_catch_cleanup(block);
}
#ifndef PRODUCT
@ -1726,7 +1726,7 @@ void CFGLoop::compute_freq() {
// Determine the probability of reaching successor 'i' from the receiver block.
float Block::succ_prob(uint i) {
int eidx = end_idx();
Node *n = _nodes[eidx]; // Get ending Node
Node *n = get_node(eidx); // Get ending Node
int op = n->Opcode();
if (n->is_Mach()) {
@ -1761,7 +1761,7 @@ float Block::succ_prob(uint i) {
float prob = n->as_MachIf()->_prob;
assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
// If succ[i] is the FALSE branch, invert path info
if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
return 1.0f - prob; // not taken
} else {
return prob; // taken
@ -1773,7 +1773,7 @@ float Block::succ_prob(uint i) {
return 1.0f/_num_succs;
case Op_Catch: {
const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
if (ci->_con == CatchProjNode::fall_through_index) {
// Fall-thru path gets the lion's share.
return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
@ -1810,7 +1810,7 @@ float Block::succ_prob(uint i) {
// Return the number of fall-through candidates for a block
int Block::num_fall_throughs() {
int eidx = end_idx();
Node *n = _nodes[eidx]; // Get ending Node
Node *n = get_node(eidx); // Get ending Node
int op = n->Opcode();
if (n->is_Mach()) {
@ -1834,7 +1834,7 @@ int Block::num_fall_throughs() {
case Op_Catch: {
for (uint i = 0; i < _num_succs; i++) {
const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
if (ci->_con == CatchProjNode::fall_through_index) {
return 1;
}
@ -1862,14 +1862,14 @@ int Block::num_fall_throughs() {
// Return true if a specific successor could be fall-through target.
bool Block::succ_fall_through(uint i) {
int eidx = end_idx();
Node *n = _nodes[eidx]; // Get ending Node
Node *n = get_node(eidx); // Get ending Node
int op = n->Opcode();
if (n->is_Mach()) {
if (n->is_MachNullCheck()) {
// In theory, either side can fall-thru, for simplicity sake,
// let's say only the false branch can now.
return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse;
return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
}
op = n->as_Mach()->ideal_Opcode();
}
@ -1883,7 +1883,7 @@ bool Block::succ_fall_through(uint i) {
return true;
case Op_Catch: {
const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
return ci->_con == CatchProjNode::fall_through_index;
}
@ -1907,7 +1907,7 @@ bool Block::succ_fall_through(uint i) {
// Update the probability of a two-branch to be uncommon
void Block::update_uncommon_branch(Block* ub) {
int eidx = end_idx();
Node *n = _nodes[eidx]; // Get ending Node
Node *n = get_node(eidx); // Get ending Node
int op = n->as_Mach()->ideal_Opcode();
@ -1923,7 +1923,7 @@ void Block::update_uncommon_branch(Block* ub) {
// If ub is the true path, make the proability small, else
// ub is the false path, and make the probability large
bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse);
bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
// Get existing probability
float p = n->as_MachIf()->_prob;

@ -61,6 +61,7 @@ void GraphKit::gen_stub(address C_function,
JVMState* jvms = new (C) JVMState(0);
jvms->set_bci(InvocationEntryBci);
jvms->set_monoff(max_map);
jvms->set_scloff(max_map);
jvms->set_endoff(max_map);
{
SafePointNode *map = new (C) SafePointNode( max_map, jvms );

@ -1501,6 +1501,25 @@ void GraphKit::pre_barrier(bool do_load,
}
}
bool GraphKit::can_move_pre_barrier() const {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
return true; // Can move it if no safepoint
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
return true; // There is no pre-barrier
case BarrierSet::Other:
default :
ShouldNotReachHere();
}
return false;
}
void GraphKit::post_barrier(Node* ctl,
Node* store,
Node* obj,
@ -3551,6 +3570,8 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
} else {
// In this case both val_type and alias_idx are unused.
assert(pre_val != NULL, "must be loaded already");
// Nothing to be done if pre_val is null.
if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
}
assert(bt == T_OBJECT, "or we shouldn't be here");
@ -3595,7 +3616,7 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
if (do_load) {
// load original value
// alias_idx correct??
pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
}
// if (pre_val != NULL)

@ -695,6 +695,10 @@ class GraphKit : public Phase {
void write_barrier_post(Node *store, Node* obj,
Node* adr, uint adr_idx, Node* val, bool use_precise);
// Allow reordering of pre-barrier with oop store and/or post-barrier.
// Used for load_store operations which loads old value.
bool can_move_pre_barrier() const;
// G1 pre/post barriers
void g1_write_barrier_pre(bool do_load,
Node* obj,

@ -639,8 +639,8 @@ void IdealGraphPrinter::walk_nodes(Node *start, bool edges, VectorSet* temp_set)
// reachable but are in the CFG so add them here.
for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
Block* block = C->cfg()->get_block(i);
for (uint s = 0; s < block->_nodes.size(); s++) {
nodeStack.push(block->_nodes[s]);
for (uint s = 0; s < block->number_of_nodes(); s++) {
nodeStack.push(block->get_node(s));
}
}
}
@ -713,9 +713,9 @@ void IdealGraphPrinter::print(Compile* compile, const char *name, Node *node, in
tail(SUCCESSORS_ELEMENT);
head(NODES_ELEMENT);
for (uint s = 0; s < block->_nodes.size(); s++) {
for (uint s = 0; s < block->number_of_nodes(); s++) {
begin_elem(NODE_ELEMENT);
print_attr(NODE_ID_PROPERTY, get_node_id(block->_nodes[s]));
print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s)));
end_elem();
}
tail(NODES_ELEMENT);

@ -319,7 +319,7 @@ void PhaseChaitin::build_ifg_virtual( ) {
// value is then removed from the live-ness set and it's inputs are
// added to the live-ness set.
for (uint j = block->end_idx() + 1; j > 1; j--) {
Node* n = block->_nodes[j - 1];
Node* n = block->get_node(j - 1);
// Get value being defined
uint r = _lrg_map.live_range_id(n);
@ -456,7 +456,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// Compute first nonphi node index
uint first_inst;
for (first_inst = 1; first_inst < last_inst; first_inst++) {
if (!block->_nodes[first_inst]->is_Phi()) {
if (!block->get_node(first_inst)->is_Phi()) {
break;
}
}
@ -464,15 +464,15 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// Spills could be inserted before CreateEx node which should be
// first instruction in block after Phis. Move CreateEx up.
for (uint insidx = first_inst; insidx < last_inst; insidx++) {
Node *ex = block->_nodes[insidx];
Node *ex = block->get_node(insidx);
if (ex->is_SpillCopy()) {
continue;
}
if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
// If the CreateEx isn't above all the MachSpillCopies
// then move it to the top.
block->_nodes.remove(insidx);
block->_nodes.insert(first_inst, ex);
block->remove_node(insidx);
block->insert_node(ex, first_inst);
}
// Stop once a CreateEx or any other node is found
break;
@ -523,7 +523,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// to the live-ness set.
uint j;
for (j = last_inst + 1; j > 1; j--) {
Node* n = block->_nodes[j - 1];
Node* n = block->get_node(j - 1);
// Get value being defined
uint r = _lrg_map.live_range_id(n);
@ -541,7 +541,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if( !n->is_Proj() ||
// Could also be a flags-projection of a dead ADD or such.
(_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
block->_nodes.remove(j - 1);
block->remove_node(j - 1);
if (lrgs(r)._def == n) {
lrgs(r)._def = 0;
}
@ -605,7 +605,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// (j - 1) is index for current instruction 'n'
Node *m = n;
for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
m = block->_nodes[i];
m = block->get_node(i);
}
if (m == single_use) {
lrgs(r)._area = 0.0;
@ -772,20 +772,20 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// Compute high pressure indice; avoid landing in the middle of projnodes
j = hrp_index[0];
if (j < block->_nodes.size() && j < block->end_idx() + 1) {
Node* cur = block->_nodes[j];
if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
Node* cur = block->get_node(j);
while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
j--;
cur = block->_nodes[j];
cur = block->get_node(j);
}
}
block->_ihrp_index = j;
j = hrp_index[1];
if (j < block->_nodes.size() && j < block->end_idx() + 1) {
Node* cur = block->_nodes[j];
if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
Node* cur = block->get_node(j);
while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
j--;
cur = block->_nodes[j];
cur = block->get_node(j);
}
}
block->_fhrp_index = j;

@ -58,14 +58,14 @@
// The proj is the control projection for the not-null case.
// The val is the pointer being checked for nullness or
// decodeHeapOop_not_null node if it did not fold into address.
void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) {
// Assume if null check need for 0 offset then always needed
// Intel solaris doesn't support any null checks yet and no
// mechanism exists (yet) to set the switches at an os_cpu level
if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
// Make sure the ptr-is-null path appears to be uncommon!
float f = end()->as_MachIf()->_prob;
float f = block->end()->as_MachIf()->_prob;
if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
if( f > PROB_UNLIKELY_MAG(4) ) return;
@ -75,13 +75,13 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// Get the successor block for if the test ptr is non-null
Block* not_null_block; // this one goes with the proj
Block* null_block;
if (_nodes[_nodes.size()-1] == proj) {
null_block = _succs[0];
not_null_block = _succs[1];
if (block->get_node(block->number_of_nodes()-1) == proj) {
null_block = block->_succs[0];
not_null_block = block->_succs[1];
} else {
assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other");
not_null_block = _succs[0];
null_block = _succs[1];
assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other");
not_null_block = block->_succs[0];
null_block = block->_succs[1];
}
while (null_block->is_Empty() == Block::empty_with_goto) {
null_block = null_block->_succs[0];
@ -93,8 +93,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// detect failure of this optimization, as in 6366351.)
{
bool found_trap = false;
for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) {
Node* nn = null_block->_nodes[i1];
for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) {
Node* nn = null_block->get_node(i1);
if (nn->is_MachCall() &&
nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
@ -237,20 +237,20 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
}
// Check ctrl input to see if the null-check dominates the memory op
Block *cb = cfg->get_block_for_node(mach);
Block *cb = get_block_for_node(mach);
cb = cb->_idom; // Always hoist at least 1 block
if( !was_store ) { // Stores can be hoisted only one block
while( cb->_dom_depth > (_dom_depth + 1))
while( cb->_dom_depth > (block->_dom_depth + 1))
cb = cb->_idom; // Hoist loads as far as we want
// The non-null-block should dominate the memory op, too. Live
// range spilling will insert a spill in the non-null-block if it is
// needs to spill the memory op for an implicit null check.
if (cb->_dom_depth == (_dom_depth + 1)) {
if (cb->_dom_depth == (block->_dom_depth + 1)) {
if (cb != not_null_block) continue;
cb = cb->_idom;
}
}
if( cb != this ) continue;
if( cb != block ) continue;
// Found a memory user; see if it can be hoisted to check-block
uint vidx = 0; // Capture index of value into memop
@ -262,8 +262,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( is_decoden ) continue;
}
// Block of memory-op input
Block *inb = cfg->get_block_for_node(mach->in(j));
Block *b = this; // Start from nul check
Block *inb = get_block_for_node(mach->in(j));
Block *b = block; // Start from nul check
while( b != inb && b->_dom_depth > inb->_dom_depth )
b = b->_idom; // search upwards for input
// See if input dominates null check
@ -272,28 +272,28 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
}
if( j > 0 )
continue;
Block *mb = cfg->get_block_for_node(mach);
Block *mb = get_block_for_node(mach);
// Hoisting stores requires more checks for the anti-dependence case.
// Give up hoisting if we have to move the store past any load.
if( was_store ) {
Block *b = mb; // Start searching here for a local load
// mach use (faulting) trying to hoist
// n might be blocker to hoisting
while( b != this ) {
while( b != block ) {
uint k;
for( k = 1; k < b->_nodes.size(); k++ ) {
Node *n = b->_nodes[k];
for( k = 1; k < b->number_of_nodes(); k++ ) {
Node *n = b->get_node(k);
if( n->needs_anti_dependence_check() &&
n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
break; // Found anti-dependent load
}
if( k < b->_nodes.size() )
if( k < b->number_of_nodes() )
break; // Found anti-dependent load
// Make sure control does not do a merge (would have to check allpaths)
if( b->num_preds() != 2 ) break;
b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
b = get_block_for_node(b->pred(1)); // Move up to predecessor block
}
if( b != this ) continue;
if( b != block ) continue;
}
// Make sure this memory op is not already being used for a NullCheck
@ -303,7 +303,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// Found a candidate! Pick one with least dom depth - the highest
// in the dom tree should be closest to the null check.
if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
best = mach;
bidx = vidx;
}
@ -319,46 +319,45 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( is_decoden ) {
// Check if we need to hoist decodeHeapOop_not_null first.
Block *valb = cfg->get_block_for_node(val);
if( this != valb && this->_dom_depth < valb->_dom_depth ) {
Block *valb = get_block_for_node(val);
if( block != valb && block->_dom_depth < valb->_dom_depth ) {
// Hoist it up to the end of the test block.
valb->find_remove(val);
this->add_inst(val);
cfg->map_node_to_block(val, this);
block->add_inst(val);
map_node_to_block(val, block);
// DecodeN on x86 may kill flags. Check for flag-killing projections
// that also need to be hoisted.
for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
Node* n = val->fast_out(j);
if( n->is_MachProj() ) {
cfg->get_block_for_node(n)->find_remove(n);
this->add_inst(n);
cfg->map_node_to_block(n, this);
get_block_for_node(n)->find_remove(n);
block->add_inst(n);
map_node_to_block(n, block);
}
}
}
}
// Hoist the memory candidate up to the end of the test block.
Block *old_block = cfg->get_block_for_node(best);
Block *old_block = get_block_for_node(best);
old_block->find_remove(best);
add_inst(best);
cfg->map_node_to_block(best, this);
block->add_inst(best);
map_node_to_block(best, block);
// Move the control dependence
if (best->in(0) && best->in(0) == old_block->_nodes[0])
best->set_req(0, _nodes[0]);
if (best->in(0) && best->in(0) == old_block->head())
best->set_req(0, block->head());
// Check for flag-killing projections that also need to be hoisted
// Should be DU safe because no edge updates.
for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
Node* n = best->fast_out(j);
if( n->is_MachProj() ) {
cfg->get_block_for_node(n)->find_remove(n);
add_inst(n);
cfg->map_node_to_block(n, this);
get_block_for_node(n)->find_remove(n);
block->add_inst(n);
map_node_to_block(n, block);
}
}
Compile *C = cfg->C;
// proj==Op_True --> ne test; proj==Op_False --> eq test.
// One of two graph shapes got matched:
// (IfTrue (If (Bool NE (CmpP ptr NULL))))
@ -368,10 +367,10 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// We need to flip the projections to keep the same semantics.
if( proj->Opcode() == Op_IfTrue ) {
// Swap order of projections in basic block to swap branch targets
Node *tmp1 = _nodes[end_idx()+1];
Node *tmp2 = _nodes[end_idx()+2];
_nodes.map(end_idx()+1, tmp2);
_nodes.map(end_idx()+2, tmp1);
Node *tmp1 = block->get_node(block->end_idx()+1);
Node *tmp2 = block->get_node(block->end_idx()+2);
block->map_node(tmp2, block->end_idx()+1);
block->map_node(tmp1, block->end_idx()+2);
Node *tmp = new (C) Node(C->top()); // Use not NULL input
tmp1->replace_by(tmp);
tmp2->replace_by(tmp1);
@ -384,8 +383,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// it as well.
Node *old_tst = proj->in(0);
MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
_nodes.map(end_idx(),nul_chk);
cfg->map_node_to_block(nul_chk, this);
block->map_node(nul_chk, block->end_idx());
map_node_to_block(nul_chk, block);
// Redirect users of old_test to nul_chk
for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
old_tst->last_out(i2)->set_req(0, nul_chk);
@ -393,8 +392,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
for (uint i3 = 0; i3 < old_tst->req(); i3++)
old_tst->set_req(i3, NULL);
cfg->latency_from_uses(nul_chk);
cfg->latency_from_uses(best);
latency_from_uses(nul_chk);
latency_from_uses(best);
}
@ -408,7 +407,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// remaining cases (most), choose the instruction with the greatest latency
// (that is, the most number of pseudo-cycles required to the end of the
// routine). If there is a tie, choose the instruction with the most inputs.
Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
// If only a single entry on the stack, use it
uint cnt = worklist.size();
@ -442,7 +441,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
}
// Final call in a block must be adjacent to 'catch'
Node *e = end();
Node *e = block->end();
if( e->is_Catch() && e->in(0)->in(0) == n )
continue;
@ -468,7 +467,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
Node* use = n->fast_out(j);
// The use is a conditional branch, make them adjacent
if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
if (use->is_MachIf() && get_block_for_node(use) == block) {
found_machif = true;
break;
}
@ -501,7 +500,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
n_choice = 1;
}
uint n_latency = cfg->get_latency_for_node(n);
uint n_latency = get_latency_for_node(n);
uint n_score = n->req(); // Many inputs get high score to break ties
// Keep best latency found
@ -529,13 +528,13 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
//------------------------------set_next_call----------------------------------
void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
if( next_call.test_set(n->_idx) ) return;
for( uint i=0; i<n->len(); i++ ) {
Node *m = n->in(i);
if( !m ) continue; // must see all nodes in block that precede call
if (cfg->get_block_for_node(m) == this) {
set_next_call(m, next_call, cfg);
if (get_block_for_node(m) == block) {
set_next_call(block, m, next_call);
}
}
}
@ -546,24 +545,26 @@ void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
// next subroutine call get priority - basically it moves things NOT needed
// for the next call till after the call. This prevents me from trying to
// carry lots of stuff live across a call.
void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
// Find the next control-defining Node in this block
Node* call = NULL;
for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
Node* m = this_call->fast_out(i);
if(cfg->get_block_for_node(m) == this && // Local-block user
if (get_block_for_node(m) == block && // Local-block user
m != this_call && // Not self-start node
m->is_MachCall() )
m->is_MachCall()) {
call = m;
break;
}
}
if (call == NULL) return; // No next call (e.g., block end is near)
// Set next-call for all inputs to this call
set_next_call(call, next_call, cfg);
set_next_call(block, call, next_call);
}
//------------------------------add_call_kills-------------------------------------
void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
// helper function that adds caller save registers to MachProjNode
static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
// Fill in the kill mask for the call
for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
if( !regs.Member(r) ) { // Not already defined by the call
@ -579,7 +580,7 @@ void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_p
//------------------------------sched_call-------------------------------------
uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
RegMask regs;
// Schedule all the users of the call right now. All the users are
@ -592,18 +593,18 @@ uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_Lis
ready_cnt.at_put(n->_idx, n_cnt);
assert( n_cnt == 0, "" );
// Schedule next to call
_nodes.map(node_cnt++, n);
block->map_node(n, node_cnt++);
// Collect defined registers
regs.OR(n->out_RegMask());
// Check for scheduling the next control-definer
if( n->bottom_type() == Type::CONTROL )
// Warm up next pile of heuristic bits
needed_for_next_call(n, next_call, cfg);
needed_for_next_call(block, n, next_call);
// Children of projections are now all ready
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j); // Get user
if(cfg->get_block_for_node(m) != this) {
if(get_block_for_node(m) != block) {
continue;
}
if( m->is_Phi() ) continue;
@ -617,14 +618,14 @@ uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_Lis
// Act as if the call defines the Frame Pointer.
// Certainly the FP is alive and well after the call.
regs.Insert(matcher.c_frame_pointer());
regs.Insert(_matcher.c_frame_pointer());
// Set all registers killed and not already defined by the call.
uint r_cnt = mcall->tf()->range()->cnt();
int op = mcall->ideal_Opcode();
MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
cfg->map_node_to_block(proj, this);
_nodes.insert(node_cnt++, proj);
MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
map_node_to_block(proj, block);
block->insert_node(proj, node_cnt++);
// Select the right register save policy.
const char * save_policy;
@ -633,13 +634,13 @@ uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_Lis
case Op_CallLeaf:
case Op_CallLeafNoFP:
// Calling C code so use C calling convention
save_policy = matcher._c_reg_save_policy;
save_policy = _matcher._c_reg_save_policy;
break;
case Op_CallStaticJava:
case Op_CallDynamicJava:
// Calling Java code so use Java calling convention
save_policy = matcher._register_save_policy;
save_policy = _matcher._register_save_policy;
break;
default:
@ -674,44 +675,46 @@ uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_Lis
//------------------------------schedule_local---------------------------------
// Topological sort within a block. Someday become a real scheduler.
bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
// Already "sorted" are the block start Node (as the first entry), and
// the block-ending Node and any trailing control projections. We leave
// these alone. PhiNodes and ParmNodes are made to follow the block start
// Node. Everything else gets topo-sorted.
#ifndef PRODUCT
if (cfg->trace_opto_pipelining()) {
tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order);
for (uint i = 0;i < _nodes.size();i++) {
if (trace_opto_pipelining()) {
tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
for (uint i = 0;i < block->number_of_nodes(); i++) {
tty->print("# ");
_nodes[i]->fast_dump();
block->get_node(i)->fast_dump();
}
tty->print_cr("#");
}
#endif
// RootNode is already sorted
if( _nodes.size() == 1 ) return true;
if (block->number_of_nodes() == 1) {
return true;
}
// Move PhiNodes and ParmNodes from 1 to cnt up to the start
uint node_cnt = end_idx();
uint node_cnt = block->end_idx();
uint phi_cnt = 1;
uint i;
for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
Node *n = _nodes[i];
Node *n = block->get_node(i);
if( n->is_Phi() || // Found a PhiNode or ParmNode
(n->is_Proj() && n->in(0) == head()) ) {
(n->is_Proj() && n->in(0) == block->head()) ) {
// Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
_nodes.map(i,_nodes[phi_cnt]);
_nodes.map(phi_cnt++,n); // swap Phi/Parm up front
block->map_node(block->get_node(phi_cnt), i);
block->map_node(n, phi_cnt++); // swap Phi/Parm up front
} else { // All others
// Count block-local inputs to 'n'
uint cnt = n->len(); // Input count
uint local = 0;
for( uint j=0; j<cnt; j++ ) {
Node *m = n->in(j);
if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
if( m && get_block_for_node(m) == block && !m->is_top() )
local++; // One more block-local input
}
ready_cnt.at_put(n->_idx, local); // Count em up
@ -723,7 +726,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
for (uint prec = n->req(); prec < n->len(); prec++) {
Node* oop_store = n->in(prec);
if (oop_store != NULL) {
assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
}
}
}
@ -747,16 +750,16 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
}
}
}
for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
ready_cnt.at_put(_nodes[i2]->_idx, 0);
for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
ready_cnt.at_put(block->get_node(i2)->_idx, 0);
// All the prescheduled guys do not hold back internal nodes
uint i3;
for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled
Node *n = _nodes[i3]; // Get pre-scheduled
Node *n = block->get_node(i3); // Get pre-scheduled
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j);
if (cfg->get_block_for_node(m) == this) { // Local-block user
if (get_block_for_node(m) == block) { // Local-block user
int m_cnt = ready_cnt.at(m->_idx)-1;
ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count
}
@ -767,7 +770,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
// Make a worklist
Node_List worklist;
for(uint i4=i3; i4<node_cnt; i4++ ) { // Put ready guys on worklist
Node *m = _nodes[i4];
Node *m = block->get_node(i4);
if( !ready_cnt.at(m->_idx) ) { // Zero ready count?
if (m->is_iteratively_computed()) {
// Push induction variable increments last to allow other uses
@ -789,15 +792,15 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
}
// Warm up the 'next_call' heuristic bits
needed_for_next_call(_nodes[0], next_call, cfg);
needed_for_next_call(block, block->head(), next_call);
#ifndef PRODUCT
if (cfg->trace_opto_pipelining()) {
for (uint j=0; j<_nodes.size(); j++) {
Node *n = _nodes[j];
if (trace_opto_pipelining()) {
for (uint j=0; j< block->number_of_nodes(); j++) {
Node *n = block->get_node(j);
int idx = n->_idx;
tty->print("# ready cnt:%3d ", ready_cnt.at(idx));
tty->print("latency:%3d ", cfg->get_latency_for_node(n));
tty->print("latency:%3d ", get_latency_for_node(n));
tty->print("%4d: %s\n", idx, n->Name());
}
}
@ -808,7 +811,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
while( worklist.size() ) { // Worklist is not ready
#ifndef PRODUCT
if (cfg->trace_opto_pipelining()) {
if (trace_opto_pipelining()) {
tty->print("# ready list:");
for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
Node *n = worklist[i]; // Get Node on worklist
@ -819,13 +822,13 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
#endif
// Select and pop a ready guy from worklist
Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt);
_nodes.map(phi_cnt++,n); // Schedule him next
Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
block->map_node(n, phi_cnt++); // Schedule him next
#ifndef PRODUCT
if (cfg->trace_opto_pipelining()) {
if (trace_opto_pipelining()) {
tty->print("# select %d: %s", n->_idx, n->Name());
tty->print(", latency:%d", cfg->get_latency_for_node(n));
tty->print(", latency:%d", get_latency_for_node(n));
n->dump();
if (Verbose) {
tty->print("# ready list:");
@ -840,26 +843,26 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
#endif
if( n->is_MachCall() ) {
MachCallNode *mcall = n->as_MachCall();
phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
continue;
}
if (n->is_Mach() && n->as_Mach()->has_call()) {
RegMask regs;
regs.Insert(matcher.c_frame_pointer());
regs.Insert(_matcher.c_frame_pointer());
regs.OR(n->out_RegMask());
MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
cfg->map_node_to_block(proj, this);
_nodes.insert(phi_cnt++, proj);
MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
map_node_to_block(proj, block);
block->insert_node(proj, phi_cnt++);
add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
}
// Children are now all ready
for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
Node* m = n->fast_out(i5); // Get user
if (cfg->get_block_for_node(m) != this) {
if (get_block_for_node(m) != block) {
continue;
}
if( m->is_Phi() ) continue;
@ -874,9 +877,8 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
}
}
if( phi_cnt != end_idx() ) {
if( phi_cnt != block->end_idx() ) {
// did not schedule all. Retry, Bailout, or Die
Compile* C = matcher.C;
if (C->subsume_loads() == true && !C->failing()) {
// Retry with subsume_loads == false
// If this is the first failure, the sentinel string will "stick"
@ -888,12 +890,12 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
}
#ifndef PRODUCT
if (cfg->trace_opto_pipelining()) {
if (trace_opto_pipelining()) {
tty->print_cr("#");
tty->print_cr("# after schedule_local");
for (uint i = 0;i < _nodes.size();i++) {
for (uint i = 0;i < block->number_of_nodes();i++) {
tty->print("# ");
_nodes[i]->fast_dump();
block->get_node(i)->fast_dump();
}
tty->cr();
}
@ -919,7 +921,7 @@ static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def
}
//------------------------------catch_cleanup_find_cloned_def------------------
static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
assert( use_blk != def_blk, "Inter-block cleanup only");
// The use is some block below the Catch. Find and return the clone of the def
@ -945,14 +947,14 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
// PhiNode, the PhiNode uses from the def and IT's uses need fixup.
Node_Array inputs = new Node_List(Thread::current()->resource_area());
for(uint k = 1; k < use_blk->num_preds(); k++) {
Block* block = cfg->get_block_for_node(use_blk->pred(k));
inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
Block* block = get_block_for_node(use_blk->pred(k));
inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx));
}
// Check to see if the use_blk already has an identical phi inserted.
// If it exists, it will be at the first position since all uses of a
// def are processed together.
Node *phi = use_blk->_nodes[1];
Node *phi = use_blk->get_node(1);
if( phi->is_Phi() ) {
fixup = phi;
for (uint k = 1; k < use_blk->num_preds(); k++) {
@ -967,8 +969,8 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
// If an existing PhiNode was not found, make a new one.
if (fixup == NULL) {
Node *new_phi = PhiNode::make(use_blk->head(), def);
use_blk->_nodes.insert(1, new_phi);
cfg->map_node_to_block(new_phi, use_blk);
use_blk->insert_node(new_phi, 1);
map_node_to_block(new_phi, use_blk);
for (uint k = 1; k < use_blk->num_preds(); k++) {
new_phi->set_req(k, inputs[k]);
}
@ -977,7 +979,7 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
} else {
// Found the use just below the Catch. Make it use the clone.
fixup = use_blk->_nodes[n_clone_idx];
fixup = use_blk->get_node(n_clone_idx);
}
return fixup;
@ -997,36 +999,36 @@ static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg,
for( uint k = 0; k < blk->_num_succs; k++ ) {
// Get clone in each successor block
Block *sb = blk->_succs[k];
Node *clone = sb->_nodes[offset_idx+1];
Node *clone = sb->get_node(offset_idx+1);
assert( clone->Opcode() == use->Opcode(), "" );
// Make use-clone reference the def-clone
catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]);
catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx));
}
}
//------------------------------catch_cleanup_inter_block---------------------
// Fix all input edges in use that reference "def". The use is in a different
// block than the def.
static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
if( !use_blk ) return; // Can happen if the use is a precedence edge
Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx);
catch_cleanup_fix_all_inputs(use, def, new_def);
}
//------------------------------call_catch_cleanup-----------------------------
// If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch.
void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
void PhaseCFG::call_catch_cleanup(Block* block) {
// End of region to clone
uint end = end_idx();
if( !_nodes[end]->is_Catch() ) return;
uint end = block->end_idx();
if( !block->get_node(end)->is_Catch() ) return;
// Start of region to clone
uint beg = end;
while(!_nodes[beg-1]->is_MachProj() ||
!_nodes[beg-1]->in(0)->is_MachCall() ) {
while(!block->get_node(beg-1)->is_MachProj() ||
!block->get_node(beg-1)->in(0)->is_MachCall() ) {
beg--;
assert(beg > 0,"Catch cleanup walking beyond block boundary");
}
@ -1035,15 +1037,15 @@ void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
// Clone along all Catch output paths. Clone area between the 'beg' and
// 'end' indices.
for( uint i = 0; i < _num_succs; i++ ) {
Block *sb = _succs[i];
for( uint i = 0; i < block->_num_succs; i++ ) {
Block *sb = block->_succs[i];
// Clone the entire area; ignoring the edge fixup for now.
for( uint j = end; j > beg; j-- ) {
// It is safe here to clone a node with anti_dependence
// since clones dominate on each path.
Node *clone = _nodes[j-1]->clone();
sb->_nodes.insert( 1, clone );
cfg->map_node_to_block(clone, sb);
Node *clone = block->get_node(j-1)->clone();
sb->insert_node(clone, 1);
map_node_to_block(clone, sb);
}
}
@ -1051,7 +1053,7 @@ void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
// Fixup edges. Check the def-use info per cloned Node
for(uint i2 = beg; i2 < end; i2++ ) {
uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
Node *n = _nodes[i2]; // Node that got cloned
Node *n = block->get_node(i2); // Node that got cloned
// Need DU safe iterator because of edge manipulation in calls.
Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
@ -1060,19 +1062,19 @@ void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
uint max = out->size();
for (uint j = 0; j < max; j++) {// For all users
Node *use = out->pop();
Block *buse = cfg->get_block_for_node(use);
Block *buse = get_block_for_node(use);
if( use->is_Phi() ) {
for( uint k = 1; k < use->req(); k++ )
if( use->in(k) == n ) {
Block* block = cfg->get_block_for_node(buse->pred(k));
Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
Block* b = get_block_for_node(buse->pred(k));
Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx);
use->set_req(k, fixup);
}
} else {
if (this == buse) {
catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
if (block == buse) {
catch_cleanup_intra_block(use, n, block, beg, n_clone_idx);
} else {
catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
catch_cleanup_inter_block(use, buse, n, block, n_clone_idx);
}
}
} // End for all users
@ -1081,30 +1083,30 @@ void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
// Remove the now-dead cloned ops
for(uint i3 = beg; i3 < end; i3++ ) {
_nodes[beg]->disconnect_inputs(NULL, C);
_nodes.remove(beg);
block->get_node(beg)->disconnect_inputs(NULL, C);
block->remove_node(beg);
}
// If the successor blocks have a CreateEx node, move it back to the top
for(uint i4 = 0; i4 < _num_succs; i4++ ) {
Block *sb = _succs[i4];
for(uint i4 = 0; i4 < block->_num_succs; i4++ ) {
Block *sb = block->_succs[i4];
uint new_cnt = end - beg;
// Remove any newly created, but dead, nodes.
for( uint j = new_cnt; j > 0; j-- ) {
Node *n = sb->_nodes[j];
Node *n = sb->get_node(j);
if (n->outcnt() == 0 &&
(!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
n->disconnect_inputs(NULL, C);
sb->_nodes.remove(j);
sb->remove_node(j);
new_cnt--;
}
}
// If any newly created nodes remain, move the CreateEx node to the top
if (new_cnt > 0) {
Node *cex = sb->_nodes[1+new_cnt];
Node *cex = sb->get_node(1+new_cnt);
if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
sb->_nodes.remove(1+new_cnt);
sb->_nodes.insert(1,cex);
sb->remove_node(1+new_cnt);
sb->insert_node(cex, 1);
}
}
}

@ -2756,10 +2756,28 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
newval = _gvn.makecon(TypePtr::NULL_PTR);
// Reference stores need a store barrier.
pre_barrier(true /* do_load*/,
control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
NULL /* pre_val*/,
T_OBJECT);
if (kind == LS_xchg) {
// If pre-barrier must execute before the oop store, old value will require do_load here.
if (!can_move_pre_barrier()) {
pre_barrier(true /* do_load*/,
control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
NULL /* pre_val*/,
T_OBJECT);
} // Else move pre_barrier to use load_store value, see below.
} else if (kind == LS_cmpxchg) {
// Same as for newval above:
if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
oldval = _gvn.makecon(TypePtr::NULL_PTR);
}
// The only known value which might get overwritten is oldval.
pre_barrier(false /* do_load */,
control(), NULL, NULL, max_juint, NULL, NULL,
oldval /* pre_val */,
T_OBJECT);
} else {
ShouldNotReachHere();
}
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
@ -2795,16 +2813,27 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
set_memory(proj, alias_idx);
if (type == T_OBJECT && kind == LS_xchg) {
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
}
#endif
if (can_move_pre_barrier()) {
// Don't need to load pre_val. The old value is returned by load_store.
// The pre_barrier can execute after the xchg as long as no safepoint
// gets inserted between them.
pre_barrier(false /* do_load */,
control(), NULL, NULL, max_juint, NULL, NULL,
load_store /* pre_val */,
T_OBJECT);
}
}
// Add the trailing membar surrounding the access
insert_mem_bar(Op_MemBarCPUOrder);
insert_mem_bar(Op_MemBarAcquire);
#ifdef _LP64
if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
}
#endif
assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
set_result(load_store);
return true;

@ -85,8 +85,8 @@ void PhaseLive::compute(uint maxlrg) {
IndexSet* def = &_defs[block->_pre_order-1];
DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
uint i;
for (i = block->_nodes.size(); i > 1; i--) {
Node* n = block->_nodes[i-1];
for (i = block->number_of_nodes(); i > 1; i--) {
Node* n = block->get_node(i-1);
if (n->is_Phi()) {
break;
}
@ -112,7 +112,7 @@ void PhaseLive::compute(uint maxlrg) {
#endif
// Remove anything defined by Phis and the block start instruction
for (uint k = i; k > 0; k--) {
uint r = _names[block->_nodes[k - 1]->_idx];
uint r = _names[block->get_node(k - 1)->_idx];
def->insert(r);
use->remove(r);
}
@ -124,7 +124,7 @@ void PhaseLive::compute(uint maxlrg) {
// PhiNode uses go in the live-out set of prior blocks.
for (uint k = i; k > 0; k--) {
add_liveout(p, _names[block->_nodes[k-1]->in(l)->_idx], first_pass);
add_liveout(p, _names[block->get_node(k-1)->in(l)->_idx], first_pass);
}
}
freeset(block);
@ -254,10 +254,10 @@ void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
void PhaseLive::dump( const Block *b ) const {
tty->print("Block %d: ",b->_pre_order);
tty->print("LiveOut: "); _live[b->_pre_order-1].dump();
uint cnt = b->_nodes.size();
uint cnt = b->number_of_nodes();
for( uint i=0; i<cnt; i++ ) {
tty->print("L%d/", _names[b->_nodes[i]->_idx] );
b->_nodes[i]->dump();
tty->print("L%d/", _names[b->get_node(i)->_idx] );
b->get_node(i)->dump();
}
tty->print("\n");
}
@ -269,7 +269,7 @@ void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
Block* block = _cfg.get_block(i);
for (uint j = block->end_idx() + 1; j > 1; j--) {
Node* n = block->_nodes[j-1];
Node* n = block->get_node(j-1);
if (n->is_Phi()) {
break;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ class State;
class MachOper : public ResourceObj {
public:
// Allocate right next to the MachNodes in the same arena
void *operator new( size_t x, Compile* C ) { return C->node_arena()->Amalloc_D(x); }
void *operator new( size_t x, Compile* C ) throw() { return C->node_arena()->Amalloc_D(x); }
// Opcode
virtual uint opcode() const = 0;

@ -72,6 +72,8 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcal
int jvms_adj = new_dbg_start - old_dbg_start;
assert (new_dbg_start == newcall->req(), "argument count mismatch");
// SafePointScalarObject node could be referenced several times in debug info.
// Use Dict to record cloned nodes.
Dict* sosn_map = new Dict(cmpkey,hashkey);
for (uint i = old_dbg_start; i < oldcall->req(); i++) {
Node* old_in = oldcall->in(i);
@ -79,8 +81,8 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcal
if (old_in != NULL && old_in->is_SafePointScalarObject()) {
SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
uint old_unique = C->unique();
Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
if (old_unique != C->unique()) {
Node* new_in = old_sosn->clone(sosn_map);
if (old_unique != C->unique()) { // New node?
new_in->set_req(0, C->root()); // reset control edge
new_in = transform_later(new_in); // Register new node.
}
@ -725,7 +727,11 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
while (safepoints.length() > 0) {
SafePointNode* sfpt = safepoints.pop();
Node* mem = sfpt->memory();
uint first_ind = sfpt->req();
assert(sfpt->jvms() != NULL, "missed JVMS");
// Fields of scalar objs are referenced only at the end
// of regular debuginfo at the last (youngest) JVMS.
// Record relative start index.
uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type,
#ifdef ASSERT
alloc,
@ -799,7 +805,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
for (int i = start; i < end; i++) {
if (sfpt_done->in(i)->is_SafePointScalarObject()) {
SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
if (scobj->first_index() == sfpt_done->req() &&
if (scobj->first_index(jvms) == sfpt_done->req() &&
scobj->n_fields() == (uint)nfields) {
assert(scobj->alloc() == alloc, "sanity");
sfpt_done->set_req(i, res);

Some files were not shown because too many files have changed in this diff Show More