Merge
This commit is contained in:
commit
1380ff2e93
@ -67,9 +67,6 @@ public class ImmutableOopMapSet extends VMObject {
|
||||
}
|
||||
}
|
||||
|
||||
public void visitValueLocation(Address valueAddr) {
|
||||
}
|
||||
|
||||
public void visitNarrowOopLocation(Address narrowOopAddr) {
|
||||
addressVisitor.visitCompOopAddress(narrowOopAddr);
|
||||
}
|
||||
@ -216,9 +213,9 @@ public class ImmutableOopMapSet extends VMObject {
|
||||
}
|
||||
}
|
||||
|
||||
// We want narow oop, value and oop oop_types
|
||||
OopMapValue.OopTypes[] values = new OopMapValue.OopTypes[]{
|
||||
OopMapValue.OopTypes.OOP_VALUE, OopMapValue.OopTypes.VALUE_VALUE, OopMapValue.OopTypes.NARROWOOP_VALUE
|
||||
// We want narow oop and oop oop_types
|
||||
OopMapValue.OopTypes[] values = new OopMapValue.OopTypes[] {
|
||||
OopMapValue.OopTypes.OOP_VALUE, OopMapValue.OopTypes.NARROWOOP_VALUE
|
||||
};
|
||||
|
||||
{
|
||||
@ -231,8 +228,6 @@ public class ImmutableOopMapSet extends VMObject {
|
||||
// to detect in the debugging system
|
||||
// assert(Universe::is_heap_or_null(*loc), "found non oop pointer");
|
||||
visitor.visitOopLocation(loc);
|
||||
} else if (omv.getType() == OopMapValue.OopTypes.VALUE_VALUE) {
|
||||
visitor.visitValueLocation(loc);
|
||||
} else if (omv.getType() == OopMapValue.OopTypes.NARROWOOP_VALUE) {
|
||||
visitor.visitNarrowOopLocation(loc);
|
||||
}
|
||||
|
@ -49,7 +49,6 @@ public class OopMapValue {
|
||||
// Types of OopValues
|
||||
static int UNUSED_VALUE;
|
||||
static int OOP_VALUE;
|
||||
static int VALUE_VALUE;
|
||||
static int NARROWOOP_VALUE;
|
||||
static int CALLEE_SAVED_VALUE;
|
||||
static int DERIVED_OOP_VALUE;
|
||||
@ -73,7 +72,6 @@ public class OopMapValue {
|
||||
REGISTER_MASK_IN_PLACE = db.lookupIntConstant("OopMapValue::register_mask_in_place").intValue();
|
||||
UNUSED_VALUE = db.lookupIntConstant("OopMapValue::unused_value").intValue();
|
||||
OOP_VALUE = db.lookupIntConstant("OopMapValue::oop_value").intValue();
|
||||
VALUE_VALUE = db.lookupIntConstant("OopMapValue::value_value").intValue();
|
||||
NARROWOOP_VALUE = db.lookupIntConstant("OopMapValue::narrowoop_value").intValue();
|
||||
CALLEE_SAVED_VALUE = db.lookupIntConstant("OopMapValue::callee_saved_value").intValue();
|
||||
DERIVED_OOP_VALUE = db.lookupIntConstant("OopMapValue::derived_oop_value").intValue();
|
||||
@ -82,7 +80,6 @@ public class OopMapValue {
|
||||
public static abstract class OopTypes {
|
||||
public static final OopTypes UNUSED_VALUE = new OopTypes() { int getValue() { return OopMapValue.UNUSED_VALUE; }};
|
||||
public static final OopTypes OOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.OOP_VALUE; }};
|
||||
public static final OopTypes VALUE_VALUE = new OopTypes() { int getValue() { return OopMapValue.VALUE_VALUE; }};
|
||||
public static final OopTypes NARROWOOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.NARROWOOP_VALUE; }};
|
||||
public static final OopTypes CALLEE_SAVED_VALUE = new OopTypes() { int getValue() { return OopMapValue.CALLEE_SAVED_VALUE; }};
|
||||
public static final OopTypes DERIVED_OOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.DERIVED_OOP_VALUE; }};
|
||||
@ -105,7 +102,6 @@ public class OopMapValue {
|
||||
|
||||
// Querying
|
||||
public boolean isOop() { return (getValue() & TYPE_MASK_IN_PLACE) == OOP_VALUE; }
|
||||
public boolean isValue() { return (getValue() & TYPE_MASK_IN_PLACE) == VALUE_VALUE; }
|
||||
public boolean isNarrowOop() { return (getValue() & TYPE_MASK_IN_PLACE) == NARROWOOP_VALUE; }
|
||||
public boolean isCalleeSaved() { return (getValue() & TYPE_MASK_IN_PLACE) == CALLEE_SAVED_VALUE; }
|
||||
public boolean isDerivedOop() { return (getValue() & TYPE_MASK_IN_PLACE) == DERIVED_OOP_VALUE; }
|
||||
@ -117,7 +113,6 @@ public class OopMapValue {
|
||||
int which = (getValue() & TYPE_MASK_IN_PLACE);
|
||||
if (which == UNUSED_VALUE) return OopTypes.UNUSED_VALUE;
|
||||
else if (which == OOP_VALUE) return OopTypes.OOP_VALUE;
|
||||
else if (which == VALUE_VALUE) return OopTypes.VALUE_VALUE;
|
||||
else if (which == NARROWOOP_VALUE) return OopTypes.NARROWOOP_VALUE;
|
||||
else if (which == CALLEE_SAVED_VALUE) return OopTypes.CALLEE_SAVED_VALUE;
|
||||
else if (which == DERIVED_OOP_VALUE) return OopTypes.DERIVED_OOP_VALUE;
|
||||
|
@ -31,6 +31,5 @@ import sun.jvm.hotspot.debugger.*;
|
||||
public interface OopMapVisitor {
|
||||
public void visitOopLocation(Address oopAddr);
|
||||
public void visitDerivedOopLocation(Address baseOopAddr, Address derivedOopAddr);
|
||||
public void visitValueLocation(Address valueAddr);
|
||||
public void visitNarrowOopLocation(Address narrowOopAddr);
|
||||
}
|
||||
|
@ -536,9 +536,6 @@ public abstract class Frame implements Cloneable {
|
||||
}
|
||||
}
|
||||
|
||||
public void visitValueLocation(Address valueAddr) {
|
||||
}
|
||||
|
||||
public void visitNarrowOopLocation(Address compOopAddr) {
|
||||
addressVisitor.visitCompOopAddress(compOopAddr);
|
||||
}
|
||||
|
@ -1220,9 +1220,6 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
oms = new OopMapStream(map, OopMapValue.OopTypes.NARROWOOP_VALUE);
|
||||
buf.append(omvIterator.iterate(oms, "NarrowOops:", false));
|
||||
|
||||
oms = new OopMapStream(map, OopMapValue.OopTypes.VALUE_VALUE);
|
||||
buf.append(omvIterator.iterate(oms, "Values:", false));
|
||||
|
||||
oms = new OopMapStream(map, OopMapValue.OopTypes.CALLEE_SAVED_VALUE);
|
||||
buf.append(omvIterator.iterate(oms, "Callee saved:", true));
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2311,6 +2311,10 @@ public:
|
||||
|
||||
#define MSG "invalid arrangement"
|
||||
|
||||
#define ASSERTION (T == T2S || T == T4S || T == T2D)
|
||||
INSN(fsqrt, 1, 0b11111);
|
||||
#undef ASSERTION
|
||||
|
||||
#define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H || T == T2S || T == T4S)
|
||||
INSN(rev64, 0, 0b00000);
|
||||
#undef ASSERTION
|
||||
|
@ -72,6 +72,7 @@ define_pd_global(bool, OptoPeephole, true);
|
||||
define_pd_global(bool, UseCISCSpill, true);
|
||||
define_pd_global(bool, OptoScheduling, false);
|
||||
define_pd_global(bool, OptoBundling, false);
|
||||
define_pd_global(bool, OptoRegScheduling, false);
|
||||
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
|
@ -42,6 +42,11 @@
|
||||
|
||||
// Implementation of InterpreterMacroAssembler
|
||||
|
||||
void InterpreterMacroAssembler::jump_to_entry(address entry) {
|
||||
assert(entry, "Entry must have been generated by now");
|
||||
b(entry);
|
||||
}
|
||||
|
||||
#ifndef CC_INTERP
|
||||
|
||||
void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
|
||||
|
@ -66,6 +66,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void load_earlyret_value(TosState state);
|
||||
|
||||
void jump_to_entry(address entry);
|
||||
|
||||
#ifdef CC_INTERP
|
||||
void save_bcp() { /* not needed in c++ interpreter and harmless */ }
|
||||
void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
|
||||
|
@ -41,13 +41,13 @@ private:
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_accessor_entry(void) { return NULL; }
|
||||
address generate_empty_entry(void) { return NULL; }
|
||||
void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs);
|
||||
address generate_Reference_get_entry();
|
||||
address generate_CRC32_update_entry();
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
void lock_method(void);
|
||||
void generate_stack_overflow_check(void);
|
||||
|
||||
|
@ -236,17 +236,6 @@ void InterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::Me
|
||||
__ blrt(rscratch1, gpargs, fpargs, rtype);
|
||||
}
|
||||
|
||||
// Jump into normal path for accessor and empty entry to jump to normal entry
|
||||
// The "fast" optimization don't update compilation count therefore can disable inlining
|
||||
// for these functions that should be inlined.
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry_point = __ pc();
|
||||
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
||||
__ b(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
@ -2286,18 +2286,30 @@ void MacroAssembler::c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_t
|
||||
}
|
||||
#endif
|
||||
|
||||
void MacroAssembler::push_CPU_state() {
|
||||
void MacroAssembler::push_CPU_state(bool save_vectors) {
|
||||
push(0x3fffffff, sp); // integer registers except lr & sp
|
||||
|
||||
if (!save_vectors) {
|
||||
for (int i = 30; i >= 0; i -= 2)
|
||||
stpd(as_FloatRegister(i), as_FloatRegister(i+1),
|
||||
Address(pre(sp, -2 * wordSize)));
|
||||
} else {
|
||||
for (int i = 30; i >= 0; i -= 2)
|
||||
stpq(as_FloatRegister(i), as_FloatRegister(i+1),
|
||||
Address(pre(sp, -4 * wordSize)));
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::pop_CPU_state() {
|
||||
void MacroAssembler::pop_CPU_state(bool restore_vectors) {
|
||||
if (!restore_vectors) {
|
||||
for (int i = 0; i < 32; i += 2)
|
||||
ldpd(as_FloatRegister(i), as_FloatRegister(i+1),
|
||||
Address(post(sp, 2 * wordSize)));
|
||||
} else {
|
||||
for (int i = 0; i < 32; i += 2)
|
||||
ldpq(as_FloatRegister(i), as_FloatRegister(i+1),
|
||||
Address(post(sp, 4 * wordSize)));
|
||||
}
|
||||
|
||||
pop(0x3fffffff, sp); // integer registers except lr & sp
|
||||
}
|
||||
|
@ -777,8 +777,8 @@ public:
|
||||
|
||||
DEBUG_ONLY(void verify_heapbase(const char* msg);)
|
||||
|
||||
void push_CPU_state();
|
||||
void pop_CPU_state() ;
|
||||
void push_CPU_state(bool save_vectors = false);
|
||||
void pop_CPU_state(bool restore_vectors = false) ;
|
||||
|
||||
// Round up to a power of two
|
||||
void round_to(Register reg, int modulus);
|
||||
|
@ -75,8 +75,8 @@ class SimpleRuntimeFrame {
|
||||
// FIXME -- this is used by C1
|
||||
class RegisterSaver {
|
||||
public:
|
||||
static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
|
||||
static void restore_live_registers(MacroAssembler* masm);
|
||||
static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
|
||||
static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
|
||||
|
||||
// Offsets into the register save area
|
||||
// Used by deoptimization when it is managing result register
|
||||
@ -108,7 +108,17 @@ class RegisterSaver {
|
||||
|
||||
};
|
||||
|
||||
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
|
||||
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
|
||||
#ifdef COMPILER2
|
||||
if (save_vectors) {
|
||||
// Save upper half of vector registers
|
||||
int vect_words = 32 * 8 / wordSize;
|
||||
additional_frame_words += vect_words;
|
||||
}
|
||||
#else
|
||||
assert(!save_vectors, "vectors are generated only by C2");
|
||||
#endif
|
||||
|
||||
int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
|
||||
reg_save_size*BytesPerInt, 16);
|
||||
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
|
||||
@ -122,7 +132,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
||||
// Save registers, fpu state, and flags.
|
||||
|
||||
__ enter();
|
||||
__ push_CPU_state();
|
||||
__ push_CPU_state(save_vectors);
|
||||
|
||||
// Set an oopmap for the call site. This oopmap will map all
|
||||
// oop-registers and debug-info registers as callee-saved. This
|
||||
@ -139,14 +149,14 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
||||
// register slots are 8 bytes
|
||||
// wide, 32 floating-point
|
||||
// registers
|
||||
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
|
||||
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots),
|
||||
r->as_VMReg());
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
|
||||
FloatRegister r = as_FloatRegister(i);
|
||||
int sp_offset = 2 * i;
|
||||
int sp_offset = save_vectors ? (4 * i) : (2 * i);
|
||||
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
|
||||
r->as_VMReg());
|
||||
}
|
||||
@ -154,8 +164,11 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
||||
return oop_map;
|
||||
}
|
||||
|
||||
void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
|
||||
__ pop_CPU_state();
|
||||
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
|
||||
#ifndef COMPILER2
|
||||
assert(!restore_vectors, "vectors are generated only by C2");
|
||||
#endif
|
||||
__ pop_CPU_state(restore_vectors);
|
||||
__ leave();
|
||||
}
|
||||
|
||||
@ -177,9 +190,9 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
// Is vector's size (in bytes) bigger than a size saved by default?
|
||||
// 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
|
||||
// 8 bytes vector registers are saved by default on AArch64.
|
||||
bool SharedRuntime::is_wide_vector(int size) {
|
||||
return size > 16;
|
||||
return size > 8;
|
||||
}
|
||||
// The java_calling_convention describes stack locations as ideal slots on
|
||||
// a frame with no abi restrictions. Since we must observe abi restrictions
|
||||
@ -1146,7 +1159,7 @@ static void rt_call(MacroAssembler* masm, address dest, int gpargs, int fpargs,
|
||||
assert((unsigned)gpargs < 256, "eek!");
|
||||
assert((unsigned)fpargs < 32, "eek!");
|
||||
__ lea(rscratch1, RuntimeAddress(dest));
|
||||
__ mov(rscratch2, (gpargs << 6) | (fpargs << 2) | type);
|
||||
if (UseBuiltinSim) __ mov(rscratch2, (gpargs << 6) | (fpargs << 2) | type);
|
||||
__ blrt(rscratch1, rscratch2);
|
||||
__ maybe_isb();
|
||||
}
|
||||
@ -1521,14 +1534,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
int vep_offset = ((intptr_t)__ pc()) - start;
|
||||
|
||||
// Generate stack overflow check
|
||||
|
||||
// If we have to make this method not-entrant we'll overwrite its
|
||||
// first instruction with a jump. For this action to be legal we
|
||||
// must ensure that this first instruction is a B, BL, NOP, BKPT,
|
||||
// SVC, HVC, or SMC. Make it a NOP.
|
||||
__ nop();
|
||||
|
||||
// Generate stack overflow check
|
||||
if (UseStackBanging) {
|
||||
__ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
|
||||
} else {
|
||||
@ -1709,23 +1721,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// need to spill before we call out
|
||||
int c_arg = total_c_args - total_in_args;
|
||||
|
||||
// Pre-load a static method's oop into r20. Used both by locking code and
|
||||
// the normal JNI call code.
|
||||
// Pre-load a static method's oop into c_rarg1.
|
||||
if (method->is_static() && !is_critical_native) {
|
||||
|
||||
// load oop into a register
|
||||
__ movoop(oop_handle_reg,
|
||||
__ movoop(c_rarg1,
|
||||
JNIHandles::make_local(method->method_holder()->java_mirror()),
|
||||
/*immediate*/true);
|
||||
|
||||
// Now handlize the static class mirror it's known not-null.
|
||||
__ str(oop_handle_reg, Address(sp, klass_offset));
|
||||
__ str(c_rarg1, Address(sp, klass_offset));
|
||||
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
|
||||
|
||||
// Now get the handle
|
||||
__ lea(oop_handle_reg, Address(sp, klass_offset));
|
||||
// store the klass handle as second argument
|
||||
__ mov(c_rarg1, oop_handle_reg);
|
||||
__ lea(c_rarg1, Address(sp, klass_offset));
|
||||
// and protect the arg if we must spill
|
||||
c_arg--;
|
||||
}
|
||||
@ -1740,19 +1749,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
__ set_last_Java_frame(sp, noreg, (address)the_pc, rscratch1);
|
||||
|
||||
|
||||
// We have all of the arguments setup at this point. We must not touch any register
|
||||
// argument registers at this point (what if we save/restore them there are no oop?
|
||||
|
||||
Label dtrace_method_entry, dtrace_method_entry_done;
|
||||
{
|
||||
SkipIfEqual skip(masm, &DTraceMethodProbes, false);
|
||||
// protect the args we've loaded
|
||||
save_args(masm, total_c_args, c_arg, out_regs);
|
||||
__ mov_metadata(c_rarg1, method());
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
|
||||
rthread, c_rarg1);
|
||||
restore_args(masm, total_c_args, c_arg, out_regs);
|
||||
unsigned long offset;
|
||||
__ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
|
||||
__ ldrb(rscratch1, Address(rscratch1, offset));
|
||||
__ cbnzw(rscratch1, dtrace_method_entry);
|
||||
__ bind(dtrace_method_entry_done);
|
||||
}
|
||||
|
||||
// RedefineClasses() tracing support for obsolete method entry
|
||||
@ -1782,7 +1785,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
if (method->is_synchronized()) {
|
||||
assert(!is_critical_native, "unhandled");
|
||||
|
||||
|
||||
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
// Get the handle (the 2nd argument)
|
||||
@ -1838,7 +1840,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Finally just about ready to make the JNI call
|
||||
|
||||
|
||||
// get JNIEnv* which is first argument to native
|
||||
if (!is_critical_native) {
|
||||
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
@ -1904,14 +1905,17 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Thread A is resumed to finish this native method, but doesn't block here since it
|
||||
// didn't see any synchronization is progress, and escapes.
|
||||
__ mov(rscratch1, _thread_in_native_trans);
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
|
||||
if(os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
|
||||
|
||||
// Force this write out before the read below
|
||||
__ dmb(Assembler::SY);
|
||||
} else {
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
@ -1920,54 +1924,23 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
}
|
||||
}
|
||||
|
||||
Label after_transition;
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
Label safepoint_in_progress, safepoint_in_progress_done;
|
||||
{
|
||||
Label Continue;
|
||||
|
||||
{ unsigned long offset;
|
||||
assert(SafepointSynchronize::_not_synchronized == 0, "fix this code");
|
||||
unsigned long offset;
|
||||
__ adrp(rscratch1,
|
||||
ExternalAddress((address)SafepointSynchronize::address_of_state()),
|
||||
offset);
|
||||
__ ldrw(rscratch1, Address(rscratch1, offset));
|
||||
}
|
||||
__ cmpw(rscratch1, SafepointSynchronize::_not_synchronized);
|
||||
|
||||
Label L;
|
||||
__ br(Assembler::NE, L);
|
||||
__ cbnzw(rscratch1, safepoint_in_progress);
|
||||
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbz(rscratch1, Continue);
|
||||
__ bind(L);
|
||||
|
||||
// Don't use call_VM as it will see a possible pending exception and forward it
|
||||
// and never return here preventing us from clearing _last_native_pc down below.
|
||||
//
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
__ mov(c_rarg0, rthread);
|
||||
#ifndef PRODUCT
|
||||
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
|
||||
#endif
|
||||
if (!is_critical_native) {
|
||||
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||
} else {
|
||||
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
|
||||
}
|
||||
__ blrt(rscratch1, 1, 0, 1);
|
||||
__ maybe_isb();
|
||||
// Restore any method result value
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
|
||||
if (is_critical_native) {
|
||||
// The call above performed the transition to thread_in_Java so
|
||||
// skip the transition logic below.
|
||||
__ b(after_transition);
|
||||
}
|
||||
|
||||
__ bind(Continue);
|
||||
__ cbnzw(rscratch1, safepoint_in_progress);
|
||||
__ bind(safepoint_in_progress_done);
|
||||
}
|
||||
|
||||
// change thread state
|
||||
Label after_transition;
|
||||
__ mov(rscratch1, _thread_in_Java);
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
@ -2024,16 +1997,15 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
|
||||
}
|
||||
|
||||
Label dtrace_method_exit, dtrace_method_exit_done;
|
||||
{
|
||||
SkipIfEqual skip(masm, &DTraceMethodProbes, false);
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
__ mov_metadata(c_rarg1, method());
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
|
||||
rthread, c_rarg1);
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
unsigned long offset;
|
||||
__ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
|
||||
__ ldrb(rscratch1, Address(rscratch1, offset));
|
||||
__ cbnzw(rscratch1, dtrace_method_exit);
|
||||
__ bind(dtrace_method_exit_done);
|
||||
}
|
||||
|
||||
__ reset_last_Java_frame(false, true);
|
||||
@ -2082,7 +2054,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Slow path locking & unlocking
|
||||
if (method->is_synchronized()) {
|
||||
|
||||
// BEGIN Slow path lock
|
||||
__ block_comment("Slow path lock {");
|
||||
__ bind(slow_path_lock);
|
||||
|
||||
// has last_Java_frame setup. No exceptions so do vanilla call not call_VM
|
||||
@ -2109,9 +2081,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
#endif
|
||||
__ b(lock_done);
|
||||
|
||||
// END Slow path lock
|
||||
__ block_comment("} Slow path lock");
|
||||
|
||||
// BEGIN Slow path unlock
|
||||
__ block_comment("Slow path unlock {");
|
||||
__ bind(slow_path_unlock);
|
||||
|
||||
// If we haven't already saved the native result we must save it now as xmm registers
|
||||
@ -2149,7 +2121,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
}
|
||||
__ b(unlock_done);
|
||||
|
||||
// END Slow path unlock
|
||||
__ block_comment("} Slow path unlock");
|
||||
|
||||
} // synchronized
|
||||
|
||||
@ -2162,6 +2134,69 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// and continue
|
||||
__ b(reguard_done);
|
||||
|
||||
// SLOW PATH safepoint
|
||||
{
|
||||
__ block_comment("safepoint {");
|
||||
__ bind(safepoint_in_progress);
|
||||
|
||||
// Don't use call_VM as it will see a possible pending exception and forward it
|
||||
// and never return here preventing us from clearing _last_native_pc down below.
|
||||
//
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
__ mov(c_rarg0, rthread);
|
||||
#ifndef PRODUCT
|
||||
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
|
||||
#endif
|
||||
if (!is_critical_native) {
|
||||
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||
} else {
|
||||
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
|
||||
}
|
||||
__ blrt(rscratch1, 1, 0, 1);
|
||||
__ maybe_isb();
|
||||
// Restore any method result value
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
|
||||
if (is_critical_native) {
|
||||
// The call above performed the transition to thread_in_Java so
|
||||
// skip the transition logic above.
|
||||
__ b(after_transition);
|
||||
}
|
||||
|
||||
__ b(safepoint_in_progress_done);
|
||||
__ block_comment("} safepoint");
|
||||
}
|
||||
|
||||
// SLOW PATH dtrace support
|
||||
{
|
||||
__ block_comment("dtrace entry {");
|
||||
__ bind(dtrace_method_entry);
|
||||
|
||||
// We have all of the arguments setup at this point. We must not touch any register
|
||||
// argument registers at this point (what if we save/restore them there are no oop?
|
||||
|
||||
save_args(masm, total_c_args, c_arg, out_regs);
|
||||
__ mov_metadata(c_rarg1, method());
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
|
||||
rthread, c_rarg1);
|
||||
restore_args(masm, total_c_args, c_arg, out_regs);
|
||||
__ b(dtrace_method_entry_done);
|
||||
__ block_comment("} dtrace entry");
|
||||
}
|
||||
|
||||
{
|
||||
__ block_comment("dtrace exit {");
|
||||
__ bind(dtrace_method_exit);
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
__ mov_metadata(c_rarg1, method());
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
|
||||
rthread, c_rarg1);
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
__ b(dtrace_method_exit_done);
|
||||
__ block_comment("} dtrace exit");
|
||||
}
|
||||
|
||||
|
||||
__ flush();
|
||||
@ -2742,7 +2777,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
|
||||
|
||||
// Save registers, fpu state, and flags
|
||||
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
|
||||
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
|
||||
|
||||
// The following is basically a call_VM. However, we need the precise
|
||||
// address of the call in order to generate an oopmap. Hence, we do all the
|
||||
@ -2793,7 +2828,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
__ bind(noException);
|
||||
|
||||
// Normal exit, restore registers and exit.
|
||||
RegisterSaver::restore_live_registers(masm);
|
||||
RegisterSaver::restore_live_registers(masm, save_vectors);
|
||||
|
||||
__ ret(lr);
|
||||
|
||||
|
@ -721,8 +721,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
return entry;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
@ -779,12 +778,10 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||
|
||||
// generate a vanilla native entry as the slow path
|
||||
__ bind(slow_path);
|
||||
|
||||
(void) generate_native_entry(false);
|
||||
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
|
||||
return entry;
|
||||
}
|
||||
return generate_native_entry(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -841,12 +838,10 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
|
||||
|
||||
// generate a vanilla native entry as the slow path
|
||||
__ bind(slow_path);
|
||||
|
||||
(void) generate_native_entry(false);
|
||||
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
|
||||
return entry;
|
||||
}
|
||||
return generate_native_entry(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
|
||||
|
@ -60,6 +60,7 @@ define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
define_pd_global(bool, OptoPeephole, false);
|
||||
define_pd_global(bool, UseCISCSpill, false);
|
||||
define_pd_global(bool, OptoBundling, false);
|
||||
define_pd_global(bool, OptoRegScheduling, false);
|
||||
// GL:
|
||||
// Detected a problem with unscaled compressed oops and
|
||||
// narrow_oop_use_complex_address() == false.
|
||||
|
@ -46,7 +46,7 @@ void InterpreterMacroAssembler::null_check_throw(Register a, int offset, Registe
|
||||
MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::branch_to_entry(address entry, Register Rscratch) {
|
||||
void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) {
|
||||
assert(entry, "Entry must have been generated by now");
|
||||
if (is_within_range_of_b(entry, pc())) {
|
||||
b(entry);
|
||||
|
@ -39,7 +39,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void null_check_throw(Register a, int offset, Register temp_reg);
|
||||
|
||||
void branch_to_entry(address entry, Register Rscratch);
|
||||
void jump_to_entry(address entry, Register Rscratch);
|
||||
|
||||
// Handy address generation macros.
|
||||
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
|
||||
|
@ -31,12 +31,12 @@
|
||||
private:
|
||||
|
||||
address generate_abstract_entry(void);
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_accessor_entry(void) { return NULL; }
|
||||
address generate_empty_entry(void) { return NULL; }
|
||||
address generate_Reference_get_entry(void);
|
||||
|
||||
address generate_CRC32_update_entry();
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
|
||||
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
||||
|
@ -427,18 +427,6 @@ address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry.
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry = __ pc();
|
||||
address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
|
||||
assert(normal_entry != NULL, "should already be generated.");
|
||||
__ branch_to_entry(normal_entry, R11_scratch1);
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Abstract method entry.
|
||||
//
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
@ -529,12 +517,12 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
|
||||
if (UseG1GC) {
|
||||
address entry = __ pc();
|
||||
|
||||
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||
guarantee(referent_offset > 0, "referent offset not initialized");
|
||||
|
||||
if (UseG1GC) {
|
||||
Label slow_path;
|
||||
|
||||
// Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
|
||||
@ -577,13 +565,11 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// Generate regular method entry.
|
||||
__ bind(slow_path);
|
||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
|
||||
__ flush();
|
||||
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
|
||||
return entry;
|
||||
} else {
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||
|
@ -2064,6 +2064,10 @@ const bool Matcher::match_rule_supported(int opcode) {
|
||||
return true; // Per default match rules are supported.
|
||||
}
|
||||
|
||||
const int Matcher::float_pressure(int default_pressure_threshold) {
|
||||
return default_pressure_threshold;
|
||||
}
|
||||
|
||||
int Matcher::regnum_to_fpu_offset(int regnum) {
|
||||
// No user for this method?
|
||||
Unimplemented();
|
||||
|
@ -620,7 +620,7 @@ inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
|
||||
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
||||
if (!math_entry_available(kind)) {
|
||||
NOT_PRODUCT(__ should_not_reach_here();)
|
||||
return Interpreter::entry_for_kind(Interpreter::zerolocals);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address entry = __ pc();
|
||||
@ -1126,14 +1126,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
|
||||
|
||||
#ifdef FAST_DISPATCH
|
||||
__ unimplemented("Fast dispatch in generate_normal_entry");
|
||||
#if 0
|
||||
__ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
|
||||
// Set bytecode dispatch table base.
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Zero out non-parameter locals.
|
||||
// Note: *Always* zero out non-parameter locals as Sparc does. It's not
|
||||
@ -1266,9 +1258,8 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
* int java.util.zip.CRC32.update(int crc, int b)
|
||||
*/
|
||||
address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||
address start = __ pc(); // Remember stub start address (is rtn value).
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
address start = __ pc(); // Remember stub start address (is rtn value).
|
||||
Label slow_path;
|
||||
|
||||
// Safepoint check
|
||||
@ -1313,11 +1304,11 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||
// Generate a vanilla native entry as the slow path.
|
||||
BLOCK_COMMENT("} CRC32_update");
|
||||
BIND(slow_path);
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
|
||||
return start;
|
||||
}
|
||||
|
||||
(void) generate_native_entry(false);
|
||||
|
||||
return start;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// CRC32 Intrinsics.
|
||||
@ -1327,9 +1318,8 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||
* int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
|
||||
*/
|
||||
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
|
||||
address start = __ pc(); // Remember stub start address (is rtn value).
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
address start = __ pc(); // Remember stub start address (is rtn value).
|
||||
Label slow_path;
|
||||
|
||||
// Safepoint check
|
||||
@ -1406,11 +1396,11 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
|
||||
// Generate a vanilla native entry as the slow path.
|
||||
BLOCK_COMMENT("} CRC32_updateBytes(Buffer)");
|
||||
BIND(slow_path);
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
|
||||
return start;
|
||||
}
|
||||
|
||||
(void) generate_native_entry(false);
|
||||
|
||||
return start;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// These should never be compiled since the interpreter will prefer
|
||||
|
@ -64,6 +64,7 @@ define_pd_global(bool, OptoPeephole, false);
|
||||
define_pd_global(bool, UseCISCSpill, false);
|
||||
define_pd_global(bool, OptoBundling, false);
|
||||
define_pd_global(bool, OptoScheduling, true);
|
||||
define_pd_global(bool, OptoRegScheduling, false);
|
||||
|
||||
#ifdef _LP64
|
||||
// We need to make sure that all generated code is within
|
||||
|
@ -468,7 +468,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_jump_to_normal_entry();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -59,6 +59,13 @@ const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_s
|
||||
|
||||
#endif // CC_INTERP
|
||||
|
||||
void InterpreterMacroAssembler::jump_to_entry(address entry) {
|
||||
assert(entry, "Entry must have been generated by now");
|
||||
AddressLiteral al(entry);
|
||||
jump_to(al, G3_scratch);
|
||||
delayed()->nop();
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
|
||||
// Note: this algorithm is also used by C1's OSR entry sequence.
|
||||
// Any changes should also be applied to CodeEmitter::emit_osr_entry().
|
||||
|
@ -80,6 +80,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
InterpreterMacroAssembler(CodeBuffer* c)
|
||||
: MacroAssembler(c) {}
|
||||
|
||||
void jump_to_entry(address entry);
|
||||
|
||||
#ifndef CC_INTERP
|
||||
virtual void load_earlyret_value(TosState state);
|
||||
|
||||
|
@ -34,9 +34,8 @@
|
||||
address generate_abstract_entry(void);
|
||||
// there are no math intrinsics on sparc
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_accessor_entry(void) { return NULL; }
|
||||
address generate_empty_entry(void) { return NULL; }
|
||||
address generate_Reference_get_entry(void);
|
||||
void lock_method(void);
|
||||
void save_native_result(void);
|
||||
@ -48,4 +47,5 @@
|
||||
// Not supported
|
||||
address generate_CRC32_update_entry() { return NULL; }
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
|
||||
|
@ -241,15 +241,6 @@ void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
|
||||
|
||||
// Various method entries
|
||||
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry = __ pc();
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
||||
AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
__ jump_to(al, G3_scratch);
|
||||
__ delayed()->nop();
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
//
|
||||
|
@ -1860,6 +1860,10 @@ const bool Matcher::match_rule_supported(int opcode) {
|
||||
return true; // Per default match rules are supported.
|
||||
}
|
||||
|
||||
const int Matcher::float_pressure(int default_pressure_threshold) {
|
||||
return default_pressure_threshold;
|
||||
}
|
||||
|
||||
int Matcher::regnum_to_fpu_offset(int regnum) {
|
||||
return regnum - 32; // The FP registers are in the second chunk
|
||||
}
|
||||
|
@ -779,14 +779,14 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// Generate regular method entry
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
return entry;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_jump_to_normal_entry();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -1604,6 +1604,85 @@ void Assembler::cpuid() {
|
||||
emit_int8((unsigned char)0xA2);
|
||||
}
|
||||
|
||||
// Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented
|
||||
// F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v
|
||||
// F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. -
|
||||
// F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. -
|
||||
//
|
||||
// F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v
|
||||
//
|
||||
// F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v
|
||||
//
|
||||
// F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v
|
||||
void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) {
|
||||
assert(VM_Version::supports_sse4_2(), "");
|
||||
int8_t w = 0x01;
|
||||
Prefix p = Prefix_EMPTY;
|
||||
|
||||
emit_int8((int8_t)0xF2);
|
||||
switch (sizeInBytes) {
|
||||
case 1:
|
||||
w = 0;
|
||||
break;
|
||||
case 2:
|
||||
case 4:
|
||||
break;
|
||||
LP64_ONLY(case 8:)
|
||||
// This instruction is not valid in 32 bits
|
||||
// Note:
|
||||
// http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
|
||||
//
|
||||
// Page B - 72 Vol. 2C says
|
||||
// qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2
|
||||
// mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m
|
||||
// F0!!!
|
||||
// while 3 - 208 Vol. 2A
|
||||
// F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64.
|
||||
//
|
||||
// the 0 on a last bit is reserved for a different flavor of this instruction :
|
||||
// F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8.
|
||||
p = REX_W;
|
||||
break;
|
||||
default:
|
||||
assert(0, "Unsupported value for a sizeInBytes argument");
|
||||
break;
|
||||
}
|
||||
LP64_ONLY(prefix(crc, v, p);)
|
||||
emit_int8((int8_t)0x0F);
|
||||
emit_int8(0x38);
|
||||
emit_int8((int8_t)(0xF0 | w));
|
||||
emit_int8(0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7));
|
||||
}
|
||||
|
||||
void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) {
|
||||
assert(VM_Version::supports_sse4_2(), "");
|
||||
InstructionMark im(this);
|
||||
int8_t w = 0x01;
|
||||
Prefix p = Prefix_EMPTY;
|
||||
|
||||
emit_int8((int8_t)0xF2);
|
||||
switch (sizeInBytes) {
|
||||
case 1:
|
||||
w = 0;
|
||||
break;
|
||||
case 2:
|
||||
case 4:
|
||||
break;
|
||||
LP64_ONLY(case 8:)
|
||||
// This instruction is not valid in 32 bits
|
||||
p = REX_W;
|
||||
break;
|
||||
default:
|
||||
assert(0, "Unsupported value for a sizeInBytes argument");
|
||||
break;
|
||||
}
|
||||
LP64_ONLY(prefix(crc, adr, p);)
|
||||
emit_int8((int8_t)0x0F);
|
||||
emit_int8(0x38);
|
||||
emit_int8((int8_t)(0xF0 | w));
|
||||
emit_operand(crc, adr);
|
||||
}
|
||||
|
||||
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3, /* no_mask_reg */ false, /* legacy_mode */ true);
|
||||
@ -6223,6 +6302,14 @@ void Assembler::shldl(Register dst, Register src) {
|
||||
emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
|
||||
}
|
||||
|
||||
// 0F A4 / r ib
|
||||
void Assembler::shldl(Register dst, Register src, int8_t imm8) {
|
||||
emit_int8(0x0F);
|
||||
emit_int8((unsigned char)0xA4);
|
||||
emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
|
||||
emit_int8(imm8);
|
||||
}
|
||||
|
||||
void Assembler::shrdl(Register dst, Register src) {
|
||||
emit_int8(0x0F);
|
||||
emit_int8((unsigned char)0xAD);
|
||||
@ -6408,6 +6495,40 @@ void Assembler::prefix(Register reg) {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::prefix(Register dst, Register src, Prefix p) {
|
||||
if (src->encoding() >= 8) {
|
||||
p = (Prefix)(p | REX_B);
|
||||
}
|
||||
if (dst->encoding() >= 8) {
|
||||
p = (Prefix)( p | REX_R);
|
||||
}
|
||||
if (p != Prefix_EMPTY) {
|
||||
// do not generate an empty prefix
|
||||
prefix(p);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::prefix(Register dst, Address adr, Prefix p) {
|
||||
if (adr.base_needs_rex()) {
|
||||
if (adr.index_needs_rex()) {
|
||||
assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
|
||||
} else {
|
||||
prefix(REX_B);
|
||||
}
|
||||
} else {
|
||||
if (adr.index_needs_rex()) {
|
||||
assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
|
||||
}
|
||||
}
|
||||
if (dst->encoding() >= 8) {
|
||||
p = (Prefix)(p | REX_R);
|
||||
}
|
||||
if (p != Prefix_EMPTY) {
|
||||
// do not generate an empty prefix
|
||||
prefix(p);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::prefix(Address adr) {
|
||||
if (adr.base_needs_rex()) {
|
||||
if (adr.index_needs_rex()) {
|
||||
|
@ -506,7 +506,8 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
VEX_3bytes = 0xC4,
|
||||
VEX_2bytes = 0xC5,
|
||||
EVEX_4bytes = 0x62
|
||||
EVEX_4bytes = 0x62,
|
||||
Prefix_EMPTY = 0x0
|
||||
};
|
||||
|
||||
enum VexPrefix {
|
||||
@ -615,6 +616,8 @@ private:
|
||||
int prefixq_and_encode(int dst_enc, int src_enc);
|
||||
|
||||
void prefix(Register reg);
|
||||
void prefix(Register dst, Register src, Prefix p);
|
||||
void prefix(Register dst, Address adr, Prefix p);
|
||||
void prefix(Address adr);
|
||||
void prefixq(Address adr);
|
||||
|
||||
@ -1177,6 +1180,10 @@ private:
|
||||
// Identify processor type and features
|
||||
void cpuid();
|
||||
|
||||
// CRC32C
|
||||
void crc32(Register crc, Register v, int8_t sizeInBytes);
|
||||
void crc32(Register crc, Address adr, int8_t sizeInBytes);
|
||||
|
||||
// Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
|
||||
void cvtsd2ss(XMMRegister dst, XMMRegister src);
|
||||
void cvtsd2ss(XMMRegister dst, Address src);
|
||||
@ -1783,6 +1790,7 @@ private:
|
||||
void setb(Condition cc, Register dst);
|
||||
|
||||
void shldl(Register dst, Register src);
|
||||
void shldl(Register dst, Register src, int8_t imm8);
|
||||
|
||||
void shll(Register dst, int imm8);
|
||||
void shll(Register dst);
|
||||
|
@ -37,6 +37,8 @@ inline int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst)
|
||||
inline int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { return dst_enc << 3 | src_enc; }
|
||||
|
||||
inline void Assembler::prefix(Register reg) {}
|
||||
inline void Assembler::prefix(Register dst, Register src, Prefix p) {}
|
||||
inline void Assembler::prefix(Register dst, Address adr, Prefix p) {}
|
||||
inline void Assembler::prefix(Address adr) {}
|
||||
inline void Assembler::prefixq(Address adr) {}
|
||||
|
||||
|
@ -48,11 +48,11 @@ define_pd_global(intx, CompileThreshold, 10000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 140);
|
||||
define_pd_global(intx, ConditionalMoveLimit, 3);
|
||||
define_pd_global(intx, FLOATPRESSURE, 6);
|
||||
define_pd_global(intx, FreqInlineSize, 325);
|
||||
define_pd_global(intx, MinJumpTableSize, 10);
|
||||
#ifdef AMD64
|
||||
define_pd_global(intx, INTPRESSURE, 13);
|
||||
define_pd_global(intx, FLOATPRESSURE, 14);
|
||||
define_pd_global(intx, InteriorEntryAlignment, 16);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
@ -64,6 +64,7 @@ define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
|
||||
#else
|
||||
define_pd_global(intx, INTPRESSURE, 6);
|
||||
define_pd_global(intx, FLOATPRESSURE, 6);
|
||||
define_pd_global(intx, InteriorEntryAlignment, 4);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 4*K);
|
||||
define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
|
||||
@ -82,6 +83,7 @@ define_pd_global(bool, OptoPeephole, true);
|
||||
define_pd_global(bool, UseCISCSpill, true);
|
||||
define_pd_global(bool, OptoScheduling, false);
|
||||
define_pd_global(bool, OptoBundling, false);
|
||||
define_pd_global(bool, OptoRegScheduling, true);
|
||||
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
|
@ -807,7 +807,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_jump_to_normal_entry();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//
|
||||
|
66
hotspot/src/cpu/x86/vm/crc32c.h
Normal file
66
hotspot/src/cpu/x86/vm/crc32c.h
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
enum {
|
||||
// S. Gueron / Information Processing Letters 112 (2012) 184
|
||||
// shows than anything above 6K and below 32K is a good choice
|
||||
// 32K does not deliver any further performance gains
|
||||
// 6K=8*256 (*3 as we compute 3 blocks together)
|
||||
//
|
||||
// Thus selecting the smallest value so it could apply to the largest number
|
||||
// of buffer sizes.
|
||||
CRC32C_HIGH = 8 * 256,
|
||||
|
||||
// empirical
|
||||
// based on ubench study using methodology described in
|
||||
// V. Gopal et al. / Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction April 2011 8
|
||||
//
|
||||
// arbitrary value between 27 and 256
|
||||
CRC32C_MIDDLE = 8 * 86,
|
||||
|
||||
// V. Gopal et al. / Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction April 2011 9
|
||||
// shows that 240 and 1024 are equally good choices as the 216==8*27
|
||||
//
|
||||
// Selecting the smallest value which resulted in a significant performance improvement over
|
||||
// sequential version
|
||||
CRC32C_LOW = 8 * 27,
|
||||
|
||||
CRC32C_NUM_ChunkSizeInBytes = 3,
|
||||
|
||||
// We need to compute powers of 64N and 128N for each "chunk" size
|
||||
CRC32C_NUM_PRECOMPUTED_CONSTANTS = ( 2 * CRC32C_NUM_ChunkSizeInBytes )
|
||||
};
|
||||
// Notes:
|
||||
// 1. Why we need to choose a "chunk" approach?
|
||||
// Overhead of computing a powers and powers of for an arbitrary buffer of size N is significant
|
||||
// (implementation approaches a library perf.)
|
||||
// 2. Why only 3 "chunks"?
|
||||
// Performance experiments results showed that a HIGH+LOW was not delivering a stable speedup
|
||||
// curve.
|
||||
//
|
||||
// Disclaimer:
|
||||
// If you ever decide to increase/decrease number of "chunks" be sure to modify
|
||||
// a) constants table generation (hotspot/src/cpu/x86/vm/stubRoutines_x86.cpp)
|
||||
// b) constant fetch from that table (macroAssembler_x86.cpp)
|
||||
// c) unrolled for loop (macroAssembler_x86.cpp)
|
@ -40,6 +40,11 @@
|
||||
|
||||
// Implementation of InterpreterMacroAssembler
|
||||
|
||||
void InterpreterMacroAssembler::jump_to_entry(address entry) {
|
||||
assert(entry, "Entry must have been generated by now");
|
||||
jump(RuntimeAddress(entry));
|
||||
}
|
||||
|
||||
#ifndef CC_INTERP
|
||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
||||
Label update, next, none;
|
||||
|
@ -60,6 +60,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
_locals_register(LP64_ONLY(r14) NOT_LP64(rdi)),
|
||||
_bcp_register(LP64_ONLY(r13) NOT_LP64(rsi)) {}
|
||||
|
||||
void jump_to_entry(address entry);
|
||||
|
||||
void load_earlyret_value(TosState state);
|
||||
|
||||
#ifdef CC_INTERP
|
||||
|
@ -31,17 +31,6 @@
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
// Jump into normal path for accessor and empty entry to jump to normal entry
|
||||
// The "fast" optimization don't update compilation count therefore can disable inlining
|
||||
// for these functions that should be inlined.
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry_point = __ pc();
|
||||
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
||||
__ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
@ -36,12 +36,12 @@
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_accessor_entry(void) { return NULL; }
|
||||
address generate_empty_entry(void) { return NULL; }
|
||||
address generate_Reference_get_entry();
|
||||
address generate_CRC32_update_entry();
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
||||
#ifndef _LP64
|
||||
address generate_Float_intBitsToFloat_entry();
|
||||
address generate_Float_floatToRawIntBits_entry();
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#include "crc32c.h"
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
@ -8636,6 +8637,471 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi
|
||||
notl(crc); // ~c
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
// S. Gueron / Information Processing Letters 112 (2012) 184
|
||||
// Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
|
||||
// Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
|
||||
// Output: the 64-bit carry-less product of B * CONST
|
||||
void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n,
|
||||
Register tmp1, Register tmp2, Register tmp3) {
|
||||
lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
|
||||
if (n > 0) {
|
||||
addq(tmp3, n * 256 * 8);
|
||||
}
|
||||
// Q1 = TABLEExt[n][B & 0xFF];
|
||||
movl(tmp1, in);
|
||||
andl(tmp1, 0x000000FF);
|
||||
shll(tmp1, 3);
|
||||
addq(tmp1, tmp3);
|
||||
movq(tmp1, Address(tmp1, 0));
|
||||
|
||||
// Q2 = TABLEExt[n][B >> 8 & 0xFF];
|
||||
movl(tmp2, in);
|
||||
shrl(tmp2, 8);
|
||||
andl(tmp2, 0x000000FF);
|
||||
shll(tmp2, 3);
|
||||
addq(tmp2, tmp3);
|
||||
movq(tmp2, Address(tmp2, 0));
|
||||
|
||||
shlq(tmp2, 8);
|
||||
xorq(tmp1, tmp2);
|
||||
|
||||
// Q3 = TABLEExt[n][B >> 16 & 0xFF];
|
||||
movl(tmp2, in);
|
||||
shrl(tmp2, 16);
|
||||
andl(tmp2, 0x000000FF);
|
||||
shll(tmp2, 3);
|
||||
addq(tmp2, tmp3);
|
||||
movq(tmp2, Address(tmp2, 0));
|
||||
|
||||
shlq(tmp2, 16);
|
||||
xorq(tmp1, tmp2);
|
||||
|
||||
// Q4 = TABLEExt[n][B >> 24 & 0xFF];
|
||||
shrl(in, 24);
|
||||
andl(in, 0x000000FF);
|
||||
shll(in, 3);
|
||||
addq(in, tmp3);
|
||||
movq(in, Address(in, 0));
|
||||
|
||||
shlq(in, 24);
|
||||
xorq(in, tmp1);
|
||||
// return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
|
||||
}
|
||||
|
||||
void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
|
||||
Register in_out,
|
||||
uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
|
||||
XMMRegister w_xtmp2,
|
||||
Register tmp1,
|
||||
Register n_tmp2, Register n_tmp3) {
|
||||
if (is_pclmulqdq_supported) {
|
||||
movdl(w_xtmp1, in_out); // modified blindly
|
||||
|
||||
movl(tmp1, const_or_pre_comp_const_index);
|
||||
movdl(w_xtmp2, tmp1);
|
||||
pclmulqdq(w_xtmp1, w_xtmp2, 0);
|
||||
|
||||
movdq(in_out, w_xtmp1);
|
||||
} else {
|
||||
crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3);
|
||||
}
|
||||
}
|
||||
|
||||
// Recombination Alternative 2: No bit-reflections
|
||||
// T1 = (CRC_A * U1) << 1
|
||||
// T2 = (CRC_B * U2) << 1
|
||||
// C1 = T1 >> 32
|
||||
// C2 = T2 >> 32
|
||||
// T1 = T1 & 0xFFFFFFFF
|
||||
// T2 = T2 & 0xFFFFFFFF
|
||||
// T1 = CRC32(0, T1)
|
||||
// T2 = CRC32(0, T2)
|
||||
// C1 = C1 ^ T1
|
||||
// C2 = C2 ^ T2
|
||||
// CRC = C1 ^ C2 ^ CRC_C
|
||||
void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
|
||||
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
|
||||
Register tmp1, Register tmp2,
|
||||
Register n_tmp3) {
|
||||
crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
|
||||
crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
|
||||
shlq(in_out, 1);
|
||||
movl(tmp1, in_out);
|
||||
shrq(in_out, 32);
|
||||
xorl(tmp2, tmp2);
|
||||
crc32(tmp2, tmp1, 4);
|
||||
xorl(in_out, tmp2); // we don't care about upper 32 bit contents here
|
||||
shlq(in1, 1);
|
||||
movl(tmp1, in1);
|
||||
shrq(in1, 32);
|
||||
xorl(tmp2, tmp2);
|
||||
crc32(tmp2, tmp1, 4);
|
||||
xorl(in1, tmp2);
|
||||
xorl(in_out, in1);
|
||||
xorl(in_out, in2);
|
||||
}
|
||||
|
||||
// Set N to predefined value
|
||||
// Subtract from a lenght of a buffer
|
||||
// execute in a loop:
|
||||
// CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0
|
||||
// for i = 1 to N do
|
||||
// CRC_A = CRC32(CRC_A, A[i])
|
||||
// CRC_B = CRC32(CRC_B, B[i])
|
||||
// CRC_C = CRC32(CRC_C, C[i])
|
||||
// end for
|
||||
// Recombine
|
||||
void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
|
||||
Register in_out1, Register in_out2, Register in_out3,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
|
||||
Register tmp4, Register tmp5,
|
||||
Register n_tmp6) {
|
||||
Label L_processPartitions;
|
||||
Label L_processPartition;
|
||||
Label L_exit;
|
||||
|
||||
bind(L_processPartitions);
|
||||
cmpl(in_out1, 3 * size);
|
||||
jcc(Assembler::less, L_exit);
|
||||
xorl(tmp1, tmp1);
|
||||
xorl(tmp2, tmp2);
|
||||
movq(tmp3, in_out2);
|
||||
addq(tmp3, size);
|
||||
|
||||
bind(L_processPartition);
|
||||
crc32(in_out3, Address(in_out2, 0), 8);
|
||||
crc32(tmp1, Address(in_out2, size), 8);
|
||||
crc32(tmp2, Address(in_out2, size * 2), 8);
|
||||
addq(in_out2, 8);
|
||||
cmpq(in_out2, tmp3);
|
||||
jcc(Assembler::less, L_processPartition);
|
||||
crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
|
||||
w_xtmp1, w_xtmp2, w_xtmp3,
|
||||
tmp4, tmp5,
|
||||
n_tmp6);
|
||||
addq(in_out2, 2 * size);
|
||||
subl(in_out1, 3 * size);
|
||||
jmp(L_processPartitions);
|
||||
|
||||
bind(L_exit);
|
||||
}
|
||||
#else
|
||||
void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
XMMRegister xtmp1, XMMRegister xtmp2) {
|
||||
lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr()));
|
||||
if (n > 0) {
|
||||
addl(tmp3, n * 256 * 8);
|
||||
}
|
||||
// Q1 = TABLEExt[n][B & 0xFF];
|
||||
movl(tmp1, in_out);
|
||||
andl(tmp1, 0x000000FF);
|
||||
shll(tmp1, 3);
|
||||
addl(tmp1, tmp3);
|
||||
movq(xtmp1, Address(tmp1, 0));
|
||||
|
||||
// Q2 = TABLEExt[n][B >> 8 & 0xFF];
|
||||
movl(tmp2, in_out);
|
||||
shrl(tmp2, 8);
|
||||
andl(tmp2, 0x000000FF);
|
||||
shll(tmp2, 3);
|
||||
addl(tmp2, tmp3);
|
||||
movq(xtmp2, Address(tmp2, 0));
|
||||
|
||||
psllq(xtmp2, 8);
|
||||
pxor(xtmp1, xtmp2);
|
||||
|
||||
// Q3 = TABLEExt[n][B >> 16 & 0xFF];
|
||||
movl(tmp2, in_out);
|
||||
shrl(tmp2, 16);
|
||||
andl(tmp2, 0x000000FF);
|
||||
shll(tmp2, 3);
|
||||
addl(tmp2, tmp3);
|
||||
movq(xtmp2, Address(tmp2, 0));
|
||||
|
||||
psllq(xtmp2, 16);
|
||||
pxor(xtmp1, xtmp2);
|
||||
|
||||
// Q4 = TABLEExt[n][B >> 24 & 0xFF];
|
||||
shrl(in_out, 24);
|
||||
andl(in_out, 0x000000FF);
|
||||
shll(in_out, 3);
|
||||
addl(in_out, tmp3);
|
||||
movq(xtmp2, Address(in_out, 0));
|
||||
|
||||
psllq(xtmp2, 24);
|
||||
pxor(xtmp1, xtmp2); // Result in CXMM
|
||||
// return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24;
|
||||
}
|
||||
|
||||
void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1,
|
||||
Register in_out,
|
||||
uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
|
||||
XMMRegister w_xtmp2,
|
||||
Register tmp1,
|
||||
Register n_tmp2, Register n_tmp3) {
|
||||
if (is_pclmulqdq_supported) {
|
||||
movdl(w_xtmp1, in_out);
|
||||
|
||||
movl(tmp1, const_or_pre_comp_const_index);
|
||||
movdl(w_xtmp2, tmp1);
|
||||
pclmulqdq(w_xtmp1, w_xtmp2, 0);
|
||||
// Keep result in XMM since GPR is 32 bit in length
|
||||
} else {
|
||||
crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
|
||||
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
|
||||
Register tmp1, Register tmp2,
|
||||
Register n_tmp3) {
|
||||
crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
|
||||
crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3);
|
||||
|
||||
psllq(w_xtmp1, 1);
|
||||
movdl(tmp1, w_xtmp1);
|
||||
psrlq(w_xtmp1, 32);
|
||||
movdl(in_out, w_xtmp1);
|
||||
|
||||
xorl(tmp2, tmp2);
|
||||
crc32(tmp2, tmp1, 4);
|
||||
xorl(in_out, tmp2);
|
||||
|
||||
psllq(w_xtmp2, 1);
|
||||
movdl(tmp1, w_xtmp2);
|
||||
psrlq(w_xtmp2, 32);
|
||||
movdl(in1, w_xtmp2);
|
||||
|
||||
xorl(tmp2, tmp2);
|
||||
crc32(tmp2, tmp1, 4);
|
||||
xorl(in1, tmp2);
|
||||
xorl(in_out, in1);
|
||||
xorl(in_out, in2);
|
||||
}
|
||||
|
||||
void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
|
||||
Register in_out1, Register in_out2, Register in_out3,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
|
||||
Register tmp4, Register tmp5,
|
||||
Register n_tmp6) {
|
||||
Label L_processPartitions;
|
||||
Label L_processPartition;
|
||||
Label L_exit;
|
||||
|
||||
bind(L_processPartitions);
|
||||
cmpl(in_out1, 3 * size);
|
||||
jcc(Assembler::less, L_exit);
|
||||
xorl(tmp1, tmp1);
|
||||
xorl(tmp2, tmp2);
|
||||
movl(tmp3, in_out2);
|
||||
addl(tmp3, size);
|
||||
|
||||
bind(L_processPartition);
|
||||
crc32(in_out3, Address(in_out2, 0), 4);
|
||||
crc32(tmp1, Address(in_out2, size), 4);
|
||||
crc32(tmp2, Address(in_out2, size*2), 4);
|
||||
crc32(in_out3, Address(in_out2, 0+4), 4);
|
||||
crc32(tmp1, Address(in_out2, size+4), 4);
|
||||
crc32(tmp2, Address(in_out2, size*2+4), 4);
|
||||
addl(in_out2, 8);
|
||||
cmpl(in_out2, tmp3);
|
||||
jcc(Assembler::less, L_processPartition);
|
||||
|
||||
push(tmp3);
|
||||
push(in_out1);
|
||||
push(in_out2);
|
||||
tmp4 = tmp3;
|
||||
tmp5 = in_out1;
|
||||
n_tmp6 = in_out2;
|
||||
|
||||
crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2,
|
||||
w_xtmp1, w_xtmp2, w_xtmp3,
|
||||
tmp4, tmp5,
|
||||
n_tmp6);
|
||||
|
||||
pop(in_out2);
|
||||
pop(in_out1);
|
||||
pop(tmp3);
|
||||
|
||||
addl(in_out2, 2 * size);
|
||||
subl(in_out1, 3 * size);
|
||||
jmp(L_processPartitions);
|
||||
|
||||
bind(L_exit);
|
||||
}
|
||||
#endif //LP64
|
||||
|
||||
#ifdef _LP64
|
||||
// Algorithm 2: Pipelined usage of the CRC32 instruction.
|
||||
// Input: A buffer I of L bytes.
|
||||
// Output: the CRC32C value of the buffer.
|
||||
// Notations:
|
||||
// Write L = 24N + r, with N = floor (L/24).
|
||||
// r = L mod 24 (0 <= r < 24).
|
||||
// Consider I as the concatenation of A|B|C|R, where A, B, C, each,
|
||||
// N quadwords, and R consists of r bytes.
|
||||
// A[j] = I [8j+7:8j], j= 0, 1, ..., N-1
|
||||
// B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1
|
||||
// C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1
|
||||
// if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1
|
||||
void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
Register tmp4, Register tmp5, Register tmp6,
|
||||
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
|
||||
bool is_pclmulqdq_supported) {
|
||||
uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
|
||||
Label L_wordByWord;
|
||||
Label L_byteByByteProlog;
|
||||
Label L_byteByByte;
|
||||
Label L_exit;
|
||||
|
||||
if (is_pclmulqdq_supported ) {
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1);
|
||||
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
|
||||
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
|
||||
assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
|
||||
} else {
|
||||
const_or_pre_comp_const_index[0] = 1;
|
||||
const_or_pre_comp_const_index[1] = 0;
|
||||
|
||||
const_or_pre_comp_const_index[2] = 3;
|
||||
const_or_pre_comp_const_index[3] = 2;
|
||||
|
||||
const_or_pre_comp_const_index[4] = 5;
|
||||
const_or_pre_comp_const_index[5] = 4;
|
||||
}
|
||||
crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
|
||||
in2, in1, in_out,
|
||||
tmp1, tmp2, tmp3,
|
||||
w_xtmp1, w_xtmp2, w_xtmp3,
|
||||
tmp4, tmp5,
|
||||
tmp6);
|
||||
crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
|
||||
in2, in1, in_out,
|
||||
tmp1, tmp2, tmp3,
|
||||
w_xtmp1, w_xtmp2, w_xtmp3,
|
||||
tmp4, tmp5,
|
||||
tmp6);
|
||||
crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
|
||||
in2, in1, in_out,
|
||||
tmp1, tmp2, tmp3,
|
||||
w_xtmp1, w_xtmp2, w_xtmp3,
|
||||
tmp4, tmp5,
|
||||
tmp6);
|
||||
movl(tmp1, in2);
|
||||
andl(tmp1, 0x00000007);
|
||||
negl(tmp1);
|
||||
addl(tmp1, in2);
|
||||
addq(tmp1, in1);
|
||||
|
||||
BIND(L_wordByWord);
|
||||
cmpq(in1, tmp1);
|
||||
jcc(Assembler::greaterEqual, L_byteByByteProlog);
|
||||
crc32(in_out, Address(in1, 0), 4);
|
||||
addq(in1, 4);
|
||||
jmp(L_wordByWord);
|
||||
|
||||
BIND(L_byteByByteProlog);
|
||||
andl(in2, 0x00000007);
|
||||
movl(tmp2, 1);
|
||||
|
||||
BIND(L_byteByByte);
|
||||
cmpl(tmp2, in2);
|
||||
jccb(Assembler::greater, L_exit);
|
||||
crc32(in_out, Address(in1, 0), 1);
|
||||
incq(in1);
|
||||
incl(tmp2);
|
||||
jmp(L_byteByByte);
|
||||
|
||||
BIND(L_exit);
|
||||
}
|
||||
#else
|
||||
void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
Register tmp4, Register tmp5, Register tmp6,
|
||||
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
|
||||
bool is_pclmulqdq_supported) {
|
||||
uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
|
||||
Label L_wordByWord;
|
||||
Label L_byteByByteProlog;
|
||||
Label L_byteByByte;
|
||||
Label L_exit;
|
||||
|
||||
if (is_pclmulqdq_supported) {
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1);
|
||||
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
|
||||
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
|
||||
} else {
|
||||
const_or_pre_comp_const_index[0] = 1;
|
||||
const_or_pre_comp_const_index[1] = 0;
|
||||
|
||||
const_or_pre_comp_const_index[2] = 3;
|
||||
const_or_pre_comp_const_index[3] = 2;
|
||||
|
||||
const_or_pre_comp_const_index[4] = 5;
|
||||
const_or_pre_comp_const_index[5] = 4;
|
||||
}
|
||||
crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported,
|
||||
in2, in1, in_out,
|
||||
tmp1, tmp2, tmp3,
|
||||
w_xtmp1, w_xtmp2, w_xtmp3,
|
||||
tmp4, tmp5,
|
||||
tmp6);
|
||||
crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported,
|
||||
in2, in1, in_out,
|
||||
tmp1, tmp2, tmp3,
|
||||
w_xtmp1, w_xtmp2, w_xtmp3,
|
||||
tmp4, tmp5,
|
||||
tmp6);
|
||||
crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported,
|
||||
in2, in1, in_out,
|
||||
tmp1, tmp2, tmp3,
|
||||
w_xtmp1, w_xtmp2, w_xtmp3,
|
||||
tmp4, tmp5,
|
||||
tmp6);
|
||||
movl(tmp1, in2);
|
||||
andl(tmp1, 0x00000007);
|
||||
negl(tmp1);
|
||||
addl(tmp1, in2);
|
||||
addl(tmp1, in1);
|
||||
|
||||
BIND(L_wordByWord);
|
||||
cmpl(in1, tmp1);
|
||||
jcc(Assembler::greaterEqual, L_byteByByteProlog);
|
||||
crc32(in_out, Address(in1,0), 4);
|
||||
addl(in1, 4);
|
||||
jmp(L_wordByWord);
|
||||
|
||||
BIND(L_byteByByteProlog);
|
||||
andl(in2, 0x00000007);
|
||||
movl(tmp2, 1);
|
||||
|
||||
BIND(L_byteByByte);
|
||||
cmpl(tmp2, in2);
|
||||
jccb(Assembler::greater, L_exit);
|
||||
movb(tmp1, Address(in1, 0));
|
||||
crc32(in_out, tmp1, 1);
|
||||
incl(in1);
|
||||
incl(tmp2);
|
||||
jmp(L_byteByByte);
|
||||
|
||||
BIND(L_exit);
|
||||
}
|
||||
#endif // LP64
|
||||
#undef BIND
|
||||
#undef BLOCK_COMMENT
|
||||
|
||||
|
@ -1278,9 +1278,42 @@ public:
|
||||
Register raxReg);
|
||||
#endif
|
||||
|
||||
// CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
|
||||
// CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
|
||||
void update_byte_crc32(Register crc, Register val, Register table);
|
||||
void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
|
||||
// CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
|
||||
// Note on a naming convention:
|
||||
// Prefix w = register only used on a Westmere+ architecture
|
||||
// Prefix n = register only used on a Nehalem architecture
|
||||
#ifdef _LP64
|
||||
void crc32c_ipl_alg4(Register in_out, uint32_t n,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
#else
|
||||
void crc32c_ipl_alg4(Register in_out, uint32_t n,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
XMMRegister xtmp1, XMMRegister xtmp2);
|
||||
#endif
|
||||
void crc32c_pclmulqdq(XMMRegister w_xtmp1,
|
||||
Register in_out,
|
||||
uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
|
||||
XMMRegister w_xtmp2,
|
||||
Register tmp1,
|
||||
Register n_tmp2, Register n_tmp3);
|
||||
void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
|
||||
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
|
||||
Register tmp1, Register tmp2,
|
||||
Register n_tmp3);
|
||||
void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
|
||||
Register in_out1, Register in_out2, Register in_out3,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
|
||||
Register tmp4, Register tmp5,
|
||||
Register n_tmp6);
|
||||
void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
Register tmp4, Register tmp5, Register tmp6,
|
||||
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
|
||||
bool is_pclmulqdq_supported);
|
||||
// Fold 128-bit data chunk
|
||||
void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
|
||||
void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
|
||||
|
@ -2991,6 +2991,63 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
* Inputs:
|
||||
* rsp(4) - int crc
|
||||
* rsp(8) - byte* buf
|
||||
* rsp(12) - int length
|
||||
* rsp(16) - table_start - optional (present only when doing a library_calll,
|
||||
* not used by x86 algorithm)
|
||||
*
|
||||
* Ouput:
|
||||
* rax - int crc result
|
||||
*/
|
||||
address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) {
|
||||
assert(UseCRC32CIntrinsics, "need SSE4_2");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C");
|
||||
address start = __ pc();
|
||||
const Register crc = rax; // crc
|
||||
const Register buf = rcx; // source java byte array address
|
||||
const Register len = rdx; // length
|
||||
const Register d = rbx;
|
||||
const Register g = rsi;
|
||||
const Register h = rdi;
|
||||
const Register empty = 0; // will never be used, in order not
|
||||
// to change a signature for crc32c_IPL_Alg2_Alt2
|
||||
// between 64/32 I'm just keeping it here
|
||||
assert_different_registers(crc, buf, len, d, g, h);
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
Address crc_arg(rsp, 4 + 4 + 0); // ESP+4 +
|
||||
// we need to add additional 4 because __ enter
|
||||
// have just pushed ebp on a stack
|
||||
Address buf_arg(rsp, 4 + 4 + 4);
|
||||
Address len_arg(rsp, 4 + 4 + 8);
|
||||
// Load up:
|
||||
__ movl(crc, crc_arg);
|
||||
__ movl(buf, buf_arg);
|
||||
__ movl(len, len_arg);
|
||||
__ push(d);
|
||||
__ push(g);
|
||||
__ push(h);
|
||||
__ crc32c_ipl_alg2_alt2(crc, buf, len,
|
||||
d, g, h,
|
||||
empty, empty, empty,
|
||||
xmm0, xmm1, xmm2,
|
||||
is_pclmulqdq_supported);
|
||||
__ pop(h);
|
||||
__ pop(g);
|
||||
__ pop(d);
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Safefetch stubs.
|
||||
void generate_safefetch(const char* name, int size, address* entry,
|
||||
address* fault_pc, address* continuation_pc) {
|
||||
@ -3204,6 +3261,13 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
|
||||
StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
bool supports_clmul = VM_Version::supports_clmul();
|
||||
StubRoutines::x86::generate_CRC32C_table(supports_clmul);
|
||||
StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table;
|
||||
StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -3958,6 +3958,64 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
* Inputs:
|
||||
* c_rarg0 - int crc
|
||||
* c_rarg1 - byte* buf
|
||||
* c_rarg2 - long length
|
||||
* c_rarg3 - table_start - optional (present only when doing a library_calll,
|
||||
* not used by x86 algorithm)
|
||||
*
|
||||
* Ouput:
|
||||
* rax - int crc result
|
||||
*/
|
||||
address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) {
|
||||
assert(UseCRC32CIntrinsics, "need SSE4_2");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C");
|
||||
address start = __ pc();
|
||||
//reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs
|
||||
//Windows RCX RDX R8 R9 none none XMM0..XMM3
|
||||
//Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7
|
||||
const Register crc = c_rarg0; // crc
|
||||
const Register buf = c_rarg1; // source java byte array address
|
||||
const Register len = c_rarg2; // length
|
||||
const Register a = rax;
|
||||
const Register j = r9;
|
||||
const Register k = r10;
|
||||
const Register l = r11;
|
||||
#ifdef _WIN64
|
||||
const Register y = rdi;
|
||||
const Register z = rsi;
|
||||
#else
|
||||
const Register y = rcx;
|
||||
const Register z = r8;
|
||||
#endif
|
||||
assert_different_registers(crc, buf, len, a, j, k, l, y, z);
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
#ifdef _WIN64
|
||||
__ push(y);
|
||||
__ push(z);
|
||||
#endif
|
||||
__ crc32c_ipl_alg2_alt2(crc, buf, len,
|
||||
a, j, k,
|
||||
l, y, z,
|
||||
c_farg0, c_farg1, c_farg2,
|
||||
is_pclmulqdq_supported);
|
||||
__ movl(rax, crc);
|
||||
#ifdef _WIN64
|
||||
__ pop(z);
|
||||
__ pop(y);
|
||||
#endif
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
@ -4302,6 +4360,13 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
|
||||
StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
bool supports_clmul = VM_Version::supports_clmul();
|
||||
StubRoutines::x86::generate_CRC32C_table(supports_clmul);
|
||||
StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table;
|
||||
StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul);
|
||||
}
|
||||
}
|
||||
|
||||
void generate_all() {
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "crc32c.h"
|
||||
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
@ -130,3 +131,107 @@ juint StubRoutines::x86::_crc_table[] =
|
||||
0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
|
||||
0x2d02ef8dUL
|
||||
};
|
||||
|
||||
#define D 32
|
||||
#define P 0x82F63B78 // Reflection of Castagnoli (0x11EDC6F41)
|
||||
|
||||
#define TILL_CYCLE 31
|
||||
uint32_t _crc32c_pow_2k_table[TILL_CYCLE]; // because _crc32c_pow_2k_table[TILL_CYCLE == 31] == _crc32c_pow_2k_table[0]
|
||||
|
||||
// A. Kadatch and B. Jenkins / Everything we know about CRC but afraid to forget September 3, 2010 8
|
||||
// Listing 1: Multiplication of normalized polynomials
|
||||
// "a" and "b" occupy D least significant bits.
|
||||
uint32_t crc32c_multiply(uint32_t a, uint32_t b) {
|
||||
uint32_t product = 0;
|
||||
uint32_t b_pow_x_table[D + 1]; // b_pow_x_table[k] = (b * x**k) mod P
|
||||
b_pow_x_table[0] = b;
|
||||
for (int k = 0; k < D; ++k) {
|
||||
// If "a" has non-zero coefficient at x**k,/ add ((b * x**k) mod P) to the result.
|
||||
if ((a & (uint64_t)(1 << (D - 1 - k))) != 0) product ^= b_pow_x_table[k];
|
||||
|
||||
// Compute b_pow_x_table[k+1] = (b ** x**(k+1)) mod P.
|
||||
if (b_pow_x_table[k] & 1) {
|
||||
// If degree of (b_pow_x_table[k] * x) is D, then
|
||||
// degree of (b_pow_x_table[k] * x - P) is less than D.
|
||||
b_pow_x_table[k + 1] = (b_pow_x_table[k] >> 1) ^ P;
|
||||
}
|
||||
else {
|
||||
b_pow_x_table[k + 1] = b_pow_x_table[k] >> 1;
|
||||
}
|
||||
}
|
||||
return product;
|
||||
}
|
||||
#undef D
|
||||
#undef P
|
||||
|
||||
// A. Kadatch and B. Jenkins / Everything we know about CRC but afraid to forget September 3, 2010 9
|
||||
void crc32c_init_pow_2k(void) {
|
||||
// _crc32c_pow_2k_table(0) =
|
||||
// x^(2^k) mod P(x) = x mod P(x) = x
|
||||
// Since we are operating on a reflected values
|
||||
// x = 10b, reflect(x) = 0x40000000
|
||||
_crc32c_pow_2k_table[0] = 0x40000000;
|
||||
|
||||
for (int k = 1; k < TILL_CYCLE; k++) {
|
||||
// _crc32c_pow_2k_table(k+1) = _crc32c_pow_2k_table(k-1)^2 mod P(x)
|
||||
uint32_t tmp = _crc32c_pow_2k_table[k - 1];
|
||||
_crc32c_pow_2k_table[k] = crc32c_multiply(tmp, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
// x^N mod P(x)
|
||||
uint32_t crc32c_f_pow_n(uint32_t n) {
|
||||
// result = 1 (polynomial)
|
||||
uint32_t one, result = 0x80000000, i = 0;
|
||||
|
||||
while (one = (n & 1), (n == 1 || n - one > 0)) {
|
||||
if (one) {
|
||||
result = crc32c_multiply(result, _crc32c_pow_2k_table[i]);
|
||||
}
|
||||
n >>= 1;
|
||||
i++;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
juint *StubRoutines::x86::_crc32c_table;
|
||||
|
||||
void StubRoutines::x86::generate_CRC32C_table(bool is_pclmulqdq_table_supported) {
|
||||
|
||||
static juint pow_n[CRC32C_NUM_PRECOMPUTED_CONSTANTS];
|
||||
|
||||
crc32c_init_pow_2k();
|
||||
|
||||
pow_n[0] = crc32c_f_pow_n(CRC32C_HIGH * 8); // 8N * 8 = 64N
|
||||
pow_n[1] = crc32c_f_pow_n(CRC32C_HIGH * 8 * 2); // 128N
|
||||
|
||||
pow_n[2] = crc32c_f_pow_n(CRC32C_MIDDLE * 8);
|
||||
pow_n[3] = crc32c_f_pow_n(CRC32C_MIDDLE * 8 * 2);
|
||||
|
||||
pow_n[4] = crc32c_f_pow_n(CRC32C_LOW * 8);
|
||||
pow_n[CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1] =
|
||||
crc32c_f_pow_n(CRC32C_LOW * 8 * 2);
|
||||
|
||||
if (is_pclmulqdq_table_supported) {
|
||||
_crc32c_table = pow_n;
|
||||
} else {
|
||||
static julong pclmulqdq_table[CRC32C_NUM_PRECOMPUTED_CONSTANTS * 256];
|
||||
|
||||
for (int j = 0; j < CRC32C_NUM_PRECOMPUTED_CONSTANTS; j++) {
|
||||
static juint X_CONST = pow_n[j];
|
||||
for (int64_t i = 0; i < 256; i++) { // to force 64 bit wide computations
|
||||
// S. Gueron / Information Processing Letters 112 (2012) 184
|
||||
// Algorithm 3: Generating a carry-less multiplication lookup table.
|
||||
// Input: A 32-bit constant, X_CONST.
|
||||
// Output: A table of 256 entries, each one is a 64-bit quadword,
|
||||
// that can be used for computing "byte" * X_CONST, for a given byte.
|
||||
pclmulqdq_table[j * 256 + i] =
|
||||
((i & 1) * X_CONST) ^ ((i & 2) * X_CONST) ^ ((i & 4) * X_CONST) ^
|
||||
((i & 8) * X_CONST) ^ ((i & 16) * X_CONST) ^ ((i & 32) * X_CONST) ^
|
||||
((i & 64) * X_CONST) ^ ((i & 128) * X_CONST);
|
||||
}
|
||||
}
|
||||
_crc32c_table = (juint*)pclmulqdq_table;
|
||||
}
|
||||
}
|
||||
|
@ -36,6 +36,8 @@
|
||||
// masks and table for CRC32
|
||||
static uint64_t _crc_by128_masks[];
|
||||
static juint _crc_table[];
|
||||
// table for CRC32C
|
||||
static juint* _crc32c_table;
|
||||
// swap mask for ghash
|
||||
static address _ghash_long_swap_mask_addr;
|
||||
static address _ghash_byte_swap_mask_addr;
|
||||
@ -46,5 +48,6 @@
|
||||
static address crc_by128_masks_addr() { return (address)_crc_by128_masks; }
|
||||
static address ghash_long_swap_mask_addr() { return _ghash_long_swap_mask_addr; }
|
||||
static address ghash_byte_swap_mask_addr() { return _ghash_byte_swap_mask_addr; }
|
||||
static void generate_CRC32C_table(bool is_pclmulqdq_supported);
|
||||
|
||||
#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
|
||||
|
@ -697,15 +697,14 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
__ jmp(rdi);
|
||||
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
return entry;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_jump_to_normal_entry();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -753,12 +752,10 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||
|
||||
// generate a vanilla native entry as the slow path
|
||||
__ bind(slow_path);
|
||||
|
||||
(void) generate_native_entry(false);
|
||||
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
|
||||
return entry;
|
||||
}
|
||||
return generate_native_entry(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -790,18 +787,25 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
|
||||
const Register buf = rdx; // source java byte array address
|
||||
const Register len = rdi; // length
|
||||
|
||||
// value x86_32
|
||||
// interp. arg ptr ESP + 4
|
||||
// int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
|
||||
// 3 2 1 0
|
||||
// int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
|
||||
// 4 2,3 1 0
|
||||
|
||||
// Arguments are reversed on java expression stack
|
||||
__ movl(len, Address(rsp, wordSize)); // Length
|
||||
__ movl(len, Address(rsp, 4 + 0)); // Length
|
||||
// Calculate address of start element
|
||||
if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
|
||||
__ movptr(buf, Address(rsp, 3*wordSize)); // long buf
|
||||
__ addptr(buf, Address(rsp, 2*wordSize)); // + offset
|
||||
__ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
|
||||
__ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long buf
|
||||
__ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
|
||||
__ movl(crc, Address(rsp, 4 + 4 * wordSize)); // Initial CRC
|
||||
} else {
|
||||
__ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
|
||||
__ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
|
||||
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
|
||||
__ addptr(buf, Address(rsp, 2*wordSize)); // + offset
|
||||
__ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
|
||||
__ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
|
||||
__ movl(crc, Address(rsp, 4 + 3 * wordSize)); // Initial CRC
|
||||
}
|
||||
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
|
||||
@ -814,12 +818,57 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
|
||||
|
||||
// generate a vanilla native entry as the slow path
|
||||
__ bind(slow_path);
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
(void) generate_native_entry(false);
|
||||
/**
|
||||
* Method entry for static native methods:
|
||||
* int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
|
||||
* int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
|
||||
*/
|
||||
address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
|
||||
if (UseCRC32CIntrinsics) {
|
||||
address entry = __ pc();
|
||||
// Load parameters
|
||||
const Register crc = rax; // crc
|
||||
const Register buf = rcx; // source java byte array address
|
||||
const Register len = rdx; // length
|
||||
const Register end = len;
|
||||
|
||||
// value x86_32
|
||||
// interp. arg ptr ESP + 4
|
||||
// int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int end)
|
||||
// 3 2 1 0
|
||||
// int java.util.zip.CRC32.updateByteBuffer(int crc, long address, int off, int end)
|
||||
// 4 2,3 1 0
|
||||
|
||||
// Arguments are reversed on java expression stack
|
||||
__ movl(end, Address(rsp, 4 + 0)); // end
|
||||
__ subl(len, Address(rsp, 4 + 1 * wordSize)); // end - offset == length
|
||||
// Calculate address of start element
|
||||
if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
|
||||
__ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long address
|
||||
__ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
|
||||
__ movl(crc, Address(rsp, 4 + 4 * wordSize)); // Initial CRC
|
||||
} else {
|
||||
__ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
|
||||
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
|
||||
__ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
|
||||
__ movl(crc, Address(rsp, 4 + 3 * wordSize)); // Initial CRC
|
||||
}
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
|
||||
// result in rax
|
||||
// _areturn
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, rsi); // set sp to sender sp
|
||||
__ jmp(rdi);
|
||||
|
||||
return entry;
|
||||
}
|
||||
return generate_native_entry(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -827,10 +876,8 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
|
||||
* java.lang.Float.intBitsToFloat(int bits)
|
||||
*/
|
||||
address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
|
||||
address entry;
|
||||
|
||||
if (UseSSE >= 1) {
|
||||
entry = __ pc();
|
||||
address entry = __ pc();
|
||||
|
||||
// rsi: the sender's SP
|
||||
|
||||
@ -844,11 +891,10 @@ address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, rsi); // set rsp to the sender's SP
|
||||
__ jmp(rdi);
|
||||
} else {
|
||||
entry = generate_native_entry(false);
|
||||
return entry;
|
||||
}
|
||||
|
||||
return entry;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -856,10 +902,8 @@ address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
|
||||
* java.lang.Float.floatToRawIntBits(float value)
|
||||
*/
|
||||
address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
|
||||
address entry;
|
||||
|
||||
if (UseSSE >= 1) {
|
||||
entry = __ pc();
|
||||
address entry = __ pc();
|
||||
|
||||
// rsi: the sender's SP
|
||||
|
||||
@ -873,11 +917,10 @@ address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, rsi); // set rsp to the sender's SP
|
||||
__ jmp(rdi);
|
||||
} else {
|
||||
entry = generate_native_entry(false);
|
||||
return entry;
|
||||
}
|
||||
|
||||
return entry;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@ -886,10 +929,8 @@ address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
|
||||
* java.lang.Double.longBitsToDouble(long bits)
|
||||
*/
|
||||
address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
|
||||
address entry;
|
||||
|
||||
if (UseSSE >= 2) {
|
||||
entry = __ pc();
|
||||
address entry = __ pc();
|
||||
|
||||
// rsi: the sender's SP
|
||||
|
||||
@ -903,11 +944,10 @@ address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, rsi); // set rsp to the sender's SP
|
||||
__ jmp(rdi);
|
||||
} else {
|
||||
entry = generate_native_entry(false);
|
||||
return entry;
|
||||
}
|
||||
|
||||
return entry;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -915,10 +955,8 @@ address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
|
||||
* java.lang.Double.doubleToRawLongBits(double value)
|
||||
*/
|
||||
address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
|
||||
address entry;
|
||||
|
||||
if (UseSSE >= 2) {
|
||||
entry = __ pc();
|
||||
address entry = __ pc();
|
||||
|
||||
// rsi: the sender's SP
|
||||
|
||||
@ -933,11 +971,10 @@ address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, rsi); // set rsp to the sender's SP
|
||||
__ jmp(rdi);
|
||||
} else {
|
||||
entry = generate_native_entry(false);
|
||||
return entry;
|
||||
}
|
||||
|
||||
return entry;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -677,15 +677,14 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
return entry;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_jump_to_normal_entry();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -733,12 +732,10 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||
|
||||
// generate a vanilla native entry as the slow path
|
||||
__ bind(slow_path);
|
||||
|
||||
(void) generate_native_entry(false);
|
||||
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
|
||||
return entry;
|
||||
}
|
||||
return generate_native_entry(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -796,12 +793,61 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
|
||||
|
||||
// generate a vanilla native entry as the slow path
|
||||
__ bind(slow_path);
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
(void) generate_native_entry(false);
|
||||
/**
|
||||
* Method entry for static native methods:
|
||||
* int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
|
||||
* int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
|
||||
*/
|
||||
address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
|
||||
if (UseCRC32CIntrinsics) {
|
||||
address entry = __ pc();
|
||||
// Load parameters
|
||||
const Register crc = c_rarg0; // crc
|
||||
const Register buf = c_rarg1; // source java byte array address
|
||||
const Register len = c_rarg2;
|
||||
const Register off = c_rarg3; // offset
|
||||
const Register end = len;
|
||||
|
||||
// Arguments are reversed on java expression stack
|
||||
// Calculate address of start element
|
||||
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
|
||||
__ movptr(buf, Address(rsp, 3 * wordSize)); // long buf
|
||||
__ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
|
||||
__ addq(buf, off); // + offset
|
||||
__ movl(crc, Address(rsp, 5 * wordSize)); // Initial CRC
|
||||
// Note on 5 * wordSize vs. 4 * wordSize:
|
||||
// * int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
|
||||
// 4 2,3 1 0
|
||||
// end starts at SP + 8
|
||||
// The Java(R) Virtual Machine Specification Java SE 7 Edition
|
||||
// 4.10.2.3. Values of Types long and double
|
||||
// "When calculating operand stack length, values of type long and double have length two."
|
||||
} else {
|
||||
__ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
|
||||
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
|
||||
__ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
|
||||
__ addq(buf, off); // + offset
|
||||
__ movl(crc, Address(rsp, 4 * wordSize)); // Initial CRC
|
||||
}
|
||||
__ movl(end, Address(rsp, wordSize)); // end
|
||||
__ subl(end, off); // end - off
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
|
||||
// result in rax
|
||||
// _areturn
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, r13); // set sp to sender sp
|
||||
__ jmp(rdi);
|
||||
|
||||
return entry;
|
||||
}
|
||||
return generate_native_entry(false);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Interpreter stub for calling a native method. (asm interpreter)
|
||||
|
@ -661,6 +661,18 @@ void VM_Version::get_processor_features() {
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (supports_sse4_2()) {
|
||||
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
|
||||
UseCRC32CIntrinsics = true;
|
||||
}
|
||||
}
|
||||
else if (UseCRC32CIntrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
|
||||
warning("CRC32C intrinsics are not available on this CPU");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
|
||||
}
|
||||
|
||||
// The AES intrinsic stubs require AES instruction support (of course)
|
||||
// but also require sse3 mode for instructions it use.
|
||||
if (UseAES && (UseSSE > 2)) {
|
||||
@ -704,12 +716,6 @@ void VM_Version::get_processor_features() {
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
|
||||
warning("CRC32C intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
|
||||
}
|
||||
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32Intrinsics not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
|
@ -1712,6 +1712,18 @@ const bool Matcher::match_rule_supported(int opcode) {
|
||||
return ret_value; // Per default match rules are supported.
|
||||
}
|
||||
|
||||
const int Matcher::float_pressure(int default_pressure_threshold) {
|
||||
int float_pressure_threshold = default_pressure_threshold;
|
||||
#ifdef _LP64
|
||||
if (UseAVX > 2) {
|
||||
// Increase pressure threshold on machines with AVX3 which have
|
||||
// 2x more XMM registers.
|
||||
float_pressure_threshold = default_pressure_threshold * 2;
|
||||
}
|
||||
#endif
|
||||
return float_pressure_threshold;
|
||||
}
|
||||
|
||||
// Max vector size in bytes. 0 if not supported.
|
||||
const int Matcher::vector_width_in_bytes(BasicType bt) {
|
||||
assert(is_java_primitive(bt), "only primitive type vectors");
|
||||
|
@ -3767,6 +3767,22 @@ operand indIndexScale(any_RegP reg, rRegL lreg, immI2 scale)
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indPosIndexScale(any_RegP reg, rRegI idx, immI2 scale)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
predicate(n->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
|
||||
match(AddP reg (LShiftL (ConvI2L idx) scale));
|
||||
|
||||
op_cost(10);
|
||||
format %{"[$reg + pos $idx << $scale]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index($idx);
|
||||
scale($scale);
|
||||
disp(0x0);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Indirect Memory Times Scale Plus Index Register Plus Offset Operand
|
||||
operand indIndexScaleOffset(any_RegP reg, immL32 off, rRegL lreg, immI2 scale)
|
||||
%{
|
||||
@ -4159,7 +4175,7 @@ operand cmpOpUCF2() %{
|
||||
// case of this is memory operands.
|
||||
|
||||
opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
|
||||
indIndexScale, indIndexScaleOffset, indPosIndexOffset, indPosIndexScaleOffset,
|
||||
indIndexScale, indPosIndexScale, indIndexScaleOffset, indPosIndexOffset, indPosIndexScaleOffset,
|
||||
indCompressedOopOffset,
|
||||
indirectNarrow, indOffset8Narrow, indOffset32Narrow,
|
||||
indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
|
||||
@ -5186,6 +5202,17 @@ instruct leaPIdxScale(rRegP dst, indIndexScale mem)
|
||||
ins_pipe(ialu_reg_reg_fat);
|
||||
%}
|
||||
|
||||
instruct leaPPosIdxScale(rRegP dst, indPosIndexScale mem)
|
||||
%{
|
||||
match(Set dst mem);
|
||||
|
||||
ins_cost(110);
|
||||
format %{ "leaq $dst, $mem\t# ptr idxscale" %}
|
||||
opcode(0x8D);
|
||||
ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
|
||||
ins_pipe(ialu_reg_reg_fat);
|
||||
%}
|
||||
|
||||
instruct leaPIdxScaleOff(rRegP dst, indIndexScaleOffset mem)
|
||||
%{
|
||||
match(Set dst mem);
|
||||
|
@ -816,7 +816,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the normal entry point
|
||||
// Reference.get could be instrumented by jvmti
|
||||
return generate_normal_entry(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
@ -42,4 +42,5 @@
|
||||
// Not supported
|
||||
address generate_CRC32_update_entry() { return NULL; }
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
|
||||
|
@ -3363,11 +3363,9 @@ const char* GraphBuilder::check_can_parse(ciMethod* callee) const {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
// negative filter: should callee NOT be inlined? returns NULL, ok to inline, or rejection msg
|
||||
const char* GraphBuilder::should_not_inline(ciMethod* callee) const {
|
||||
if ( callee->should_exclude()) return "excluded by CompilerOracle";
|
||||
if ( callee->should_not_inline()) return "disallowed by CompilerOracle";
|
||||
if ( callee->should_not_inline()) return "disallowed by CompileCommand";
|
||||
if ( callee->dont_inline()) return "don't inline by annotation";
|
||||
return NULL;
|
||||
}
|
||||
@ -3698,7 +3696,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
|
||||
|
||||
const char* msg = "";
|
||||
if (callee->force_inline()) msg = "force inline by annotation";
|
||||
if (callee->should_inline()) msg = "force inline by CompileOracle";
|
||||
if (callee->should_inline()) msg = "force inline by CompileCommand";
|
||||
print_inlining(callee, msg);
|
||||
} else {
|
||||
// use heuristic controls on inlining
|
||||
|
@ -1043,18 +1043,6 @@ MethodCounters* ciMethod::ensure_method_counters() {
|
||||
return method_counters;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::should_exclude
|
||||
//
|
||||
// Should this method be excluded from compilation?
|
||||
bool ciMethod::should_exclude() {
|
||||
check_is_loaded();
|
||||
VM_ENTRY_MARK;
|
||||
methodHandle mh(THREAD, get_Method());
|
||||
bool ignore;
|
||||
return CompilerOracle::should_exclude(mh, ignore);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::should_inline
|
||||
//
|
||||
|
@ -266,7 +266,6 @@ class ciMethod : public ciMetadata {
|
||||
int resolve_vtable_index(ciKlass* caller, ciKlass* receiver);
|
||||
|
||||
// Compilation directives
|
||||
bool should_exclude();
|
||||
bool should_inline();
|
||||
bool should_not_inline();
|
||||
bool should_print_assembly();
|
||||
|
@ -1157,7 +1157,7 @@ bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci,
|
||||
method->print_short_name(tty);
|
||||
tty->cr();
|
||||
}
|
||||
method->set_not_compilable(CompLevel_all, !quietly, "excluded by CompilerOracle");
|
||||
method->set_not_compilable(CompLevel_all, !quietly, "excluded by CompileCommand");
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -24,149 +24,17 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "compiler/methodMatcher.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
class MethodMatcher : public CHeapObj<mtCompiler> {
|
||||
public:
|
||||
enum Mode {
|
||||
Exact,
|
||||
Prefix = 1,
|
||||
Suffix = 2,
|
||||
Substring = Prefix | Suffix,
|
||||
Any,
|
||||
Unknown = -1
|
||||
};
|
||||
|
||||
protected:
|
||||
Symbol* _class_name;
|
||||
Symbol* _method_name;
|
||||
Symbol* _signature;
|
||||
Mode _class_mode;
|
||||
Mode _method_mode;
|
||||
MethodMatcher* _next;
|
||||
|
||||
static bool match(Symbol* candidate, Symbol* match, Mode match_mode);
|
||||
|
||||
Symbol* class_name() const { return _class_name; }
|
||||
Symbol* method_name() const { return _method_name; }
|
||||
Symbol* signature() const { return _signature; }
|
||||
|
||||
public:
|
||||
MethodMatcher(Symbol* class_name, Mode class_mode,
|
||||
Symbol* method_name, Mode method_mode,
|
||||
Symbol* signature, MethodMatcher* next);
|
||||
MethodMatcher(Symbol* class_name, Symbol* method_name, MethodMatcher* next);
|
||||
|
||||
// utility method
|
||||
MethodMatcher* find(methodHandle method) {
|
||||
Symbol* class_name = method->method_holder()->name();
|
||||
Symbol* method_name = method->name();
|
||||
for (MethodMatcher* current = this; current != NULL; current = current->_next) {
|
||||
if (match(class_name, current->class_name(), current->_class_mode) &&
|
||||
match(method_name, current->method_name(), current->_method_mode) &&
|
||||
(current->signature() == NULL || current->signature() == method->signature())) {
|
||||
return current;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool match(methodHandle method) {
|
||||
return find(method) != NULL;
|
||||
}
|
||||
|
||||
MethodMatcher* next() const { return _next; }
|
||||
|
||||
static void print_symbol(Symbol* h, Mode mode) {
|
||||
ResourceMark rm;
|
||||
|
||||
if (mode == Suffix || mode == Substring || mode == Any) {
|
||||
tty->print("*");
|
||||
}
|
||||
if (mode != Any) {
|
||||
h->print_symbol_on(tty);
|
||||
}
|
||||
if (mode == Prefix || mode == Substring) {
|
||||
tty->print("*");
|
||||
}
|
||||
}
|
||||
|
||||
void print_base() {
|
||||
print_symbol(class_name(), _class_mode);
|
||||
tty->print(".");
|
||||
print_symbol(method_name(), _method_mode);
|
||||
if (signature() != NULL) {
|
||||
signature()->print_symbol_on(tty);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void print() {
|
||||
print_base();
|
||||
tty->cr();
|
||||
}
|
||||
};
|
||||
|
||||
MethodMatcher::MethodMatcher(Symbol* class_name, Symbol* method_name, MethodMatcher* next) {
|
||||
_class_name = class_name;
|
||||
_method_name = method_name;
|
||||
_next = next;
|
||||
_class_mode = MethodMatcher::Exact;
|
||||
_method_mode = MethodMatcher::Exact;
|
||||
_signature = NULL;
|
||||
}
|
||||
|
||||
|
||||
MethodMatcher::MethodMatcher(Symbol* class_name, Mode class_mode,
|
||||
Symbol* method_name, Mode method_mode,
|
||||
Symbol* signature, MethodMatcher* next):
|
||||
_class_mode(class_mode)
|
||||
, _method_mode(method_mode)
|
||||
, _next(next)
|
||||
, _class_name(class_name)
|
||||
, _method_name(method_name)
|
||||
, _signature(signature) {
|
||||
}
|
||||
|
||||
bool MethodMatcher::match(Symbol* candidate, Symbol* match, Mode match_mode) {
|
||||
if (match_mode == Any) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (match_mode == Exact) {
|
||||
return candidate == match;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
const char * candidate_string = candidate->as_C_string();
|
||||
const char * match_string = match->as_C_string();
|
||||
|
||||
switch (match_mode) {
|
||||
case Prefix:
|
||||
return strstr(candidate_string, match_string) == candidate_string;
|
||||
|
||||
case Suffix: {
|
||||
size_t clen = strlen(candidate_string);
|
||||
size_t mlen = strlen(match_string);
|
||||
return clen >= mlen && strcmp(candidate_string + clen - mlen, match_string) == 0;
|
||||
}
|
||||
|
||||
case Substring:
|
||||
return strstr(candidate_string, match_string) != NULL;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
enum OptionType {
|
||||
IntxType,
|
||||
UintxType,
|
||||
@ -202,114 +70,6 @@ template<> OptionType get_type_for<double>() {
|
||||
return DoubleType;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static const T copy_value(const T value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
template<> const ccstr copy_value<ccstr>(const ccstr value) {
|
||||
return (const ccstr)os::strdup_check_oom(value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
class TypedMethodOptionMatcher : public MethodMatcher {
|
||||
const char* _option;
|
||||
OptionType _type;
|
||||
const T _value;
|
||||
|
||||
public:
|
||||
TypedMethodOptionMatcher(Symbol* class_name, Mode class_mode,
|
||||
Symbol* method_name, Mode method_mode,
|
||||
Symbol* signature, const char* opt,
|
||||
const T value, MethodMatcher* next) :
|
||||
MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next),
|
||||
_type(get_type_for<T>()), _value(copy_value<T>(value)) {
|
||||
_option = os::strdup_check_oom(opt);
|
||||
}
|
||||
|
||||
~TypedMethodOptionMatcher() {
|
||||
os::free((void*)_option);
|
||||
}
|
||||
|
||||
TypedMethodOptionMatcher* match(methodHandle method, const char* opt) {
|
||||
TypedMethodOptionMatcher* current = this;
|
||||
while (current != NULL) {
|
||||
current = (TypedMethodOptionMatcher*)current->find(method);
|
||||
if (current == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (strcmp(current->_option, opt) == 0) {
|
||||
return current;
|
||||
}
|
||||
current = current->next();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TypedMethodOptionMatcher* next() {
|
||||
return (TypedMethodOptionMatcher*)_next;
|
||||
}
|
||||
|
||||
OptionType get_type(void) {
|
||||
return _type;
|
||||
};
|
||||
|
||||
T value() { return _value; }
|
||||
|
||||
void print() {
|
||||
ttyLocker ttyl;
|
||||
print_base();
|
||||
tty->print(" %s", _option);
|
||||
tty->print(" <unknown option type>");
|
||||
tty->cr();
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
void TypedMethodOptionMatcher<intx>::print() {
|
||||
ttyLocker ttyl;
|
||||
print_base();
|
||||
tty->print(" intx %s", _option);
|
||||
tty->print(" = " INTX_FORMAT, _value);
|
||||
tty->cr();
|
||||
};
|
||||
|
||||
template<>
|
||||
void TypedMethodOptionMatcher<uintx>::print() {
|
||||
ttyLocker ttyl;
|
||||
print_base();
|
||||
tty->print(" uintx %s", _option);
|
||||
tty->print(" = " UINTX_FORMAT, _value);
|
||||
tty->cr();
|
||||
};
|
||||
|
||||
template<>
|
||||
void TypedMethodOptionMatcher<bool>::print() {
|
||||
ttyLocker ttyl;
|
||||
print_base();
|
||||
tty->print(" bool %s", _option);
|
||||
tty->print(" = %s", _value ? "true" : "false");
|
||||
tty->cr();
|
||||
};
|
||||
|
||||
template<>
|
||||
void TypedMethodOptionMatcher<ccstr>::print() {
|
||||
ttyLocker ttyl;
|
||||
print_base();
|
||||
tty->print(" const char* %s", _option);
|
||||
tty->print(" = '%s'", _value);
|
||||
tty->cr();
|
||||
};
|
||||
|
||||
template<>
|
||||
void TypedMethodOptionMatcher<double>::print() {
|
||||
ttyLocker ttyl;
|
||||
print_base();
|
||||
tty->print(" double %s", _option);
|
||||
tty->print(" = %f", _value);
|
||||
tty->cr();
|
||||
};
|
||||
|
||||
// this must parallel the command_names below
|
||||
enum OracleCommand {
|
||||
UnknownCommand = -1,
|
||||
@ -342,8 +102,198 @@ static const char * command_names[] = {
|
||||
};
|
||||
|
||||
class MethodMatcher;
|
||||
static MethodMatcher* lists[OracleCommandCount] = { 0, };
|
||||
class TypedMethodOptionMatcher;
|
||||
|
||||
static BasicMatcher* lists[OracleCommandCount] = { 0, };
|
||||
static TypedMethodOptionMatcher* option_list = NULL;
|
||||
|
||||
class TypedMethodOptionMatcher : public MethodMatcher {
|
||||
private:
|
||||
TypedMethodOptionMatcher* _next;
|
||||
const char* _option;
|
||||
OptionType _type;
|
||||
public:
|
||||
|
||||
union {
|
||||
bool bool_value;
|
||||
intx intx_value;
|
||||
uintx uintx_value;
|
||||
double double_value;
|
||||
ccstr ccstr_value;
|
||||
} _u;
|
||||
|
||||
TypedMethodOptionMatcher() : MethodMatcher(),
|
||||
_next(NULL),
|
||||
_type(UnknownType) {
|
||||
_option = NULL;
|
||||
memset(&_u, 0, sizeof(_u));
|
||||
}
|
||||
|
||||
static TypedMethodOptionMatcher* parse_method_pattern(char*& line, const char*& error_msg);
|
||||
TypedMethodOptionMatcher* match(methodHandle method, const char* opt, OptionType type);
|
||||
|
||||
void init(const char* opt, OptionType type, TypedMethodOptionMatcher* next) {
|
||||
_next = next;
|
||||
_type = type;
|
||||
_option = os::strdup_check_oom(opt);
|
||||
}
|
||||
|
||||
void set_next(TypedMethodOptionMatcher* next) {_next = next; }
|
||||
TypedMethodOptionMatcher* next() { return _next; }
|
||||
OptionType type() { return _type; }
|
||||
template<typename T> T value();
|
||||
template<typename T> void set_value(T value);
|
||||
void print();
|
||||
void print_all();
|
||||
TypedMethodOptionMatcher* clone();
|
||||
~TypedMethodOptionMatcher();
|
||||
};
|
||||
|
||||
// A few templated accessors instead of a full template class.
|
||||
template<> intx TypedMethodOptionMatcher::value<intx>() {
|
||||
return _u.intx_value;
|
||||
}
|
||||
|
||||
template<> uintx TypedMethodOptionMatcher::value<uintx>() {
|
||||
return _u.uintx_value;
|
||||
}
|
||||
|
||||
template<> bool TypedMethodOptionMatcher::value<bool>() {
|
||||
return _u.bool_value;
|
||||
}
|
||||
|
||||
template<> double TypedMethodOptionMatcher::value<double>() {
|
||||
return _u.double_value;
|
||||
}
|
||||
|
||||
template<> ccstr TypedMethodOptionMatcher::value<ccstr>() {
|
||||
return _u.ccstr_value;
|
||||
}
|
||||
|
||||
template<> void TypedMethodOptionMatcher::set_value(intx value) {
|
||||
_u.intx_value = value;
|
||||
}
|
||||
|
||||
template<> void TypedMethodOptionMatcher::set_value(uintx value) {
|
||||
_u.uintx_value = value;
|
||||
}
|
||||
|
||||
template<> void TypedMethodOptionMatcher::set_value(double value) {
|
||||
_u.double_value = value;
|
||||
}
|
||||
|
||||
template<> void TypedMethodOptionMatcher::set_value(bool value) {
|
||||
_u.bool_value = value;
|
||||
}
|
||||
|
||||
template<> void TypedMethodOptionMatcher::set_value(ccstr value) {
|
||||
_u.ccstr_value = (const ccstr)os::strdup_check_oom(value);
|
||||
}
|
||||
|
||||
void TypedMethodOptionMatcher::print() {
|
||||
ttyLocker ttyl;
|
||||
print_base(tty);
|
||||
switch (_type) {
|
||||
case IntxType:
|
||||
tty->print_cr(" intx %s = " INTX_FORMAT, _option, value<intx>());
|
||||
break;
|
||||
case UintxType:
|
||||
tty->print_cr(" uintx %s = " UINTX_FORMAT, _option, value<uintx>());
|
||||
break;
|
||||
case BoolType:
|
||||
tty->print_cr(" bool %s = %s", _option, value<bool>() ? "true" : "false");
|
||||
break;
|
||||
case DoubleType:
|
||||
tty->print_cr(" double %s = %f", _option, value<double>());
|
||||
break;
|
||||
case CcstrType:
|
||||
tty->print_cr(" const char* %s = '%s'", _option, value<ccstr>());
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void TypedMethodOptionMatcher::print_all() {
|
||||
print();
|
||||
if (_next != NULL) {
|
||||
tty->print(" ");
|
||||
_next->print_all();
|
||||
}
|
||||
}
|
||||
|
||||
TypedMethodOptionMatcher* TypedMethodOptionMatcher::clone() {
|
||||
TypedMethodOptionMatcher* m = new TypedMethodOptionMatcher();
|
||||
m->_class_mode = _class_mode;
|
||||
m->_class_name = _class_name;
|
||||
m->_method_mode = _method_mode;
|
||||
m->_method_name = _method_name;
|
||||
m->_signature = _signature;
|
||||
// Need to ref count the symbols
|
||||
if (_class_name != NULL) {
|
||||
_class_name->increment_refcount();
|
||||
}
|
||||
if (_method_name != NULL) {
|
||||
_method_name->increment_refcount();
|
||||
}
|
||||
if (_signature != NULL) {
|
||||
_signature->increment_refcount();
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
TypedMethodOptionMatcher::~TypedMethodOptionMatcher() {
|
||||
if (_option != NULL) {
|
||||
os::free((void*)_option);
|
||||
}
|
||||
if (_class_name != NULL) {
|
||||
_class_name->decrement_refcount();
|
||||
}
|
||||
if (_method_name != NULL) {
|
||||
_method_name->decrement_refcount();
|
||||
}
|
||||
if (_signature != NULL) {
|
||||
_signature->decrement_refcount();
|
||||
}
|
||||
}
|
||||
|
||||
TypedMethodOptionMatcher* TypedMethodOptionMatcher::parse_method_pattern(char*& line, const char*& error_msg) {
|
||||
assert(error_msg == NULL, "Dont call here with error_msg already set");
|
||||
TypedMethodOptionMatcher* tom = new TypedMethodOptionMatcher();
|
||||
MethodMatcher::parse_method_pattern(line, error_msg, tom);
|
||||
if (error_msg != NULL) {
|
||||
delete tom;
|
||||
return NULL;
|
||||
}
|
||||
return tom;
|
||||
}
|
||||
|
||||
TypedMethodOptionMatcher* TypedMethodOptionMatcher::match(methodHandle method, const char* opt, OptionType type) {
|
||||
TypedMethodOptionMatcher* current = this;
|
||||
while (current != NULL) {
|
||||
// Fastest compare first.
|
||||
if (current->type() == type) {
|
||||
if (strcmp(current->_option, opt) == 0) {
|
||||
if (current->matches(method)) {
|
||||
return current;
|
||||
}
|
||||
}
|
||||
}
|
||||
current = current->next();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void add_option_string(TypedMethodOptionMatcher* matcher,
|
||||
const char* option,
|
||||
T value) {
|
||||
assert(matcher != option_list, "No circular lists please");
|
||||
matcher->init(option, get_type_for<T>(), option_list);
|
||||
matcher->set_value<T>(value);
|
||||
option_list = matcher;
|
||||
return;
|
||||
}
|
||||
|
||||
static bool check_predicate(OracleCommand command, methodHandle method) {
|
||||
return ((lists[command] != NULL) &&
|
||||
@ -351,51 +301,27 @@ static bool check_predicate(OracleCommand command, methodHandle method) {
|
||||
lists[command]->match(method));
|
||||
}
|
||||
|
||||
|
||||
static MethodMatcher* add_predicate(OracleCommand command,
|
||||
Symbol* class_name, MethodMatcher::Mode c_mode,
|
||||
Symbol* method_name, MethodMatcher::Mode m_mode,
|
||||
Symbol* signature) {
|
||||
static void add_predicate(OracleCommand command, BasicMatcher* bm) {
|
||||
assert(command != OptionCommand, "must use add_option_string");
|
||||
if (command == LogCommand && !LogCompilation && lists[LogCommand] == NULL)
|
||||
if (command == LogCommand && !LogCompilation && lists[LogCommand] == NULL) {
|
||||
tty->print_cr("Warning: +LogCompilation must be enabled in order for individual methods to be logged.");
|
||||
lists[command] = new MethodMatcher(class_name, c_mode, method_name, m_mode, signature, lists[command]);
|
||||
return lists[command];
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static MethodMatcher* add_option_string(Symbol* class_name, MethodMatcher::Mode c_mode,
|
||||
Symbol* method_name, MethodMatcher::Mode m_mode,
|
||||
Symbol* signature,
|
||||
const char* option,
|
||||
T value) {
|
||||
lists[OptionCommand] = new TypedMethodOptionMatcher<T>(class_name, c_mode, method_name, m_mode,
|
||||
signature, option, value, lists[OptionCommand]);
|
||||
return lists[OptionCommand];
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static bool get_option_value(methodHandle method, const char* option, T& value) {
|
||||
TypedMethodOptionMatcher<T>* m;
|
||||
if (lists[OptionCommand] != NULL
|
||||
&& (m = ((TypedMethodOptionMatcher<T>*)lists[OptionCommand])->match(method, option)) != NULL
|
||||
&& m->get_type() == get_type_for<T>()) {
|
||||
value = m->value();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
bm->set_next(lists[command]);
|
||||
lists[command] = bm;
|
||||
|
||||
bool CompilerOracle::has_option_string(methodHandle method, const char* option) {
|
||||
bool value = false;
|
||||
get_option_value(method, option, value);
|
||||
return value;
|
||||
return;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
bool CompilerOracle::has_option_value(methodHandle method, const char* option, T& value) {
|
||||
return ::get_option_value(method, option, value);
|
||||
if (option_list != NULL) {
|
||||
TypedMethodOptionMatcher* m = option_list->match(method, option, get_type_for<T>());
|
||||
if (m != NULL) {
|
||||
value = m->value<T>();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Explicit instantiation for all OptionTypes supported.
|
||||
@ -405,6 +331,12 @@ template bool CompilerOracle::has_option_value<bool>(methodHandle method, const
|
||||
template bool CompilerOracle::has_option_value<ccstr>(methodHandle method, const char* option, ccstr& value);
|
||||
template bool CompilerOracle::has_option_value<double>(methodHandle method, const char* option, double& value);
|
||||
|
||||
bool CompilerOracle::has_option_string(methodHandle method, const char* option) {
|
||||
bool value = false;
|
||||
has_option_value(method, option, value);
|
||||
return value;
|
||||
}
|
||||
|
||||
bool CompilerOracle::should_exclude(methodHandle method, bool& quietly) {
|
||||
quietly = true;
|
||||
if (lists[ExcludeCommand] != NULL) {
|
||||
@ -420,19 +352,18 @@ bool CompilerOracle::should_exclude(methodHandle method, bool& quietly) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool CompilerOracle::should_inline(methodHandle method) {
|
||||
return (check_predicate(InlineCommand, method));
|
||||
}
|
||||
|
||||
|
||||
// Check both DontInlineCommand and ExcludeCommand here
|
||||
// - consistent behavior for all compilers
|
||||
bool CompilerOracle::should_not_inline(methodHandle method) {
|
||||
return (check_predicate(DontInlineCommand, method));
|
||||
return check_predicate(DontInlineCommand, method) || check_predicate(ExcludeCommand, method);
|
||||
}
|
||||
|
||||
|
||||
bool CompilerOracle::should_print(methodHandle method) {
|
||||
return (check_predicate(PrintCommand, method));
|
||||
return check_predicate(PrintCommand, method);
|
||||
}
|
||||
|
||||
bool CompilerOracle::should_print_methods() {
|
||||
@ -445,12 +376,10 @@ bool CompilerOracle::should_log(methodHandle method) {
|
||||
return (check_predicate(LogCommand, method));
|
||||
}
|
||||
|
||||
|
||||
bool CompilerOracle::should_break_at(methodHandle method) {
|
||||
return check_predicate(BreakCommand, method);
|
||||
}
|
||||
|
||||
|
||||
static OracleCommand parse_command_name(const char * line, int* bytes_read) {
|
||||
assert(ARRAY_SIZE(command_names) == OracleCommandCount,
|
||||
"command_names size mismatch");
|
||||
@ -516,83 +445,11 @@ static void usage() {
|
||||
tty->cr();
|
||||
};
|
||||
|
||||
// The JVM specification defines the allowed characters.
|
||||
// Tokens that are disallowed by the JVM specification can have
|
||||
// a meaning to the parser so we need to include them here.
|
||||
// The parser does not enforce all rules of the JVMS - a successful parse
|
||||
// does not mean that it is an allowed name. Illegal names will
|
||||
// be ignored since they never can match a class or method.
|
||||
//
|
||||
// '\0' and 0xf0-0xff are disallowed in constant string values
|
||||
// 0x20 ' ', 0x09 '\t' and, 0x2c ',' are used in the matching
|
||||
// 0x5b '[' and 0x5d ']' can not be used because of the matcher
|
||||
// 0x28 '(' and 0x29 ')' are used for the signature
|
||||
// 0x2e '.' is always replaced before the matching
|
||||
// 0x2f '/' is only used in the class name as package separator
|
||||
|
||||
#define RANGEBASE "\x1\x2\x3\x4\x5\x6\x7\x8\xa\xb\xc\xd\xe\xf" \
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" \
|
||||
"\x21\x22\x23\x24\x25\x26\x27\x2a\x2b\x2c\x2d" \
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" \
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" \
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5c\x5e\x5f" \
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" \
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" \
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" \
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" \
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf" \
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" \
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" \
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" \
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
|
||||
#define RANGE0 "[*" RANGEBASE "]"
|
||||
#define RANGESLASH "[*" RANGEBASE "/]"
|
||||
|
||||
static MethodMatcher::Mode check_mode(char name[], const char*& error_msg) {
|
||||
int match = MethodMatcher::Exact;
|
||||
while (name[0] == '*') {
|
||||
match |= MethodMatcher::Suffix;
|
||||
// Copy remaining string plus NUL to the beginning
|
||||
memmove(name, name + 1, strlen(name + 1) + 1);
|
||||
}
|
||||
|
||||
if (strcmp(name, "*") == 0) return MethodMatcher::Any;
|
||||
|
||||
size_t len = strlen(name);
|
||||
while (len > 0 && name[len - 1] == '*') {
|
||||
match |= MethodMatcher::Prefix;
|
||||
name[--len] = '\0';
|
||||
}
|
||||
|
||||
if (strstr(name, "*") != NULL) {
|
||||
error_msg = " Embedded * not allowed";
|
||||
return MethodMatcher::Unknown;
|
||||
}
|
||||
return (MethodMatcher::Mode)match;
|
||||
}
|
||||
|
||||
static bool scan_line(const char * line,
|
||||
char class_name[], MethodMatcher::Mode* c_mode,
|
||||
char method_name[], MethodMatcher::Mode* m_mode,
|
||||
int* bytes_read, const char*& error_msg) {
|
||||
*bytes_read = 0;
|
||||
error_msg = NULL;
|
||||
if (2 == sscanf(line, "%*[ \t]%255" RANGESLASH "%*[ ]" "%255" RANGE0 "%n", class_name, method_name, bytes_read)) {
|
||||
*c_mode = check_mode(class_name, error_msg);
|
||||
*m_mode = check_mode(method_name, error_msg);
|
||||
return *c_mode != MethodMatcher::Unknown && *m_mode != MethodMatcher::Unknown;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Scan next flag and value in line, return MethodMatcher object on success, NULL on failure.
|
||||
// On failure, error_msg contains description for the first error.
|
||||
// For future extensions: set error_msg on first error.
|
||||
static MethodMatcher* scan_flag_and_value(const char* type, const char* line, int& total_bytes_read,
|
||||
Symbol* c_name, MethodMatcher::Mode c_match,
|
||||
Symbol* m_name, MethodMatcher::Mode m_match,
|
||||
Symbol* signature,
|
||||
static void scan_flag_and_value(const char* type, const char* line, int& total_bytes_read,
|
||||
TypedMethodOptionMatcher* matcher,
|
||||
char* errorbuf, const int buf_size) {
|
||||
total_bytes_read = 0;
|
||||
int bytes_read = 0;
|
||||
@ -608,7 +465,8 @@ static MethodMatcher* scan_flag_and_value(const char* type, const char* line, in
|
||||
intx value;
|
||||
if (sscanf(line, "%*[ \t]" INTX_FORMAT "%n", &value, &bytes_read) == 1) {
|
||||
total_bytes_read += bytes_read;
|
||||
return add_option_string(c_name, c_match, m_name, m_match, signature, flag, value);
|
||||
add_option_string(matcher, flag, value);
|
||||
return;
|
||||
} else {
|
||||
jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s ", flag, type);
|
||||
}
|
||||
@ -616,7 +474,8 @@ static MethodMatcher* scan_flag_and_value(const char* type, const char* line, in
|
||||
uintx value;
|
||||
if (sscanf(line, "%*[ \t]" UINTX_FORMAT "%n", &value, &bytes_read) == 1) {
|
||||
total_bytes_read += bytes_read;
|
||||
return add_option_string(c_name, c_match, m_name, m_match, signature, flag, value);
|
||||
add_option_string(matcher, flag, value);
|
||||
return;
|
||||
} else {
|
||||
jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s", flag, type);
|
||||
}
|
||||
@ -625,7 +484,8 @@ static MethodMatcher* scan_flag_and_value(const char* type, const char* line, in
|
||||
char* value = NEW_RESOURCE_ARRAY(char, strlen(line) + 1);
|
||||
if (sscanf(line, "%*[ \t]%255[_a-zA-Z0-9]%n", value, &bytes_read) == 1) {
|
||||
total_bytes_read += bytes_read;
|
||||
return add_option_string(c_name, c_match, m_name, m_match, signature, flag, (ccstr)value);
|
||||
add_option_string(matcher, flag, (ccstr)value);
|
||||
return;
|
||||
} else {
|
||||
jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s", flag, type);
|
||||
}
|
||||
@ -646,7 +506,8 @@ static MethodMatcher* scan_flag_and_value(const char* type, const char* line, in
|
||||
next_value += bytes_read;
|
||||
end_value = next_value-1;
|
||||
}
|
||||
return add_option_string(c_name, c_match, m_name, m_match, signature, flag, (ccstr)value);
|
||||
add_option_string(matcher, flag, (ccstr)value);
|
||||
return;
|
||||
} else {
|
||||
jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s", flag, type);
|
||||
}
|
||||
@ -655,10 +516,12 @@ static MethodMatcher* scan_flag_and_value(const char* type, const char* line, in
|
||||
if (sscanf(line, "%*[ \t]%255[a-zA-Z]%n", value, &bytes_read) == 1) {
|
||||
if (strcmp(value, "true") == 0) {
|
||||
total_bytes_read += bytes_read;
|
||||
return add_option_string(c_name, c_match, m_name, m_match, signature, flag, true);
|
||||
add_option_string(matcher, flag, true);
|
||||
return;
|
||||
} else if (strcmp(value, "false") == 0) {
|
||||
total_bytes_read += bytes_read;
|
||||
return add_option_string(c_name, c_match, m_name, m_match, signature, flag, false);
|
||||
add_option_string(matcher, flag, false);
|
||||
return;
|
||||
} else {
|
||||
jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s", flag, type);
|
||||
}
|
||||
@ -673,7 +536,8 @@ static MethodMatcher* scan_flag_and_value(const char* type, const char* line, in
|
||||
char value[512] = "";
|
||||
jio_snprintf(value, sizeof(value), "%s.%s", buffer[0], buffer[1]);
|
||||
total_bytes_read += bytes_read;
|
||||
return add_option_string(c_name, c_match, m_name, m_match, signature, flag, atof(value));
|
||||
add_option_string(matcher, flag, atof(value));
|
||||
return;
|
||||
} else {
|
||||
jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s", flag, type);
|
||||
}
|
||||
@ -683,7 +547,7 @@ static MethodMatcher* scan_flag_and_value(const char* type, const char* line, in
|
||||
} else {
|
||||
jio_snprintf(errorbuf, buf_size, " Flag name for type %s should be alphanumeric ", type);
|
||||
}
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
int skip_whitespace(char* line) {
|
||||
@ -693,31 +557,20 @@ int skip_whitespace(char* line) {
|
||||
return whitespace_read;
|
||||
}
|
||||
|
||||
void CompilerOracle::print_parse_error(const char*& error_msg, char* original_line) {
|
||||
assert(error_msg != NULL, "Must have error_message");
|
||||
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("CompileCommand: An error occurred during parsing");
|
||||
tty->print_cr("Line: %s", original_line);
|
||||
tty->print_cr("Error: %s", error_msg);
|
||||
CompilerOracle::print_tip();
|
||||
}
|
||||
|
||||
void CompilerOracle::parse_from_line(char* line) {
|
||||
if (line[0] == '\0') return;
|
||||
if (line[0] == '#') return;
|
||||
|
||||
bool have_colon = (strstr(line, "::") != NULL);
|
||||
for (char* lp = line; *lp != '\0'; lp++) {
|
||||
// Allow '.' to separate the class name from the method name.
|
||||
// This is the preferred spelling of methods:
|
||||
// exclude java/lang/String.indexOf(I)I
|
||||
// Allow ',' for spaces (eases command line quoting).
|
||||
// exclude,java/lang/String.indexOf
|
||||
// For backward compatibility, allow space as separator also.
|
||||
// exclude java/lang/String indexOf
|
||||
// exclude,java/lang/String,indexOf
|
||||
// For easy cut-and-paste of method names, allow VM output format
|
||||
// as produced by Method::print_short_name:
|
||||
// exclude java.lang.String::indexOf
|
||||
// For simple implementation convenience here, convert them all to space.
|
||||
if (have_colon) {
|
||||
if (*lp == '.') *lp = '/'; // dots build the package prefix
|
||||
if (*lp == ':') *lp = ' ';
|
||||
}
|
||||
if (*lp == ',' || *lp == '.') *lp = ' ';
|
||||
}
|
||||
|
||||
char* original_line = line;
|
||||
int bytes_read;
|
||||
OracleCommand command = parse_command_name(line, &bytes_read);
|
||||
@ -742,32 +595,7 @@ void CompilerOracle::parse_from_line(char* line) {
|
||||
return;
|
||||
}
|
||||
|
||||
MethodMatcher::Mode c_match = MethodMatcher::Exact;
|
||||
MethodMatcher::Mode m_match = MethodMatcher::Exact;
|
||||
char class_name[256];
|
||||
char method_name[256];
|
||||
char sig[1024];
|
||||
char errorbuf[1024];
|
||||
const char* error_msg = NULL; // description of first error that appears
|
||||
MethodMatcher* match = NULL;
|
||||
|
||||
if (scan_line(line, class_name, &c_match, method_name, &m_match, &bytes_read, error_msg)) {
|
||||
EXCEPTION_MARK;
|
||||
Symbol* c_name = SymbolTable::new_symbol(class_name, CHECK);
|
||||
Symbol* m_name = SymbolTable::new_symbol(method_name, CHECK);
|
||||
Symbol* signature = NULL;
|
||||
|
||||
line += bytes_read;
|
||||
|
||||
// there might be a signature following the method.
|
||||
// signatures always begin with ( so match that by hand
|
||||
line += skip_whitespace(line);
|
||||
if (1 == sscanf(line, "(%254[[);/" RANGEBASE "]%n", sig + 1, &bytes_read)) {
|
||||
sig[0] = '(';
|
||||
line += bytes_read;
|
||||
signature = SymbolTable::new_symbol(sig, CHECK);
|
||||
}
|
||||
|
||||
const char* error_msg = NULL;
|
||||
if (command == OptionCommand) {
|
||||
// Look for trailing options.
|
||||
//
|
||||
@ -783,18 +611,24 @@ void CompilerOracle::parse_from_line(char* line) {
|
||||
// the following types: intx, uintx, bool, ccstr, ccstrlist, and double.
|
||||
//
|
||||
// For future extensions: extend scan_flag_and_value()
|
||||
|
||||
char option[256]; // stores flag for Type (1) and type of Type (2)
|
||||
line++; // skip the ','
|
||||
TypedMethodOptionMatcher* archetype = TypedMethodOptionMatcher::parse_method_pattern(line, error_msg);
|
||||
if (archetype == NULL) {
|
||||
assert(error_msg != NULL, "Must have error_message");
|
||||
print_parse_error(error_msg, original_line);
|
||||
return;
|
||||
}
|
||||
|
||||
line += skip_whitespace(line);
|
||||
|
||||
// This is unnecessarily complex. Should retire multi-option lines and skip while loop
|
||||
while (sscanf(line, "%255[a-zA-Z0-9]%n", option, &bytes_read) == 1) {
|
||||
if (match != NULL && !_quiet) {
|
||||
// Print out the last match added
|
||||
ttyLocker ttyl;
|
||||
tty->print("CompileCommand: %s ", command_names[command]);
|
||||
match->print();
|
||||
}
|
||||
line += bytes_read;
|
||||
|
||||
// typed_matcher is used as a blueprint for each option, deleted at the end
|
||||
TypedMethodOptionMatcher* typed_matcher = archetype->clone();
|
||||
if (strcmp(option, "intx") == 0
|
||||
|| strcmp(option, "uintx") == 0
|
||||
|| strcmp(option, "bool") == 0
|
||||
@ -802,49 +636,45 @@ void CompilerOracle::parse_from_line(char* line) {
|
||||
|| strcmp(option, "ccstrlist") == 0
|
||||
|| strcmp(option, "double") == 0
|
||||
) {
|
||||
|
||||
char errorbuf[1024] = {0};
|
||||
// Type (2) option: parse flag name and value.
|
||||
match = scan_flag_and_value(option, line, bytes_read,
|
||||
c_name, c_match, m_name, m_match, signature,
|
||||
errorbuf, sizeof(errorbuf));
|
||||
if (match == NULL) {
|
||||
scan_flag_and_value(option, line, bytes_read, typed_matcher, errorbuf, sizeof(errorbuf));
|
||||
if (*errorbuf != '\0') {
|
||||
error_msg = errorbuf;
|
||||
break;
|
||||
print_parse_error(error_msg, original_line);
|
||||
return;
|
||||
}
|
||||
line += bytes_read;
|
||||
} else {
|
||||
// Type (1) option
|
||||
match = add_option_string(c_name, c_match, m_name, m_match, signature, option, true);
|
||||
add_option_string(typed_matcher, option, true);
|
||||
}
|
||||
if (typed_matcher != NULL && !_quiet) {
|
||||
// Print out the last match added
|
||||
assert(error_msg == NULL, "No error here");
|
||||
ttyLocker ttyl;
|
||||
tty->print("CompileCommand: %s ", command_names[command]);
|
||||
typed_matcher->print();
|
||||
}
|
||||
line += skip_whitespace(line);
|
||||
} // while(
|
||||
} else {
|
||||
match = add_predicate(command, c_name, c_match, m_name, m_match, signature);
|
||||
}
|
||||
delete archetype;
|
||||
} else { // not an OptionCommand)
|
||||
assert(error_msg == NULL, "Don't call here with error_msg already set");
|
||||
|
||||
BasicMatcher* matcher = BasicMatcher::parse_method_pattern(line, error_msg);
|
||||
if (error_msg != NULL) {
|
||||
assert(matcher == NULL, "consistency");
|
||||
print_parse_error(error_msg, original_line);
|
||||
return;
|
||||
}
|
||||
|
||||
add_predicate(command, matcher);
|
||||
if (!_quiet) {
|
||||
ttyLocker ttyl;
|
||||
if (error_msg != NULL) {
|
||||
// an error has happened
|
||||
tty->print_cr("CompileCommand: An error occured during parsing");
|
||||
tty->print_cr(" \"%s\"", original_line);
|
||||
if (error_msg != NULL) {
|
||||
tty->print_cr("%s", error_msg);
|
||||
}
|
||||
CompilerOracle::print_tip();
|
||||
|
||||
} else {
|
||||
// check for remaining characters
|
||||
bytes_read = 0;
|
||||
sscanf(line, "%*[ \t]%n", &bytes_read);
|
||||
if (line[bytes_read] != '\0') {
|
||||
tty->print_cr("CompileCommand: Bad pattern");
|
||||
tty->print_cr(" \"%s\"", original_line);
|
||||
tty->print_cr(" Unrecognized text %s after command ", line);
|
||||
CompilerOracle::print_tip();
|
||||
} else if (match != NULL && !_quiet) {
|
||||
tty->print("CompileCommand: %s ", command_names[command]);
|
||||
match->print();
|
||||
matcher->print(tty);
|
||||
tty->cr();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1045,10 +875,12 @@ void CompilerOracle::parse_compile_only(char * line) {
|
||||
Symbol* m_name = SymbolTable::new_symbol(methodName, CHECK);
|
||||
Symbol* signature = NULL;
|
||||
|
||||
add_predicate(CompileOnlyCommand, c_name, c_match, m_name, m_match, signature);
|
||||
BasicMatcher* bm = new BasicMatcher();
|
||||
bm->init(c_name, c_match, m_name, m_match, signature);
|
||||
add_predicate(CompileOnlyCommand, bm);
|
||||
if (PrintVMOptions) {
|
||||
tty->print("CompileOnly: compileonly ");
|
||||
lists[CompileOnlyCommand]->print();
|
||||
lists[CompileOnlyCommand]->print_all(tty);
|
||||
}
|
||||
|
||||
className = NULL;
|
||||
|
@ -35,6 +35,7 @@ class CompilerOracle : AllStatic {
|
||||
private:
|
||||
static bool _quiet;
|
||||
static void print_tip();
|
||||
static void print_parse_error(const char*& error_msg, char* original_line);
|
||||
|
||||
public:
|
||||
|
||||
|
347
hotspot/src/share/vm/compiler/methodMatcher.cpp
Normal file
347
hotspot/src/share/vm/compiler/methodMatcher.cpp
Normal file
@ -0,0 +1,347 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "compiler/methodMatcher.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
// The JVM specification defines the allowed characters.
|
||||
// Tokens that are disallowed by the JVM specification can have
|
||||
// a meaning to the parser so we need to include them here.
|
||||
// The parser does not enforce all rules of the JVMS - a successful parse
|
||||
// does not mean that it is an allowed name. Illegal names will
|
||||
// be ignored since they never can match a class or method.
|
||||
//
|
||||
// '\0' and 0xf0-0xff are disallowed in constant string values
|
||||
// 0x20 ' ', 0x09 '\t' and, 0x2c ',' are used in the matching
|
||||
// 0x5b '[' and 0x5d ']' can not be used because of the matcher
|
||||
// 0x28 '(' and 0x29 ')' are used for the signature
|
||||
// 0x2e '.' is always replaced before the matching
|
||||
// 0x2f '/' is only used in the class name as package separator
|
||||
|
||||
#define RANGEBASE "\x1\x2\x3\x4\x5\x6\x7\x8\xa\xb\xc\xd\xe\xf" \
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" \
|
||||
"\x21\x22\x23\x24\x25\x26\x27\x2a\x2b\x2c\x2d" \
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" \
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" \
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5c\x5e\x5f" \
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" \
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" \
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" \
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" \
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf" \
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" \
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" \
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" \
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
|
||||
#define RANGE0 "[*" RANGEBASE "]"
|
||||
#define RANGESLASH "[*" RANGEBASE "/]"
|
||||
|
||||
MethodMatcher::MethodMatcher():
|
||||
_class_mode(Exact)
|
||||
, _method_mode(Exact)
|
||||
, _class_name(NULL)
|
||||
, _method_name(NULL)
|
||||
, _signature(NULL) {
|
||||
}
|
||||
|
||||
MethodMatcher::~MethodMatcher() {
|
||||
if (_class_name != NULL) {
|
||||
_class_name->decrement_refcount();
|
||||
}
|
||||
if (_method_name != NULL) {
|
||||
_method_name->decrement_refcount();
|
||||
}
|
||||
if (_signature != NULL) {
|
||||
_signature->decrement_refcount();
|
||||
}
|
||||
}
|
||||
|
||||
void MethodMatcher::init(Symbol* class_name, Mode class_mode,
|
||||
Symbol* method_name, Mode method_mode,
|
||||
Symbol* signature) {
|
||||
_class_mode = class_mode;
|
||||
_method_mode = method_mode;
|
||||
_class_name = class_name;
|
||||
_method_name = method_name;
|
||||
_signature = signature;
|
||||
}
|
||||
|
||||
bool MethodMatcher::canonicalize(char * line, const char *& error_msg) {
|
||||
char* colon = strstr(line, "::");
|
||||
bool have_colon = (colon != NULL);
|
||||
if (have_colon) {
|
||||
// Don't allow multiple '::'
|
||||
if (colon + 2 != '\0') {
|
||||
if (strstr(colon+2, "::")) {
|
||||
error_msg = "Method pattern only allows one '::' allowed";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool in_signature = false;
|
||||
char* pos = line;
|
||||
if (pos != NULL) {
|
||||
for (char* lp = pos + 1; *lp != '\0'; lp++) {
|
||||
if (*lp == '(') {
|
||||
break;
|
||||
}
|
||||
|
||||
if (*lp == '/') {
|
||||
error_msg = "Method pattern uses '/' together with '::'";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Don't allow mixed package separators
|
||||
char* pos = strchr(line, '.');
|
||||
bool in_signature = false;
|
||||
if (pos != NULL) {
|
||||
for (char* lp = pos + 1; *lp != '\0'; lp++) {
|
||||
if (*lp == '(') {
|
||||
in_signature = true;
|
||||
}
|
||||
|
||||
// After any comma the method pattern has ended
|
||||
if (*lp == ',') {
|
||||
break;
|
||||
}
|
||||
|
||||
if (!in_signature && (*lp == '/')) {
|
||||
error_msg = "Method pattern uses mixed '/' and '.' package separators";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (*lp == '.') {
|
||||
error_msg = "Method pattern uses multiple '.' in pattern";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (char* lp = line; *lp != '\0'; lp++) {
|
||||
// Allow '.' to separate the class name from the method name.
|
||||
// This is the preferred spelling of methods:
|
||||
// exclude java/lang/String.indexOf(I)I
|
||||
// Allow ',' for spaces (eases command line quoting).
|
||||
// exclude,java/lang/String.indexOf
|
||||
// For backward compatibility, allow space as separator also.
|
||||
// exclude java/lang/String indexOf
|
||||
// exclude,java/lang/String,indexOf
|
||||
// For easy cut-and-paste of method names, allow VM output format
|
||||
// as produced by Method::print_short_name:
|
||||
// exclude java.lang.String::indexOf
|
||||
// For simple implementation convenience here, convert them all to space.
|
||||
|
||||
if (have_colon) {
|
||||
if (*lp == '.') *lp = '/'; // dots build the package prefix
|
||||
if (*lp == ':') *lp = ' ';
|
||||
}
|
||||
if (*lp == ',' || *lp == '.') *lp = ' ';
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MethodMatcher::match(Symbol* candidate, Symbol* match, Mode match_mode) const {
|
||||
if (match_mode == Any) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (match_mode == Exact) {
|
||||
return candidate == match;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
const char * candidate_string = candidate->as_C_string();
|
||||
const char * match_string = match->as_C_string();
|
||||
|
||||
switch (match_mode) {
|
||||
case Prefix:
|
||||
return strstr(candidate_string, match_string) == candidate_string;
|
||||
|
||||
case Suffix: {
|
||||
size_t clen = strlen(candidate_string);
|
||||
size_t mlen = strlen(match_string);
|
||||
return clen >= mlen && strcmp(candidate_string + clen - mlen, match_string) == 0;
|
||||
}
|
||||
|
||||
case Substring:
|
||||
return strstr(candidate_string, match_string) != NULL;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static MethodMatcher::Mode check_mode(char name[], const char*& error_msg) {
|
||||
int match = MethodMatcher::Exact;
|
||||
if (name[0] == '*') {
|
||||
if (strlen(name) == 1) {
|
||||
return MethodMatcher::Any;
|
||||
}
|
||||
match |= MethodMatcher::Suffix;
|
||||
memmove(name, name + 1, strlen(name + 1) + 1);
|
||||
}
|
||||
|
||||
size_t len = strlen(name);
|
||||
if (len > 0 && name[len - 1] == '*') {
|
||||
match |= MethodMatcher::Prefix;
|
||||
name[--len] = '\0';
|
||||
}
|
||||
|
||||
if (strlen(name) == 0) {
|
||||
error_msg = "** Not a valid pattern";
|
||||
return MethodMatcher::Any;
|
||||
}
|
||||
|
||||
if (strstr(name, "*") != NULL) {
|
||||
error_msg = " Embedded * not allowed";
|
||||
return MethodMatcher::Unknown;
|
||||
}
|
||||
return (MethodMatcher::Mode)match;
|
||||
}
|
||||
|
||||
// Skip any leading spaces
|
||||
void skip_leading_spaces(char*& line, int* total_bytes_read ) {
|
||||
int bytes_read = 0;
|
||||
sscanf(line, "%*[ \t]%n", &bytes_read);
|
||||
if (bytes_read > 0) {
|
||||
line += bytes_read;
|
||||
*total_bytes_read += bytes_read;
|
||||
}
|
||||
}
|
||||
|
||||
void MethodMatcher::parse_method_pattern(char*& line, const char*& error_msg, MethodMatcher* matcher) {
|
||||
MethodMatcher::Mode c_match;
|
||||
MethodMatcher::Mode m_match;
|
||||
char class_name[256] = {0};
|
||||
char method_name[256] = {0};
|
||||
char sig[1024] = {0};
|
||||
int bytes_read = 0;
|
||||
int total_bytes_read = 0;
|
||||
|
||||
assert(error_msg == NULL, "Dont call here with error_msg already set");
|
||||
|
||||
if (!MethodMatcher::canonicalize(line, error_msg)) {
|
||||
assert(error_msg != NULL, "Message must be set if parsing failed");
|
||||
return;
|
||||
}
|
||||
|
||||
skip_leading_spaces(line, &total_bytes_read);
|
||||
|
||||
if (2 == sscanf(line, "%255" RANGESLASH "%*[ ]" "%255" RANGE0 "%n", class_name, method_name, &bytes_read)) {
|
||||
c_match = check_mode(class_name, error_msg);
|
||||
m_match = check_mode(method_name, error_msg);
|
||||
|
||||
if ((strchr(class_name, '<') != NULL) || (strchr(class_name, '>') != NULL)) {
|
||||
error_msg = "Chars '<' and '>' not allowed in class name";
|
||||
return;
|
||||
}
|
||||
if ((strchr(method_name, '<') != NULL) || (strchr(method_name, '>') != NULL)) {
|
||||
if ((strncmp("<init>", method_name, 255) != 0) && (strncmp("<clinit>", method_name, 255) != 0)) {
|
||||
error_msg = "Chars '<' and '>' only allowed in <init> and <clinit>";
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (c_match == MethodMatcher::Unknown || m_match == MethodMatcher::Unknown) {
|
||||
assert(error_msg != NULL, "Must have been set by check_mode()");
|
||||
return;
|
||||
}
|
||||
|
||||
EXCEPTION_MARK;
|
||||
Symbol* signature = NULL;
|
||||
line += bytes_read;
|
||||
bytes_read = 0;
|
||||
|
||||
skip_leading_spaces(line, &total_bytes_read);
|
||||
|
||||
// there might be a signature following the method.
|
||||
// signatures always begin with ( so match that by hand
|
||||
if (line[0] == '(') {
|
||||
line++;
|
||||
sig[0] = '(';
|
||||
// scan the rest
|
||||
if (1 == sscanf(line, "%254[[);/" RANGEBASE "]%n", sig+1, &bytes_read)) {
|
||||
if (strchr(sig, '*') != NULL) {
|
||||
error_msg = " Wildcard * not allowed in signature";
|
||||
return;
|
||||
}
|
||||
line += bytes_read;
|
||||
}
|
||||
signature = SymbolTable::new_symbol(sig, CHECK);
|
||||
}
|
||||
Symbol* c_name = SymbolTable::new_symbol(class_name, CHECK);
|
||||
Symbol* m_name = SymbolTable::new_symbol(method_name, CHECK);
|
||||
|
||||
matcher->init(c_name, c_match, m_name, m_match, signature);
|
||||
return;
|
||||
} else {
|
||||
error_msg = "Could not parse method pattern";
|
||||
}
|
||||
}
|
||||
|
||||
bool MethodMatcher::matches(methodHandle method) const {
|
||||
Symbol* class_name = method->method_holder()->name();
|
||||
Symbol* method_name = method->name();
|
||||
Symbol* signature = method->signature();
|
||||
|
||||
if (match(class_name, this->class_name(), _class_mode) &&
|
||||
match(method_name, this->method_name(), _method_mode) &&
|
||||
((this->signature() == NULL) || match(signature, this->signature(), Prefix))) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void MethodMatcher::print_symbol(outputStream* st, Symbol* h, Mode mode) {
|
||||
ResourceMark rm;
|
||||
|
||||
if (mode == Suffix || mode == Substring || mode == Any) {
|
||||
st->print("*");
|
||||
}
|
||||
if (mode != Any) {
|
||||
h->print_symbol_on(st);
|
||||
}
|
||||
if (mode == Prefix || mode == Substring) {
|
||||
st->print("*");
|
||||
}
|
||||
}
|
||||
|
||||
void MethodMatcher::print_base(outputStream* st) {
|
||||
print_symbol(st, class_name(), _class_mode);
|
||||
st->print(".");
|
||||
print_symbol(st, method_name(), _method_mode);
|
||||
if (signature() != NULL) {
|
||||
signature()->print_symbol_on(st);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
126
hotspot/src/share/vm/compiler/methodMatcher.hpp
Normal file
126
hotspot/src/share/vm/compiler/methodMatcher.hpp
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_COMPILER_METHODMATCHER_HPP
|
||||
#define SHARE_VM_COMPILER_METHODMATCHER_HPP
|
||||
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
|
||||
class MethodMatcher : public CHeapObj<mtCompiler> {
|
||||
public:
|
||||
enum Mode {
|
||||
Exact,
|
||||
Prefix = 1,
|
||||
Suffix = 2,
|
||||
Substring = Prefix | Suffix,
|
||||
Any,
|
||||
Unknown = -1
|
||||
};
|
||||
|
||||
protected:
|
||||
Symbol* _class_name;
|
||||
Symbol* _method_name;
|
||||
Symbol* _signature;
|
||||
Mode _class_mode;
|
||||
Mode _method_mode;
|
||||
|
||||
public:
|
||||
Symbol* class_name() const { return _class_name; }
|
||||
Mode class_mode() const { return _class_mode; }
|
||||
Symbol* method_name() const { return _method_name; }
|
||||
Mode method_mode() const { return _method_mode; }
|
||||
Symbol* signature() const { return _signature; }
|
||||
|
||||
MethodMatcher();
|
||||
~MethodMatcher();
|
||||
|
||||
void init(Symbol* class_name, Mode class_mode, Symbol* method_name, Mode method_mode, Symbol* signature);
|
||||
static void parse_method_pattern(char*& line, const char*& error_msg, MethodMatcher* m);
|
||||
static void print_symbol(outputStream* st, Symbol* h, Mode mode);
|
||||
bool matches(methodHandle method) const;
|
||||
void print_base(outputStream* st);
|
||||
|
||||
private:
|
||||
static bool canonicalize(char * line, const char *& error_msg);
|
||||
bool match(Symbol* candidate, Symbol* match, Mode match_mode) const;
|
||||
};
|
||||
|
||||
class BasicMatcher : public MethodMatcher {
|
||||
private:
|
||||
BasicMatcher* _next;
|
||||
public:
|
||||
|
||||
BasicMatcher() : MethodMatcher(),
|
||||
_next(NULL) {
|
||||
}
|
||||
|
||||
BasicMatcher(BasicMatcher* next) :
|
||||
_next(next) {
|
||||
}
|
||||
|
||||
static BasicMatcher* parse_method_pattern(char* line, const char*& error_msg) {
|
||||
assert(error_msg == NULL, "Dont call here with error_msg already set");
|
||||
BasicMatcher* bm = new BasicMatcher();
|
||||
MethodMatcher::parse_method_pattern(line, error_msg, bm);
|
||||
if (error_msg != NULL) {
|
||||
delete bm;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// check for bad trailing characters
|
||||
int bytes_read = 0;
|
||||
sscanf(line, "%*[ \t]%n", &bytes_read);
|
||||
if (line[bytes_read] != '\0') {
|
||||
error_msg = "Unrecognized trailing text after method pattern";
|
||||
delete bm;
|
||||
return NULL;
|
||||
}
|
||||
return bm;
|
||||
}
|
||||
|
||||
bool match(methodHandle method) {
|
||||
for (BasicMatcher* current = this; current != NULL; current = current->next()) {
|
||||
if (current->matches(method)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void set_next(BasicMatcher* next) { _next = next; }
|
||||
BasicMatcher* next() { return _next; }
|
||||
|
||||
void print(outputStream* st) { print_base(st); }
|
||||
void print_all(outputStream* st) {
|
||||
print_base(st);
|
||||
if (_next != NULL) {
|
||||
_next->print_all(st);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_COMPILER_METHODMATCHER_HPP
|
||||
|
@ -58,7 +58,6 @@ OopMapStream::OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask) {
|
||||
_valid_omv = false;
|
||||
}
|
||||
|
||||
|
||||
void OopMapStream::find_next() {
|
||||
while(_position++ < _size) {
|
||||
_omv.read_from(_stream);
|
||||
@ -156,9 +155,7 @@ void OopMap::set_oop(VMReg reg) {
|
||||
|
||||
|
||||
void OopMap::set_value(VMReg reg) {
|
||||
// At this time, we only need value entries in our OopMap when ZapDeadCompiledLocals is active.
|
||||
if (ZapDeadCompiledLocals)
|
||||
set_xxx(reg, OopMapValue::value_value, VMRegImpl::Bad());
|
||||
// At this time, we don't need value entries in our OopMap.
|
||||
}
|
||||
|
||||
|
||||
@ -199,7 +196,6 @@ void OopMapSet::grow_om_data() {
|
||||
set_om_data(new_data);
|
||||
}
|
||||
|
||||
|
||||
void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
|
||||
assert(om_size() != -1,"Cannot grow a fixed OopMapSet");
|
||||
|
||||
@ -345,7 +341,7 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||
do {
|
||||
omv = oms.current();
|
||||
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
|
||||
if ( loc != NULL ) {
|
||||
guarantee(loc != NULL, "missing saved register");
|
||||
oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
|
||||
oop *derived_loc = loc;
|
||||
oop val = *base_loc;
|
||||
@ -355,7 +351,7 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||
// implicit null check is used in compiled code.
|
||||
// The narrow_oop_base could be NULL or be the address
|
||||
// of the page below heap depending on compressed oops mode.
|
||||
} else
|
||||
} else {
|
||||
derived_oop_fn(base_loc, derived_loc);
|
||||
}
|
||||
oms.next();
|
||||
@ -363,13 +359,17 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||
}
|
||||
}
|
||||
|
||||
// We want coop, value and oop oop_types
|
||||
int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::narrowoop_value;
|
||||
// We want coop and oop oop_types
|
||||
int mask = OopMapValue::oop_value | OopMapValue::narrowoop_value;
|
||||
{
|
||||
for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) {
|
||||
omv = oms.current();
|
||||
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
|
||||
if ( loc != NULL ) {
|
||||
// It should be an error if no location can be found for a
|
||||
// register mentioned as contained an oop of some kind. Maybe
|
||||
// this was allowed previously because value_value items might
|
||||
// be missing?
|
||||
guarantee(loc != NULL, "missing saved register");
|
||||
if ( omv.type() == OopMapValue::oop_value ) {
|
||||
oop val = *loc;
|
||||
if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
|
||||
@ -395,14 +395,12 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||
}
|
||||
#endif // ASSERT
|
||||
oop_fn->do_oop(loc);
|
||||
} else if ( omv.type() == OopMapValue::value_value ) {
|
||||
assert((*loc) == (oop)NULL || !Universe::is_narrow_oop_base(*loc),
|
||||
"found invalid value pointer");
|
||||
value_fn->do_oop(loc);
|
||||
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
|
||||
narrowOop *nl = (narrowOop*)loc;
|
||||
#ifndef VM_LITTLE_ENDIAN
|
||||
if (!omv.reg()->is_stack()) {
|
||||
VMReg vmReg = omv.reg();
|
||||
// Don't do this on SPARC float registers as they can be individually addressed
|
||||
if (!vmReg->is_stack() SPARC_ONLY(&& !vmReg->is_FloatRegister())) {
|
||||
// compressed oops in registers only take up 4 bytes of an
|
||||
// 8 byte register but they are in the wrong part of the
|
||||
// word so adjust loc to point at the right place.
|
||||
@ -413,7 +411,6 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -485,9 +482,6 @@ void print_register_type(OopMapValue::oop_types x, VMReg optional,
|
||||
case OopMapValue::oop_value:
|
||||
st->print("Oop");
|
||||
break;
|
||||
case OopMapValue::value_value:
|
||||
st->print("Value");
|
||||
break;
|
||||
case OopMapValue::narrowoop_value:
|
||||
st->print("NarrowOop");
|
||||
break;
|
||||
|
@ -33,7 +33,6 @@
|
||||
// Interface for generating the frame map for compiled code. A frame map
|
||||
// describes for a specific pc whether each register and frame stack slot is:
|
||||
// Oop - A GC root for current frame
|
||||
// Value - Live non-oop, non-float value: int, either half of double
|
||||
// Dead - Dead; can be Zapped for debugging
|
||||
// CalleeXX - Callee saved; also describes which caller register is saved
|
||||
// DerivedXX - A derived oop; original oop is described.
|
||||
@ -54,7 +53,7 @@ private:
|
||||
|
||||
public:
|
||||
// Constants
|
||||
enum { type_bits = 5,
|
||||
enum { type_bits = 4,
|
||||
register_bits = BitsPerShort - type_bits };
|
||||
|
||||
enum { type_shift = 0,
|
||||
@ -68,10 +67,9 @@ public:
|
||||
enum oop_types { // must fit in type_bits
|
||||
unused_value =0, // powers of 2, for masking OopMapStream
|
||||
oop_value = 1,
|
||||
value_value = 2,
|
||||
narrowoop_value = 4,
|
||||
callee_saved_value = 8,
|
||||
derived_oop_value= 16 };
|
||||
narrowoop_value = 2,
|
||||
callee_saved_value = 4,
|
||||
derived_oop_value= 8 };
|
||||
|
||||
// Constructors
|
||||
OopMapValue () { set_value(0); set_content_reg(VMRegImpl::Bad()); }
|
||||
@ -96,13 +94,11 @@ public:
|
||||
|
||||
// Querying
|
||||
bool is_oop() { return mask_bits(value(), type_mask_in_place) == oop_value; }
|
||||
bool is_value() { return mask_bits(value(), type_mask_in_place) == value_value; }
|
||||
bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; }
|
||||
bool is_callee_saved() { return mask_bits(value(), type_mask_in_place) == callee_saved_value; }
|
||||
bool is_derived_oop() { return mask_bits(value(), type_mask_in_place) == derived_oop_value; }
|
||||
|
||||
void set_oop() { set_value((value() & register_mask_in_place) | oop_value); }
|
||||
void set_value() { set_value((value() & register_mask_in_place) | value_value); }
|
||||
void set_narrowoop() { set_value((value() & register_mask_in_place) | narrowoop_value); }
|
||||
void set_callee_saved() { set_value((value() & register_mask_in_place) | callee_saved_value); }
|
||||
void set_derived_oop() { set_value((value() & register_mask_in_place) | derived_oop_value); }
|
||||
|
@ -90,6 +90,8 @@ class AbstractInterpreter: AllStatic {
|
||||
java_util_zip_CRC32_update, // implementation of java.util.zip.CRC32.update()
|
||||
java_util_zip_CRC32_updateBytes, // implementation of java.util.zip.CRC32.updateBytes()
|
||||
java_util_zip_CRC32_updateByteBuffer, // implementation of java.util.zip.CRC32.updateByteBuffer()
|
||||
java_util_zip_CRC32C_updateBytes, // implementation of java.util.zip.CRC32C.updateBytes(crc, b[], off, end)
|
||||
java_util_zip_CRC32C_updateDirectByteBuffer, // implementation of java.util.zip.CRC32C.updateDirectByteBuffer(crc, address, off, end)
|
||||
java_lang_Float_intBitsToFloat, // implementation of java.lang.Float.intBitsToFloat()
|
||||
java_lang_Float_floatToRawIntBits, // implementation of java.lang.Float.floatToRawIntBits()
|
||||
java_lang_Double_longBitsToDouble, // implementation of java.lang.Double.longBitsToDouble()
|
||||
|
@ -104,7 +104,10 @@ CodeletMark::~CodeletMark() {
|
||||
(*_masm)->flush();
|
||||
|
||||
// Commit Codelet.
|
||||
AbstractInterpreter::code()->commit((*_masm)->code()->pure_insts_size(), (*_masm)->code()->strings());
|
||||
int committed_code_size = (*_masm)->code()->pure_insts_size();
|
||||
if (committed_code_size) {
|
||||
AbstractInterpreter::code()->commit(committed_code_size, (*_masm)->code()->strings());
|
||||
}
|
||||
// Make sure nobody can use _masm outside a CodeletMark lifespan.
|
||||
*_masm = NULL;
|
||||
}
|
||||
@ -234,6 +237,13 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
|
||||
case vmIntrinsics::_updateByteBufferCRC32 : return java_util_zip_CRC32_updateByteBuffer;
|
||||
}
|
||||
}
|
||||
if (UseCRC32CIntrinsics) {
|
||||
// Use optimized stub code for CRC32C methods.
|
||||
switch (m->intrinsic_id()) {
|
||||
case vmIntrinsics::_updateBytesCRC32C : return java_util_zip_CRC32C_updateBytes;
|
||||
case vmIntrinsics::_updateDirectByteBufferCRC32C : return java_util_zip_CRC32C_updateDirectByteBuffer;
|
||||
}
|
||||
}
|
||||
|
||||
switch(m->intrinsic_id()) {
|
||||
case vmIntrinsics::_intBitsToFloat: return java_lang_Float_intBitsToFloat;
|
||||
@ -349,6 +359,8 @@ void AbstractInterpreter::print_method_kind(MethodKind kind) {
|
||||
case java_util_zip_CRC32_update : tty->print("java_util_zip_CRC32_update"); break;
|
||||
case java_util_zip_CRC32_updateBytes : tty->print("java_util_zip_CRC32_updateBytes"); break;
|
||||
case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break;
|
||||
case java_util_zip_CRC32C_updateBytes : tty->print("java_util_zip_CRC32C_updateBytes"); break;
|
||||
case java_util_zip_CRC32C_updateDirectByteBuffer: tty->print("java_util_zip_CRC32C_updateDirectByteByffer"); break;
|
||||
default:
|
||||
if (kind >= method_handle_invoke_FIRST &&
|
||||
kind <= method_handle_invoke_LAST) {
|
||||
@ -537,14 +549,15 @@ void AbstractInterpreterGenerator::initialize_method_handle_entries() {
|
||||
address InterpreterGenerator::generate_method_entry(
|
||||
AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool native = false;
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = generate_native_entry(true); break;
|
||||
case Interpreter::native : native = true; break;
|
||||
case Interpreter::native_synchronized : native = true; synchronized = true; break;
|
||||
case Interpreter::empty : entry_point = generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = generate_abstract_entry(); break;
|
||||
@ -562,28 +575,32 @@ address InterpreterGenerator::generate_method_entry(
|
||||
: entry_point = generate_Reference_get_entry(); break;
|
||||
#ifndef CC_INTERP
|
||||
case Interpreter::java_util_zip_CRC32_update
|
||||
: entry_point = generate_CRC32_update_entry(); break;
|
||||
: native = true; entry_point = generate_CRC32_update_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_updateBytes
|
||||
: // fall thru
|
||||
case Interpreter::java_util_zip_CRC32_updateByteBuffer
|
||||
: entry_point = generate_CRC32_updateBytes_entry(kind); break;
|
||||
: native = true; entry_point = generate_CRC32_updateBytes_entry(kind); break;
|
||||
case Interpreter::java_util_zip_CRC32C_updateBytes
|
||||
: // fall thru
|
||||
case Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer
|
||||
: entry_point = generate_CRC32C_updateBytes_entry(kind); break;
|
||||
#if defined(TARGET_ARCH_x86) && !defined(_LP64)
|
||||
// On x86_32 platforms, a special entry is generated for the following four methods.
|
||||
// On other platforms the normal entry is used to enter these methods.
|
||||
case Interpreter::java_lang_Float_intBitsToFloat
|
||||
: entry_point = generate_Float_intBitsToFloat_entry(); break;
|
||||
: native = true; entry_point = generate_Float_intBitsToFloat_entry(); break;
|
||||
case Interpreter::java_lang_Float_floatToRawIntBits
|
||||
: entry_point = generate_Float_floatToRawIntBits_entry(); break;
|
||||
: native = true; entry_point = generate_Float_floatToRawIntBits_entry(); break;
|
||||
case Interpreter::java_lang_Double_longBitsToDouble
|
||||
: entry_point = generate_Double_longBitsToDouble_entry(); break;
|
||||
: native = true; entry_point = generate_Double_longBitsToDouble_entry(); break;
|
||||
case Interpreter::java_lang_Double_doubleToRawLongBits
|
||||
: entry_point = generate_Double_doubleToRawLongBits_entry(); break;
|
||||
: native = true; entry_point = generate_Double_doubleToRawLongBits_entry(); break;
|
||||
#else
|
||||
case Interpreter::java_lang_Float_intBitsToFloat:
|
||||
case Interpreter::java_lang_Float_floatToRawIntBits:
|
||||
case Interpreter::java_lang_Double_longBitsToDouble:
|
||||
case Interpreter::java_lang_Double_doubleToRawLongBits:
|
||||
entry_point = generate_native_entry(false);
|
||||
native = true;
|
||||
break;
|
||||
#endif // defined(TARGET_ARCH_x86) && !defined(_LP64)
|
||||
#endif // CC_INTERP
|
||||
@ -596,5 +613,18 @@ address InterpreterGenerator::generate_method_entry(
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
return generate_normal_entry(synchronized);
|
||||
// We expect the normal and native entry points to be generated first so we can reuse them.
|
||||
if (native) {
|
||||
entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native);
|
||||
if (entry_point == NULL) {
|
||||
entry_point = generate_native_entry(synchronized);
|
||||
}
|
||||
} else {
|
||||
entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals);
|
||||
if (entry_point == NULL) {
|
||||
entry_point = generate_normal_entry(synchronized);
|
||||
}
|
||||
}
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
@ -213,31 +213,6 @@ void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
|
||||
void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) {
|
||||
int n = number_of_entries();
|
||||
int word_index = 0;
|
||||
uintptr_t value = 0;
|
||||
uintptr_t mask = 0;
|
||||
// iterate over entries
|
||||
for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
|
||||
// get current word
|
||||
if (mask == 0) {
|
||||
value = bit_mask()[word_index++];
|
||||
mask = 1;
|
||||
}
|
||||
// test for dead values & oops, and for live values
|
||||
if ((value & (mask << dead_bit_number)) != 0) dead_closure->offset_do(i); // call this for all dead values or oops
|
||||
else if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); // call this for all live oops
|
||||
else value_closure->offset_do(i); // call this for all live values
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
void InterpreterOopMap::print() const {
|
||||
int n = number_of_entries();
|
||||
tty->print("oop map for ");
|
||||
@ -297,12 +272,6 @@ bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, in
|
||||
bool v2 = vars[i].is_reference() ? true : false;
|
||||
assert(v1 == v2, "locals oop mask generation error");
|
||||
if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
|
||||
#ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
bool v3 = is_dead(i) ? true : false;
|
||||
bool v4 = !vars[i].is_live() ? true : false;
|
||||
assert(v3 == v4, "locals live mask generation error");
|
||||
assert(!(v1 && v3), "dead value marked as oop");
|
||||
#endif
|
||||
}
|
||||
|
||||
if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); }
|
||||
@ -311,12 +280,6 @@ bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, in
|
||||
bool v2 = stack[j].is_reference() ? true : false;
|
||||
assert(v1 == v2, "stack oop mask generation error");
|
||||
if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
|
||||
#ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
bool v3 = is_dead(max_locals + j) ? true : false;
|
||||
bool v4 = !stack[j].is_live() ? true : false;
|
||||
assert(v3 == v4, "stack live mask generation error");
|
||||
assert(!(v1 && v3), "dead value marked as oop");
|
||||
#endif
|
||||
}
|
||||
if (TraceOopMapGeneration && Verbose) tty->cr();
|
||||
return true;
|
||||
|
@ -141,9 +141,6 @@ class InterpreterOopMap: ResourceObj {
|
||||
|
||||
int expression_stack_size() const { return _expression_stack_size; }
|
||||
|
||||
#ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
void iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure);
|
||||
#endif
|
||||
};
|
||||
|
||||
class OopMapCache : public CHeapObj<mtClass> {
|
||||
|
@ -412,17 +412,6 @@ void TemplateInterpreterGenerator::generate_all() {
|
||||
method_entry(java_lang_math_pow )
|
||||
method_entry(java_lang_ref_reference_get)
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
method_entry(java_util_zip_CRC32_update)
|
||||
method_entry(java_util_zip_CRC32_updateBytes)
|
||||
method_entry(java_util_zip_CRC32_updateByteBuffer)
|
||||
}
|
||||
|
||||
method_entry(java_lang_Float_intBitsToFloat);
|
||||
method_entry(java_lang_Float_floatToRawIntBits);
|
||||
method_entry(java_lang_Double_longBitsToDouble);
|
||||
method_entry(java_lang_Double_doubleToRawLongBits);
|
||||
|
||||
initialize_method_handle_entries();
|
||||
|
||||
// all native method kinds (must be one contiguous block)
|
||||
@ -431,6 +420,22 @@ void TemplateInterpreterGenerator::generate_all() {
|
||||
method_entry(native_synchronized)
|
||||
Interpreter::_native_entry_end = Interpreter::code()->code_end();
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
method_entry(java_util_zip_CRC32_update)
|
||||
method_entry(java_util_zip_CRC32_updateBytes)
|
||||
method_entry(java_util_zip_CRC32_updateByteBuffer)
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
method_entry(java_util_zip_CRC32C_updateBytes)
|
||||
method_entry(java_util_zip_CRC32C_updateDirectByteBuffer)
|
||||
}
|
||||
|
||||
method_entry(java_lang_Float_intBitsToFloat);
|
||||
method_entry(java_lang_Float_floatToRawIntBits);
|
||||
method_entry(java_lang_Double_longBitsToDouble);
|
||||
method_entry(java_lang_Double_doubleToRawLongBits);
|
||||
|
||||
#undef method_entry
|
||||
|
||||
// Bytecodes
|
||||
|
@ -358,6 +358,8 @@ void Block::dump(const PhaseCFG* cfg) const {
|
||||
PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
|
||||
: Phase(CFG)
|
||||
, _block_arena(arena)
|
||||
, _regalloc(NULL)
|
||||
, _scheduling_for_pressure(false)
|
||||
, _root(root)
|
||||
, _matcher(matcher)
|
||||
, _node_to_block_mapping(arena)
|
||||
|
@ -37,6 +37,7 @@ class MachCallNode;
|
||||
class Matcher;
|
||||
class RootNode;
|
||||
class VectorSet;
|
||||
class PhaseChaitin;
|
||||
struct Tarjan;
|
||||
|
||||
//------------------------------Block_Array------------------------------------
|
||||
@ -383,6 +384,12 @@ class PhaseCFG : public Phase {
|
||||
// Arena for the blocks to be stored in
|
||||
Arena* _block_arena;
|
||||
|
||||
// Info used for scheduling
|
||||
PhaseChaitin* _regalloc;
|
||||
|
||||
// Register pressure heuristic used?
|
||||
bool _scheduling_for_pressure;
|
||||
|
||||
// The matcher for this compilation
|
||||
Matcher& _matcher;
|
||||
|
||||
@ -433,12 +440,14 @@ class PhaseCFG : public Phase {
|
||||
// to late. Helper for schedule_late.
|
||||
Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
|
||||
|
||||
bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
|
||||
bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call, intptr_t* recacl_pressure_nodes);
|
||||
void set_next_call(Block* block, Node* n, VectorSet& next_call);
|
||||
void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
|
||||
|
||||
// Perform basic-block local scheduling
|
||||
Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
|
||||
Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot,
|
||||
intptr_t* recacl_pressure_nodes);
|
||||
void adjust_register_pressure(Node* n, Block* block, intptr_t *recalc_pressure_nodes, bool finalize_mode);
|
||||
|
||||
// Schedule a call next in the block
|
||||
uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
|
||||
|
@ -114,7 +114,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
CompileTask::print_inline_indent(inline_level());
|
||||
tty->print_cr("Inlined method is hot: ");
|
||||
}
|
||||
set_msg("force inline by CompilerOracle");
|
||||
set_msg("force inline by CompileCommand");
|
||||
_forced_inline = true;
|
||||
return true;
|
||||
}
|
||||
@ -223,12 +223,12 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
|
||||
|
||||
// ignore heuristic controls on inlining
|
||||
if (callee_method->should_inline()) {
|
||||
set_msg("force inline by CompilerOracle");
|
||||
set_msg("force inline by CompileCommand");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (callee_method->should_not_inline()) {
|
||||
set_msg("disallowed by CompilerOracle");
|
||||
set_msg("disallowed by CompileCommand");
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -470,11 +470,6 @@ bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* call
|
||||
}
|
||||
}
|
||||
}
|
||||
// We will attempt to see if a class/field/etc got properly loaded. If it
|
||||
// did not, it may attempt to throw an exception during our probing. Catch
|
||||
// and ignore such exceptions and do not attempt to compile the method.
|
||||
if( callee_method->should_exclude() ) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -69,22 +69,6 @@
|
||||
develop(bool, StressGCM, false, \
|
||||
"Randomize instruction scheduling in GCM") \
|
||||
\
|
||||
notproduct(intx, CompileZapFirst, 0, \
|
||||
"If +ZapDeadCompiledLocals, " \
|
||||
"skip this many before compiling in zap calls") \
|
||||
\
|
||||
notproduct(intx, CompileZapLast, -1, \
|
||||
"If +ZapDeadCompiledLocals, " \
|
||||
"compile this many after skipping (incl. skip count, -1 = all)") \
|
||||
\
|
||||
notproduct(intx, ZapDeadCompiledLocalsFirst, 0, \
|
||||
"If +ZapDeadCompiledLocals, " \
|
||||
"skip this many before really doing it") \
|
||||
\
|
||||
notproduct(intx, ZapDeadCompiledLocalsLast, -1, \
|
||||
"If +ZapDeadCompiledLocals, " \
|
||||
"do this many after skipping (incl. skip count, -1 = all)") \
|
||||
\
|
||||
develop(intx, OptoPrologueNops, 0, \
|
||||
"Insert this many extra nop instructions " \
|
||||
"in the prologue of every nmethod") \
|
||||
@ -306,6 +290,9 @@
|
||||
product_pd(bool, OptoScheduling, \
|
||||
"Instruction Scheduling after register allocation") \
|
||||
\
|
||||
product_pd(bool, OptoRegScheduling, \
|
||||
"Instruction Scheduling before register allocation for pressure") \
|
||||
\
|
||||
product(bool, PartialPeelLoop, true, \
|
||||
"Partial peel (rotate) loops") \
|
||||
\
|
||||
|
@ -907,6 +907,18 @@ public:
|
||||
|
||||
// Convenience for initialization->maybe_set_complete(phase)
|
||||
bool maybe_set_complete(PhaseGVN* phase);
|
||||
|
||||
// Return true if allocation doesn't escape thread, its escape state
|
||||
// needs be noEscape or ArgEscape. InitializeNode._does_not_escape
|
||||
// is true when its allocation's escape state is noEscape or
|
||||
// ArgEscape. In case allocation's InitializeNode is NULL, check
|
||||
// AlllocateNode._is_non_escaping flag.
|
||||
// AlllocateNode._is_non_escaping is true when its escape state is
|
||||
// noEscape.
|
||||
bool does_not_escape_thread() {
|
||||
InitializeNode* init = NULL;
|
||||
return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------AllocateArray---------------------------------
|
||||
|
@ -191,7 +191,7 @@ uint LiveRangeMap::find_const(uint lrg) const {
|
||||
return next;
|
||||
}
|
||||
|
||||
PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
|
||||
PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool scheduling_info_generated)
|
||||
: PhaseRegAlloc(unique, cfg, matcher,
|
||||
#ifndef PRODUCT
|
||||
print_chaitin_statistics
|
||||
@ -205,6 +205,11 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
|
||||
, _spilled_twice(Thread::current()->resource_area())
|
||||
, _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
|
||||
, _oldphi(unique)
|
||||
, _scheduling_info_generated(scheduling_info_generated)
|
||||
, _sched_int_pressure(0, INTPRESSURE)
|
||||
, _sched_float_pressure(0, FLOATPRESSURE)
|
||||
, _scratch_int_pressure(0, INTPRESSURE)
|
||||
, _scratch_float_pressure(0, FLOATPRESSURE)
|
||||
#ifndef PRODUCT
|
||||
, _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
|
||||
#endif
|
||||
@ -350,7 +355,7 @@ void PhaseChaitin::Register_Allocate() {
|
||||
// all copy-related live ranges low and then using the max copy-related
|
||||
// live range as a cut-off for LIVE and the IFG. In other words, I can
|
||||
// build a subset of LIVE and IFG just for copies.
|
||||
PhaseLive live(_cfg, _lrg_map.names(), &live_arena);
|
||||
PhaseLive live(_cfg, _lrg_map.names(), &live_arena, false);
|
||||
|
||||
// Need IFG for coalescing and coloring
|
||||
PhaseIFG ifg(&live_arena);
|
||||
@ -690,6 +695,29 @@ void PhaseChaitin::de_ssa() {
|
||||
_lrg_map.reset_uf_map(lr_counter);
|
||||
}
|
||||
|
||||
void PhaseChaitin::mark_ssa() {
|
||||
// Use ssa names to populate the live range maps or if no mask
|
||||
// is available, use the 0 entry.
|
||||
uint max_idx = 0;
|
||||
for ( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
|
||||
Block* block = _cfg.get_block(i);
|
||||
uint cnt = block->number_of_nodes();
|
||||
|
||||
// Handle all the normal Nodes in the block
|
||||
for ( uint j = 0; j < cnt; j++ ) {
|
||||
Node *n = block->get_node(j);
|
||||
// Pre-color to the zero live range, or pick virtual register
|
||||
const RegMask &rm = n->out_RegMask();
|
||||
_lrg_map.map(n->_idx, rm.is_NotEmpty() ? n->_idx : 0);
|
||||
max_idx = (n->_idx > max_idx) ? n->_idx : max_idx;
|
||||
}
|
||||
}
|
||||
_lrg_map.set_max_lrg_id(max_idx+1);
|
||||
|
||||
// Reset the Union-Find mapping to be identity
|
||||
_lrg_map.reset_uf_map(max_idx+1);
|
||||
}
|
||||
|
||||
|
||||
// Gather LiveRanGe information, including register masks. Modification of
|
||||
// cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
|
||||
@ -707,7 +735,9 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
|
||||
for (uint j = 1; j < block->number_of_nodes(); j++) {
|
||||
Node* n = block->get_node(j);
|
||||
uint input_edge_start =1; // Skip control most nodes
|
||||
bool is_machine_node = false;
|
||||
if (n->is_Mach()) {
|
||||
is_machine_node = true;
|
||||
input_edge_start = n->as_Mach()->oper_input_base();
|
||||
}
|
||||
uint idx = n->is_Copy();
|
||||
@ -929,6 +959,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
|
||||
// Convert operand number to edge index number
|
||||
inp = n->as_Mach()->operand_index(inp);
|
||||
}
|
||||
|
||||
// Prepare register mask for each input
|
||||
for( uint k = input_edge_start; k < cnt; k++ ) {
|
||||
uint vreg = _lrg_map.live_range_id(n->in(k));
|
||||
@ -948,6 +979,12 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
|
||||
n->as_Mach()->use_cisc_RegMask();
|
||||
}
|
||||
|
||||
if (is_machine_node && _scheduling_info_generated) {
|
||||
MachNode* cur_node = n->as_Mach();
|
||||
// this is cleaned up by register allocation
|
||||
if (k >= cur_node->num_opnds()) continue;
|
||||
}
|
||||
|
||||
LRG &lrg = lrgs(vreg);
|
||||
// // Testing for floating point code shape
|
||||
// Node *test = n->in(k);
|
||||
@ -989,7 +1026,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
|
||||
// double can interfere with TWO aligned pairs, or effectively
|
||||
// FOUR registers!
|
||||
#ifdef ASSERT
|
||||
if (is_vect) {
|
||||
if (is_vect && !_scheduling_info_generated) {
|
||||
if (lrg.num_regs() != 0) {
|
||||
assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
|
||||
assert(!lrg._fat_proj, "sanity");
|
||||
|
@ -399,7 +399,6 @@ class PhaseChaitin : public PhaseRegAlloc {
|
||||
int _trip_cnt;
|
||||
int _alternate;
|
||||
|
||||
LRG &lrgs(uint idx) const { return _ifg->lrgs(idx); }
|
||||
PhaseLive *_live; // Liveness, used in the interference graph
|
||||
PhaseIFG *_ifg; // Interference graph (for original chunk)
|
||||
Node_List **_lrg_nodes; // Array of node; lists for lrgs which spill
|
||||
@ -464,16 +463,28 @@ class PhaseChaitin : public PhaseRegAlloc {
|
||||
#endif
|
||||
|
||||
public:
|
||||
PhaseChaitin( uint unique, PhaseCFG &cfg, Matcher &matcher );
|
||||
PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool track_liveout_pressure);
|
||||
~PhaseChaitin() {}
|
||||
|
||||
LiveRangeMap _lrg_map;
|
||||
|
||||
LRG &lrgs(uint idx) const { return _ifg->lrgs(idx); }
|
||||
|
||||
// Do all the real work of allocate
|
||||
void Register_Allocate();
|
||||
|
||||
float high_frequency_lrg() const { return _high_frequency_lrg; }
|
||||
|
||||
// Used when scheduling info generated, not in general register allocation
|
||||
bool _scheduling_info_generated;
|
||||
|
||||
void set_ifg(PhaseIFG &ifg) { _ifg = &ifg; }
|
||||
void set_live(PhaseLive &live) { _live = &live; }
|
||||
PhaseLive* get_live() { return _live; }
|
||||
|
||||
// Populate the live range maps with ssa info for scheduling
|
||||
void mark_ssa();
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool trace_spilling() const { return _trace_spilling; }
|
||||
#endif
|
||||
@ -516,7 +527,11 @@ private:
|
||||
uint _final_pressure;
|
||||
|
||||
// number of live ranges that constitute high register pressure
|
||||
const uint _high_pressure_limit;
|
||||
uint _high_pressure_limit;
|
||||
|
||||
// initial pressure observed
|
||||
uint _start_pressure;
|
||||
|
||||
public:
|
||||
|
||||
// lower the register pressure and look for a low to high pressure
|
||||
@ -537,6 +552,14 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void init(int limit) {
|
||||
_current_pressure = 0;
|
||||
_high_pressure_index = 0;
|
||||
_final_pressure = 0;
|
||||
_high_pressure_limit = limit;
|
||||
_start_pressure = 0;
|
||||
}
|
||||
|
||||
uint high_pressure_index() const {
|
||||
return _high_pressure_index;
|
||||
}
|
||||
@ -545,6 +568,10 @@ private:
|
||||
return _final_pressure;
|
||||
}
|
||||
|
||||
uint start_pressure() const {
|
||||
return _start_pressure;
|
||||
}
|
||||
|
||||
uint current_pressure() const {
|
||||
return _current_pressure;
|
||||
}
|
||||
@ -561,6 +588,15 @@ private:
|
||||
_high_pressure_index = 0;
|
||||
}
|
||||
|
||||
void set_start_pressure(int value) {
|
||||
_start_pressure = value;
|
||||
_final_pressure = value;
|
||||
}
|
||||
|
||||
void set_current_pressure(int value) {
|
||||
_current_pressure = value;
|
||||
}
|
||||
|
||||
void check_pressure_at_fatproj(uint fatproj_location, RegMask& fatproj_mask) {
|
||||
// this pressure is only valid at this instruction, i.e. we don't need to lower
|
||||
// the register pressure since the fat proj was never live before (going backwards)
|
||||
@ -579,12 +615,11 @@ private:
|
||||
Pressure(uint high_pressure_index, uint high_pressure_limit)
|
||||
: _current_pressure(0)
|
||||
, _high_pressure_index(high_pressure_index)
|
||||
, _final_pressure(0)
|
||||
, _high_pressure_limit(high_pressure_limit)
|
||||
, _final_pressure(0) {}
|
||||
, _start_pressure(0) {}
|
||||
};
|
||||
|
||||
void lower_pressure(Block* b, uint location, LRG& lrg, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure);
|
||||
void raise_pressure(Block* b, LRG& lrg, Pressure& int_pressure, Pressure& float_pressure);
|
||||
void check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const int op_regtype);
|
||||
void add_input_to_liveout(Block* b, Node* n, IndexSet* liveout, double cost, Pressure& int_pressure, Pressure& float_pressure);
|
||||
void compute_initial_block_pressure(Block* b, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure, double cost);
|
||||
@ -600,10 +635,25 @@ private:
|
||||
// acceptable register sets do not overlap, then they do not interfere.
|
||||
uint build_ifg_physical( ResourceArea *a );
|
||||
|
||||
public:
|
||||
// Gather LiveRanGe information, including register masks and base pointer/
|
||||
// derived pointer relationships.
|
||||
void gather_lrg_masks( bool mod_cisc_masks );
|
||||
|
||||
// user visible pressure variables for scheduling
|
||||
Pressure _sched_int_pressure;
|
||||
Pressure _sched_float_pressure;
|
||||
Pressure _scratch_int_pressure;
|
||||
Pressure _scratch_float_pressure;
|
||||
|
||||
// Pressure functions for user context
|
||||
void lower_pressure(Block* b, uint location, LRG& lrg, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure);
|
||||
void raise_pressure(Block* b, LRG& lrg, Pressure& int_pressure, Pressure& float_pressure);
|
||||
void compute_entry_block_pressure(Block* b);
|
||||
void compute_exit_block_pressure(Block* b);
|
||||
void print_pressure_info(Pressure& pressure, const char *str);
|
||||
|
||||
private:
|
||||
// Force the bases of derived pointers to be alive at GC points.
|
||||
bool stretch_base_pointer_live_ranges( ResourceArea *a );
|
||||
// Helper to stretch above; recursively discover the base Node for
|
||||
|
@ -2336,7 +2336,7 @@ void Compile::Code_Gen() {
|
||||
debug_only( cfg.verify(); )
|
||||
}
|
||||
|
||||
PhaseChaitin regalloc(unique(), cfg, matcher);
|
||||
PhaseChaitin regalloc(unique(), cfg, matcher, false);
|
||||
_regalloc = ®alloc;
|
||||
{
|
||||
TracePhase tp("regalloc", &timers[_t_registerAllocation]);
|
||||
|
@ -1208,12 +1208,6 @@ class Compile : public Phase {
|
||||
// Compute the name of old_SP. See <arch>.ad for frame layout.
|
||||
OptoReg::Name compute_old_SP();
|
||||
|
||||
#ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
static bool is_node_getting_a_safepoint(Node*);
|
||||
void Insert_zap_nodes();
|
||||
Node* call_zap_node(MachSafePointNode* n, int block_no);
|
||||
#endif
|
||||
|
||||
private:
|
||||
// Phase control:
|
||||
void Init(int aliaslevel); // Prepare for a single compilation
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
#include "opto/chaitin.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
|
||||
// Portions of code courtesy of Clifford Click
|
||||
@ -1363,6 +1364,44 @@ void PhaseCFG::global_code_motion() {
|
||||
}
|
||||
}
|
||||
|
||||
bool block_size_threshold_ok = false;
|
||||
intptr_t *recalc_pressure_nodes = NULL;
|
||||
if (OptoRegScheduling) {
|
||||
for (uint i = 0; i < number_of_blocks(); i++) {
|
||||
Block* block = get_block(i);
|
||||
if (block->number_of_nodes() > 10) {
|
||||
block_size_threshold_ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
|
||||
// is key to enabling this feature.
|
||||
PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
|
||||
ResourceArea live_arena; // Arena for liveness
|
||||
ResourceMark rm_live(&live_arena);
|
||||
PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
|
||||
PhaseIFG ifg(&live_arena);
|
||||
if (OptoRegScheduling && block_size_threshold_ok) {
|
||||
regalloc.mark_ssa();
|
||||
Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
|
||||
rm_live.reset_to_mark(); // Reclaim working storage
|
||||
IndexSet::reset_memory(C, &live_arena);
|
||||
uint node_size = regalloc._lrg_map.max_lrg_id();
|
||||
ifg.init(node_size); // Empty IFG
|
||||
regalloc.set_ifg(ifg);
|
||||
regalloc.set_live(live);
|
||||
regalloc.gather_lrg_masks(false); // Collect LRG masks
|
||||
live.compute(node_size); // Compute liveness
|
||||
|
||||
recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);
|
||||
for (uint i = 0; i < node_size; i++) {
|
||||
recalc_pressure_nodes[i] = 0;
|
||||
}
|
||||
}
|
||||
_regalloc = ®alloc;
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (trace_opto_pipelining()) {
|
||||
tty->print("\n---- Start Local Scheduling ----\n");
|
||||
@ -1375,13 +1414,15 @@ void PhaseCFG::global_code_motion() {
|
||||
visited.Clear();
|
||||
for (uint i = 0; i < number_of_blocks(); i++) {
|
||||
Block* block = get_block(i);
|
||||
if (!schedule_local(block, ready_cnt, visited)) {
|
||||
if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {
|
||||
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
|
||||
C->record_method_not_compilable("local schedule failed");
|
||||
}
|
||||
_regalloc = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
_regalloc = NULL;
|
||||
|
||||
// If we inserted any instructions between a Call and his CatchNode,
|
||||
// clone the instructions on all paths below the Catch.
|
||||
|
@ -439,8 +439,10 @@ void PhaseChaitin::lower_pressure(Block* b, uint location, LRG& lrg, IndexSet* l
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_scheduling_info_generated == false) {
|
||||
assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
|
||||
assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
|
||||
}
|
||||
}
|
||||
|
||||
/* Go to the first non-phi index in a block */
|
||||
@ -517,6 +519,58 @@ void PhaseChaitin::compute_initial_block_pressure(Block* b, IndexSet* liveout, P
|
||||
assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
|
||||
}
|
||||
|
||||
/*
|
||||
* Computes the entry register pressure of a block, looking at all live
|
||||
* ranges in the livein. The register pressure is computed for both float
|
||||
* and int/pointer registers.
|
||||
*/
|
||||
void PhaseChaitin::compute_entry_block_pressure(Block* b) {
|
||||
IndexSet* livein = _live->livein(b);
|
||||
IndexSetIterator elements(livein);
|
||||
uint lid = elements.next();
|
||||
while (lid != 0) {
|
||||
LRG& lrg = lrgs(lid);
|
||||
raise_pressure(b, lrg, _sched_int_pressure, _sched_float_pressure);
|
||||
lid = elements.next();
|
||||
}
|
||||
// Now check phis for locally defined inputs
|
||||
for (uint j = 0; j < b->number_of_nodes(); j++) {
|
||||
Node* n = b->get_node(j);
|
||||
if (n->is_Phi()) {
|
||||
for (uint k = 1; k < n->req(); k++) {
|
||||
Node* phi_in = n->in(k);
|
||||
// Because we are talking about phis, raise register pressure once for each
|
||||
// instance of a phi to account for a single value
|
||||
if (_cfg.get_block_for_node(phi_in) == b) {
|
||||
LRG& lrg = lrgs(phi_in->_idx);
|
||||
raise_pressure(b, lrg, _sched_int_pressure, _sched_float_pressure);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_sched_int_pressure.set_start_pressure(_sched_int_pressure.current_pressure());
|
||||
_sched_float_pressure.set_start_pressure(_sched_float_pressure.current_pressure());
|
||||
}
|
||||
|
||||
/*
|
||||
* Computes the exit register pressure of a block, looking at all live
|
||||
* ranges in the liveout. The register pressure is computed for both float
|
||||
* and int/pointer registers.
|
||||
*/
|
||||
void PhaseChaitin::compute_exit_block_pressure(Block* b) {
|
||||
IndexSet* livein = _live->live(b);
|
||||
IndexSetIterator elements(livein);
|
||||
_sched_int_pressure.set_current_pressure(0);
|
||||
_sched_float_pressure.set_current_pressure(0);
|
||||
uint lid = elements.next();
|
||||
while (lid != 0) {
|
||||
LRG& lrg = lrgs(lid);
|
||||
raise_pressure(b, lrg, _sched_int_pressure, _sched_float_pressure);
|
||||
lid = elements.next();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove dead node if it's not used.
|
||||
* We only remove projection nodes if the node "defining" the projection is
|
||||
@ -737,6 +791,16 @@ void PhaseChaitin::adjust_high_pressure_index(Block* b, uint& block_hrp_index, P
|
||||
block_hrp_index = i;
|
||||
}
|
||||
|
||||
void PhaseChaitin::print_pressure_info(Pressure& pressure, const char *str) {
|
||||
if (str != NULL) {
|
||||
tty->print_cr("# *** %s ***", str);
|
||||
}
|
||||
tty->print_cr("# start pressure is = %d", pressure.start_pressure());
|
||||
tty->print_cr("# max pressure is = %d", pressure.final_pressure());
|
||||
tty->print_cr("# end pressure is = %d", pressure.current_pressure());
|
||||
tty->print_cr("#");
|
||||
}
|
||||
|
||||
/* Build an interference graph:
|
||||
* That is, if 2 live ranges are simultaneously alive but in their acceptable
|
||||
* register sets do not overlap, then they do not interfere. The IFG is built
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
#include "opto/chaitin.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
||||
// Optimization - Graph Style
|
||||
@ -443,7 +444,13 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
|
||||
// remaining cases (most), choose the instruction with the greatest latency
|
||||
// (that is, the most number of pseudo-cycles required to the end of the
|
||||
// routine). If there is a tie, choose the instruction with the most inputs.
|
||||
Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
|
||||
Node* PhaseCFG::select(
|
||||
Block* block,
|
||||
Node_List &worklist,
|
||||
GrowableArray<int> &ready_cnt,
|
||||
VectorSet &next_call,
|
||||
uint sched_slot,
|
||||
intptr_t* recalc_pressure_nodes) {
|
||||
|
||||
// If only a single entry on the stack, use it
|
||||
uint cnt = worklist.size();
|
||||
@ -458,6 +465,7 @@ Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &re
|
||||
uint score = 0; // Bigger is better
|
||||
int idx = -1; // Index in worklist
|
||||
int cand_cnt = 0; // Candidate count
|
||||
bool block_size_threshold_ok = (block->number_of_nodes() > 10) ? true : false;
|
||||
|
||||
for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
|
||||
// Order in worklist is used to break ties.
|
||||
@ -539,6 +547,46 @@ Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &re
|
||||
uint n_latency = get_latency_for_node(n);
|
||||
uint n_score = n->req(); // Many inputs get high score to break ties
|
||||
|
||||
if (OptoRegScheduling && block_size_threshold_ok) {
|
||||
if (recalc_pressure_nodes[n->_idx] == 0x7fff7fff) {
|
||||
_regalloc->_scratch_int_pressure.init(_regalloc->_sched_int_pressure.high_pressure_limit());
|
||||
_regalloc->_scratch_float_pressure.init(_regalloc->_sched_float_pressure.high_pressure_limit());
|
||||
// simulate the notion that we just picked this node to schedule
|
||||
n->add_flag(Node::Flag_is_scheduled);
|
||||
// now caculate its effect upon the graph if we did
|
||||
adjust_register_pressure(n, block, recalc_pressure_nodes, false);
|
||||
// return its state for finalize in case somebody else wins
|
||||
n->remove_flag(Node::Flag_is_scheduled);
|
||||
// now save the two final pressure components of register pressure, limiting pressure calcs to short size
|
||||
short int_pressure = (short)_regalloc->_scratch_int_pressure.current_pressure();
|
||||
short float_pressure = (short)_regalloc->_scratch_float_pressure.current_pressure();
|
||||
recalc_pressure_nodes[n->_idx] = int_pressure;
|
||||
recalc_pressure_nodes[n->_idx] |= (float_pressure << 16);
|
||||
}
|
||||
|
||||
if (_scheduling_for_pressure) {
|
||||
latency = n_latency;
|
||||
if (n_choice != 3) {
|
||||
// Now evaluate each register pressure component based on threshold in the score.
|
||||
// In general the defining register type will dominate the score, ergo we will not see register pressure grow on both banks
|
||||
// on a single instruction, but we might see it shrink on both banks.
|
||||
// For each use of register that has a register class that is over the high pressure limit, we build n_score up for
|
||||
// live ranges that terminate on this instruction.
|
||||
if (_regalloc->_sched_int_pressure.current_pressure() > _regalloc->_sched_int_pressure.high_pressure_limit()) {
|
||||
short int_pressure = (short)recalc_pressure_nodes[n->_idx];
|
||||
n_score = (int_pressure < 0) ? ((score + n_score) - int_pressure) : (int_pressure > 0) ? 1 : n_score;
|
||||
}
|
||||
if (_regalloc->_sched_float_pressure.current_pressure() > _regalloc->_sched_float_pressure.high_pressure_limit()) {
|
||||
short float_pressure = (short)(recalc_pressure_nodes[n->_idx] >> 16);
|
||||
n_score = (float_pressure < 0) ? ((score + n_score) - float_pressure) : (float_pressure > 0) ? 1 : n_score;
|
||||
}
|
||||
} else {
|
||||
// make sure we choose these candidates
|
||||
score = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Keep best latency found
|
||||
cand_cnt++;
|
||||
if (choice < n_choice ||
|
||||
@ -562,6 +610,100 @@ Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &re
|
||||
return n;
|
||||
}
|
||||
|
||||
//-------------------------adjust_register_pressure----------------------------
|
||||
void PhaseCFG::adjust_register_pressure(Node* n, Block* block, intptr_t* recalc_pressure_nodes, bool finalize_mode) {
|
||||
PhaseLive* liveinfo = _regalloc->get_live();
|
||||
IndexSet* liveout = liveinfo->live(block);
|
||||
// first adjust the register pressure for the sources
|
||||
for (uint i = 1; i < n->req(); i++) {
|
||||
bool lrg_ends = false;
|
||||
Node *src_n = n->in(i);
|
||||
if (src_n == NULL) continue;
|
||||
if (!src_n->is_Mach()) continue;
|
||||
uint src = _regalloc->_lrg_map.find(src_n);
|
||||
if (src == 0) continue;
|
||||
LRG& lrg_src = _regalloc->lrgs(src);
|
||||
// detect if the live range ends or not
|
||||
if (liveout->member(src) == false) {
|
||||
lrg_ends = true;
|
||||
for (DUIterator_Fast jmax, j = src_n->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* m = src_n->fast_out(j); // Get user
|
||||
if (m == n) continue;
|
||||
if (!m->is_Mach()) continue;
|
||||
MachNode *mach = m->as_Mach();
|
||||
bool src_matches = false;
|
||||
int iop = mach->ideal_Opcode();
|
||||
|
||||
switch (iop) {
|
||||
case Op_StoreB:
|
||||
case Op_StoreC:
|
||||
case Op_StoreCM:
|
||||
case Op_StoreD:
|
||||
case Op_StoreF:
|
||||
case Op_StoreI:
|
||||
case Op_StoreL:
|
||||
case Op_StoreP:
|
||||
case Op_StoreN:
|
||||
case Op_StoreVector:
|
||||
case Op_StoreNKlass:
|
||||
for (uint k = 1; k < m->req(); k++) {
|
||||
Node *in = m->in(k);
|
||||
if (in == src_n) {
|
||||
src_matches = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
src_matches = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// If we have a store as our use, ignore the non source operands
|
||||
if (src_matches == false) continue;
|
||||
|
||||
// Mark every unscheduled use which is not n with a recalculation
|
||||
if ((get_block_for_node(m) == block) && (!m->is_scheduled())) {
|
||||
if (finalize_mode && !m->is_Phi()) {
|
||||
recalc_pressure_nodes[m->_idx] = 0x7fff7fff;
|
||||
}
|
||||
lrg_ends = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// if none, this live range ends and we can adjust register pressure
|
||||
if (lrg_ends) {
|
||||
if (finalize_mode) {
|
||||
_regalloc->lower_pressure(block, 0, lrg_src, NULL, _regalloc->_sched_int_pressure, _regalloc->_sched_float_pressure);
|
||||
} else {
|
||||
_regalloc->lower_pressure(block, 0, lrg_src, NULL, _regalloc->_scratch_int_pressure, _regalloc->_scratch_float_pressure);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now add the register pressure from the dest and evaluate which heuristic we should use:
|
||||
// 1.) The default, latency scheduling
|
||||
// 2.) Register pressure scheduling based on the high pressure limit threshold for int or float register stacks
|
||||
uint dst = _regalloc->_lrg_map.find(n);
|
||||
if (dst != 0) {
|
||||
LRG& lrg_dst = _regalloc->lrgs(dst);
|
||||
if (finalize_mode) {
|
||||
_regalloc->raise_pressure(block, lrg_dst, _regalloc->_sched_int_pressure, _regalloc->_sched_float_pressure);
|
||||
// check to see if we fall over the register pressure cliff here
|
||||
if (_regalloc->_sched_int_pressure.current_pressure() > _regalloc->_sched_int_pressure.high_pressure_limit()) {
|
||||
_scheduling_for_pressure = true;
|
||||
} else if (_regalloc->_sched_float_pressure.current_pressure() > _regalloc->_sched_float_pressure.high_pressure_limit()) {
|
||||
_scheduling_for_pressure = true;
|
||||
} else {
|
||||
// restore latency scheduling mode
|
||||
_scheduling_for_pressure = false;
|
||||
}
|
||||
} else {
|
||||
_regalloc->raise_pressure(block, lrg_dst, _regalloc->_scratch_int_pressure, _regalloc->_scratch_float_pressure);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------set_next_call----------------------------------
|
||||
void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
|
||||
@ -644,7 +786,7 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
|
||||
continue;
|
||||
}
|
||||
if( m->is_Phi() ) continue;
|
||||
int m_cnt = ready_cnt.at(m->_idx)-1;
|
||||
int m_cnt = ready_cnt.at(m->_idx) - 1;
|
||||
ready_cnt.at_put(m->_idx, m_cnt);
|
||||
if( m_cnt == 0 )
|
||||
worklist.push(m);
|
||||
@ -711,7 +853,7 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
|
||||
|
||||
//------------------------------schedule_local---------------------------------
|
||||
// Topological sort within a block. Someday become a real scheduler.
|
||||
bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
|
||||
bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call, intptr_t *recalc_pressure_nodes) {
|
||||
// Already "sorted" are the block start Node (as the first entry), and
|
||||
// the block-ending Node and any trailing control projections. We leave
|
||||
// these alone. PhiNodes and ParmNodes are made to follow the block start
|
||||
@ -733,10 +875,24 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
|
||||
return true;
|
||||
}
|
||||
|
||||
bool block_size_threshold_ok = (block->number_of_nodes() > 10) ? true : false;
|
||||
|
||||
// We track the uses of local definitions as input dependences so that
|
||||
// we know when a given instruction is avialable to be scheduled.
|
||||
uint i;
|
||||
if (OptoRegScheduling && block_size_threshold_ok) {
|
||||
for (i = 1; i < block->number_of_nodes(); i++) { // setup nodes for pressure calc
|
||||
Node *n = block->get_node(i);
|
||||
n->remove_flag(Node::Flag_is_scheduled);
|
||||
if (!n->is_Phi()) {
|
||||
recalc_pressure_nodes[n->_idx] = 0x7fff7fff;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move PhiNodes and ParmNodes from 1 to cnt up to the start
|
||||
uint node_cnt = block->end_idx();
|
||||
uint phi_cnt = 1;
|
||||
uint i;
|
||||
for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
|
||||
Node *n = block->get_node(i);
|
||||
if( n->is_Phi() || // Found a PhiNode or ParmNode
|
||||
@ -744,6 +900,10 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
|
||||
// Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
|
||||
block->map_node(block->get_node(phi_cnt), i);
|
||||
block->map_node(n, phi_cnt++); // swap Phi/Parm up front
|
||||
if (OptoRegScheduling && block_size_threshold_ok) {
|
||||
// mark n as scheduled
|
||||
n->add_flag(Node::Flag_is_scheduled);
|
||||
}
|
||||
} else { // All others
|
||||
// Count block-local inputs to 'n'
|
||||
uint cnt = n->len(); // Input count
|
||||
@ -791,12 +951,18 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
|
||||
|
||||
// All the prescheduled guys do not hold back internal nodes
|
||||
uint i3;
|
||||
for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled
|
||||
for (i3 = 0; i3 < phi_cnt; i3++) { // For all pre-scheduled
|
||||
Node *n = block->get_node(i3); // Get pre-scheduled
|
||||
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* m = n->fast_out(j);
|
||||
if (get_block_for_node(m) == block) { // Local-block user
|
||||
int m_cnt = ready_cnt.at(m->_idx)-1;
|
||||
if (OptoRegScheduling && block_size_threshold_ok) {
|
||||
// mark m as scheduled
|
||||
if (m_cnt < 0) {
|
||||
m->add_flag(Node::Flag_is_scheduled);
|
||||
}
|
||||
}
|
||||
ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count
|
||||
}
|
||||
}
|
||||
@ -827,6 +993,18 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
|
||||
worklist.push(d);
|
||||
}
|
||||
|
||||
if (OptoRegScheduling && block_size_threshold_ok) {
|
||||
// To stage register pressure calculations we need to examine the live set variables
|
||||
// breaking them up by register class to compartmentalize the calculations.
|
||||
uint float_pressure = Matcher::float_pressure(FLOATPRESSURE);
|
||||
_regalloc->_sched_int_pressure.init(INTPRESSURE);
|
||||
_regalloc->_sched_float_pressure.init(float_pressure);
|
||||
_regalloc->_scratch_int_pressure.init(INTPRESSURE);
|
||||
_regalloc->_scratch_float_pressure.init(float_pressure);
|
||||
|
||||
_regalloc->compute_entry_block_pressure(block);
|
||||
}
|
||||
|
||||
// Warm up the 'next_call' heuristic bits
|
||||
needed_for_next_call(block, block->head(), next_call);
|
||||
|
||||
@ -858,9 +1036,18 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
|
||||
#endif
|
||||
|
||||
// Select and pop a ready guy from worklist
|
||||
Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
|
||||
Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt, recalc_pressure_nodes);
|
||||
block->map_node(n, phi_cnt++); // Schedule him next
|
||||
|
||||
if (OptoRegScheduling && block_size_threshold_ok) {
|
||||
n->add_flag(Node::Flag_is_scheduled);
|
||||
|
||||
// Now adjust the resister pressure with the node we selected
|
||||
if (!n->is_Phi()) {
|
||||
adjust_register_pressure(n, block, recalc_pressure_nodes, true);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (trace_opto_pipelining()) {
|
||||
tty->print("# select %d: %s", n->_idx, n->Name());
|
||||
@ -906,7 +1093,7 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
|
||||
assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
|
||||
continue;
|
||||
}
|
||||
int m_cnt = ready_cnt.at(m->_idx)-1;
|
||||
int m_cnt = ready_cnt.at(m->_idx) - 1;
|
||||
ready_cnt.at_put(m->_idx, m_cnt);
|
||||
if( m_cnt == 0 )
|
||||
worklist.push(m);
|
||||
@ -925,6 +1112,12 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
|
||||
return false;
|
||||
}
|
||||
|
||||
if (OptoRegScheduling && block_size_threshold_ok) {
|
||||
_regalloc->compute_exit_block_pressure(block);
|
||||
block->_reg_pressure = _regalloc->_sched_int_pressure.final_pressure();
|
||||
block->_freg_pressure = _regalloc->_sched_float_pressure.final_pressure();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (trace_opto_pipelining()) {
|
||||
tty->print_cr("#");
|
||||
@ -933,11 +1126,17 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
|
||||
tty->print("# ");
|
||||
block->get_node(i)->fast_dump();
|
||||
}
|
||||
tty->print_cr("# ");
|
||||
|
||||
if (OptoRegScheduling && block_size_threshold_ok) {
|
||||
tty->print_cr("# pressure info : %d", block->_pre_order);
|
||||
_regalloc->print_pressure_info(_regalloc->_sched_int_pressure, "int register info");
|
||||
_regalloc->print_pressure_info(_regalloc->_sched_float_pressure, "float register info");
|
||||
}
|
||||
tty->cr();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,14 @@
|
||||
// block is put on the worklist.
|
||||
// The locally live-in stuff is computed once and added to predecessor
|
||||
// live-out sets. This separate compilation is done in the outer loop below.
|
||||
PhaseLive::PhaseLive( const PhaseCFG &cfg, const LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
|
||||
PhaseLive::PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena, bool keep_deltas)
|
||||
: Phase(LIVE),
|
||||
_cfg(cfg),
|
||||
_names(names),
|
||||
_arena(arena),
|
||||
_live(0),
|
||||
_livein(0),
|
||||
_keep_deltas(keep_deltas) {
|
||||
}
|
||||
|
||||
void PhaseLive::compute(uint maxlrg) {
|
||||
@ -56,6 +63,13 @@ void PhaseLive::compute(uint maxlrg) {
|
||||
_live[i].initialize(_maxlrg);
|
||||
}
|
||||
|
||||
if (_keep_deltas) {
|
||||
_livein = (IndexSet*)_arena->Amalloc(sizeof(IndexSet) * _cfg.number_of_blocks());
|
||||
for (i = 0; i < _cfg.number_of_blocks(); i++) {
|
||||
_livein[i].initialize(_maxlrg);
|
||||
}
|
||||
}
|
||||
|
||||
// Init the sparse arrays for delta-sets.
|
||||
ResourceMark rm; // Nuke temp storage on exit
|
||||
|
||||
@ -124,7 +138,10 @@ void PhaseLive::compute(uint maxlrg) {
|
||||
|
||||
// PhiNode uses go in the live-out set of prior blocks.
|
||||
for (uint k = i; k > 0; k--) {
|
||||
add_liveout(p, _names.at(block->get_node(k-1)->in(l)->_idx), first_pass);
|
||||
Node *phi = block->get_node(k - 1);
|
||||
if (l < phi->req()) {
|
||||
add_liveout(p, _names.at(phi->in(l)->_idx), first_pass);
|
||||
}
|
||||
}
|
||||
}
|
||||
freeset(block);
|
||||
@ -200,8 +217,11 @@ IndexSet *PhaseLive::getfreeset( ) {
|
||||
}
|
||||
|
||||
// Free an IndexSet from a block.
|
||||
void PhaseLive::freeset( const Block *p ) {
|
||||
void PhaseLive::freeset( Block *p ) {
|
||||
IndexSet *f = _deltas[p->_pre_order-1];
|
||||
if ( _keep_deltas ) {
|
||||
add_livein(p, f);
|
||||
}
|
||||
f->set_next(_free_IndexSet);
|
||||
_free_IndexSet = f; // Drop onto free list
|
||||
_deltas[p->_pre_order-1] = NULL;
|
||||
@ -249,10 +269,23 @@ void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
|
||||
}
|
||||
}
|
||||
|
||||
// Add a vector of live-in values to a given blocks live-in set.
|
||||
void PhaseLive::add_livein(Block *p, IndexSet *lo) {
|
||||
IndexSet *livein = &_livein[p->_pre_order-1];
|
||||
IndexSetIterator elements(lo);
|
||||
uint r;
|
||||
while ((r = elements.next()) != 0) {
|
||||
livein->insert(r); // Then add to live-in set
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Dump the live-out set for a block
|
||||
void PhaseLive::dump( const Block *b ) const {
|
||||
tty->print("Block %d: ",b->_pre_order);
|
||||
if ( _keep_deltas ) {
|
||||
tty->print("LiveIn: "); _livein[b->_pre_order-1].dump();
|
||||
}
|
||||
tty->print("LiveOut: "); _live[b->_pre_order-1].dump();
|
||||
uint cnt = b->number_of_nodes();
|
||||
for( uint i=0; i<cnt; i++ ) {
|
||||
|
@ -46,7 +46,8 @@ typedef GrowableArray<uint> LRG_List;
|
||||
class PhaseLive : public Phase {
|
||||
// Array of Sets of values live at the start of a block.
|
||||
// Indexed by block pre-order number.
|
||||
IndexSet *_live;
|
||||
IndexSet *_live; // live out
|
||||
IndexSet *_livein; // live in
|
||||
|
||||
// Array of Sets of values defined locally in the block
|
||||
// Indexed by block pre-order number.
|
||||
@ -62,15 +63,17 @@ class PhaseLive : public Phase {
|
||||
const LRG_List &_names; // Mapping from Nodes to live ranges
|
||||
uint _maxlrg; // Largest live-range number
|
||||
Arena *_arena;
|
||||
bool _keep_deltas; // Retain live in information
|
||||
|
||||
IndexSet *getset( Block *p );
|
||||
IndexSet *getfreeset( );
|
||||
void freeset( const Block *p );
|
||||
void freeset( Block *p );
|
||||
void add_liveout( Block *p, uint r, VectorSet &first_pass );
|
||||
void add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass );
|
||||
void add_livein( Block *p, IndexSet *lo );
|
||||
|
||||
public:
|
||||
PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena);
|
||||
PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena, bool keep_deltas);
|
||||
~PhaseLive() {}
|
||||
// Compute liveness info
|
||||
void compute(uint maxlrg);
|
||||
@ -79,6 +82,7 @@ public:
|
||||
|
||||
// Return the live-out set for this block
|
||||
IndexSet *live( const Block * b ) { return &_live[b->_pre_order-1]; }
|
||||
IndexSet *livein( const Block * b ) { return &_livein[b->_pre_order - 1]; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
void dump( const Block *b ) const;
|
||||
|
@ -290,6 +290,7 @@ public:
|
||||
if (phi() == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(phi()->is_Phi(), "should be PhiNode");
|
||||
Node *ln = phi()->in(0);
|
||||
if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) {
|
||||
return (CountedLoopNode*)ln;
|
||||
|
@ -447,21 +447,21 @@ Node *PhaseIdealLoop::remix_address_expressions( Node *n ) {
|
||||
}
|
||||
|
||||
// Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
|
||||
if( n2_loop != n_loop && n3_loop == n_loop ) {
|
||||
if( n->in(3)->Opcode() == Op_AddI ) {
|
||||
if (n2_loop != n_loop && n3_loop == n_loop) {
|
||||
if (n->in(3)->Opcode() == Op_AddX) {
|
||||
Node *V = n->in(3)->in(1);
|
||||
Node *I = n->in(3)->in(2);
|
||||
if( is_member(n_loop,get_ctrl(V)) ) {
|
||||
if (is_member(n_loop,get_ctrl(V))) {
|
||||
} else {
|
||||
Node *tmp = V; V = I; I = tmp;
|
||||
}
|
||||
if( !is_member(n_loop,get_ctrl(I)) ) {
|
||||
Node *add1 = new AddPNode( n->in(1), n->in(2), I );
|
||||
if (!is_member(n_loop,get_ctrl(I))) {
|
||||
Node *add1 = new AddPNode(n->in(1), n->in(2), I);
|
||||
// Stuff new AddP in the loop preheader
|
||||
register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
|
||||
Node *add2 = new AddPNode( n->in(1), add1, V );
|
||||
register_new_node( add2, n_ctrl );
|
||||
_igvn.replace_node( n, add2 );
|
||||
register_new_node(add1, n_loop->_head->in(LoopNode::EntryControl));
|
||||
Node *add2 = new AddPNode(n->in(1), add1, V);
|
||||
register_new_node(add2, n_ctrl);
|
||||
_igvn.replace_node(n, add2);
|
||||
return add2;
|
||||
}
|
||||
}
|
||||
@ -653,7 +653,6 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
|
||||
return iff->in(1);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
|
||||
for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = m->fast_out(i);
|
||||
@ -667,7 +666,6 @@ static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Try moving a store out of a loop, right before the loop
|
||||
Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
|
||||
@ -687,11 +685,15 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
|
||||
// written at iteration i by the second store could be overwritten
|
||||
// at iteration i+n by the first store: it's not safe to move the
|
||||
// first store out of the loop
|
||||
// - nothing must observe the Phi memory: it guarantees no read
|
||||
// before the store and no early exit out of the loop
|
||||
// With those conditions, we are also guaranteed the store post
|
||||
// dominates the loop head. Otherwise there would be extra Phi
|
||||
// involved between the loop's Phi and the store.
|
||||
// - nothing must observe the memory Phi: it guarantees no read
|
||||
// before the store, we are also guaranteed the store post
|
||||
// dominates the loop head (ignoring a possible early
|
||||
// exit). Otherwise there would be extra Phi involved between the
|
||||
// loop's Phi and the store.
|
||||
// - there must be no early exit from the loop before the Store
|
||||
// (such an exit most of the time would be an extra use of the
|
||||
// memory Phi but sometimes is a bottom memory Phi that takes the
|
||||
// store as input).
|
||||
|
||||
if (!n_loop->is_member(address_loop) &&
|
||||
!n_loop->is_member(value_loop) &&
|
||||
@ -699,9 +701,10 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
|
||||
mem->outcnt() == 1 &&
|
||||
mem->in(LoopNode::LoopBackControl) == n) {
|
||||
|
||||
#ifdef ASSERT
|
||||
// Verify that store's control does post dominate loop entry and
|
||||
// that there's no early exit of the loop before the store.
|
||||
assert(n_loop->_tail != NULL, "need a tail");
|
||||
assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
|
||||
|
||||
// Verify that there's no early exit of the loop before the store.
|
||||
bool ctrl_ok = false;
|
||||
{
|
||||
// Follow control from loop head until n, we exit the loop or
|
||||
@ -709,7 +712,7 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
|
||||
ResourceMark rm;
|
||||
Unique_Node_List wq;
|
||||
wq.push(n_loop->_head);
|
||||
assert(n_loop->_tail != NULL, "need a tail");
|
||||
|
||||
for (uint next = 0; next < wq.size(); ++next) {
|
||||
Node *m = wq.at(next);
|
||||
if (m == n->in(0)) {
|
||||
@ -722,11 +725,13 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
|
||||
break;
|
||||
}
|
||||
enqueue_cfg_uses(m, wq);
|
||||
if (wq.size() > 10) {
|
||||
ctrl_ok = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(ctrl_ok, "bad control");
|
||||
#endif
|
||||
|
||||
}
|
||||
if (ctrl_ok) {
|
||||
// move the Store
|
||||
_igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
|
||||
_igvn.replace_input_of(n, 0, n_loop->_head->in(LoopNode::EntryControl));
|
||||
@ -742,6 +747,7 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
|
||||
return n;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -769,13 +775,15 @@ void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
|
||||
}
|
||||
if (u->is_Phi() && u->in(0) == n_loop->_head) {
|
||||
assert(_igvn.type(u) == Type::MEMORY, "bad phi");
|
||||
assert(phi == NULL, "already found");
|
||||
// multiple phis on the same slice are possible
|
||||
if (phi != NULL) {
|
||||
return;
|
||||
}
|
||||
phi = u;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
phi = NULL;
|
||||
break;
|
||||
return;
|
||||
}
|
||||
if (phi != NULL) {
|
||||
// Nothing in the loop before the store (next iteration)
|
||||
|
@ -1512,7 +1512,8 @@ void PhaseMacroExpand::expand_allocate_common(
|
||||
// MemBarStoreStore so that stores that initialize this object
|
||||
// can't be reordered with a subsequent store that makes this
|
||||
// object accessible by other threads.
|
||||
if (init == NULL || (!init->is_complete_with_arraycopy() && !init->does_not_escape())) {
|
||||
if (!alloc->does_not_escape_thread() &&
|
||||
(init == NULL || !init->is_complete_with_arraycopy())) {
|
||||
if (init == NULL || init->req() < InitializeNode::RawStores) {
|
||||
// No InitializeNode or no stores captured by zeroing
|
||||
// elimination. Simply add the MemBarStoreStore after object
|
||||
|
@ -2045,6 +2045,33 @@ bool Matcher::is_bmi_pattern(Node *n, Node *m) {
|
||||
// and then expanded into the inline_cache_reg and a method_oop register
|
||||
// defined in ad_<arch>.cpp
|
||||
|
||||
// Check for shift by small constant as well
|
||||
static bool clone_shift(Node* shift, Matcher* matcher, MStack& mstack, VectorSet& address_visited) {
|
||||
if (shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
|
||||
shift->in(2)->get_int() <= 3 &&
|
||||
// Are there other uses besides address expressions?
|
||||
!matcher->is_visited(shift)) {
|
||||
address_visited.set(shift->_idx); // Flag as address_visited
|
||||
mstack.push(shift->in(2), Visit);
|
||||
Node *conv = shift->in(1);
|
||||
#ifdef _LP64
|
||||
// Allow Matcher to match the rule which bypass
|
||||
// ConvI2L operation for an array index on LP64
|
||||
// if the index value is positive.
|
||||
if (conv->Opcode() == Op_ConvI2L &&
|
||||
conv->as_Type()->type()->is_long()->_lo >= 0 &&
|
||||
// Are there other uses besides address expressions?
|
||||
!matcher->is_visited(conv)) {
|
||||
address_visited.set(conv->_idx); // Flag as address_visited
|
||||
mstack.push(conv->in(1), Pre_Visit);
|
||||
} else
|
||||
#endif
|
||||
mstack.push(conv, Pre_Visit);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
//------------------------------find_shared------------------------------------
|
||||
// Set bits if Node is shared or otherwise a root
|
||||
@ -2205,7 +2232,10 @@ void Matcher::find_shared( Node *n ) {
|
||||
#endif
|
||||
|
||||
// Clone addressing expressions as they are "free" in memory access instructions
|
||||
if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
|
||||
if (mem_op && i == MemNode::Address && mop == Op_AddP &&
|
||||
// When there are other uses besides address expressions
|
||||
// put it on stack and mark as shared.
|
||||
!is_visited(m)) {
|
||||
// Some inputs for address expression are not put on stack
|
||||
// to avoid marking them as shared and forcing them into register
|
||||
// if they are used only in address expressions.
|
||||
@ -2213,10 +2243,7 @@ void Matcher::find_shared( Node *n ) {
|
||||
// besides address expressions.
|
||||
|
||||
Node *off = m->in(AddPNode::Offset);
|
||||
if( off->is_Con() &&
|
||||
// When there are other uses besides address expressions
|
||||
// put it on stack and mark as shared.
|
||||
!is_visited(m) ) {
|
||||
if (off->is_Con()) {
|
||||
address_visited.test_set(m->_idx); // Flag as address_visited
|
||||
Node *adr = m->in(AddPNode::Address);
|
||||
|
||||
@ -2229,28 +2256,7 @@ void Matcher::find_shared( Node *n ) {
|
||||
!is_visited(adr) ) {
|
||||
address_visited.set(adr->_idx); // Flag as address_visited
|
||||
Node *shift = adr->in(AddPNode::Offset);
|
||||
// Check for shift by small constant as well
|
||||
if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
|
||||
shift->in(2)->get_int() <= 3 &&
|
||||
// Are there other uses besides address expressions?
|
||||
!is_visited(shift) ) {
|
||||
address_visited.set(shift->_idx); // Flag as address_visited
|
||||
mstack.push(shift->in(2), Visit);
|
||||
Node *conv = shift->in(1);
|
||||
#ifdef _LP64
|
||||
// Allow Matcher to match the rule which bypass
|
||||
// ConvI2L operation for an array index on LP64
|
||||
// if the index value is positive.
|
||||
if( conv->Opcode() == Op_ConvI2L &&
|
||||
conv->as_Type()->type()->is_long()->_lo >= 0 &&
|
||||
// Are there other uses besides address expressions?
|
||||
!is_visited(conv) ) {
|
||||
address_visited.set(conv->_idx); // Flag as address_visited
|
||||
mstack.push(conv->in(1), Pre_Visit);
|
||||
} else
|
||||
#endif
|
||||
mstack.push(conv, Pre_Visit);
|
||||
} else {
|
||||
if (!clone_shift(shift, this, mstack, address_visited)) {
|
||||
mstack.push(shift, Pre_Visit);
|
||||
}
|
||||
mstack.push(adr->in(AddPNode::Address), Pre_Visit);
|
||||
@ -2263,6 +2269,12 @@ void Matcher::find_shared( Node *n ) {
|
||||
mstack.push(off, Visit);
|
||||
mstack.push(m->in(AddPNode::Base), Pre_Visit);
|
||||
continue; // for(int i = ...)
|
||||
} else if (clone_shift_expressions &&
|
||||
clone_shift(off, this, mstack, address_visited)) {
|
||||
address_visited.test_set(m->_idx); // Flag as address_visited
|
||||
mstack.push(m->in(AddPNode::Address), Pre_Visit);
|
||||
mstack.push(m->in(AddPNode::Base), Pre_Visit);
|
||||
continue;
|
||||
} // if( off->is_Con() )
|
||||
} // if( mem_op &&
|
||||
mstack.push(m, Pre_Visit);
|
||||
|
@ -269,6 +269,9 @@ public:
|
||||
// should generate this one.
|
||||
static const bool match_rule_supported(int opcode);
|
||||
|
||||
// Some uarchs have different sized float register resources
|
||||
static const int float_pressure(int default_pressure_threshold);
|
||||
|
||||
// Used to determine if we have fast l2f conversion
|
||||
// USII has it, USIII doesn't
|
||||
static const bool convL2FSupported(void);
|
||||
|
@ -2945,7 +2945,7 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// Final field stores.
|
||||
Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
|
||||
if ((alloc != NULL) && alloc->is_Allocate() &&
|
||||
alloc->as_Allocate()->_is_non_escaping) {
|
||||
alloc->as_Allocate()->does_not_escape_thread()) {
|
||||
// The allocated object does not escape.
|
||||
eliminate = true;
|
||||
}
|
||||
|
@ -674,7 +674,8 @@ public:
|
||||
Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1,
|
||||
Flag_has_call = Flag_avoid_back_to_back_after << 1,
|
||||
Flag_is_reduction = Flag_has_call << 1,
|
||||
Flag_is_expensive = Flag_is_reduction << 1,
|
||||
Flag_is_scheduled = Flag_is_reduction,
|
||||
Flag_is_expensive = Flag_is_scheduled << 1,
|
||||
_max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
|
||||
};
|
||||
|
||||
@ -861,6 +862,9 @@ public:
|
||||
// It must have the loop's phi as input and provide a def to the phi.
|
||||
bool is_reduction() const { return (_flags & Flag_is_reduction) != 0; }
|
||||
|
||||
// Used in lcm to mark nodes that have scheduled
|
||||
bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; }
|
||||
|
||||
//----------------- Optimization
|
||||
|
||||
// Get the worst-case Type output for this Node.
|
||||
|
@ -116,12 +116,6 @@ void Compile::Output() {
|
||||
}
|
||||
}
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
if (ZapDeadCompiledLocals) {
|
||||
Insert_zap_nodes();
|
||||
}
|
||||
# endif
|
||||
|
||||
uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
|
||||
blk_starts[0] = 0;
|
||||
|
||||
@ -184,113 +178,6 @@ bool Compile::need_register_stack_bang() const {
|
||||
return (stub_function() == NULL && has_java_calls());
|
||||
}
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
|
||||
|
||||
// In order to catch compiler oop-map bugs, we have implemented
|
||||
// a debugging mode called ZapDeadCompilerLocals.
|
||||
// This mode causes the compiler to insert a call to a runtime routine,
|
||||
// "zap_dead_locals", right before each place in compiled code
|
||||
// that could potentially be a gc-point (i.e., a safepoint or oop map point).
|
||||
// The runtime routine checks that locations mapped as oops are really
|
||||
// oops, that locations mapped as values do not look like oops,
|
||||
// and that locations mapped as dead are not used later
|
||||
// (by zapping them to an invalid address).
|
||||
|
||||
int Compile::_CompiledZap_count = 0;
|
||||
|
||||
void Compile::Insert_zap_nodes() {
|
||||
bool skip = false;
|
||||
|
||||
|
||||
// Dink with static counts because code code without the extra
|
||||
// runtime calls is MUCH faster for debugging purposes
|
||||
|
||||
if ( CompileZapFirst == 0 ) ; // nothing special
|
||||
else if ( CompileZapFirst > CompiledZap_count() ) skip = true;
|
||||
else if ( CompileZapFirst == CompiledZap_count() )
|
||||
warning("starting zap compilation after skipping");
|
||||
|
||||
if ( CompileZapLast == -1 ) ; // nothing special
|
||||
else if ( CompileZapLast < CompiledZap_count() ) skip = true;
|
||||
else if ( CompileZapLast == CompiledZap_count() )
|
||||
warning("about to compile last zap");
|
||||
|
||||
++_CompiledZap_count; // counts skipped zaps, too
|
||||
|
||||
if ( skip ) return;
|
||||
|
||||
|
||||
if ( _method == NULL )
|
||||
return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
|
||||
|
||||
// Insert call to zap runtime stub before every node with an oop map
|
||||
for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
|
||||
Block *b = _cfg->get_block(i);
|
||||
for ( uint j = 0; j < b->number_of_nodes(); ++j ) {
|
||||
Node *n = b->get_node(j);
|
||||
|
||||
// Determining if we should insert a zap-a-lot node in output.
|
||||
// We do that for all nodes that has oopmap info, except for calls
|
||||
// to allocation. Calls to allocation passes in the old top-of-eden pointer
|
||||
// and expect the C code to reset it. Hence, there can be no safepoints between
|
||||
// the inlined-allocation and the call to new_Java, etc.
|
||||
// We also cannot zap monitor calls, as they must hold the microlock
|
||||
// during the call to Zap, which also wants to grab the microlock.
|
||||
bool insert = n->is_MachSafePoint() && (n->as_MachSafePoint()->oop_map() != NULL);
|
||||
if ( insert ) { // it is MachSafePoint
|
||||
if ( !n->is_MachCall() ) {
|
||||
insert = false;
|
||||
} else if ( n->is_MachCall() ) {
|
||||
MachCallNode* call = n->as_MachCall();
|
||||
if (call->entry_point() == OptoRuntime::new_instance_Java() ||
|
||||
call->entry_point() == OptoRuntime::new_array_Java() ||
|
||||
call->entry_point() == OptoRuntime::multianewarray2_Java() ||
|
||||
call->entry_point() == OptoRuntime::multianewarray3_Java() ||
|
||||
call->entry_point() == OptoRuntime::multianewarray4_Java() ||
|
||||
call->entry_point() == OptoRuntime::multianewarray5_Java() ||
|
||||
call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
|
||||
call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
|
||||
) {
|
||||
insert = false;
|
||||
}
|
||||
}
|
||||
if (insert) {
|
||||
Node *zap = call_zap_node(n->as_MachSafePoint(), i);
|
||||
b->insert_node(zap, j);
|
||||
_cfg->map_node_to_block(zap, b);
|
||||
++j;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
|
||||
const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
|
||||
CallStaticJavaNode* ideal_node =
|
||||
new CallStaticJavaNode( tf,
|
||||
OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
|
||||
"call zap dead locals stub", 0, TypePtr::BOTTOM);
|
||||
// We need to copy the OopMap from the site we're zapping at.
|
||||
// We have to make a copy, because the zap site might not be
|
||||
// a call site, and zap_dead is a call site.
|
||||
OopMap* clone = node_to_check->oop_map()->deep_copy();
|
||||
|
||||
// Add the cloned OopMap to the zap node
|
||||
ideal_node->set_oop_map(clone);
|
||||
return _matcher->match_sfpt(ideal_node);
|
||||
}
|
||||
|
||||
bool Compile::is_node_getting_a_safepoint( Node* n) {
|
||||
// This code duplicates the logic prior to the call of add_safepoint
|
||||
// below in this file.
|
||||
if( n->is_MachSafePoint() ) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
# endif // ENABLE_ZAP_DEAD_LOCALS
|
||||
|
||||
// Compute the size of first NumberOfLoopInstrToAlign instructions at the top
|
||||
// of a loop. When aligning a loop we need to provide enough instructions
|
||||
@ -834,10 +721,6 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
|
||||
MachSafePointNode *sfn = mach->as_MachSafePoint();
|
||||
MachCallNode *mcall;
|
||||
|
||||
#ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
assert( is_node_getting_a_safepoint(mach), "logic does not match; false negative");
|
||||
#endif
|
||||
|
||||
int safepoint_pc_offset = current_offset;
|
||||
bool is_method_handle_invoke = false;
|
||||
bool return_oop = false;
|
||||
@ -1294,10 +1177,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||
if (Pipeline::requires_bundling() && starts_bundle(n))
|
||||
cb->flush_bundle(false);
|
||||
|
||||
// The following logic is duplicated in the code ifdeffed for
|
||||
// ENABLE_ZAP_DEAD_LOCALS which appears above in this file. It
|
||||
// should be factored out. Or maybe dispersed to the nodes?
|
||||
|
||||
// Special handling for SafePoint/Call Nodes
|
||||
bool is_mcall = false;
|
||||
if (n->is_Mach()) {
|
||||
@ -1364,9 +1243,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||
// !!!!! Stubs only need an oopmap right now, so bail out
|
||||
if (sfn->jvms()->method() == NULL) {
|
||||
// Write the oopmap directly to the code blob??!!
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
assert( !is_node_getting_a_safepoint(sfn), "logic does not match; false positive");
|
||||
# endif
|
||||
continue;
|
||||
}
|
||||
} // End synchronization
|
||||
@ -1554,9 +1430,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||
// !!!!! Stubs only need an oopmap right now, so bail out
|
||||
if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
|
||||
// Write the oopmap directly to the code blob??!!
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
assert( !is_node_getting_a_safepoint(mach), "logic does not match; false positive");
|
||||
# endif
|
||||
delay_slot = NULL;
|
||||
continue;
|
||||
}
|
||||
|
@ -102,11 +102,6 @@ address OptoRuntime::_rethrow_Java = NULL;
|
||||
address OptoRuntime::_slow_arraycopy_Java = NULL;
|
||||
address OptoRuntime::_register_finalizer_Java = NULL;
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
address OptoRuntime::_zap_dead_Java_locals_Java = NULL;
|
||||
address OptoRuntime::_zap_dead_native_locals_Java = NULL;
|
||||
# endif
|
||||
|
||||
ExceptionBlob* OptoRuntime::_exception_blob;
|
||||
|
||||
// This should be called in an assertion at the start of OptoRuntime routines
|
||||
@ -152,10 +147,6 @@ bool OptoRuntime::generate(ciEnv* env) {
|
||||
gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false, false);
|
||||
gen(env, _register_finalizer_Java , register_finalizer_Type , register_finalizer , 0 , false, false, false);
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
gen(env, _zap_dead_Java_locals_Java , zap_dead_locals_Type , zap_dead_Java_locals_C , 0 , false, true , false );
|
||||
gen(env, _zap_dead_native_locals_Java , zap_dead_locals_Type , zap_dead_native_locals_C , 0 , false, true , false );
|
||||
# endif
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -604,23 +595,6 @@ const TypeFunc *OptoRuntime::uncommon_trap_Type() {
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
// Type used for stub generation for zap_dead_locals.
|
||||
// No inputs or outputs
|
||||
const TypeFunc *OptoRuntime::zap_dead_locals_Type() {
|
||||
// create input type (domain)
|
||||
const Type **fields = TypeTuple::fields(0);
|
||||
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms,fields);
|
||||
|
||||
// create result type (range)
|
||||
fields = TypeTuple::fields(0);
|
||||
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms,fields);
|
||||
|
||||
return TypeFunc::make(domain,range);
|
||||
}
|
||||
# endif
|
||||
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Monitor Handling
|
||||
const TypeFunc *OptoRuntime::complete_monitor_enter_Type() {
|
||||
@ -1648,67 +1622,3 @@ static void trace_exception(oop exception_oop, address exception_pc, const char*
|
||||
|
||||
#endif // PRODUCT
|
||||
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
// Called from call sites in compiled code with oop maps (actually safepoints)
|
||||
// Zaps dead locals in first java frame.
|
||||
// Is entry because may need to lock to generate oop maps
|
||||
// Currently, only used for compiler frames, but someday may be used
|
||||
// for interpreter frames, too.
|
||||
|
||||
int OptoRuntime::ZapDeadCompiledLocals_count = 0;
|
||||
|
||||
// avoid pointers to member funcs with these helpers
|
||||
static bool is_java_frame( frame* f) { return f->is_java_frame(); }
|
||||
static bool is_native_frame(frame* f) { return f->is_native_frame(); }
|
||||
|
||||
|
||||
void OptoRuntime::zap_dead_java_or_native_locals(JavaThread* thread,
|
||||
bool (*is_this_the_right_frame_to_zap)(frame*)) {
|
||||
assert(JavaThread::current() == thread, "is this needed?");
|
||||
|
||||
if ( !ZapDeadCompiledLocals ) return;
|
||||
|
||||
bool skip = false;
|
||||
|
||||
if ( ZapDeadCompiledLocalsFirst == 0 ) ; // nothing special
|
||||
else if ( ZapDeadCompiledLocalsFirst > ZapDeadCompiledLocals_count ) skip = true;
|
||||
else if ( ZapDeadCompiledLocalsFirst == ZapDeadCompiledLocals_count )
|
||||
warning("starting zapping after skipping");
|
||||
|
||||
if ( ZapDeadCompiledLocalsLast == -1 ) ; // nothing special
|
||||
else if ( ZapDeadCompiledLocalsLast < ZapDeadCompiledLocals_count ) skip = true;
|
||||
else if ( ZapDeadCompiledLocalsLast == ZapDeadCompiledLocals_count )
|
||||
warning("about to zap last zap");
|
||||
|
||||
++ZapDeadCompiledLocals_count; // counts skipped zaps, too
|
||||
|
||||
if ( skip ) return;
|
||||
|
||||
// find java frame and zap it
|
||||
|
||||
for (StackFrameStream sfs(thread); !sfs.is_done(); sfs.next()) {
|
||||
if (is_this_the_right_frame_to_zap(sfs.current()) ) {
|
||||
sfs.current()->zap_dead_locals(thread, sfs.register_map());
|
||||
return;
|
||||
}
|
||||
}
|
||||
warning("no frame found to zap in zap_dead_Java_locals_C");
|
||||
}
|
||||
|
||||
JRT_LEAF(void, OptoRuntime::zap_dead_Java_locals_C(JavaThread* thread))
|
||||
zap_dead_java_or_native_locals(thread, is_java_frame);
|
||||
JRT_END
|
||||
|
||||
// The following does not work because for one thing, the
|
||||
// thread state is wrong; it expects java, but it is native.
|
||||
// Also, the invariants in a native stub are different and
|
||||
// I'm not sure it is safe to have a MachCalRuntimeDirectNode
|
||||
// in there.
|
||||
// So for now, we do not zap in native stubs.
|
||||
|
||||
JRT_LEAF(void, OptoRuntime::zap_dead_native_locals_C(JavaThread* thread))
|
||||
zap_dead_java_or_native_locals(thread, is_native_frame);
|
||||
JRT_END
|
||||
|
||||
# endif
|
||||
|
@ -152,12 +152,6 @@ class OptoRuntime : public AllStatic {
|
||||
static address _slow_arraycopy_Java;
|
||||
static address _register_finalizer_Java;
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
static address _zap_dead_Java_locals_Java;
|
||||
static address _zap_dead_native_locals_Java;
|
||||
# endif
|
||||
|
||||
|
||||
//
|
||||
// Implementation of runtime methods
|
||||
// =================================
|
||||
@ -212,19 +206,6 @@ private:
|
||||
|
||||
static void register_finalizer(oopDesc* obj, JavaThread* thread);
|
||||
|
||||
// zaping dead locals, either from Java frames or from native frames
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
static void zap_dead_Java_locals_C( JavaThread* thread);
|
||||
static void zap_dead_native_locals_C( JavaThread* thread);
|
||||
|
||||
static void zap_dead_java_or_native_locals( JavaThread*, bool (*)(frame*));
|
||||
|
||||
public:
|
||||
static int ZapDeadCompiledLocals_count;
|
||||
|
||||
# endif
|
||||
|
||||
|
||||
public:
|
||||
|
||||
static bool is_callee_saved_register(MachRegisterNumbers reg);
|
||||
@ -256,14 +237,6 @@ private:
|
||||
static address slow_arraycopy_Java() { return _slow_arraycopy_Java; }
|
||||
static address register_finalizer_Java() { return _register_finalizer_Java; }
|
||||
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
static address zap_dead_locals_stub(bool is_native) { return is_native
|
||||
? _zap_dead_native_locals_Java
|
||||
: _zap_dead_Java_locals_Java; }
|
||||
static MachNode* node_to_call_zap_dead_locals(Node* n, int block_num, bool is_native);
|
||||
# endif
|
||||
|
||||
static ExceptionBlob* exception_blob() { return _exception_blob; }
|
||||
|
||||
// Leaf routines helping with method data update
|
||||
@ -353,10 +326,6 @@ private:
|
||||
static const TypeFunc* dtrace_method_entry_exit_Type();
|
||||
static const TypeFunc* dtrace_object_alloc_Type();
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
static const TypeFunc* zap_dead_locals_Type();
|
||||
# endif
|
||||
|
||||
private:
|
||||
static NamedCounter * volatile _named_counters;
|
||||
|
||||
|
@ -2690,15 +2690,25 @@ void SuperWord::align_initial_loop_index(MemNode* align_to_ref) {
|
||||
|
||||
//----------------------------get_pre_loop_end---------------------------
|
||||
// Find pre loop end from main loop. Returns null if none.
|
||||
CountedLoopEndNode* SuperWord::get_pre_loop_end(CountedLoopNode *cl) {
|
||||
Node *ctrl = cl->in(LoopNode::EntryControl);
|
||||
CountedLoopEndNode* SuperWord::get_pre_loop_end(CountedLoopNode* cl) {
|
||||
Node* ctrl = cl->in(LoopNode::EntryControl);
|
||||
if (!ctrl->is_IfTrue() && !ctrl->is_IfFalse()) return NULL;
|
||||
Node *iffm = ctrl->in(0);
|
||||
Node* iffm = ctrl->in(0);
|
||||
if (!iffm->is_If()) return NULL;
|
||||
Node *p_f = iffm->in(0);
|
||||
Node* bolzm = iffm->in(1);
|
||||
if (!bolzm->is_Bool()) return NULL;
|
||||
Node* cmpzm = bolzm->in(1);
|
||||
if (!cmpzm->is_Cmp()) return NULL;
|
||||
Node* opqzm = cmpzm->in(2);
|
||||
// Can not optimize a loop if zero-trip Opaque1 node is optimized
|
||||
// away and then another round of loop opts attempted.
|
||||
if (opqzm->Opcode() != Op_Opaque1) {
|
||||
return NULL;
|
||||
}
|
||||
Node* p_f = iffm->in(0);
|
||||
if (!p_f->is_IfFalse()) return NULL;
|
||||
if (!p_f->in(0)->is_CountedLoopEnd()) return NULL;
|
||||
CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
|
||||
CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd();
|
||||
CountedLoopNode* loop_node = pre_end->loopnode();
|
||||
if (loop_node == NULL || !loop_node->is_pre_loop()) return NULL;
|
||||
return pre_end;
|
||||
@ -3045,6 +3055,9 @@ bool SWPointer::offset_plus_k(Node* n, bool negate) {
|
||||
}
|
||||
}
|
||||
if (invariant(n)) {
|
||||
if (opc == Op_ConvI2L) {
|
||||
n = n->in(1);
|
||||
}
|
||||
_negate_invar = negate;
|
||||
_invar = n;
|
||||
NOT_PRODUCT(_tracer.offset_plus_k_10(n, _invar, _negate_invar, _offset);)
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "compiler/methodMatcher.hpp"
|
||||
#include "jvmtifiles/jvmtiEnv.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
@ -625,6 +626,32 @@ WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobjec
|
||||
return (mh->queued_for_compilation() || nm != NULL);
|
||||
WB_END
|
||||
|
||||
|
||||
WB_ENTRY(jint, WB_MatchesMethod(JNIEnv* env, jobject o, jobject method, jstring pattern))
|
||||
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
|
||||
CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
|
||||
|
||||
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
|
||||
|
||||
ResourceMark rm;
|
||||
char* method_str = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(pattern));
|
||||
|
||||
const char* error_msg = NULL;
|
||||
|
||||
BasicMatcher* m = BasicMatcher::parse_method_pattern(method_str, error_msg);
|
||||
if (m == NULL) {
|
||||
assert(error_msg != NULL, "Must have error_msg");
|
||||
tty->print_cr("Got error: %s", error_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Pattern works - now check if it matches
|
||||
int result = m->matches(mh);
|
||||
delete m;
|
||||
assert(result == 0 || result == 1, "Result out of range");
|
||||
return result;
|
||||
WB_END
|
||||
|
||||
class AlwaysFalseClosure : public BoolObjectClosure {
|
||||
public:
|
||||
bool do_object_b(oop p) { return false; }
|
||||
@ -1430,6 +1457,9 @@ static JNINativeMethod methods[] = {
|
||||
CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState},
|
||||
{CC"lockCompilation", CC"()V", (void*)&WB_LockCompilation},
|
||||
{CC"unlockCompilation", CC"()V", (void*)&WB_UnlockCompilation},
|
||||
{CC"matchesMethod",
|
||||
CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)I",
|
||||
(void*)&WB_MatchesMethod},
|
||||
{CC"isConstantVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsConstantVMFlag},
|
||||
{CC"isLockedVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsLockedVMFlag},
|
||||
{CC"setBooleanVMFlag", CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag},
|
||||
|
@ -1111,104 +1111,6 @@ void frame::metadata_do(void f(Metadata*)) {
|
||||
}
|
||||
}
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
|
||||
void frame::CheckValueClosure::do_oop(oop* p) {
|
||||
if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) {
|
||||
warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
|
||||
}
|
||||
}
|
||||
frame::CheckValueClosure frame::_check_value;
|
||||
|
||||
|
||||
void frame::CheckOopClosure::do_oop(oop* p) {
|
||||
if (*p != NULL && !(*p)->is_oop()) {
|
||||
warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
|
||||
}
|
||||
}
|
||||
frame::CheckOopClosure frame::_check_oop;
|
||||
|
||||
void frame::check_derived_oop(oop* base, oop* derived) {
|
||||
_check_oop.do_oop(base);
|
||||
}
|
||||
|
||||
|
||||
void frame::ZapDeadClosure::do_oop(oop* p) {
|
||||
if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p);
|
||||
*p = cast_to_oop<intptr_t>(0xbabebabe);
|
||||
}
|
||||
frame::ZapDeadClosure frame::_zap_dead;
|
||||
|
||||
void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) {
|
||||
assert(thread == Thread::current(), "need to synchronize to do this to another thread");
|
||||
// Tracing - part 1
|
||||
if (TraceZapDeadLocals) {
|
||||
ResourceMark rm(thread);
|
||||
tty->print_cr("--------------------------------------------------------------------------------");
|
||||
tty->print("Zapping dead locals in ");
|
||||
print_on(tty);
|
||||
tty->cr();
|
||||
}
|
||||
// Zapping
|
||||
if (is_entry_frame ()) zap_dead_entry_locals (thread, map);
|
||||
else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map);
|
||||
else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map);
|
||||
|
||||
else
|
||||
// could be is_runtime_frame
|
||||
// so remove error: ShouldNotReachHere();
|
||||
;
|
||||
// Tracing - part 2
|
||||
if (TraceZapDeadLocals) {
|
||||
tty->cr();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) {
|
||||
// get current interpreter 'pc'
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
Method* m = interpreter_frame_method();
|
||||
int bci = interpreter_frame_bci();
|
||||
|
||||
int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
|
||||
|
||||
// process dynamic part
|
||||
InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(),
|
||||
&_check_value);
|
||||
InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(),
|
||||
&_check_oop );
|
||||
InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(),
|
||||
&_zap_dead );
|
||||
|
||||
// get frame map
|
||||
InterpreterOopMap mask;
|
||||
m->mask_for(bci, &mask);
|
||||
mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
|
||||
}
|
||||
|
||||
|
||||
void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) {
|
||||
|
||||
ResourceMark rm(thread);
|
||||
assert(_cb != NULL, "sanity check");
|
||||
if (_cb->oop_maps() != NULL) {
|
||||
OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) {
|
||||
if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented");
|
||||
}
|
||||
|
||||
|
||||
void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) {
|
||||
if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented");
|
||||
}
|
||||
|
||||
# endif // ENABLE_ZAP_DEAD_LOCALS
|
||||
|
||||
void frame::verify(const RegisterMap* map) {
|
||||
// for now make sure receiver type is correct
|
||||
if (is_interpreted_frame()) {
|
||||
|
@ -405,39 +405,6 @@ class frame VALUE_OBJ_CLASS_SPEC {
|
||||
// RedefineClasses support for finding live interpreted methods on the stack
|
||||
void metadata_do(void f(Metadata*));
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
private:
|
||||
class CheckValueClosure: public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* p);
|
||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
};
|
||||
static CheckValueClosure _check_value;
|
||||
|
||||
class CheckOopClosure: public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* p);
|
||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
};
|
||||
static CheckOopClosure _check_oop;
|
||||
|
||||
static void check_derived_oop(oop* base, oop* derived);
|
||||
|
||||
class ZapDeadClosure: public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* p);
|
||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
};
|
||||
static ZapDeadClosure _zap_dead;
|
||||
|
||||
public:
|
||||
// Zapping
|
||||
void zap_dead_locals (JavaThread* thread, const RegisterMap* map);
|
||||
void zap_dead_interpreted_locals(JavaThread* thread, const RegisterMap* map);
|
||||
void zap_dead_compiled_locals (JavaThread* thread, const RegisterMap* map);
|
||||
void zap_dead_entry_locals (JavaThread* thread, const RegisterMap* map);
|
||||
void zap_dead_deoptimized_locals(JavaThread* thread, const RegisterMap* map);
|
||||
# endif
|
||||
// Verification
|
||||
void verify(const RegisterMap* map);
|
||||
static bool verify_return_pc(address x);
|
||||
|
@ -937,16 +937,6 @@ public:
|
||||
notproduct(bool, VerifyCodeCache, false, \
|
||||
"Verify code cache on memory allocation/deallocation") \
|
||||
\
|
||||
develop(bool, ZapDeadCompiledLocals, false, \
|
||||
"Zap dead locals in compiler frames") \
|
||||
\
|
||||
notproduct(bool, ZapDeadLocalsOld, false, \
|
||||
"Zap dead locals (old version, zaps all frames when " \
|
||||
"entering the VM") \
|
||||
\
|
||||
notproduct(bool, CheckOopishValues, false, \
|
||||
"Warn if value contains oop (requires ZapDeadLocals)") \
|
||||
\
|
||||
develop(bool, UseMallocOnly, false, \
|
||||
"Use only malloc/free for allocation (no resource area/arena)") \
|
||||
\
|
||||
@ -1489,9 +1479,6 @@ public:
|
||||
develop(bool, TraceCompiledIC, false, \
|
||||
"Trace changes of compiled IC") \
|
||||
\
|
||||
notproduct(bool, TraceZapDeadLocals, false, \
|
||||
"Trace zapping dead locals") \
|
||||
\
|
||||
develop(bool, TraceStartupTime, false, \
|
||||
"Trace setup time") \
|
||||
\
|
||||
@ -3143,7 +3130,7 @@ public:
|
||||
\
|
||||
develop(intx, MaxForceInlineLevel, 100, \
|
||||
"maximum number of nested calls that are forced for inlining " \
|
||||
"(using CompilerOracle or marked w/ @ForceInline)") \
|
||||
"(using CompileCommand or marked w/ @ForceInline)") \
|
||||
\
|
||||
product_pd(intx, InlineSmallCode, \
|
||||
"Only inline already compiled methods if their code size is " \
|
||||
|
@ -167,25 +167,6 @@ void InterfaceSupport::walk_stack() {
|
||||
walk_stack_from(thread->last_java_vframe(®_map));
|
||||
}
|
||||
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
|
||||
static int zap_traversals = 0;
|
||||
|
||||
void InterfaceSupport::zap_dead_locals_old() {
|
||||
JavaThread* thread = JavaThread::current();
|
||||
if (zap_traversals == -1) // edit constant for debugging
|
||||
warning("I am here");
|
||||
int zap_frame_count = 0; // count frames to help debugging
|
||||
for (StackFrameStream sfs(thread); !sfs.is_done(); sfs.next()) {
|
||||
sfs.current()->zap_dead_locals(thread, sfs.register_map());
|
||||
++zap_frame_count;
|
||||
}
|
||||
++zap_traversals;
|
||||
}
|
||||
|
||||
# endif
|
||||
|
||||
// invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
|
||||
int deoptimizeAllCounter = 0;
|
||||
int zombieAllCounter = 0;
|
||||
|
@ -84,10 +84,6 @@ class InterfaceSupport: AllStatic {
|
||||
static void walk_stack_from(vframe* start_vf);
|
||||
static void walk_stack();
|
||||
|
||||
# ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
static void zap_dead_locals_old();
|
||||
# endif
|
||||
|
||||
static void zombieAll();
|
||||
static void unlinkSymbols();
|
||||
static void deoptimizeAll();
|
||||
@ -357,11 +353,6 @@ class VMEntryWrapper {
|
||||
if (WalkStackALot) {
|
||||
InterfaceSupport::walk_stack();
|
||||
}
|
||||
#ifdef ENABLE_ZAP_DEAD_LOCALS
|
||||
if (ZapDeadLocalsOld) {
|
||||
InterfaceSupport::zap_dead_locals_old();
|
||||
}
|
||||
#endif
|
||||
#ifdef COMPILER2
|
||||
// This option is not used by Compiler 1
|
||||
if (StressDerivedPointers) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user