7083786: dead various dead chunks of code
Reviewed-by: iveresov, kvn
This commit is contained in:
parent
9c87ea9062
commit
fa7c124af1
@ -142,11 +142,6 @@ LIR_Opr LIR_Assembler::receiverOpr() {
|
||||
}
|
||||
|
||||
|
||||
LIR_Opr LIR_Assembler::incomingReceiverOpr() {
|
||||
return FrameMap::I0_oop_opr;
|
||||
}
|
||||
|
||||
|
||||
LIR_Opr LIR_Assembler::osrBufferPointer() {
|
||||
return FrameMap::I0_opr;
|
||||
}
|
||||
|
@ -782,13 +782,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case jvmti_exception_throw_id:
|
||||
{ // Oexception : exception
|
||||
__ set_info("jvmti_exception_throw", dont_gc_arguments);
|
||||
oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
|
||||
}
|
||||
break;
|
||||
|
||||
case dtrace_object_alloc_id:
|
||||
{ // O0: object
|
||||
__ set_info("dtrace_object_alloc", dont_gc_arguments);
|
||||
|
@ -259,13 +259,8 @@
|
||||
};
|
||||
#endif /* CC_INTERP */
|
||||
|
||||
// the compiler frame has many of the same fields as the interpreter frame
|
||||
// %%%%% factor out declarations of the shared fields
|
||||
enum compiler_frame_fixed_locals {
|
||||
compiler_frame_d_scratch_fp_offset = -2,
|
||||
compiler_frame_vm_locals_fp_offset = -2, // should be same as above
|
||||
|
||||
compiler_frame_vm_local_words = -compiler_frame_vm_locals_fp_offset
|
||||
compiler_frame_vm_locals_fp_offset = -2
|
||||
};
|
||||
|
||||
private:
|
||||
@ -283,9 +278,6 @@
|
||||
|
||||
inline void interpreter_frame_set_tos_address(intptr_t* x);
|
||||
|
||||
|
||||
// %%%%% Another idea: instead of defining 3 fns per item, just define one returning a ref
|
||||
|
||||
// monitors:
|
||||
|
||||
// next two fns read and write Lmonitors value,
|
||||
@ -298,22 +290,8 @@
|
||||
return ((interpreterState)sp_at(interpreter_state_ptr_offset));
|
||||
}
|
||||
|
||||
|
||||
#endif /* CC_INTERP */
|
||||
|
||||
|
||||
|
||||
// Compiled frames
|
||||
|
||||
public:
|
||||
// Tells if this register can hold 64 bits on V9 (really, V8+).
|
||||
static bool holds_a_doubleword(Register reg) {
|
||||
#ifdef _LP64
|
||||
// return true;
|
||||
return reg->is_out() || reg->is_global();
|
||||
#else
|
||||
return reg->is_out() || reg->is_global();
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // CPU_SPARC_VM_FRAME_SPARC_HPP
|
||||
|
@ -129,10 +129,6 @@ LIR_Opr LIR_Assembler::receiverOpr() {
|
||||
return FrameMap::receiver_opr;
|
||||
}
|
||||
|
||||
LIR_Opr LIR_Assembler::incomingReceiverOpr() {
|
||||
return receiverOpr();
|
||||
}
|
||||
|
||||
LIR_Opr LIR_Assembler::osrBufferPointer() {
|
||||
return FrameMap::as_pointer_opr(receiverOpr()->as_register());
|
||||
}
|
||||
@ -371,55 +367,6 @@ void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info)
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) {
|
||||
if (exception->is_valid()) {
|
||||
// preserve exception
|
||||
// note: the monitor_exit runtime call is a leaf routine
|
||||
// and cannot block => no GC can happen
|
||||
// The slow case (MonitorAccessStub) uses the first two stack slots
|
||||
// ([esp+0] and [esp+4]), therefore we store the exception at [esp+8]
|
||||
__ movptr (Address(rsp, 2*wordSize), exception);
|
||||
}
|
||||
|
||||
Register obj_reg = obj_opr->as_register();
|
||||
Register lock_reg = lock_opr->as_register();
|
||||
|
||||
// setup registers (lock_reg must be rax, for lock_object)
|
||||
assert(obj_reg != SYNC_header && lock_reg != SYNC_header, "rax, must be available here");
|
||||
Register hdr = lock_reg;
|
||||
assert(new_hdr == SYNC_header, "wrong register");
|
||||
lock_reg = new_hdr;
|
||||
// compute pointer to BasicLock
|
||||
Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no);
|
||||
__ lea(lock_reg, lock_addr);
|
||||
// unlock object
|
||||
MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no);
|
||||
// _slow_case_stubs->append(slow_case);
|
||||
// temporary fix: must be created after exceptionhandler, therefore as call stub
|
||||
_slow_case_stubs->append(slow_case);
|
||||
if (UseFastLocking) {
|
||||
// try inlined fast unlocking first, revert to slow locking if it fails
|
||||
// note: lock_reg points to the displaced header since the displaced header offset is 0!
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
__ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
|
||||
} else {
|
||||
// always do slow unlocking
|
||||
// note: the slow unlocking code could be inlined here, however if we use
|
||||
// slow unlocking, speed doesn't matter anyway and this solution is
|
||||
// simpler and requires less duplicated code - additionally, the
|
||||
// slow unlocking code is the same in either case which simplifies
|
||||
// debugging
|
||||
__ jmp(*slow_case->entry());
|
||||
}
|
||||
// done
|
||||
__ bind(*slow_case->continuation());
|
||||
|
||||
if (exception->is_valid()) {
|
||||
// restore exception
|
||||
__ movptr (exception, Address(rsp, 2 * wordSize));
|
||||
}
|
||||
}
|
||||
|
||||
// This specifies the rsp decrement needed to build the frame
|
||||
int LIR_Assembler::initial_frame_size_in_bytes() {
|
||||
// if rounding, must let FrameMap know!
|
||||
|
@ -29,8 +29,6 @@
|
||||
|
||||
Address::ScaleFactor array_element_size(BasicType type) const;
|
||||
|
||||
void monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception);
|
||||
|
||||
void arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack);
|
||||
|
||||
// helper functions which checks for overflow and sets bailout if it
|
||||
|
@ -1465,19 +1465,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
}
|
||||
break;
|
||||
|
||||
case jvmti_exception_throw_id:
|
||||
{ // rax,: exception oop
|
||||
StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
|
||||
// Preserve all registers across this potentially blocking call
|
||||
const int num_rt_args = 2; // thread, exception oop
|
||||
OopMap* map = save_live_registers(sasm, num_rt_args);
|
||||
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
|
||||
oop_maps = new OopMapSet();
|
||||
oop_maps->add_gc_map(call_offset, map);
|
||||
restore_live_registers(sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case dtrace_object_alloc_id:
|
||||
{ // rax,: object
|
||||
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
|
||||
|
@ -346,7 +346,6 @@ void Compilation::install_code(int frame_size) {
|
||||
implicit_exception_table(),
|
||||
compiler(),
|
||||
_env->comp_level(),
|
||||
true,
|
||||
has_unsafe_access()
|
||||
);
|
||||
}
|
||||
|
@ -133,7 +133,6 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
static bool is_small_constant(LIR_Opr opr);
|
||||
|
||||
static LIR_Opr receiverOpr();
|
||||
static LIR_Opr incomingReceiverOpr();
|
||||
static LIR_Opr osrBufferPointer();
|
||||
|
||||
// stubs
|
||||
|
@ -375,14 +375,6 @@ JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread, oopDes
|
||||
JRT_END
|
||||
|
||||
|
||||
JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
|
||||
if (JvmtiExport::can_post_on_exceptions()) {
|
||||
vframeStream vfst(thread, true);
|
||||
address bcp = vfst.method()->bcp_from(vfst.bci());
|
||||
JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
|
||||
}
|
||||
JRT_END
|
||||
|
||||
// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
|
||||
// associated with the top activation record. The inlinee (that is possibly included in the enclosing
|
||||
// method) method oop is passed as an argument. In order to do that it is embedded in the code as
|
||||
|
@ -65,7 +65,6 @@ class StubAssembler;
|
||||
stub(monitorexit_nofpu) /* optimized version that does not preserve fpu registers */ \
|
||||
stub(access_field_patching) \
|
||||
stub(load_klass_patching) \
|
||||
stub(jvmti_exception_throw) \
|
||||
stub(g1_pre_barrier_slow) \
|
||||
stub(g1_post_barrier_slow) \
|
||||
stub(fpu2long_stub) \
|
||||
@ -141,7 +140,6 @@ class Runtime1: public AllStatic {
|
||||
static void unimplemented_entry (JavaThread* thread, StubID id);
|
||||
|
||||
static address exception_handler_for_pc(JavaThread* thread);
|
||||
static void post_jvmti_exception_throw(JavaThread* thread);
|
||||
|
||||
static void throw_range_check_exception(JavaThread* thread, int index);
|
||||
static void throw_index_exception(JavaThread* thread, int index);
|
||||
|
@ -46,9 +46,6 @@ private:
|
||||
ciObject* _object;
|
||||
} _value;
|
||||
|
||||
// Implementation of the print method.
|
||||
void print_impl(outputStream* st);
|
||||
|
||||
public:
|
||||
|
||||
ciConstant() {
|
||||
|
@ -949,7 +949,6 @@ void ciEnv::register_method(ciMethod* target,
|
||||
ImplicitExceptionTable* inc_table,
|
||||
AbstractCompiler* compiler,
|
||||
int comp_level,
|
||||
bool has_debug_info,
|
||||
bool has_unsafe_access) {
|
||||
VM_ENTRY_MARK;
|
||||
nmethod* nm = NULL;
|
||||
@ -1044,7 +1043,6 @@ void ciEnv::register_method(ciMethod* target,
|
||||
CompileBroker::handle_full_code_cache();
|
||||
}
|
||||
} else {
|
||||
NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); )
|
||||
nm->set_has_unsafe_access(has_unsafe_access);
|
||||
|
||||
// Record successful registration.
|
||||
|
@ -317,8 +317,7 @@ public:
|
||||
ImplicitExceptionTable* inc_table,
|
||||
AbstractCompiler* compiler,
|
||||
int comp_level,
|
||||
bool has_debug_info = true,
|
||||
bool has_unsafe_access = false);
|
||||
bool has_unsafe_access);
|
||||
|
||||
|
||||
// Access to certain well known ciObjects.
|
||||
|
@ -64,9 +64,6 @@ private:
|
||||
// shared constructor code
|
||||
void initialize_from(fieldDescriptor* fd);
|
||||
|
||||
// The implementation of the print method.
|
||||
void print_impl(outputStream* st);
|
||||
|
||||
public:
|
||||
ciFlags flags() { return _flags; }
|
||||
|
||||
|
@ -451,7 +451,6 @@ void nmethod::init_defaults() {
|
||||
_stack_traversal_mark = 0;
|
||||
_unload_reported = false; // jvmti state
|
||||
|
||||
NOT_PRODUCT(_has_debug_info = false);
|
||||
#ifdef ASSERT
|
||||
_oops_are_stale = false;
|
||||
#endif
|
||||
|
@ -191,8 +191,6 @@ class nmethod : public CodeBlob {
|
||||
|
||||
jbyte _scavenge_root_state;
|
||||
|
||||
NOT_PRODUCT(bool _has_debug_info; )
|
||||
|
||||
// Nmethod Flushing lock. If non-zero, then the nmethod is not removed
|
||||
// and is not made into a zombie. However, once the nmethod is made into
|
||||
// a zombie, it will be locked one final time if CompiledMethodUnload
|
||||
@ -329,11 +327,6 @@ class nmethod : public CodeBlob {
|
||||
methodOop method() const { return _method; }
|
||||
AbstractCompiler* compiler() const { return _compiler; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool has_debug_info() const { return _has_debug_info; }
|
||||
void set_has_debug_info(bool f) { _has_debug_info = false; }
|
||||
#endif // NOT PRODUCT
|
||||
|
||||
// type info
|
||||
bool is_nmethod() const { return true; }
|
||||
bool is_java_method() const { return !method()->is_native(); }
|
||||
|
@ -172,11 +172,6 @@ void constMethodKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
int constMethodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
assert(obj->is_constMethod(), "should be constMethod");
|
||||
constMethodOop cm_oop = constMethodOop(obj);
|
||||
#if 0
|
||||
PSParallelCompact::adjust_pointer(cm_oop->adr_method());
|
||||
PSParallelCompact::adjust_pointer(cm_oop->adr_exception_table());
|
||||
PSParallelCompact::adjust_pointer(cm_oop->adr_stackmap_data());
|
||||
#endif
|
||||
oop* const beg_oop = cm_oop->oop_block_beg();
|
||||
oop* const end_oop = cm_oop->oop_block_end();
|
||||
for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
|
||||
|
@ -817,7 +817,6 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
&_handler_table, &_inc_table,
|
||||
compiler,
|
||||
env()->comp_level(),
|
||||
true, /*has_debug_info*/
|
||||
has_unsafe_access()
|
||||
);
|
||||
}
|
||||
|
@ -496,14 +496,6 @@ public:
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
};
|
||||
|
||||
//------------------------------MemMoveNode------------------------------------
|
||||
// Memory to memory move. Inserted very late, after allocation.
|
||||
class MemMoveNode : public Node {
|
||||
public:
|
||||
MemMoveNode( Node *dst, Node *src ) : Node(0,dst,src) {}
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//------------------------------ThreadLocalNode--------------------------------
|
||||
// Ideal Node which returns the base of ThreadLocalStorage.
|
||||
class ThreadLocalNode : public Node {
|
||||
|
@ -752,20 +752,12 @@ void Parse::do_jsr() {
|
||||
// Handle ret bytecode
|
||||
void Parse::do_ret() {
|
||||
// Find to whom we return.
|
||||
#if 0 // %%%% MAKE THIS WORK
|
||||
Node* con = local();
|
||||
const TypePtr* tp = con->bottom_type()->isa_ptr();
|
||||
assert(tp && tp->singleton(), "");
|
||||
int return_bci = (int) tp->get_con();
|
||||
merge(return_bci);
|
||||
#else
|
||||
assert(block()->num_successors() == 1, "a ret can only go one place now");
|
||||
Block* target = block()->successor_at(0);
|
||||
assert(!target->is_ready(), "our arrival must be expected");
|
||||
profile_ret(target->flow()->start());
|
||||
int pnum = target->next_path_num();
|
||||
merge_common(target, pnum);
|
||||
#endif
|
||||
}
|
||||
|
||||
//--------------------------dynamic_branch_prediction--------------------------
|
||||
|
@ -978,7 +978,6 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* t
|
||||
|
||||
thread->set_exception_pc(pc);
|
||||
thread->set_exception_handler_pc(handler_address);
|
||||
thread->set_exception_stack_size(0);
|
||||
|
||||
// Check if the exception PC is a MethodHandle call site.
|
||||
thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
|
||||
|
@ -522,25 +522,6 @@ static void forte_fill_call_trace_given_top(JavaThread* thd,
|
||||
extern "C" {
|
||||
JNIEXPORT
|
||||
void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) {
|
||||
|
||||
// This is if'd out because we no longer use thread suspension.
|
||||
// However if someone wanted to backport this to a 5.0 jvm then this
|
||||
// code would be important.
|
||||
#if 0
|
||||
if (SafepointSynchronize::is_synchronizing()) {
|
||||
// The safepoint mechanism is trying to synchronize all the threads.
|
||||
// Since this can involve thread suspension, it is not safe for us
|
||||
// to be here. We can reduce the deadlock risk window by quickly
|
||||
// returning to the SIGPROF handler. However, it is still possible
|
||||
// for VMThread to catch us here or in the SIGPROF handler. If we
|
||||
// are suspended while holding a resource and another thread blocks
|
||||
// on that resource in the SIGPROF handler, then we will have a
|
||||
// three-thread deadlock (VMThread, this thread, the other thread).
|
||||
trace->num_frames = ticks_safepoint; // -10
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
JavaThread* thread;
|
||||
|
||||
if (trace->env_id == NULL ||
|
||||
|
@ -1272,7 +1272,6 @@ void JavaThread::initialize() {
|
||||
_exception_oop = NULL;
|
||||
_exception_pc = 0;
|
||||
_exception_handler_pc = 0;
|
||||
_exception_stack_size = 0;
|
||||
_is_method_handle_return = 0;
|
||||
_jvmti_thread_state= NULL;
|
||||
_should_post_on_exceptions_flag = JNI_FALSE;
|
||||
|
@ -841,7 +841,6 @@ class JavaThread: public Thread {
|
||||
volatile oop _exception_oop; // Exception thrown in compiled code
|
||||
volatile address _exception_pc; // PC where exception happened
|
||||
volatile address _exception_handler_pc; // PC for handler of exception
|
||||
volatile int _exception_stack_size; // Size of frame where exception happened
|
||||
volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site.
|
||||
|
||||
// support for compilation
|
||||
@ -1182,7 +1181,6 @@ class JavaThread: public Thread {
|
||||
|
||||
// Exception handling for compiled methods
|
||||
oop exception_oop() const { return _exception_oop; }
|
||||
int exception_stack_size() const { return _exception_stack_size; }
|
||||
address exception_pc() const { return _exception_pc; }
|
||||
address exception_handler_pc() const { return _exception_handler_pc; }
|
||||
bool is_method_handle_return() const { return _is_method_handle_return == 1; }
|
||||
@ -1190,7 +1188,6 @@ class JavaThread: public Thread {
|
||||
void set_exception_oop(oop o) { _exception_oop = o; }
|
||||
void set_exception_pc(address a) { _exception_pc = a; }
|
||||
void set_exception_handler_pc(address a) { _exception_handler_pc = a; }
|
||||
void set_exception_stack_size(int size) { _exception_stack_size = size; }
|
||||
void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; }
|
||||
|
||||
// Stack overflow support
|
||||
@ -1264,7 +1261,6 @@ class JavaThread: public Thread {
|
||||
static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop ); }
|
||||
static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc ); }
|
||||
static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
|
||||
static ByteSize exception_stack_size_offset() { return byte_offset_of(JavaThread, _exception_stack_size); }
|
||||
static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
|
||||
static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); }
|
||||
static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); }
|
||||
|
Loading…
x
Reference in New Issue
Block a user