8233343: Deprecate -XX:+CriticalJNINatives flag which implements JavaCritical native functions
Reviewed-by: rehn, mdoerr, zgu
This commit is contained in:
parent
615b759edd
commit
56ea490f1e
src/hotspot
cpu
aarch64
ppc
s390
x86
share
test/hotspot/jtreg/gc
@ -1081,20 +1081,6 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Check GCLocker::needs_gc and enter the runtime if it's true. This
|
||||
// keeps a new JNI critical region from starting until a GC has been
|
||||
// forced. Save down any oops in registers and describe them in an
|
||||
// OopMap.
|
||||
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
|
||||
int stack_slots,
|
||||
int total_c_args,
|
||||
int total_in_args,
|
||||
int arg_save_area,
|
||||
OopMapSet* oop_maps,
|
||||
VMRegPair* in_regs,
|
||||
BasicType* in_sig_bt) { Unimplemented(); }
|
||||
|
||||
// Unpack an array argument into a pointer to the body and the length
|
||||
// if the array is non-null, otherwise pass 0 for both.
|
||||
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); }
|
||||
@ -1260,25 +1246,12 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||
// Critical native functions are a shorthand for the use of
|
||||
// GetPrimtiveArrayCritical and disallow the use of any other JNI
|
||||
// functions. The wrapper is expected to unpack the arguments before
|
||||
// passing them to the callee and perform checks before and after the
|
||||
// native call to ensure that they GCLocker
|
||||
// lock_critical/unlock_critical semantics are followed. Some other
|
||||
// parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// passing them to the callee. Critical native functions leave the state _in_Java,
|
||||
// since they block out GC.
|
||||
// Some other parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// block and the check for pending exceptions it's impossible for them
|
||||
// to be thrown.
|
||||
//
|
||||
// They are roughly structured like this:
|
||||
// if (GCLocker::needs_gc())
|
||||
// SharedRuntime::block_for_jni_critical();
|
||||
// tranistion to thread_in_native
|
||||
// unpack arrray arguments and call native entry point
|
||||
// check for safepoint in progress
|
||||
// check if any thread suspend flags are set
|
||||
// call into JVM and possible unlock the JNI critical
|
||||
// if a GC was suppressed while in the critical native.
|
||||
// transition back to thread_in_Java
|
||||
// return to caller
|
||||
//
|
||||
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
const methodHandle& method,
|
||||
int compile_id,
|
||||
@ -1546,11 +1519,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
const Register oop_handle_reg = r20;
|
||||
|
||||
if (is_critical_native) {
|
||||
check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
|
||||
oop_handle_offset, oop_maps, in_regs, in_sig_bt);
|
||||
}
|
||||
|
||||
//
|
||||
// We immediately shuffle the arguments so that any vm call we have to
|
||||
// make from here on out (sync slow path, jvmti, etc.) we will have
|
||||
@ -1823,12 +1791,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// get JNIEnv* which is first argument to native
|
||||
if (!is_critical_native) {
|
||||
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
}
|
||||
|
||||
// Now set thread in native
|
||||
__ mov(rscratch1, _thread_in_native);
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
// Now set thread in native
|
||||
__ mov(rscratch1, _thread_in_native);
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
}
|
||||
|
||||
rt_call(masm, native_func);
|
||||
|
||||
@ -1856,6 +1824,21 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
Label safepoint_in_progress, safepoint_in_progress_done;
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
__ safepoint_poll(needs_safepoint, false /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbnzw(rscratch1, needs_safepoint);
|
||||
__ b(after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Switch thread to "native transition" state before reading the synchronization state.
|
||||
// This additional state is necessary because reading and testing the synchronization
|
||||
// state is not atomic w.r.t. GC, as this scenario demonstrates:
|
||||
@ -1876,7 +1859,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
Label safepoint_in_progress, safepoint_in_progress_done;
|
||||
{
|
||||
// We need an acquire here to ensure that any subsequent load of the
|
||||
// global SafepointSynchronize::_state flag is ordered after this load
|
||||
@ -1894,7 +1876,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// change thread state
|
||||
Label after_transition;
|
||||
__ mov(rscratch1, _thread_in_Java);
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
@ -2099,22 +2080,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
#ifndef PRODUCT
|
||||
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
|
||||
#endif
|
||||
if (!is_critical_native) {
|
||||
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||
} else {
|
||||
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
|
||||
}
|
||||
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||
__ blr(rscratch1);
|
||||
__ maybe_isb();
|
||||
// Restore any method result value
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
|
||||
if (is_critical_native) {
|
||||
// The call above performed the transition to thread_in_Java so
|
||||
// skip the transition logic above.
|
||||
__ b(after_transition);
|
||||
}
|
||||
|
||||
__ b(safepoint_in_progress_done);
|
||||
__ block_comment("} safepoint");
|
||||
}
|
||||
@ -2163,12 +2134,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
|
||||
oop_maps);
|
||||
|
||||
if (is_critical_native) {
|
||||
nm->set_lazy_critical_native(true);
|
||||
}
|
||||
|
||||
return nm;
|
||||
|
||||
}
|
||||
|
||||
// this function returns the adjust size (in number of words) to a c2i adapter
|
||||
|
@ -1530,156 +1530,6 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
|
||||
}
|
||||
}
|
||||
|
||||
static void save_or_restore_arguments(MacroAssembler* masm,
|
||||
const int stack_slots,
|
||||
const int total_in_args,
|
||||
const int arg_save_area,
|
||||
OopMap* map,
|
||||
VMRegPair* in_regs,
|
||||
BasicType* in_sig_bt) {
|
||||
// If map is non-NULL then the code should store the values,
|
||||
// otherwise it should load them.
|
||||
int slot = arg_save_area;
|
||||
// Save down double word first.
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
slot += VMRegImpl::slots_per_word;
|
||||
assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)");
|
||||
if (map != NULL) {
|
||||
__ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
|
||||
} else {
|
||||
__ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
|
||||
}
|
||||
} else if (in_regs[i].first()->is_Register() &&
|
||||
(in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
if (map != NULL) {
|
||||
__ std(in_regs[i].first()->as_Register(), offset, R1_SP);
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
map->set_oop(VMRegImpl::stack2reg(slot));
|
||||
}
|
||||
} else {
|
||||
__ ld(in_regs[i].first()->as_Register(), offset, R1_SP);
|
||||
}
|
||||
slot += VMRegImpl::slots_per_word;
|
||||
assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)");
|
||||
}
|
||||
}
|
||||
// Save or restore single word registers.
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
// Value lives in an input register. Save it on stack.
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_BOOLEAN:
|
||||
case T_CHAR:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_INT:
|
||||
if (map != NULL) {
|
||||
__ stw(in_regs[i].first()->as_Register(), offset, R1_SP);
|
||||
} else {
|
||||
__ lwa(in_regs[i].first()->as_Register(), offset, R1_SP);
|
||||
}
|
||||
slot++;
|
||||
assert(slot <= stack_slots, "overflow (after INT or smaller stack slot)");
|
||||
break;
|
||||
case T_ARRAY:
|
||||
case T_LONG:
|
||||
// handled above
|
||||
break;
|
||||
case T_OBJECT:
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_FloatRegister()) {
|
||||
if (in_sig_bt[i] == T_FLOAT) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
slot++;
|
||||
assert(slot <= stack_slots, "overflow (after FLOAT stack slot)");
|
||||
if (map != NULL) {
|
||||
__ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
|
||||
} else {
|
||||
__ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
|
||||
}
|
||||
}
|
||||
} else if (in_regs[i].first()->is_stack()) {
|
||||
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
|
||||
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
|
||||
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check GCLocker::needs_gc and enter the runtime if it's true. This
|
||||
// keeps a new JNI critical region from starting until a GC has been
|
||||
// forced. Save down any oops in registers and describe them in an
|
||||
// OopMap.
|
||||
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
|
||||
const int stack_slots,
|
||||
const int total_in_args,
|
||||
const int arg_save_area,
|
||||
OopMapSet* oop_maps,
|
||||
VMRegPair* in_regs,
|
||||
BasicType* in_sig_bt,
|
||||
Register tmp_reg ) {
|
||||
__ block_comment("check GCLocker::needs_gc");
|
||||
Label cont;
|
||||
__ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GCLocker::needs_gc_address());
|
||||
__ cmplwi(CCR0, tmp_reg, 0);
|
||||
__ beq(CCR0, cont);
|
||||
|
||||
// Save down any values that are live in registers and call into the
|
||||
// runtime to halt for a GC.
|
||||
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, map, in_regs, in_sig_bt);
|
||||
|
||||
__ mr(R3_ARG1, R16_thread);
|
||||
__ set_last_Java_frame(R1_SP, noreg);
|
||||
|
||||
__ block_comment("block_for_jni_critical");
|
||||
address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c(entry_point, relocInfo::runtime_call_type);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type);
|
||||
#endif
|
||||
address start = __ pc() - __ offset(),
|
||||
calls_return_pc = __ last_calls_return_pc();
|
||||
oop_maps->add_gc_map(calls_return_pc - start, map);
|
||||
|
||||
__ reset_last_Java_frame();
|
||||
|
||||
// Reload all the register arguments.
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, NULL, in_regs, in_sig_bt);
|
||||
|
||||
__ BIND(cont);
|
||||
|
||||
#ifdef ASSERT
|
||||
if (StressCriticalJNINatives) {
|
||||
// Stress register saving.
|
||||
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, map, in_regs, in_sig_bt);
|
||||
// Destroy argument registers.
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
__ neg(reg, reg);
|
||||
} else if (in_regs[i].first()->is_FloatRegister()) {
|
||||
__ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
|
||||
}
|
||||
}
|
||||
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, NULL, in_regs, in_sig_bt);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) {
|
||||
if (src.first()->is_stack()) {
|
||||
if (dst.first()->is_stack()) {
|
||||
@ -1821,25 +1671,12 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||
// Critical native functions are a shorthand for the use of
|
||||
// GetPrimtiveArrayCritical and disallow the use of any other JNI
|
||||
// functions. The wrapper is expected to unpack the arguments before
|
||||
// passing them to the callee and perform checks before and after the
|
||||
// native call to ensure that they GCLocker
|
||||
// lock_critical/unlock_critical semantics are followed. Some other
|
||||
// parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// passing them to the callee. Critical native functions leave the state _in_Java,
|
||||
// since they cannot stop for GC.
|
||||
// Some other parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// block and the check for pending exceptions it's impossible for them
|
||||
// to be thrown.
|
||||
//
|
||||
// They are roughly structured like this:
|
||||
// if (GCLocker::needs_gc())
|
||||
// SharedRuntime::block_for_jni_critical();
|
||||
// tranistion to thread_in_native
|
||||
// unpack arrray arguments and call native entry point
|
||||
// check for safepoint in progress
|
||||
// check if any thread suspend flags are set
|
||||
// call into JVM and possible unlock the JNI critical
|
||||
// if a GC was suppressed while in the critical native.
|
||||
// transition back to thread_in_Java
|
||||
// return to caller
|
||||
//
|
||||
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
const methodHandle& method,
|
||||
int compile_id,
|
||||
@ -2146,11 +1983,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
|
||||
if (is_critical_native) {
|
||||
check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset,
|
||||
oop_maps, in_regs, in_sig_bt, r_temp_1);
|
||||
}
|
||||
|
||||
// Move arguments from register/stack to register/stack.
|
||||
// --------------------------------------------------------------------------
|
||||
//
|
||||
@ -2351,18 +2183,19 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ bind(locked);
|
||||
}
|
||||
|
||||
|
||||
// Publish thread state
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// Use that pc we placed in r_return_pc a while back as the current frame anchor.
|
||||
__ set_last_Java_frame(R1_SP, r_return_pc);
|
||||
|
||||
// Transition from _thread_in_Java to _thread_in_native.
|
||||
__ li(R0, _thread_in_native);
|
||||
__ release();
|
||||
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
|
||||
__ stw(R0, thread_(thread_state));
|
||||
if (!is_critical_native) {
|
||||
// Publish thread state
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// Transition from _thread_in_Java to _thread_in_native.
|
||||
__ li(R0, _thread_in_native);
|
||||
__ release();
|
||||
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
|
||||
__ stw(R0, thread_(thread_state));
|
||||
}
|
||||
|
||||
|
||||
// The JNI call
|
||||
@ -2422,6 +2255,22 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
break;
|
||||
}
|
||||
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
Register sync_state = r_temp_5;
|
||||
__ safepoint_poll(needs_safepoint, sync_state);
|
||||
|
||||
Register suspend_flags = r_temp_6;
|
||||
__ lwz(suspend_flags, thread_(suspend_flags));
|
||||
__ cmpwi(CCR1, suspend_flags, 0);
|
||||
__ beq(CCR1, after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Publish thread state
|
||||
// --------------------------------------------------------------------------
|
||||
@ -2449,7 +2298,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Block, if necessary, before resuming in _thread_in_Java state.
|
||||
// In order for GC to work, don't clear the last_Java_sp until after blocking.
|
||||
Label after_transition;
|
||||
{
|
||||
Label no_block, sync;
|
||||
|
||||
@ -2477,32 +2325,28 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ bind(sync);
|
||||
__ isync();
|
||||
|
||||
address entry_point = is_critical_native
|
||||
? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)
|
||||
: CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
|
||||
address entry_point =
|
||||
CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
|
||||
save_native_result(masm, ret_type, workspace_slot_offset);
|
||||
__ call_VM_leaf(entry_point, R16_thread);
|
||||
restore_native_result(masm, ret_type, workspace_slot_offset);
|
||||
|
||||
if (is_critical_native) {
|
||||
__ b(after_transition); // No thread state transition here.
|
||||
}
|
||||
__ bind(no_block);
|
||||
|
||||
// Publish thread state.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// Thread state is thread_in_native_trans. Any safepoint blocking has
|
||||
// already happened so we can now change state to _thread_in_Java.
|
||||
|
||||
// Transition from _thread_in_native_trans to _thread_in_Java.
|
||||
__ li(R0, _thread_in_Java);
|
||||
__ lwsync(); // Acquire safepoint and suspend state, release thread state.
|
||||
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
|
||||
__ stw(R0, thread_(thread_state));
|
||||
__ bind(after_transition);
|
||||
}
|
||||
|
||||
// Publish thread state.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// Thread state is thread_in_native_trans. Any safepoint blocking has
|
||||
// already happened so we can now change state to _thread_in_Java.
|
||||
|
||||
// Transition from _thread_in_native_trans to _thread_in_Java.
|
||||
__ li(R0, _thread_in_Java);
|
||||
__ lwsync(); // Acquire safepoint and suspend state, release thread state.
|
||||
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
|
||||
__ stw(R0, thread_(thread_state));
|
||||
__ bind(after_transition);
|
||||
|
||||
// Reguard any pages if necessary.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
@ -2658,10 +2502,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
in_ByteSize(lock_offset),
|
||||
oop_maps);
|
||||
|
||||
if (is_critical_native) {
|
||||
nm->set_lazy_critical_native(true);
|
||||
}
|
||||
|
||||
return nm;
|
||||
}
|
||||
|
||||
|
@ -1285,163 +1285,6 @@ static void move32_64(MacroAssembler *masm,
|
||||
}
|
||||
}
|
||||
|
||||
static void save_or_restore_arguments(MacroAssembler *masm,
|
||||
const int stack_slots,
|
||||
const int total_in_args,
|
||||
const int arg_save_area,
|
||||
OopMap *map,
|
||||
VMRegPair *in_regs,
|
||||
BasicType *in_sig_bt) {
|
||||
|
||||
// If map is non-NULL then the code should store the values,
|
||||
// otherwise it should load them.
|
||||
int slot = arg_save_area;
|
||||
// Handle double words first.
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
slot += VMRegImpl::slots_per_word;
|
||||
assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)");
|
||||
const FloatRegister freg = in_regs[i].first()->as_FloatRegister();
|
||||
Address stackaddr(Z_SP, offset);
|
||||
if (map != NULL) {
|
||||
__ freg2mem_opt(freg, stackaddr);
|
||||
} else {
|
||||
__ mem2freg_opt(freg, stackaddr);
|
||||
}
|
||||
} else if (in_regs[i].first()->is_Register() &&
|
||||
(in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
if (map != NULL) {
|
||||
__ z_stg(reg, offset, Z_SP);
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
map->set_oop(VMRegImpl::stack2reg(slot));
|
||||
}
|
||||
} else {
|
||||
__ z_lg(reg, offset, Z_SP);
|
||||
}
|
||||
slot += VMRegImpl::slots_per_word;
|
||||
assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)");
|
||||
}
|
||||
}
|
||||
|
||||
// Save or restore single word registers.
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
// Value lives in an input register. Save it on stack.
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_BOOLEAN:
|
||||
case T_CHAR:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_INT: {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
Address stackaddr(Z_SP, offset);
|
||||
if (map != NULL) {
|
||||
__ z_st(reg, stackaddr);
|
||||
} else {
|
||||
__ z_lgf(reg, stackaddr);
|
||||
}
|
||||
slot++;
|
||||
assert(slot <= stack_slots, "overflow (after INT or smaller stack slot)");
|
||||
break;
|
||||
}
|
||||
case T_ARRAY:
|
||||
case T_LONG:
|
||||
// handled above
|
||||
break;
|
||||
case T_OBJECT:
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_FloatRegister()) {
|
||||
if (in_sig_bt[i] == T_FLOAT) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
slot++;
|
||||
assert(slot <= stack_slots, "overflow (after FLOAT stack slot)");
|
||||
const FloatRegister freg = in_regs[i].first()->as_FloatRegister();
|
||||
Address stackaddr(Z_SP, offset);
|
||||
if (map != NULL) {
|
||||
__ freg2mem_opt(freg, stackaddr, false);
|
||||
} else {
|
||||
__ mem2freg_opt(freg, stackaddr, false);
|
||||
}
|
||||
}
|
||||
} else if (in_regs[i].first()->is_stack() &&
|
||||
in_sig_bt[i] == T_ARRAY && map != NULL) {
|
||||
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
|
||||
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check GCLocker::needs_gc and enter the runtime if it's true. This
|
||||
// keeps a new JNI critical region from starting until a GC has been
|
||||
// forced. Save down any oops in registers and describe them in an OopMap.
|
||||
static void check_needs_gc_for_critical_native(MacroAssembler *masm,
|
||||
const int stack_slots,
|
||||
const int total_in_args,
|
||||
const int arg_save_area,
|
||||
OopMapSet *oop_maps,
|
||||
VMRegPair *in_regs,
|
||||
BasicType *in_sig_bt) {
|
||||
__ block_comment("check GCLocker::needs_gc");
|
||||
Label cont;
|
||||
|
||||
// Check GCLocker::_needs_gc flag.
|
||||
__ load_const_optimized(Z_R1_scratch, (long) GCLocker::needs_gc_address());
|
||||
__ z_cli(0, Z_R1_scratch, 0);
|
||||
__ z_bre(cont);
|
||||
|
||||
// Save down any values that are live in registers and call into the
|
||||
// runtime to halt for a GC.
|
||||
OopMap *map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, map, in_regs, in_sig_bt);
|
||||
address the_pc = __ pc();
|
||||
__ set_last_Java_frame(Z_SP, noreg);
|
||||
|
||||
__ block_comment("block_for_jni_critical");
|
||||
__ z_lgr(Z_ARG1, Z_thread);
|
||||
|
||||
address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical);
|
||||
__ call_c(entry_point);
|
||||
oop_maps->add_gc_map(__ offset(), map);
|
||||
|
||||
__ reset_last_Java_frame();
|
||||
|
||||
// Reload all the register arguments.
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, NULL, in_regs, in_sig_bt);
|
||||
|
||||
__ bind(cont);
|
||||
|
||||
if (StressCriticalJNINatives) {
|
||||
// Stress register saving
|
||||
OopMap *map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, map, in_regs, in_sig_bt);
|
||||
|
||||
// Destroy argument registers.
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
// Don't set CC.
|
||||
__ clear_reg(in_regs[i].first()->as_Register(), true, false);
|
||||
} else {
|
||||
if (in_regs[i].first()->is_FloatRegister()) {
|
||||
FloatRegister fr = in_regs[i].first()->as_FloatRegister();
|
||||
__ z_lcdbr(fr, fr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, NULL, in_regs, in_sig_bt);
|
||||
}
|
||||
}
|
||||
|
||||
static void move_ptr(MacroAssembler *masm,
|
||||
VMRegPair src,
|
||||
VMRegPair dst,
|
||||
@ -1858,12 +1701,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
OopMap *map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
|
||||
if (is_critical_native) {
|
||||
check_needs_gc_for_critical_native(masm, stack_slots, total_in_args,
|
||||
oop_handle_slot_offset, oop_maps, in_regs, in_sig_bt);
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// The Grand Shuffle
|
||||
@ -2092,9 +1929,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Use that pc we placed in Z_R10 a while back as the current frame anchor.
|
||||
__ set_last_Java_frame(Z_SP, Z_R10);
|
||||
|
||||
// Transition from _thread_in_Java to _thread_in_native.
|
||||
__ set_thread_state(_thread_in_native);
|
||||
|
||||
if (!is_critical_native) {
|
||||
// Transition from _thread_in_Java to _thread_in_native.
|
||||
__ set_thread_state(_thread_in_native);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// This is the JNI call.
|
||||
@ -2140,6 +1978,19 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
break;
|
||||
}
|
||||
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
// Does this need to save_native_result and fences?
|
||||
__ safepoint_poll(needs_safepoint, Z_R1);
|
||||
__ load_and_test_int(Z_R0, Address(Z_thread, JavaThread::suspend_flags_offset()));
|
||||
__ z_bre(after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Switch thread to "native transition" state before reading the synchronization state.
|
||||
// This additional state is necessary because reading and testing the synchronization
|
||||
@ -2159,7 +2010,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Block, if necessary, before resuming in _thread_in_Java state.
|
||||
// In order for GC to work, don't clear the last_Java_sp until after blocking.
|
||||
//--------------------------------------------------------------------
|
||||
Label after_transition;
|
||||
{
|
||||
Label no_block, sync;
|
||||
|
||||
@ -2181,15 +2031,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ bind(sync);
|
||||
__ z_acquire();
|
||||
|
||||
address entry_point = is_critical_native ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)
|
||||
: CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
|
||||
address entry_point = CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
|
||||
|
||||
__ call_VM_leaf(entry_point, Z_thread);
|
||||
|
||||
if (is_critical_native) {
|
||||
restore_native_result(masm, ret_type, workspace_slot_offset);
|
||||
__ z_bru(after_transition); // No thread state transition here.
|
||||
}
|
||||
__ bind(no_block);
|
||||
restore_native_result(masm, ret_type, workspace_slot_offset);
|
||||
}
|
||||
@ -2202,7 +2047,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ set_thread_state(_thread_in_Java);
|
||||
__ bind(after_transition);
|
||||
|
||||
|
||||
//--------------------------------------------------------------------
|
||||
// Reguard any pages if necessary.
|
||||
// Protect native result from being destroyed.
|
||||
@ -2385,10 +2229,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
in_ByteSize(lock_offset),
|
||||
oop_maps);
|
||||
|
||||
if (is_critical_native) {
|
||||
nm->set_lazy_critical_native(true);
|
||||
}
|
||||
|
||||
return nm;
|
||||
}
|
||||
|
||||
|
@ -1214,265 +1214,6 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void save_or_restore_arguments(MacroAssembler* masm,
|
||||
const int stack_slots,
|
||||
const int total_in_args,
|
||||
const int arg_save_area,
|
||||
OopMap* map,
|
||||
VMRegPair* in_regs,
|
||||
BasicType* in_sig_bt) {
|
||||
// if map is non-NULL then the code should store the values,
|
||||
// otherwise it should load them.
|
||||
int handle_index = 0;
|
||||
// Save down double word first
|
||||
for ( int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
|
||||
int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
handle_index += 2;
|
||||
assert(handle_index <= stack_slots, "overflow");
|
||||
if (map != NULL) {
|
||||
__ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
|
||||
} else {
|
||||
__ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
|
||||
}
|
||||
}
|
||||
if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
|
||||
int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
handle_index += 2;
|
||||
assert(handle_index <= stack_slots, "overflow");
|
||||
if (map != NULL) {
|
||||
__ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
|
||||
if (in_regs[i].second()->is_Register()) {
|
||||
__ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
|
||||
}
|
||||
} else {
|
||||
__ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
|
||||
if (in_regs[i].second()->is_Register()) {
|
||||
__ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Save or restore single word registers
|
||||
for ( int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
assert(handle_index <= stack_slots, "overflow");
|
||||
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
|
||||
map->set_oop(VMRegImpl::stack2reg(slot));;
|
||||
}
|
||||
|
||||
// Value is in an input register pass we must flush it to the stack
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_ARRAY:
|
||||
if (map != NULL) {
|
||||
__ movptr(Address(rsp, offset), reg);
|
||||
} else {
|
||||
__ movptr(reg, Address(rsp, offset));
|
||||
}
|
||||
break;
|
||||
case T_BOOLEAN:
|
||||
case T_CHAR:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_INT:
|
||||
if (map != NULL) {
|
||||
__ movl(Address(rsp, offset), reg);
|
||||
} else {
|
||||
__ movl(reg, Address(rsp, offset));
|
||||
}
|
||||
break;
|
||||
case T_OBJECT:
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_XMMRegister()) {
|
||||
if (in_sig_bt[i] == T_FLOAT) {
|
||||
int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
assert(handle_index <= stack_slots, "overflow");
|
||||
if (map != NULL) {
|
||||
__ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
|
||||
} else {
|
||||
__ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
|
||||
}
|
||||
}
|
||||
} else if (in_regs[i].first()->is_stack()) {
|
||||
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
|
||||
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
|
||||
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Registers need to be saved for runtime call
|
||||
static Register caller_saved_registers[] = {
|
||||
rcx, rdx, rsi, rdi
|
||||
};
|
||||
|
||||
// Save caller saved registers except r1 and r2
|
||||
static void save_registers_except(MacroAssembler* masm, Register r1, Register r2) {
|
||||
int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
|
||||
for (int index = 0; index < reg_len; index ++) {
|
||||
Register this_reg = caller_saved_registers[index];
|
||||
if (this_reg != r1 && this_reg != r2) {
|
||||
__ push(this_reg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore caller saved registers except r1 and r2
|
||||
static void restore_registers_except(MacroAssembler* masm, Register r1, Register r2) {
|
||||
int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
|
||||
for (int index = reg_len - 1; index >= 0; index --) {
|
||||
Register this_reg = caller_saved_registers[index];
|
||||
if (this_reg != r1 && this_reg != r2) {
|
||||
__ pop(this_reg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pin object, return pinned object or null in rax
|
||||
static void gen_pin_object(MacroAssembler* masm,
|
||||
Register thread, VMRegPair reg) {
|
||||
__ block_comment("gen_pin_object {");
|
||||
|
||||
Label is_null;
|
||||
Register tmp_reg = rax;
|
||||
VMRegPair tmp(tmp_reg->as_VMReg());
|
||||
if (reg.first()->is_stack()) {
|
||||
// Load the arg up from the stack
|
||||
simple_move32(masm, reg, tmp);
|
||||
reg = tmp;
|
||||
} else {
|
||||
__ movl(tmp_reg, reg.first()->as_Register());
|
||||
}
|
||||
__ testptr(reg.first()->as_Register(), reg.first()->as_Register());
|
||||
__ jccb(Assembler::equal, is_null);
|
||||
|
||||
// Save registers that may be used by runtime call
|
||||
Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
|
||||
save_registers_except(masm, arg, thread);
|
||||
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
|
||||
thread, reg.first()->as_Register());
|
||||
|
||||
// Restore saved registers
|
||||
restore_registers_except(masm, arg, thread);
|
||||
|
||||
__ bind(is_null);
|
||||
__ block_comment("} gen_pin_object");
|
||||
}
|
||||
|
||||
// Unpin object
|
||||
static void gen_unpin_object(MacroAssembler* masm,
|
||||
Register thread, VMRegPair reg) {
|
||||
__ block_comment("gen_unpin_object {");
|
||||
Label is_null;
|
||||
|
||||
// temp register
|
||||
__ push(rax);
|
||||
Register tmp_reg = rax;
|
||||
VMRegPair tmp(tmp_reg->as_VMReg());
|
||||
|
||||
simple_move32(masm, reg, tmp);
|
||||
|
||||
__ testptr(rax, rax);
|
||||
__ jccb(Assembler::equal, is_null);
|
||||
|
||||
// Save registers that may be used by runtime call
|
||||
Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
|
||||
save_registers_except(masm, arg, thread);
|
||||
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
|
||||
thread, rax);
|
||||
|
||||
// Restore saved registers
|
||||
restore_registers_except(masm, arg, thread);
|
||||
__ bind(is_null);
|
||||
__ pop(rax);
|
||||
__ block_comment("} gen_unpin_object");
|
||||
}
|
||||
|
||||
// Check GCLocker::needs_gc and enter the runtime if it's true. This
|
||||
// keeps a new JNI critical region from starting until a GC has been
|
||||
// forced. Save down any oops in registers and describe them in an
|
||||
// OopMap.
|
||||
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
|
||||
Register thread,
|
||||
int stack_slots,
|
||||
int total_c_args,
|
||||
int total_in_args,
|
||||
int arg_save_area,
|
||||
OopMapSet* oop_maps,
|
||||
VMRegPair* in_regs,
|
||||
BasicType* in_sig_bt) {
|
||||
__ block_comment("check GCLocker::needs_gc");
|
||||
Label cont;
|
||||
__ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
|
||||
__ jcc(Assembler::equal, cont);
|
||||
|
||||
// Save down any incoming oops and call into the runtime to halt for a GC
|
||||
|
||||
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, map, in_regs, in_sig_bt);
|
||||
|
||||
address the_pc = __ pc();
|
||||
oop_maps->add_gc_map( __ offset(), map);
|
||||
__ set_last_Java_frame(thread, rsp, noreg, the_pc);
|
||||
|
||||
__ block_comment("block_for_jni_critical");
|
||||
__ push(thread);
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
|
||||
__ increment(rsp, wordSize);
|
||||
|
||||
__ get_thread(thread);
|
||||
__ reset_last_Java_frame(thread, false);
|
||||
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, NULL, in_regs, in_sig_bt);
|
||||
|
||||
__ bind(cont);
|
||||
#ifdef ASSERT
|
||||
if (StressCriticalJNINatives) {
|
||||
// Stress register saving
|
||||
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, map, in_regs, in_sig_bt);
|
||||
// Destroy argument registers
|
||||
for (int i = 0; i < total_in_args - 1; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
__ xorptr(reg, reg);
|
||||
} else if (in_regs[i].first()->is_XMMRegister()) {
|
||||
__ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
|
||||
} else if (in_regs[i].first()->is_FloatRegister()) {
|
||||
ShouldNotReachHere();
|
||||
} else if (in_regs[i].first()->is_stack()) {
|
||||
// Nothing to do
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, NULL, in_regs, in_sig_bt);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Unpack an array argument into a pointer to the body and the length
|
||||
// if the array is non-null, otherwise pass 0 for both.
|
||||
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
|
||||
@ -1597,24 +1338,12 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||
// Critical native functions are a shorthand for the use of
|
||||
// GetPrimtiveArrayCritical and disallow the use of any other JNI
|
||||
// functions. The wrapper is expected to unpack the arguments before
|
||||
// passing them to the callee and perform checks before and after the
|
||||
// native call to ensure that they GCLocker
|
||||
// lock_critical/unlock_critical semantics are followed. Some other
|
||||
// parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// passing them to the callee. Critical native functions leave the state _in_Java,
|
||||
// since they cannot stop for GC.
|
||||
// Some other parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// block and the check for pending exceptions it's impossible for them
|
||||
// to be thrown.
|
||||
//
|
||||
// They are roughly structured like this:
|
||||
// if (GCLocker::needs_gc())
|
||||
// SharedRuntime::block_for_jni_critical();
|
||||
// tranistion to thread_in_native
|
||||
// unpack arrray arguments and call native entry point
|
||||
// check for safepoint in progress
|
||||
// check if any thread suspend flags are set
|
||||
// call into JVM and possible unlock the JNI critical
|
||||
// if a GC was suppressed while in the critical native.
|
||||
// transition back to thread_in_Java
|
||||
// return to caller
|
||||
//
|
||||
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
const methodHandle& method,
|
||||
@ -1926,11 +1655,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
__ get_thread(thread);
|
||||
|
||||
if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
|
||||
check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
|
||||
oop_handle_offset, oop_maps, in_regs, in_sig_bt);
|
||||
}
|
||||
|
||||
//
|
||||
// We immediately shuffle the arguments so that any vm call we have to
|
||||
// make from here on out (sync slow path, jvmti, etc.) we will have
|
||||
@ -1964,11 +1688,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
//
|
||||
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
|
||||
// Inbound arguments that need to be pinned for critical natives
|
||||
GrowableArray<int> pinned_args(total_in_args);
|
||||
// Current stack slot for storing register based array argument
|
||||
int pinned_slot = oop_handle_offset;
|
||||
|
||||
// Mark location of rbp,
|
||||
// map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
|
||||
|
||||
@ -1981,26 +1700,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
case T_ARRAY:
|
||||
if (is_critical_native) {
|
||||
VMRegPair in_arg = in_regs[i];
|
||||
if (Universe::heap()->supports_object_pinning()) {
|
||||
// gen_pin_object handles save and restore
|
||||
// of any clobbered registers
|
||||
gen_pin_object(masm, thread, in_arg);
|
||||
pinned_args.append(i);
|
||||
|
||||
// rax has pinned array
|
||||
VMRegPair result_reg(rax->as_VMReg());
|
||||
if (!in_arg.first()->is_stack()) {
|
||||
assert(pinned_slot <= stack_slots, "overflow");
|
||||
simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
|
||||
pinned_slot += VMRegImpl::slots_per_word;
|
||||
} else {
|
||||
// Write back pinned value, it will be used to unpin this argument
|
||||
__ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register());
|
||||
}
|
||||
// We have the array in register, use it
|
||||
in_arg = result_reg;
|
||||
}
|
||||
|
||||
unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
|
||||
c_arg++;
|
||||
break;
|
||||
@ -2155,15 +1854,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Finally just about ready to make the JNI call
|
||||
|
||||
|
||||
// get JNIEnv* which is first argument to native
|
||||
if (!is_critical_native) {
|
||||
__ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
__ movptr(Address(rsp, 0), rdx);
|
||||
}
|
||||
|
||||
// Now set thread in native
|
||||
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
|
||||
// Now set thread in native
|
||||
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
|
||||
}
|
||||
|
||||
__ call(RuntimeAddress(native_func));
|
||||
|
||||
@ -2194,24 +1892,17 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// unpin pinned arguments
|
||||
pinned_slot = oop_handle_offset;
|
||||
if (pinned_args.length() > 0) {
|
||||
// save return value that may be overwritten otherwise.
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
for (int index = 0; index < pinned_args.length(); index ++) {
|
||||
int i = pinned_args.at(index);
|
||||
assert(pinned_slot <= stack_slots, "overflow");
|
||||
if (!in_regs[i].first()->is_stack()) {
|
||||
int offset = pinned_slot * VMRegImpl::stack_slot_size;
|
||||
__ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
|
||||
pinned_slot += VMRegImpl::slots_per_word;
|
||||
}
|
||||
// gen_pin_object handles save and restore
|
||||
// of any other clobbered registers
|
||||
gen_unpin_object(masm, thread, in_regs[i]);
|
||||
}
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
__ safepoint_poll(needs_safepoint, thread, false /* at_return */, false /* in_nmethod */);
|
||||
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
|
||||
__ jcc(Assembler::equal, after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Switch thread to "native transition" state before reading the synchronization state.
|
||||
@ -2233,8 +1924,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
|
||||
}
|
||||
|
||||
Label after_transition;
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
{ Label Continue, slow_path;
|
||||
|
||||
@ -2254,23 +1943,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
__ push(thread);
|
||||
if (!is_critical_native) {
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
|
||||
JavaThread::check_special_condition_for_native_trans)));
|
||||
} else {
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
|
||||
JavaThread::check_special_condition_for_native_trans_and_transition)));
|
||||
}
|
||||
__ increment(rsp, wordSize);
|
||||
// Restore any method result value
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
|
||||
if (is_critical_native) {
|
||||
// The call above performed the transition to thread_in_Java so
|
||||
// skip the transition logic below.
|
||||
__ jmpb(after_transition);
|
||||
}
|
||||
|
||||
__ bind(Continue);
|
||||
}
|
||||
|
||||
@ -2511,10 +2188,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
|
||||
oop_maps);
|
||||
|
||||
if (is_critical_native) {
|
||||
nm->set_lazy_critical_native(true);
|
||||
}
|
||||
|
||||
return nm;
|
||||
|
||||
}
|
||||
|
@ -1378,222 +1378,6 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void save_or_restore_arguments(MacroAssembler* masm,
|
||||
const int stack_slots,
|
||||
const int total_in_args,
|
||||
const int arg_save_area,
|
||||
OopMap* map,
|
||||
VMRegPair* in_regs,
|
||||
BasicType* in_sig_bt) {
|
||||
// if map is non-NULL then the code should store the values,
|
||||
// otherwise it should load them.
|
||||
int slot = arg_save_area;
|
||||
// Save down double word first
|
||||
for ( int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
slot += VMRegImpl::slots_per_word;
|
||||
assert(slot <= stack_slots, "overflow");
|
||||
if (map != NULL) {
|
||||
__ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
|
||||
} else {
|
||||
__ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
|
||||
}
|
||||
}
|
||||
if (in_regs[i].first()->is_Register() &&
|
||||
(in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
if (map != NULL) {
|
||||
__ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
map->set_oop(VMRegImpl::stack2reg(slot));;
|
||||
}
|
||||
} else {
|
||||
__ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
|
||||
}
|
||||
slot += VMRegImpl::slots_per_word;
|
||||
}
|
||||
}
|
||||
// Save or restore single word registers
|
||||
for ( int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
slot++;
|
||||
assert(slot <= stack_slots, "overflow");
|
||||
|
||||
// Value is in an input register pass we must flush it to the stack
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_BOOLEAN:
|
||||
case T_CHAR:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_INT:
|
||||
if (map != NULL) {
|
||||
__ movl(Address(rsp, offset), reg);
|
||||
} else {
|
||||
__ movl(reg, Address(rsp, offset));
|
||||
}
|
||||
break;
|
||||
case T_ARRAY:
|
||||
case T_LONG:
|
||||
// handled above
|
||||
break;
|
||||
case T_OBJECT:
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_XMMRegister()) {
|
||||
if (in_sig_bt[i] == T_FLOAT) {
|
||||
int offset = slot * VMRegImpl::stack_slot_size;
|
||||
slot++;
|
||||
assert(slot <= stack_slots, "overflow");
|
||||
if (map != NULL) {
|
||||
__ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
|
||||
} else {
|
||||
__ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
|
||||
}
|
||||
}
|
||||
} else if (in_regs[i].first()->is_stack()) {
|
||||
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
|
||||
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
|
||||
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pin object, return pinned object or null in rax
|
||||
static void gen_pin_object(MacroAssembler* masm,
|
||||
VMRegPair reg) {
|
||||
__ block_comment("gen_pin_object {");
|
||||
|
||||
// rax always contains oop, either incoming or
|
||||
// pinned.
|
||||
Register tmp_reg = rax;
|
||||
|
||||
Label is_null;
|
||||
VMRegPair tmp;
|
||||
VMRegPair in_reg = reg;
|
||||
|
||||
tmp.set_ptr(tmp_reg->as_VMReg());
|
||||
if (reg.first()->is_stack()) {
|
||||
// Load the arg up from the stack
|
||||
move_ptr(masm, reg, tmp);
|
||||
reg = tmp;
|
||||
} else {
|
||||
__ movptr(rax, reg.first()->as_Register());
|
||||
}
|
||||
__ testptr(reg.first()->as_Register(), reg.first()->as_Register());
|
||||
__ jccb(Assembler::equal, is_null);
|
||||
|
||||
if (reg.first()->as_Register() != c_rarg1) {
|
||||
__ movptr(c_rarg1, reg.first()->as_Register());
|
||||
}
|
||||
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
|
||||
r15_thread, c_rarg1);
|
||||
|
||||
__ bind(is_null);
|
||||
__ block_comment("} gen_pin_object");
|
||||
}
|
||||
|
||||
// Unpin object
|
||||
static void gen_unpin_object(MacroAssembler* masm,
|
||||
VMRegPair reg) {
|
||||
__ block_comment("gen_unpin_object {");
|
||||
Label is_null;
|
||||
|
||||
if (reg.first()->is_stack()) {
|
||||
__ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
|
||||
} else if (reg.first()->as_Register() != c_rarg1) {
|
||||
__ movptr(c_rarg1, reg.first()->as_Register());
|
||||
}
|
||||
|
||||
__ testptr(c_rarg1, c_rarg1);
|
||||
__ jccb(Assembler::equal, is_null);
|
||||
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
|
||||
r15_thread, c_rarg1);
|
||||
|
||||
__ bind(is_null);
|
||||
__ block_comment("} gen_unpin_object");
|
||||
}
|
||||
|
||||
// Check GCLocker::needs_gc and enter the runtime if it's true. This
|
||||
// keeps a new JNI critical region from starting until a GC has been
|
||||
// forced. Save down any oops in registers and describe them in an
|
||||
// OopMap.
|
||||
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
|
||||
int stack_slots,
|
||||
int total_c_args,
|
||||
int total_in_args,
|
||||
int arg_save_area,
|
||||
OopMapSet* oop_maps,
|
||||
VMRegPair* in_regs,
|
||||
BasicType* in_sig_bt) {
|
||||
__ block_comment("check GCLocker::needs_gc");
|
||||
Label cont;
|
||||
__ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
|
||||
__ jcc(Assembler::equal, cont);
|
||||
|
||||
// Save down any incoming oops and call into the runtime to halt for a GC
|
||||
|
||||
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, map, in_regs, in_sig_bt);
|
||||
|
||||
address the_pc = __ pc();
|
||||
oop_maps->add_gc_map( __ offset(), map);
|
||||
__ set_last_Java_frame(rsp, noreg, the_pc);
|
||||
|
||||
__ block_comment("block_for_jni_critical");
|
||||
__ movptr(c_rarg0, r15_thread);
|
||||
__ mov(r12, rsp); // remember sp
|
||||
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
|
||||
__ andptr(rsp, -16); // align stack as required by ABI
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
|
||||
__ mov(rsp, r12); // restore sp
|
||||
__ reinit_heapbase();
|
||||
|
||||
__ reset_last_Java_frame(false);
|
||||
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, NULL, in_regs, in_sig_bt);
|
||||
__ bind(cont);
|
||||
#ifdef ASSERT
|
||||
if (StressCriticalJNINatives) {
|
||||
// Stress register saving
|
||||
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, map, in_regs, in_sig_bt);
|
||||
// Destroy argument registers
|
||||
for (int i = 0; i < total_in_args - 1; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
__ xorptr(reg, reg);
|
||||
} else if (in_regs[i].first()->is_XMMRegister()) {
|
||||
__ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
|
||||
} else if (in_regs[i].first()->is_FloatRegister()) {
|
||||
ShouldNotReachHere();
|
||||
} else if (in_regs[i].first()->is_stack()) {
|
||||
// Nothing to do
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
save_or_restore_arguments(masm, stack_slots, total_in_args,
|
||||
arg_save_area, NULL, in_regs, in_sig_bt);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Unpack an array argument into a pointer to the body and the length
|
||||
// if the array is non-null, otherwise pass 0 for both.
|
||||
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
|
||||
@ -1898,25 +1682,12 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||
// Critical native functions are a shorthand for the use of
|
||||
// GetPrimtiveArrayCritical and disallow the use of any other JNI
|
||||
// functions. The wrapper is expected to unpack the arguments before
|
||||
// passing them to the callee and perform checks before and after the
|
||||
// native call to ensure that they GCLocker
|
||||
// lock_critical/unlock_critical semantics are followed. Some other
|
||||
// parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// passing them to the callee. Critical native functions leave the state _in_Java,
|
||||
// since they cannot stop for GC.
|
||||
// Some other parts of JNI setup are skipped like the tear down of the JNI handle
|
||||
// block and the check for pending exceptions it's impossible for them
|
||||
// to be thrown.
|
||||
//
|
||||
// They are roughly structured like this:
|
||||
// if (GCLocker::needs_gc())
|
||||
// SharedRuntime::block_for_jni_critical();
|
||||
// tranistion to thread_in_native
|
||||
// unpack arrray arguments and call native entry point
|
||||
// check for safepoint in progress
|
||||
// check if any thread suspend flags are set
|
||||
// call into JVM and possible unlock the JNI critical
|
||||
// if a GC was suppressed while in the critical native.
|
||||
// transition back to thread_in_Java
|
||||
// return to caller
|
||||
//
|
||||
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
const methodHandle& method,
|
||||
int compile_id,
|
||||
@ -2217,11 +1988,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
const Register oop_handle_reg = r14;
|
||||
|
||||
if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
|
||||
check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
|
||||
oop_handle_offset, oop_maps, in_regs, in_sig_bt);
|
||||
}
|
||||
|
||||
//
|
||||
// We immediately shuffle the arguments so that any vm call we have to
|
||||
// make from here on out (sync slow path, jvmti, etc.) we will have
|
||||
@ -2274,10 +2040,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// the incoming and outgoing registers are offset upwards and for
|
||||
// critical natives they are offset down.
|
||||
GrowableArray<int> arg_order(2 * total_in_args);
|
||||
// Inbound arguments that need to be pinned for critical natives
|
||||
GrowableArray<int> pinned_args(total_in_args);
|
||||
// Current stack slot for storing register based array argument
|
||||
int pinned_slot = oop_handle_offset;
|
||||
|
||||
VMRegPair tmp_vmreg;
|
||||
tmp_vmreg.set2(rbx->as_VMReg());
|
||||
@ -2326,23 +2088,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_ARRAY:
|
||||
if (is_critical_native) {
|
||||
// pin before unpack
|
||||
if (Universe::heap()->supports_object_pinning()) {
|
||||
save_args(masm, total_c_args, 0, out_regs);
|
||||
gen_pin_object(masm, in_regs[i]);
|
||||
pinned_args.append(i);
|
||||
restore_args(masm, total_c_args, 0, out_regs);
|
||||
|
||||
// rax has pinned array
|
||||
VMRegPair result_reg;
|
||||
result_reg.set_ptr(rax->as_VMReg());
|
||||
move_ptr(masm, result_reg, in_regs[i]);
|
||||
if (!in_regs[i].first()->is_stack()) {
|
||||
assert(pinned_slot <= stack_slots, "overflow");
|
||||
move_ptr(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
|
||||
pinned_slot += VMRegImpl::slots_per_word;
|
||||
}
|
||||
}
|
||||
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
|
||||
c_arg++;
|
||||
#ifdef ASSERT
|
||||
@ -2521,17 +2266,15 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ bind(lock_done);
|
||||
}
|
||||
|
||||
|
||||
// Finally just about ready to make the JNI call
|
||||
|
||||
|
||||
// get JNIEnv* which is first argument to native
|
||||
if (!is_critical_native) {
|
||||
__ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
}
|
||||
|
||||
// Now set thread in native
|
||||
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
|
||||
// Now set thread in native
|
||||
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
|
||||
}
|
||||
|
||||
__ call(RuntimeAddress(native_func));
|
||||
|
||||
@ -2557,22 +2300,17 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// unpin pinned arguments
|
||||
pinned_slot = oop_handle_offset;
|
||||
if (pinned_args.length() > 0) {
|
||||
// save return value that may be overwritten otherwise.
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
for (int index = 0; index < pinned_args.length(); index ++) {
|
||||
int i = pinned_args.at(index);
|
||||
assert(pinned_slot <= stack_slots, "overflow");
|
||||
if (!in_regs[i].first()->is_stack()) {
|
||||
int offset = pinned_slot * VMRegImpl::stack_slot_size;
|
||||
__ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
|
||||
pinned_slot += VMRegImpl::slots_per_word;
|
||||
}
|
||||
gen_unpin_object(masm, in_regs[i]);
|
||||
}
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
__ safepoint_poll(needs_safepoint, r15_thread, false /* at_return */, false /* in_nmethod */);
|
||||
__ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
|
||||
__ jcc(Assembler::equal, after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Switch thread to "native transition" state before reading the synchronization state.
|
||||
@ -2589,8 +2327,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
Assembler::LoadLoad | Assembler::LoadStore |
|
||||
Assembler::StoreLoad | Assembler::StoreStore));
|
||||
|
||||
Label after_transition;
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
{
|
||||
Label Continue;
|
||||
@ -2614,22 +2350,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ mov(r12, rsp); // remember sp
|
||||
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
|
||||
__ andptr(rsp, -16); // align stack as required by ABI
|
||||
if (!is_critical_native) {
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||
} else {
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
|
||||
}
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||
__ mov(rsp, r12); // restore sp
|
||||
__ reinit_heapbase();
|
||||
// Restore any method result value
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
|
||||
if (is_critical_native) {
|
||||
// The call above performed the transition to thread_in_Java so
|
||||
// skip the transition logic below.
|
||||
__ jmpb(after_transition);
|
||||
}
|
||||
|
||||
__ bind(Continue);
|
||||
}
|
||||
|
||||
@ -2853,12 +2578,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
|
||||
oop_maps);
|
||||
|
||||
if (is_critical_native) {
|
||||
nm->set_lazy_critical_native(true);
|
||||
}
|
||||
|
||||
return nm;
|
||||
|
||||
}
|
||||
|
||||
// this function returns the adjust size (in number of words) to a c2i adapter
|
||||
|
@ -78,7 +78,6 @@ void CompiledMethod::init_defaults() {
|
||||
}
|
||||
_has_unsafe_access = 0;
|
||||
_has_method_handle_invokes = 0;
|
||||
_lazy_critical_native = 0;
|
||||
_has_wide_vectors = 0;
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,6 @@ protected:
|
||||
// set during construction
|
||||
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
|
||||
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
|
||||
unsigned int _lazy_critical_native:1; // Lazy JNI critical native
|
||||
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
|
||||
|
||||
Method* _method;
|
||||
@ -196,9 +195,6 @@ public:
|
||||
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
|
||||
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
|
||||
|
||||
bool is_lazy_critical_native() const { return _lazy_critical_native; }
|
||||
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
|
||||
|
||||
bool has_wide_vectors() const { return _has_wide_vectors; }
|
||||
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -149,8 +149,6 @@ class GCLocker: public AllStatic {
|
||||
// is set, the slow path is always taken, till _needs_gc is cleared.
|
||||
inline static void lock_critical(JavaThread* thread);
|
||||
inline static void unlock_critical(JavaThread* thread);
|
||||
|
||||
static address needs_gc_address() { return (address) &_needs_gc; }
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_GCLOCKER_HPP
|
||||
|
@ -531,6 +531,7 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
{ "PrintPreciseBiasedLockingStatistics", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
|
||||
{ "InitialBootClassLoaderMetaspaceSize", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
|
||||
{ "UseLargePagesInMetaspace", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
|
||||
{ "CriticalJNINatives", JDK_Version::jdk(16), JDK_Version::jdk(17), JDK_Version::jdk(18) },
|
||||
|
||||
// --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
|
||||
{ "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
|
@ -321,11 +321,8 @@ const intx ObjectAlignmentInBytes = 8;
|
||||
product(bool, InlineUnsafeOps, true, DIAGNOSTIC, \
|
||||
"Inline memory ops (native methods) from Unsafe") \
|
||||
\
|
||||
product(bool, CriticalJNINatives, true, \
|
||||
"Check for critical JNI entry points") \
|
||||
\
|
||||
notproduct(bool, StressCriticalJNINatives, false, \
|
||||
"Exercise register saving code in critical natives") \
|
||||
product(bool, CriticalJNINatives, false, \
|
||||
"(Deprecated) Check for critical JNI entry points") \
|
||||
\
|
||||
product(bool, UseAESIntrinsics, false, DIAGNOSTIC, \
|
||||
"Use intrinsics for AES versions of crypto") \
|
||||
|
@ -673,39 +673,6 @@ bool SafepointSynchronize::handshake_safe(JavaThread *thread) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// See if the thread is running inside a lazy critical native and
|
||||
// update the thread critical count if so. Also set a suspend flag to
|
||||
// cause the native wrapper to return into the JVM to do the unlock
|
||||
// once the native finishes.
|
||||
static void check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state) {
|
||||
if (state == _thread_in_native &&
|
||||
thread->has_last_Java_frame() &&
|
||||
thread->frame_anchor()->walkable()) {
|
||||
// This thread might be in a critical native nmethod so look at
|
||||
// the top of the stack and increment the critical count if it
|
||||
// is.
|
||||
frame wrapper_frame = thread->last_frame();
|
||||
CodeBlob* stub_cb = wrapper_frame.cb();
|
||||
if (stub_cb != NULL &&
|
||||
stub_cb->is_nmethod() &&
|
||||
stub_cb->as_nmethod_or_null()->is_lazy_critical_native()) {
|
||||
// A thread could potentially be in a critical native across
|
||||
// more than one safepoint, so only update the critical state on
|
||||
// the first one. When it returns it will perform the unlock.
|
||||
if (!thread->do_critical_native_unlock()) {
|
||||
#ifdef ASSERT
|
||||
if (!thread->in_critical()) {
|
||||
GCLocker::increment_debug_jni_lock_count();
|
||||
}
|
||||
#endif
|
||||
thread->enter_critical();
|
||||
// Make sure the native wrapper calls back on return to
|
||||
// perform the needed critical unlock.
|
||||
thread->set_critical_native_unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------
|
||||
// Implementation of Safepoint blocking point
|
||||
@ -899,7 +866,6 @@ void ThreadSafepointState::examine_state_of_thread(uint64_t safepoint_count) {
|
||||
}
|
||||
|
||||
if (safepoint_safe_with(_thread, stable_state)) {
|
||||
check_for_lazy_critical_native(_thread, stable_state);
|
||||
account_safe_thread();
|
||||
return;
|
||||
}
|
||||
|
@ -2938,36 +2938,6 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
|
||||
}
|
||||
}
|
||||
|
||||
JRT_ENTRY_NO_ASYNC(void, SharedRuntime::block_for_jni_critical(JavaThread* thread))
|
||||
assert(thread == JavaThread::current(), "must be");
|
||||
// The code is about to enter a JNI lazy critical native method and
|
||||
// _needs_gc is true, so if this thread is already in a critical
|
||||
// section then just return, otherwise this thread should block
|
||||
// until needs_gc has been cleared.
|
||||
if (thread->in_critical()) {
|
||||
return;
|
||||
}
|
||||
// Lock and unlock a critical section to give the system a chance to block
|
||||
GCLocker::lock_critical(thread);
|
||||
GCLocker::unlock_critical(thread);
|
||||
JRT_END
|
||||
|
||||
JRT_LEAF(oopDesc*, SharedRuntime::pin_object(JavaThread* thread, oopDesc* obj))
|
||||
assert(Universe::heap()->supports_object_pinning(), "Why we are here?");
|
||||
assert(obj != NULL, "Should not be null");
|
||||
oop o(obj);
|
||||
o = Universe::heap()->pin_object(thread, o);
|
||||
assert(o != NULL, "Should not be null");
|
||||
return o;
|
||||
JRT_END
|
||||
|
||||
JRT_LEAF(void, SharedRuntime::unpin_object(JavaThread* thread, oopDesc* obj))
|
||||
assert(Universe::heap()->supports_object_pinning(), "Why we are here?");
|
||||
assert(obj != NULL, "Should not be null");
|
||||
oop o(obj);
|
||||
Universe::heap()->unpin_object(thread, o);
|
||||
JRT_END
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Java-Java calling convention
|
||||
// (what you use when Java calls Java)
|
||||
|
@ -486,13 +486,6 @@ class SharedRuntime: AllStatic {
|
||||
BasicType ret_type,
|
||||
address critical_entry);
|
||||
|
||||
// Block before entering a JNI critical method
|
||||
static void block_for_jni_critical(JavaThread* thread);
|
||||
|
||||
// Pin/Unpin object
|
||||
static oopDesc* pin_object(JavaThread* thread, oopDesc* obj);
|
||||
static void unpin_object(JavaThread* thread, oopDesc* obj);
|
||||
|
||||
// A compiled caller has just called the interpreter, but compiled code
|
||||
// exists. Patch the caller so he no longer calls into the interpreter.
|
||||
static void fixup_callers_callsite(Method* moop, address ret_pc);
|
||||
|
@ -2728,26 +2728,6 @@ void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
|
||||
}
|
||||
}
|
||||
|
||||
// This is a variant of the normal
|
||||
// check_special_condition_for_native_trans with slightly different
|
||||
// semantics for use by critical native wrappers. It does all the
|
||||
// normal checks but also performs the transition back into
|
||||
// thread_in_Java state. This is required so that critical natives
|
||||
// can potentially block and perform a GC if they are the last thread
|
||||
// exiting the GCLocker.
|
||||
void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThread *thread) {
|
||||
check_special_condition_for_native_trans(thread);
|
||||
|
||||
// Finish the transition
|
||||
thread->set_thread_state(_thread_in_Java);
|
||||
|
||||
if (thread->do_critical_native_unlock()) {
|
||||
ThreadInVMfromJavaNoAsyncException tiv(thread);
|
||||
GCLocker::unlock_critical(thread);
|
||||
thread->clear_critical_native_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// We need to guarantee the Threads_lock here, since resumes are not
|
||||
// allowed during safepoint synchronization
|
||||
// Can only resume from an external suspension
|
||||
|
@ -301,7 +301,6 @@ class Thread: public ThreadShadow {
|
||||
_ext_suspended = 0x40000000U, // thread has self-suspended
|
||||
|
||||
_has_async_exception = 0x00000001U, // there is a pending async exception
|
||||
_critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock
|
||||
|
||||
_trace_flag = 0x00000004U, // call tracing backend
|
||||
_obj_deopt = 0x00000008U // suspend for object reallocation and relocking for JVMTI agent
|
||||
@ -543,11 +542,6 @@ class Thread: public ThreadShadow {
|
||||
inline void set_has_async_exception();
|
||||
inline void clear_has_async_exception();
|
||||
|
||||
bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
|
||||
|
||||
inline void set_critical_native_unlock();
|
||||
inline void clear_critical_native_unlock();
|
||||
|
||||
inline void set_trace_flag();
|
||||
inline void clear_trace_flag();
|
||||
|
||||
@ -1389,11 +1383,6 @@ class JavaThread: public Thread {
|
||||
// Check for async exception in addition to safepoint and suspend request.
|
||||
static void check_special_condition_for_native_trans(JavaThread *thread);
|
||||
|
||||
// Same as check_special_condition_for_native_trans but finishes the
|
||||
// transition into thread_in_Java mode so that it can potentially
|
||||
// block.
|
||||
static void check_special_condition_for_native_trans_and_transition(JavaThread *thread);
|
||||
|
||||
bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits);
|
||||
bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
|
||||
MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
@ -53,12 +53,6 @@ inline void Thread::set_has_async_exception() {
|
||||
inline void Thread::clear_has_async_exception() {
|
||||
clear_suspend_flag(_has_async_exception);
|
||||
}
|
||||
inline void Thread::set_critical_native_unlock() {
|
||||
set_suspend_flag(_critical_native_unlock);
|
||||
}
|
||||
inline void Thread::clear_critical_native_unlock() {
|
||||
clear_suspend_flag(_critical_native_unlock);
|
||||
}
|
||||
inline void Thread::set_trace_flag() {
|
||||
set_suspend_flag(_trace_flag);
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Red Hat, Inc. and/or its affiliates.
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,6 +51,15 @@ package gc;
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCMode=iu -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.CriticalNativeArgs
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.CriticalNativeArgs
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test CriticalNativeStress
|
||||
* @bug 8199868 8233343
|
||||
* @library /
|
||||
* @requires os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386" | os.arch=="ppc64" | os.arch=="ppc64le" | os.arch=="s390x"
|
||||
* @summary test argument unpacking nmethod wrapper of critical native method
|
||||
* @run main/othervm/native -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.CriticalNativeArgs
|
||||
*/
|
||||
public class CriticalNativeArgs {
|
||||
public static void main(String[] args) {
|
||||
int[] arr = new int[2];
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Red Hat, Inc. and/or its affiliates.
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,6 +56,17 @@ import jdk.test.lib.Utils;
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test CriticalNativeStress
|
||||
* @key randomness
|
||||
* @bug 8199868 8233343
|
||||
* @library / /test/lib
|
||||
* @requires os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386" | os.arch=="ppc64" | os.arch=="ppc64le" | os.arch=="s390x"
|
||||
* @summary test argument unpacking nmethod wrapper of critical native method
|
||||
* @run main/othervm/native -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
*/
|
||||
|
||||
public class CriticalNativeStress {
|
||||
// CYCLES and THREAD_PER_CASE are used to tune the tests for different GC settings,
|
||||
// so that they can execrise enough GC cycles and not OOM
|
||||
|
Loading…
x
Reference in New Issue
Block a user