8258192: Obsolete the CriticalJNINatives flag
Reviewed-by: mdoerr, shade
This commit is contained in:
parent
5a2452c80e
commit
0d2980cdd1
@ -1112,69 +1112,6 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack an array argument into a pointer to the body and the length
|
||||
// if the array is non-null, otherwise pass 0 for both.
|
||||
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); }
|
||||
|
||||
|
||||
class ComputeMoveOrder: public StackObj {
|
||||
class MoveOperation: public ResourceObj {
|
||||
friend class ComputeMoveOrder;
|
||||
private:
|
||||
VMRegPair _src;
|
||||
VMRegPair _dst;
|
||||
int _src_index;
|
||||
int _dst_index;
|
||||
bool _processed;
|
||||
MoveOperation* _next;
|
||||
MoveOperation* _prev;
|
||||
|
||||
static int get_id(VMRegPair r) { Unimplemented(); return 0; }
|
||||
|
||||
public:
|
||||
MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
|
||||
_src(src)
|
||||
, _dst(dst)
|
||||
, _src_index(src_index)
|
||||
, _dst_index(dst_index)
|
||||
, _processed(false)
|
||||
, _next(NULL)
|
||||
, _prev(NULL) { Unimplemented(); }
|
||||
|
||||
VMRegPair src() const { Unimplemented(); return _src; }
|
||||
int src_id() const { Unimplemented(); return 0; }
|
||||
int src_index() const { Unimplemented(); return 0; }
|
||||
VMRegPair dst() const { Unimplemented(); return _src; }
|
||||
void set_dst(int i, VMRegPair dst) { Unimplemented(); }
|
||||
int dst_index() const { Unimplemented(); return 0; }
|
||||
int dst_id() const { Unimplemented(); return 0; }
|
||||
MoveOperation* next() const { Unimplemented(); return 0; }
|
||||
MoveOperation* prev() const { Unimplemented(); return 0; }
|
||||
void set_processed() { Unimplemented(); }
|
||||
bool is_processed() const { Unimplemented(); return 0; }
|
||||
|
||||
// insert
|
||||
void break_cycle(VMRegPair temp_register) { Unimplemented(); }
|
||||
|
||||
void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); }
|
||||
};
|
||||
|
||||
private:
|
||||
GrowableArray<MoveOperation*> edges;
|
||||
|
||||
public:
|
||||
ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
|
||||
BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); }
|
||||
|
||||
// Collected all the move operations
|
||||
void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); }
|
||||
|
||||
// Walk the edges breaking cycles between moves. The result list
|
||||
// can be walked in order to produce the proper set of loads
|
||||
GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; }
|
||||
};
|
||||
|
||||
|
||||
static void rt_call(MacroAssembler* masm, address dest) {
|
||||
CodeBlob *cb = CodeCache::find_blob(dest);
|
||||
if (cb) {
|
||||
@ -1287,8 +1224,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
int compile_id,
|
||||
BasicType* in_sig_bt,
|
||||
VMRegPair* in_regs,
|
||||
BasicType ret_type,
|
||||
address critical_entry) {
|
||||
BasicType ret_type) {
|
||||
if (method->is_method_handle_intrinsic()) {
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
intptr_t start = (intptr_t)__ pc();
|
||||
@ -1313,12 +1249,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
in_ByteSize(-1),
|
||||
(OopMapSet*)NULL);
|
||||
}
|
||||
bool is_critical_native = true;
|
||||
address native_func = critical_entry;
|
||||
if (native_func == NULL) {
|
||||
native_func = method->native_function();
|
||||
is_critical_native = false;
|
||||
}
|
||||
address native_func = method->native_function();
|
||||
assert(native_func != NULL, "must have function");
|
||||
|
||||
// An OopMap for lock (and class if static)
|
||||
@ -1332,55 +1263,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// the hidden arguments as arg[0] and possibly arg[1] (static method)
|
||||
|
||||
const int total_in_args = method->size_of_parameters();
|
||||
int total_c_args = total_in_args;
|
||||
if (!is_critical_native) {
|
||||
total_c_args += 1;
|
||||
if (method->is_static()) {
|
||||
total_c_args++;
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
total_c_args++;
|
||||
}
|
||||
}
|
||||
}
|
||||
int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
|
||||
|
||||
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
|
||||
BasicType* in_elem_bt = NULL;
|
||||
|
||||
int argc = 0;
|
||||
if (!is_critical_native) {
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
|
||||
SignatureStream ss(method->signature());
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
// Arrays are passed as int, elem* pair
|
||||
out_sig_bt[argc++] = T_INT;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
ss.skip_array_prefix(1); // skip one '['
|
||||
assert(ss.is_primitive(), "primitive type expected");
|
||||
in_elem_bt[i] = ss.type();
|
||||
} else {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
in_elem_bt[i] = T_VOID;
|
||||
}
|
||||
if (in_sig_bt[i] != T_VOID) {
|
||||
assert(in_sig_bt[i] == ss.type() ||
|
||||
in_sig_bt[i] == T_ARRAY, "must match");
|
||||
ss.next();
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
|
||||
// Now figure out where the args must be stored and how much stack space
|
||||
@ -1402,34 +1298,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Now the space for the inbound oop handle area
|
||||
int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers
|
||||
if (is_critical_native) {
|
||||
// Critical natives may have to call out so they need a save area
|
||||
// for register arguments.
|
||||
int double_slots = 0;
|
||||
int single_slots = 0;
|
||||
for ( int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_BOOLEAN:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_CHAR:
|
||||
case T_INT: single_slots++; break;
|
||||
case T_ARRAY: // specific to LP64 (7145024)
|
||||
case T_LONG: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_FloatRegister()) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
total_save_slots = double_slots * 2 + single_slots;
|
||||
// align the save area
|
||||
if (double_slots != 0) {
|
||||
stack_slots = align_up(stack_slots, 2);
|
||||
}
|
||||
}
|
||||
|
||||
int oop_handle_offset = stack_slots;
|
||||
stack_slots += total_save_slots;
|
||||
@ -1596,22 +1464,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
#endif /* ASSERT */
|
||||
|
||||
// This may iterate in two different directions depending on the
|
||||
// kind of native it is. The reason is that for regular JNI natives
|
||||
// the incoming and outgoing registers are offset upwards and for
|
||||
// critical natives they are offset down.
|
||||
// For JNI natives the incoming and outgoing registers are offset upwards.
|
||||
GrowableArray<int> arg_order(2 * total_in_args);
|
||||
VMRegPair tmp_vmreg;
|
||||
tmp_vmreg.set2(r19->as_VMReg());
|
||||
|
||||
if (!is_critical_native) {
|
||||
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
|
||||
arg_order.push(i);
|
||||
arg_order.push(c_arg);
|
||||
}
|
||||
} else {
|
||||
// Compute a valid move order, using tmp_vmreg to break any cycles
|
||||
ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
|
||||
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
|
||||
arg_order.push(i);
|
||||
arg_order.push(c_arg);
|
||||
}
|
||||
|
||||
int temploc = -1;
|
||||
@ -1619,20 +1479,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
int i = arg_order.at(ai);
|
||||
int c_arg = arg_order.at(ai + 1);
|
||||
__ block_comment(err_msg("move %d -> %d", i, c_arg));
|
||||
if (c_arg == -1) {
|
||||
assert(is_critical_native, "should only be required for critical natives");
|
||||
// This arg needs to be moved to a temporary
|
||||
__ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
|
||||
in_regs[i] = tmp_vmreg;
|
||||
temploc = i;
|
||||
continue;
|
||||
} else if (i == -1) {
|
||||
assert(is_critical_native, "should only be required for critical natives");
|
||||
// Read from the temporary location
|
||||
assert(temploc != -1, "must be valid");
|
||||
i = temploc;
|
||||
temploc = -1;
|
||||
}
|
||||
assert(c_arg != -1 && i != -1, "wrong order");
|
||||
#ifdef ASSERT
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
|
||||
@ -1647,21 +1494,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
#endif /* ASSERT */
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_ARRAY:
|
||||
if (is_critical_native) {
|
||||
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
|
||||
c_arg++;
|
||||
#ifdef ASSERT
|
||||
if (out_regs[c_arg].first()->is_Register()) {
|
||||
reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
|
||||
} else if (out_regs[c_arg].first()->is_FloatRegister()) {
|
||||
freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
|
||||
}
|
||||
#endif
|
||||
int_args++;
|
||||
break;
|
||||
}
|
||||
case T_OBJECT:
|
||||
assert(!is_critical_native, "no oop arguments");
|
||||
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
|
||||
((i == 0) && (!is_static)),
|
||||
&receiver_offset);
|
||||
@ -1701,7 +1534,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
int c_arg = total_c_args - total_in_args;
|
||||
|
||||
// Pre-load a static method's oop into c_rarg1.
|
||||
if (method->is_static() && !is_critical_native) {
|
||||
if (method->is_static()) {
|
||||
|
||||
// load oop into a register
|
||||
__ movoop(c_rarg1,
|
||||
@ -1759,7 +1592,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
Label lock_done;
|
||||
|
||||
if (method->is_synchronized()) {
|
||||
assert(!is_critical_native, "unhandled");
|
||||
|
||||
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
@ -1813,14 +1645,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Finally just about ready to make the JNI call
|
||||
|
||||
// get JNIEnv* which is first argument to native
|
||||
if (!is_critical_native) {
|
||||
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
|
||||
// Now set thread in native
|
||||
__ mov(rscratch1, _thread_in_native);
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
}
|
||||
// Now set thread in native
|
||||
__ mov(rscratch1, _thread_in_native);
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
|
||||
rt_call(masm, native_func);
|
||||
|
||||
@ -1851,18 +1681,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
Label safepoint_in_progress, safepoint_in_progress_done;
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
__ safepoint_poll(needs_safepoint, false /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbnzw(rscratch1, needs_safepoint);
|
||||
__ b(after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Switch thread to "native transition" state before reading the synchronization state.
|
||||
// This additional state is necessary because reading and testing the synchronization
|
||||
// state is not atomic w.r.t. GC, as this scenario demonstrates:
|
||||
@ -1971,32 +1789,26 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
|
||||
}
|
||||
|
||||
if (!is_critical_native) {
|
||||
// reset handle block
|
||||
__ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
|
||||
__ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
|
||||
}
|
||||
// reset handle block
|
||||
__ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
|
||||
__ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
|
||||
|
||||
__ leave();
|
||||
|
||||
if (!is_critical_native) {
|
||||
// Any exception pending?
|
||||
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
|
||||
__ cbnz(rscratch1, exception_pending);
|
||||
}
|
||||
// Any exception pending?
|
||||
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
|
||||
__ cbnz(rscratch1, exception_pending);
|
||||
|
||||
// We're done
|
||||
__ ret(lr);
|
||||
|
||||
// Unexpected paths are out of line and go here
|
||||
|
||||
if (!is_critical_native) {
|
||||
// forward the exception
|
||||
__ bind(exception_pending);
|
||||
// forward the exception
|
||||
__ bind(exception_pending);
|
||||
|
||||
// and forward the exception
|
||||
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
|
||||
}
|
||||
// and forward the exception
|
||||
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
|
||||
|
||||
// Slow path locking & unlocking
|
||||
if (method->is_synchronized()) {
|
||||
|
@ -472,6 +472,4 @@ void VM_Version::initialize() {
|
||||
#endif
|
||||
|
||||
_spin_wait = get_spin_wait_desc();
|
||||
|
||||
UNSUPPORTED_OPTION(CriticalJNINatives);
|
||||
}
|
||||
|
@ -750,8 +750,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
int compile_id,
|
||||
BasicType* in_sig_bt,
|
||||
VMRegPair* in_regs,
|
||||
BasicType ret_type,
|
||||
address critical_entry) {
|
||||
BasicType ret_type) {
|
||||
if (method->is_method_handle_intrinsic()) {
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
intptr_t start = (intptr_t)__ pc();
|
||||
@ -777,20 +776,17 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Usage of Rtemp should be OK since scratched by native call
|
||||
|
||||
bool is_static = method->is_static();
|
||||
bool method_is_static = method->is_static();
|
||||
|
||||
const int total_in_args = method->size_of_parameters();
|
||||
int total_c_args = total_in_args + 1;
|
||||
if (is_static) {
|
||||
total_c_args++;
|
||||
}
|
||||
int total_c_args = total_in_args + (method_is_static ? 2 : 1);
|
||||
|
||||
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
|
||||
|
||||
int argc = 0;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (is_static) {
|
||||
if (method_is_static) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
|
||||
@ -881,7 +877,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
|
||||
const int extra_args = is_static ? 2 : 1;
|
||||
const int extra_args = method_is_static ? 2 : 1;
|
||||
int receiver_offset = -1;
|
||||
int fp_regs_in_arguments = 0;
|
||||
|
||||
@ -904,7 +900,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
int offset = oop_handle_offset * VMRegImpl::stack_slot_size;
|
||||
__ str(src->as_Register(), Address(SP, offset));
|
||||
map->set_oop(VMRegImpl::stack2reg(oop_handle_offset));
|
||||
if ((i == 0) && (!is_static)) {
|
||||
if ((i == 0) && (!method_is_static)) {
|
||||
receiver_offset = offset;
|
||||
}
|
||||
oop_handle_offset += VMRegImpl::slots_per_word;
|
||||
@ -1116,7 +1112,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Get Klass mirror
|
||||
int klass_offset = -1;
|
||||
if (is_static) {
|
||||
if (method_is_static) {
|
||||
klass_offset = oop_handle_offset * VMRegImpl::stack_slot_size;
|
||||
__ mov_oop(Rtemp, JNIHandles::make_local(method->method_holder()->java_mirror()));
|
||||
__ add(c_rarg1, SP, klass_offset);
|
||||
@ -1332,7 +1328,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
vep_offset,
|
||||
frame_complete,
|
||||
stack_slots / VMRegImpl::slots_per_word,
|
||||
in_ByteSize(is_static ? klass_offset : receiver_offset),
|
||||
in_ByteSize(method_is_static ? klass_offset : receiver_offset),
|
||||
in_ByteSize(lock_slot_offset * VMRegImpl::stack_slot_size),
|
||||
oop_maps);
|
||||
}
|
||||
|
@ -335,7 +335,6 @@ void VM_Version::initialize() {
|
||||
}
|
||||
|
||||
UNSUPPORTED_OPTION(TypeProfileLevel);
|
||||
UNSUPPORTED_OPTION(CriticalJNINatives);
|
||||
|
||||
FLAG_SET_DEFAULT(TypeProfileLevel, 0); // unsupported
|
||||
|
||||
|
@ -1533,57 +1533,6 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
|
||||
}
|
||||
}
|
||||
|
||||
static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) {
|
||||
if (src.first()->is_stack()) {
|
||||
if (dst.first()->is_stack()) {
|
||||
// stack to stack
|
||||
__ ld(r_temp, reg2offset(src.first()), r_caller_sp);
|
||||
__ std(r_temp, reg2offset(dst.first()), R1_SP);
|
||||
} else {
|
||||
// stack to reg
|
||||
__ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
|
||||
}
|
||||
} else if (dst.first()->is_stack()) {
|
||||
// reg to stack
|
||||
__ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
|
||||
} else {
|
||||
if (dst.first() != src.first()) {
|
||||
__ mr(dst.first()->as_Register(), src.first()->as_Register());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack an array argument into a pointer to the body and the length
|
||||
// if the array is non-null, otherwise pass 0 for both.
|
||||
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type,
|
||||
VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp,
|
||||
Register tmp_reg, Register tmp2_reg) {
|
||||
assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
|
||||
"possible collision");
|
||||
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
|
||||
"possible collision");
|
||||
|
||||
// Pass the length, ptr pair.
|
||||
Label set_out_args;
|
||||
VMRegPair tmp, tmp2;
|
||||
tmp.set_ptr(tmp_reg->as_VMReg());
|
||||
tmp2.set_ptr(tmp2_reg->as_VMReg());
|
||||
if (reg.first()->is_stack()) {
|
||||
// Load the arg up from the stack.
|
||||
move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0);
|
||||
reg = tmp;
|
||||
}
|
||||
__ li(tmp2_reg, 0); // Pass zeros if Array=null.
|
||||
if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0);
|
||||
__ cmpdi(CCR0, reg.first()->as_Register(), 0);
|
||||
__ beq(CCR0, set_out_args);
|
||||
__ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register());
|
||||
__ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type));
|
||||
__ bind(set_out_args);
|
||||
move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0);
|
||||
move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64.
|
||||
}
|
||||
|
||||
static void verify_oop_args(MacroAssembler* masm,
|
||||
const methodHandle& method,
|
||||
const BasicType* sig_bt,
|
||||
@ -1685,8 +1634,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
int compile_id,
|
||||
BasicType *in_sig_bt,
|
||||
VMRegPair *in_regs,
|
||||
BasicType ret_type,
|
||||
address critical_entry) {
|
||||
BasicType ret_type) {
|
||||
if (method->is_method_handle_intrinsic()) {
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
intptr_t start = (intptr_t)__ pc();
|
||||
@ -1709,12 +1657,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
(OopMapSet*)NULL);
|
||||
}
|
||||
|
||||
bool is_critical_native = true;
|
||||
address native_func = critical_entry;
|
||||
if (native_func == NULL) {
|
||||
native_func = method->native_function();
|
||||
is_critical_native = false;
|
||||
}
|
||||
address native_func = method->native_function();
|
||||
assert(native_func != NULL, "must have function");
|
||||
|
||||
// First, create signature for outgoing C call
|
||||
@ -1733,19 +1676,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// some floating-point arguments must be passed in registers _and_
|
||||
// in stack locations.
|
||||
bool method_is_static = method->is_static();
|
||||
int total_c_args = total_in_args;
|
||||
|
||||
if (!is_critical_native) {
|
||||
int n_hidden_args = method_is_static ? 2 : 1;
|
||||
total_c_args += n_hidden_args;
|
||||
} else {
|
||||
// No JNIEnv*, no this*, but unpacked arrays (base+length).
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
total_c_args++;
|
||||
}
|
||||
}
|
||||
}
|
||||
int total_c_args = total_in_args + (method_is_static ? 2 : 1);
|
||||
|
||||
BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
|
||||
@ -1759,44 +1690,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// hidden arguments).
|
||||
|
||||
int argc = 0;
|
||||
if (!is_critical_native) {
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
SignatureStream ss(method->signature());
|
||||
int o = 0;
|
||||
for (int i = 0; i < total_in_args ; i++, o++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
// Arrays are passed as int, elem* pair
|
||||
ss.skip_array_prefix(1); // skip one '['
|
||||
assert(ss.is_primitive(), "primitive type expected");
|
||||
in_elem_bt[o] = ss.type();
|
||||
} else {
|
||||
in_elem_bt[o] = T_VOID;
|
||||
}
|
||||
if (in_sig_bt[i] != T_VOID) {
|
||||
assert(in_sig_bt[i] == ss.type() ||
|
||||
in_sig_bt[i] == T_ARRAY, "must match");
|
||||
ss.next();
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
// Arrays are passed as int, elem* pair.
|
||||
out_sig_bt[argc++] = T_INT;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
} else {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
|
||||
|
||||
@ -1823,7 +1723,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
//
|
||||
// NW [ABI_REG_ARGS] <-- 1) R1_SP
|
||||
// [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset
|
||||
// [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives)
|
||||
// [oopHandle area] <-- 3) R1_SP + oop_handle_offset
|
||||
// klass <-- 4) R1_SP + klass_offset
|
||||
// lock <-- 5) R1_SP + lock_offset
|
||||
// [workspace] <-- 6) R1_SP + workspace_offset
|
||||
@ -1838,42 +1738,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Now the space for the inbound oop handle area.
|
||||
int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word;
|
||||
if (is_critical_native) {
|
||||
// Critical natives may have to call out so they need a save area
|
||||
// for register arguments.
|
||||
int double_slots = 0;
|
||||
int single_slots = 0;
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_BOOLEAN:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_CHAR:
|
||||
case T_INT:
|
||||
// Fall through.
|
||||
case T_ARRAY:
|
||||
case T_LONG: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_FloatRegister()) {
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_FLOAT: single_slots++; break;
|
||||
case T_DOUBLE: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
total_save_slots = double_slots * 2 + align_up(single_slots, 2); // round to even
|
||||
}
|
||||
|
||||
int oop_handle_slot_offset = stack_slots;
|
||||
stack_slots += total_save_slots; // 3)
|
||||
|
||||
int klass_slot_offset = 0;
|
||||
int klass_offset = -1;
|
||||
if (method_is_static && !is_critical_native) { // 4)
|
||||
if (method_is_static) { // 4)
|
||||
klass_slot_offset = stack_slots;
|
||||
klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
|
||||
stack_slots += VMRegImpl::slots_per_word;
|
||||
@ -1919,10 +1790,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
Register r_carg1_jnienv = noreg;
|
||||
Register r_carg2_classorobject = noreg;
|
||||
if (!is_critical_native) {
|
||||
r_carg1_jnienv = out_regs[0].first()->as_Register();
|
||||
r_carg2_classorobject = out_regs[1].first()->as_Register();
|
||||
}
|
||||
r_carg1_jnienv = out_regs[0].first()->as_Register();
|
||||
r_carg2_classorobject = out_regs[1].first()->as_Register();
|
||||
|
||||
|
||||
// Generate the Unverified Entry Point (UEP).
|
||||
@ -2058,15 +1927,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
|
||||
break;
|
||||
case T_ARRAY:
|
||||
if (is_critical_native) {
|
||||
int body_arg = out;
|
||||
out -= 1; // Point to length arg.
|
||||
unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out],
|
||||
r_callers_sp, r_temp_1, r_temp_2);
|
||||
break;
|
||||
}
|
||||
case T_OBJECT:
|
||||
assert(!is_critical_native, "no oop arguments");
|
||||
object_move(masm, stack_slots,
|
||||
oop_map, oop_handle_slot_offset,
|
||||
((in == 0) && (!method_is_static)), &receiver_offset,
|
||||
@ -2098,7 +1959,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Pre-load a static method's oop into ARG2.
|
||||
// Used both by locking code and the normal JNI call code.
|
||||
if (method_is_static && !is_critical_native) {
|
||||
if (method_is_static) {
|
||||
__ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()),
|
||||
r_carg2_classorobject);
|
||||
|
||||
@ -2109,9 +1970,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
}
|
||||
|
||||
// Get JNIEnv* which is first argument to native.
|
||||
if (!is_critical_native) {
|
||||
__ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset()));
|
||||
}
|
||||
__ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset()));
|
||||
|
||||
// NOTE:
|
||||
//
|
||||
@ -2140,7 +1999,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
if (method->is_synchronized()) {
|
||||
assert(!is_critical_native, "unhandled");
|
||||
ConditionRegister r_flag = CCR1;
|
||||
Register r_oop = r_temp_4;
|
||||
const Register r_box = r_temp_5;
|
||||
@ -2185,16 +2043,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Use that pc we placed in r_return_pc a while back as the current frame anchor.
|
||||
__ set_last_Java_frame(R1_SP, r_return_pc);
|
||||
|
||||
if (!is_critical_native) {
|
||||
// Publish thread state
|
||||
// --------------------------------------------------------------------------
|
||||
// Publish thread state
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// Transition from _thread_in_Java to _thread_in_native.
|
||||
__ li(R0, _thread_in_native);
|
||||
__ release();
|
||||
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
|
||||
__ stw(R0, thread_(thread_state));
|
||||
}
|
||||
// Transition from _thread_in_Java to _thread_in_native.
|
||||
__ li(R0, _thread_in_native);
|
||||
__ release();
|
||||
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
|
||||
__ stw(R0, thread_(thread_state));
|
||||
|
||||
|
||||
// The JNI call
|
||||
@ -2256,24 +2112,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
Register sync_state = r_temp_5;
|
||||
// Note: We should not reach here with active stack watermark. There's no safepoint between
|
||||
// start of the native wrapper and this check where it could have been added.
|
||||
// We don't check the watermark in the fast path.
|
||||
__ safepoint_poll(needs_safepoint, sync_state, false /* at_return */, false /* in_nmethod */);
|
||||
|
||||
Register suspend_flags = r_temp_6;
|
||||
__ lwz(suspend_flags, thread_(suspend_flags));
|
||||
__ cmpwi(CCR1, suspend_flags, 0);
|
||||
__ beq(CCR1, after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Publish thread state
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
@ -2443,7 +2281,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Reset handle block.
|
||||
// --------------------------------------------------------------------------
|
||||
if (!is_critical_native) {
|
||||
__ ld(r_temp_1, thread_(active_handles));
|
||||
// TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
|
||||
__ li(r_temp_2, 0);
|
||||
@ -2455,7 +2292,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ ld(r_temp_2, thread_(pending_exception));
|
||||
__ cmpdi(CCR0, r_temp_2, 0);
|
||||
__ bne(CCR0, handle_pending_exception);
|
||||
}
|
||||
|
||||
// Return
|
||||
// --------------------------------------------------------------------------
|
||||
@ -2470,14 +2306,12 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Since this is a native call, we know the proper exception handler
|
||||
// is the empty function. We just pop this frame and then jump to
|
||||
// forward_exception_entry.
|
||||
if (!is_critical_native) {
|
||||
__ bind(handle_pending_exception);
|
||||
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R11);
|
||||
__ b64_patchable((address)StubRoutines::forward_exception_entry(),
|
||||
relocInfo::runtime_call_type);
|
||||
}
|
||||
|
||||
// Handler for a cache miss (out-of-line).
|
||||
// --------------------------------------------------------------------------
|
||||
|
@ -1286,76 +1286,6 @@ static void move32_64(MacroAssembler *masm,
|
||||
}
|
||||
}
|
||||
|
||||
static void move_ptr(MacroAssembler *masm,
|
||||
VMRegPair src,
|
||||
VMRegPair dst,
|
||||
int framesize_in_slots) {
|
||||
int frame_offset = framesize_in_slots * VMRegImpl::stack_slot_size;
|
||||
|
||||
if (src.first()->is_stack()) {
|
||||
if (dst.first()->is_stack()) {
|
||||
// stack to stack
|
||||
__ mem2reg_opt(Z_R0_scratch, Address(Z_SP, reg2offset(src.first()) + frame_offset));
|
||||
__ reg2mem_opt(Z_R0_scratch, Address(Z_SP, reg2offset(dst.first())));
|
||||
} else {
|
||||
// stack to reg
|
||||
__ mem2reg_opt(dst.first()->as_Register(),
|
||||
Address(Z_SP, reg2offset(src.first()) + frame_offset));
|
||||
}
|
||||
} else {
|
||||
if (dst.first()->is_stack()) {
|
||||
// reg to stack
|
||||
__ reg2mem_opt(src.first()->as_Register(), Address(Z_SP, reg2offset(dst.first())));
|
||||
} else {
|
||||
__ lgr_if_needed(dst.first()->as_Register(), src.first()->as_Register());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack an array argument into a pointer to the body and the length
|
||||
// if the array is non-null, otherwise pass 0 for both.
|
||||
static void unpack_array_argument(MacroAssembler *masm,
|
||||
VMRegPair reg,
|
||||
BasicType in_elem_type,
|
||||
VMRegPair body_arg,
|
||||
VMRegPair length_arg,
|
||||
int framesize_in_slots) {
|
||||
Register tmp_reg = Z_tmp_2;
|
||||
Register tmp2_reg = Z_tmp_1;
|
||||
|
||||
assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
|
||||
"possible collision");
|
||||
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
|
||||
"possible collision");
|
||||
|
||||
// Pass the length, ptr pair.
|
||||
NearLabel set_out_args;
|
||||
VMRegPair tmp, tmp2;
|
||||
|
||||
tmp.set_ptr(tmp_reg->as_VMReg());
|
||||
tmp2.set_ptr(tmp2_reg->as_VMReg());
|
||||
if (reg.first()->is_stack()) {
|
||||
// Load the arg up from the stack.
|
||||
move_ptr(masm, reg, tmp, framesize_in_slots);
|
||||
reg = tmp;
|
||||
}
|
||||
|
||||
const Register first = reg.first()->as_Register();
|
||||
|
||||
// Don't set CC, indicate unused result.
|
||||
(void) __ clear_reg(tmp2_reg, true, false);
|
||||
if (tmp_reg != first) {
|
||||
__ clear_reg(tmp_reg, true, false); // Don't set CC.
|
||||
}
|
||||
__ compare64_and_branch(first, (RegisterOrConstant)0L, Assembler::bcondEqual, set_out_args);
|
||||
__ z_lgf(tmp2_reg, Address(first, arrayOopDesc::length_offset_in_bytes()));
|
||||
__ add2reg(tmp_reg, arrayOopDesc::base_offset_in_bytes(in_elem_type), first);
|
||||
|
||||
__ bind(set_out_args);
|
||||
move_ptr(masm, tmp, body_arg, framesize_in_slots);
|
||||
move32_64(masm, tmp2, length_arg, framesize_in_slots);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
// Wrap a JNI call.
|
||||
//----------------------------------------------------------------------
|
||||
@ -1365,8 +1295,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
int compile_id,
|
||||
BasicType *in_sig_bt,
|
||||
VMRegPair *in_regs,
|
||||
BasicType ret_type,
|
||||
address critical_entry) {
|
||||
BasicType ret_type) {
|
||||
int total_in_args = method->size_of_parameters();
|
||||
if (method->is_method_handle_intrinsic()) {
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
@ -1400,12 +1329,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool is_critical_native = true;
|
||||
address native_func = critical_entry;
|
||||
if (native_func == NULL) {
|
||||
native_func = method->native_function();
|
||||
is_critical_native = false;
|
||||
}
|
||||
address native_func = method->native_function();
|
||||
assert(native_func != NULL, "must have function");
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
@ -1430,19 +1354,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// So convert the signature and registers now, and adjust the total number
|
||||
// of in-arguments accordingly.
|
||||
bool method_is_static = method->is_static();
|
||||
int total_c_args = total_in_args;
|
||||
|
||||
if (!is_critical_native) {
|
||||
int n_hidden_args = method_is_static ? 2 : 1;
|
||||
total_c_args += n_hidden_args;
|
||||
} else {
|
||||
// No JNIEnv*, no this*, but unpacked arrays (base+length).
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
total_c_args ++;
|
||||
}
|
||||
}
|
||||
}
|
||||
int total_c_args = total_in_args + (method_is_static ? 2 : 1);
|
||||
|
||||
BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
|
||||
@ -1455,45 +1367,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// hidden arguments)
|
||||
|
||||
int argc = 0;
|
||||
if (!is_critical_native) {
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
|
||||
SignatureStream ss(method->signature());
|
||||
int o = 0;
|
||||
for (int i = 0; i < total_in_args; i++, o++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
// Arrays are passed as tuples (int, elem*).
|
||||
ss.skip_array_prefix(1); // skip one '['
|
||||
assert(ss.is_primitive(), "primitive type expected");
|
||||
in_elem_bt[o] = ss.type();
|
||||
} else {
|
||||
in_elem_bt[o] = T_VOID;
|
||||
}
|
||||
if (in_sig_bt[i] != T_VOID) {
|
||||
assert(in_sig_bt[i] == ss.type() ||
|
||||
in_sig_bt[i] == T_ARRAY, "must match");
|
||||
ss.next();
|
||||
}
|
||||
}
|
||||
assert(total_in_args == o, "must match");
|
||||
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
// Arrays are passed as tuples (int, elem*).
|
||||
out_sig_bt[argc++] = T_INT;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
} else {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
@ -1550,8 +1430,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// 4| klass (if static) |
|
||||
// |---------------------| <- klass_slot_offset
|
||||
// 3| oopHandle area |
|
||||
// | (save area for |
|
||||
// | critical natives) |
|
||||
// | |
|
||||
// | |
|
||||
// |---------------------| <- oop_handle_offset
|
||||
@ -1579,44 +1457,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Now the space for the inbound oop handle area.
|
||||
int total_save_slots = RegisterImpl::number_of_arg_registers * VMRegImpl::slots_per_word;
|
||||
if (is_critical_native) {
|
||||
// Critical natives may have to call out so they need a save area
|
||||
// for register arguments.
|
||||
int double_slots = 0;
|
||||
int single_slots = 0;
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_BOOLEAN:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_CHAR:
|
||||
case T_INT:
|
||||
// Fall through.
|
||||
case T_ARRAY:
|
||||
case T_LONG: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
if (in_regs[i].first()->is_FloatRegister()) {
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_FLOAT: single_slots++; break;
|
||||
case T_DOUBLE: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
} // for
|
||||
total_save_slots = double_slots * 2 + align_up(single_slots, 2); // Round to even.
|
||||
}
|
||||
|
||||
int oop_handle_slot_offset = stack_slots;
|
||||
stack_slots += total_save_slots; // 3)
|
||||
|
||||
int klass_slot_offset = 0;
|
||||
int klass_offset = -1;
|
||||
if (method_is_static && !is_critical_native) { // 4)
|
||||
if (method_is_static) { // 4)
|
||||
klass_slot_offset = stack_slots;
|
||||
klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
|
||||
stack_slots += VMRegImpl::slots_per_word;
|
||||
@ -1783,15 +1630,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
break;
|
||||
|
||||
case T_ARRAY:
|
||||
if (is_critical_native) {
|
||||
int body_arg = cix;
|
||||
cix -= 1; // Point to length arg.
|
||||
unpack_array_argument(masm, in_regs[jix], in_elem_bt[jix], out_regs[body_arg], out_regs[cix], stack_slots);
|
||||
break;
|
||||
}
|
||||
// else fallthrough
|
||||
case T_OBJECT:
|
||||
assert(!is_critical_native, "no oop arguments");
|
||||
object_move(masm, map, oop_handle_slot_offset, stack_slots, in_regs[jix], out_regs[cix],
|
||||
((jix == 0) && (!method_is_static)),
|
||||
&receiver_offset);
|
||||
@ -1821,7 +1660,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Pre-load a static method's oop into ARG2.
|
||||
// Used both by locking code and the normal JNI call code.
|
||||
//--------------------------------------------------------------------
|
||||
if (method_is_static && !is_critical_native) {
|
||||
if (method_is_static) {
|
||||
__ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), Z_ARG2);
|
||||
|
||||
// Now handlize the static class mirror in ARG2. It's known not-null.
|
||||
@ -1831,9 +1670,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
}
|
||||
|
||||
// Get JNIEnv* which is first argument to native.
|
||||
if (!is_critical_native) {
|
||||
__ add2reg(Z_ARG1, in_bytes(JavaThread::jni_environment_offset()), Z_thread);
|
||||
}
|
||||
__ add2reg(Z_ARG1, in_bytes(JavaThread::jni_environment_offset()), Z_thread);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// We have all of the arguments setup at this point.
|
||||
@ -1855,7 +1692,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Lock a synchronized method.
|
||||
|
||||
if (method->is_synchronized()) {
|
||||
assert(!is_critical_native, "unhandled");
|
||||
|
||||
// ATTENTION: args and Z_R10 must be preserved.
|
||||
Register r_oop = Z_R11;
|
||||
@ -1923,10 +1759,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Use that pc we placed in Z_R10 a while back as the current frame anchor.
|
||||
__ set_last_Java_frame(Z_SP, Z_R10);
|
||||
|
||||
if (!is_critical_native) {
|
||||
// Transition from _thread_in_Java to _thread_in_native.
|
||||
__ set_thread_state(_thread_in_native);
|
||||
}
|
||||
// Transition from _thread_in_Java to _thread_in_native.
|
||||
__ set_thread_state(_thread_in_native);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// This is the JNI call.
|
||||
@ -1974,18 +1808,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
// Does this need to save_native_result and fences?
|
||||
__ safepoint_poll(needs_safepoint, Z_R1);
|
||||
__ load_and_test_int(Z_R0, Address(Z_thread, JavaThread::suspend_flags_offset()));
|
||||
__ z_bre(after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Switch thread to "native transition" state before reading the synchronization state.
|
||||
// This additional state is necessary because reading and testing the synchronization
|
||||
// state is not atomic w.r.t. GC, as this scenario demonstrates:
|
||||
@ -2156,14 +1978,12 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
}
|
||||
|
||||
// Reset handle block.
|
||||
if (!is_critical_native) {
|
||||
__ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::active_handles_offset()));
|
||||
__ clear_mem(Address(Z_R1_scratch, JNIHandleBlock::top_offset_in_bytes()), 4);
|
||||
__ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::active_handles_offset()));
|
||||
__ clear_mem(Address(Z_R1_scratch, JNIHandleBlock::top_offset_in_bytes()), 4);
|
||||
|
||||
// Check for pending exceptions.
|
||||
__ load_and_test_long(Z_R0, Address(Z_thread, Thread::pending_exception_offset()));
|
||||
__ z_brne(handle_pending_exception);
|
||||
}
|
||||
// Check for pending exceptions.
|
||||
__ load_and_test_long(Z_R0, Address(Z_thread, Thread::pending_exception_offset()));
|
||||
__ z_brne(handle_pending_exception);
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
@ -2185,26 +2005,23 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
if (!is_critical_native) {
|
||||
//---------------------------------------------------------------------
|
||||
// Handler for pending exceptions (out-of-line).
|
||||
//---------------------------------------------------------------------
|
||||
// Since this is a native call, we know the proper exception handler
|
||||
// is the empty function. We just pop this frame and then jump to
|
||||
// forward_exception_entry. Z_R14 will contain the native caller's
|
||||
// return PC.
|
||||
__ bind(handle_pending_exception);
|
||||
__ pop_frame();
|
||||
__ load_const_optimized(Z_R1_scratch, StubRoutines::forward_exception_entry());
|
||||
__ restore_return_pc();
|
||||
__ z_br(Z_R1_scratch);
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// Handler for pending exceptions (out-of-line).
|
||||
//---------------------------------------------------------------------
|
||||
// Since this is a native call, we know the proper exception handler
|
||||
// is the empty function. We just pop this frame and then jump to
|
||||
// forward_exception_entry. Z_R14 will contain the native caller's
|
||||
// return PC.
|
||||
__ bind(handle_pending_exception);
|
||||
__ pop_frame();
|
||||
__ load_const_optimized(Z_R1_scratch, StubRoutines::forward_exception_entry());
|
||||
__ restore_return_pc();
|
||||
__ z_br(Z_R1_scratch);
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// Handler for a cache miss (out-of-line)
|
||||
//---------------------------------------------------------------------
|
||||
__ call_ic_miss_handler(ic_miss, 0x77, 0, Z_R1_scratch);
|
||||
}
|
||||
//---------------------------------------------------------------------
|
||||
// Handler for a cache miss (out-of-line)
|
||||
//---------------------------------------------------------------------
|
||||
__ call_ic_miss_handler(ic_miss, 0x77, 0, Z_R1_scratch);
|
||||
__ flush();
|
||||
|
||||
|
||||
|
@ -1236,40 +1236,6 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack an array argument into a pointer to the body and the length
|
||||
// if the array is non-null, otherwise pass 0 for both.
|
||||
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
|
||||
Register tmp_reg = rax;
|
||||
assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
|
||||
"possible collision");
|
||||
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
|
||||
"possible collision");
|
||||
|
||||
// Pass the length, ptr pair
|
||||
Label is_null, done;
|
||||
VMRegPair tmp(tmp_reg->as_VMReg());
|
||||
if (reg.first()->is_stack()) {
|
||||
// Load the arg up from the stack
|
||||
simple_move32(masm, reg, tmp);
|
||||
reg = tmp;
|
||||
}
|
||||
__ testptr(reg.first()->as_Register(), reg.first()->as_Register());
|
||||
__ jccb(Assembler::equal, is_null);
|
||||
__ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
|
||||
simple_move32(masm, tmp, body_arg);
|
||||
// load the length relative to the body.
|
||||
__ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
|
||||
arrayOopDesc::base_offset_in_bytes(in_elem_type)));
|
||||
simple_move32(masm, tmp, length_arg);
|
||||
__ jmpb(done);
|
||||
__ bind(is_null);
|
||||
// Pass zeros
|
||||
__ xorptr(tmp_reg, tmp_reg);
|
||||
simple_move32(masm, tmp, body_arg);
|
||||
simple_move32(masm, tmp, length_arg);
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
static void verify_oop_args(MacroAssembler* masm,
|
||||
const methodHandle& method,
|
||||
const BasicType* sig_bt,
|
||||
@ -1372,8 +1338,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
int compile_id,
|
||||
BasicType* in_sig_bt,
|
||||
VMRegPair* in_regs,
|
||||
BasicType ret_type,
|
||||
address critical_entry) {
|
||||
BasicType ret_type) {
|
||||
if (method->is_method_handle_intrinsic()) {
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
intptr_t start = (intptr_t)__ pc();
|
||||
@ -1395,12 +1360,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
in_ByteSize(-1),
|
||||
(OopMapSet*)NULL);
|
||||
}
|
||||
bool is_critical_native = true;
|
||||
address native_func = critical_entry;
|
||||
if (native_func == NULL) {
|
||||
native_func = method->native_function();
|
||||
is_critical_native = false;
|
||||
}
|
||||
address native_func = method->native_function();
|
||||
assert(native_func != NULL, "must have function");
|
||||
|
||||
// An OopMap for lock (and class if static)
|
||||
@ -1413,55 +1373,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// the hidden arguments as arg[0] and possibly arg[1] (static method)
|
||||
|
||||
const int total_in_args = method->size_of_parameters();
|
||||
int total_c_args = total_in_args;
|
||||
if (!is_critical_native) {
|
||||
total_c_args += 1;
|
||||
if (method->is_static()) {
|
||||
total_c_args++;
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
total_c_args++;
|
||||
}
|
||||
}
|
||||
}
|
||||
int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
|
||||
|
||||
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
|
||||
BasicType* in_elem_bt = NULL;
|
||||
|
||||
int argc = 0;
|
||||
if (!is_critical_native) {
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
|
||||
SignatureStream ss(method->signature());
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
// Arrays are passed as int, elem* pair
|
||||
out_sig_bt[argc++] = T_INT;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
ss.skip_array_prefix(1); // skip one '['
|
||||
assert(ss.is_primitive(), "primitive type expected");
|
||||
in_elem_bt[i] = ss.type();
|
||||
} else {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
in_elem_bt[i] = T_VOID;
|
||||
}
|
||||
if (in_sig_bt[i] != T_VOID) {
|
||||
assert(in_sig_bt[i] == ss.type() ||
|
||||
in_sig_bt[i] == T_ARRAY, "must match");
|
||||
ss.next();
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
|
||||
// Now figure out where the args must be stored and how much stack space
|
||||
@ -1479,40 +1404,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Now the space for the inbound oop handle area
|
||||
int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
|
||||
if (is_critical_native) {
|
||||
// Critical natives may have to call out so they need a save area
|
||||
// for register arguments.
|
||||
int double_slots = 0;
|
||||
int single_slots = 0;
|
||||
for ( int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_ARRAY: // critical array (uses 2 slots on LP64)
|
||||
case T_BOOLEAN:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_CHAR:
|
||||
case T_INT: single_slots++; break;
|
||||
case T_LONG: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_XMMRegister()) {
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_FLOAT: single_slots++; break;
|
||||
case T_DOUBLE: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_FloatRegister()) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
total_save_slots = double_slots * 2 + single_slots;
|
||||
// align the save area
|
||||
if (double_slots != 0) {
|
||||
stack_slots = align_up(stack_slots, 2);
|
||||
}
|
||||
}
|
||||
|
||||
int oop_handle_offset = stack_slots;
|
||||
stack_slots += total_save_slots;
|
||||
@ -1691,7 +1582,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// vectors we have in our possession. We simply walk the java vector to
|
||||
// get the source locations and the c vector to get the destinations.
|
||||
|
||||
int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
|
||||
int c_arg = method->is_static() ? 2 : 1;
|
||||
|
||||
// Record rsp-based slot for receiver on stack for non-static methods
|
||||
int receiver_offset = -1;
|
||||
@ -1714,14 +1605,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_ARRAY:
|
||||
if (is_critical_native) {
|
||||
VMRegPair in_arg = in_regs[i];
|
||||
unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
|
||||
c_arg++;
|
||||
break;
|
||||
}
|
||||
case T_OBJECT:
|
||||
assert(!is_critical_native, "no oop arguments");
|
||||
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
|
||||
((i == 0) && (!is_static)),
|
||||
&receiver_offset);
|
||||
@ -1753,7 +1637,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Pre-load a static method's oop into rsi. Used both by locking code and
|
||||
// the normal JNI call code.
|
||||
if (method->is_static() && !is_critical_native) {
|
||||
if (method->is_static()) {
|
||||
|
||||
// load opp into a register
|
||||
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
|
||||
@ -1808,8 +1692,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Lock a synchronized method
|
||||
if (method->is_synchronized()) {
|
||||
assert(!is_critical_native, "unhandled");
|
||||
|
||||
|
||||
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
@ -1861,13 +1743,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Finally just about ready to make the JNI call
|
||||
|
||||
// get JNIEnv* which is first argument to native
|
||||
if (!is_critical_native) {
|
||||
__ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
__ movptr(Address(rsp, 0), rdx);
|
||||
__ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
__ movptr(Address(rsp, 0), rdx);
|
||||
|
||||
// Now set thread in native
|
||||
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
|
||||
}
|
||||
// Now set thread in native
|
||||
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
|
||||
|
||||
__ call(RuntimeAddress(native_func));
|
||||
|
||||
@ -1900,17 +1780,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
__ safepoint_poll(needs_safepoint, thread, false /* at_return */, false /* in_nmethod */);
|
||||
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
|
||||
__ jcc(Assembler::equal, after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Switch thread to "native transition" state before reading the synchronization state.
|
||||
// This additional state is necessary because reading and testing the synchronization
|
||||
// state is not atomic w.r.t. GC, as this scenario demonstrates:
|
||||
@ -2043,15 +1912,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
|
||||
}
|
||||
|
||||
if (!is_critical_native) {
|
||||
// reset handle block
|
||||
__ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
|
||||
__ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
|
||||
// reset handle block
|
||||
__ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
|
||||
__ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
|
||||
|
||||
// Any exception pending?
|
||||
__ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
|
||||
__ jcc(Assembler::notEqual, exception_pending);
|
||||
}
|
||||
// Any exception pending?
|
||||
__ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
|
||||
__ jcc(Assembler::notEqual, exception_pending);
|
||||
|
||||
// no exception, we're almost done
|
||||
|
||||
@ -2165,18 +2032,16 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// BEGIN EXCEPTION PROCESSING
|
||||
|
||||
if (!is_critical_native) {
|
||||
// Forward the exception
|
||||
__ bind(exception_pending);
|
||||
// Forward the exception
|
||||
__ bind(exception_pending);
|
||||
|
||||
// remove possible return value from FPU register stack
|
||||
__ empty_FPU_stack();
|
||||
// remove possible return value from FPU register stack
|
||||
__ empty_FPU_stack();
|
||||
|
||||
// pop our frame
|
||||
__ leave();
|
||||
// and forward the exception
|
||||
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
|
||||
}
|
||||
// pop our frame
|
||||
__ leave();
|
||||
// and forward the exception
|
||||
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
|
||||
|
||||
__ flush();
|
||||
|
||||
|
@ -1235,46 +1235,6 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack an array argument into a pointer to the body and the length
|
||||
// if the array is non-null, otherwise pass 0 for both.
|
||||
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
|
||||
Register tmp_reg = rax;
|
||||
assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
|
||||
"possible collision");
|
||||
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
|
||||
"possible collision");
|
||||
|
||||
__ block_comment("unpack_array_argument {");
|
||||
|
||||
// Pass the length, ptr pair
|
||||
Label is_null, done;
|
||||
VMRegPair tmp;
|
||||
tmp.set_ptr(tmp_reg->as_VMReg());
|
||||
if (reg.first()->is_stack()) {
|
||||
// Load the arg up from the stack
|
||||
__ move_ptr(reg, tmp);
|
||||
reg = tmp;
|
||||
}
|
||||
__ testptr(reg.first()->as_Register(), reg.first()->as_Register());
|
||||
__ jccb(Assembler::equal, is_null);
|
||||
__ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
|
||||
__ move_ptr(tmp, body_arg);
|
||||
// load the length relative to the body.
|
||||
__ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
|
||||
arrayOopDesc::base_offset_in_bytes(in_elem_type)));
|
||||
__ move32_64(tmp, length_arg);
|
||||
__ jmpb(done);
|
||||
__ bind(is_null);
|
||||
// Pass zeros
|
||||
__ xorptr(tmp_reg, tmp_reg);
|
||||
__ move_ptr(tmp, body_arg);
|
||||
__ move32_64(tmp, length_arg);
|
||||
__ bind(done);
|
||||
|
||||
__ block_comment("} unpack_array_argument");
|
||||
}
|
||||
|
||||
|
||||
// Different signatures may require very different orders for the move
|
||||
// to avoid clobbering other arguments. There's no simple way to
|
||||
// order them safely. Compute a safe order for issuing stores and
|
||||
@ -1550,8 +1510,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
int compile_id,
|
||||
BasicType* in_sig_bt,
|
||||
VMRegPair* in_regs,
|
||||
BasicType ret_type,
|
||||
address critical_entry) {
|
||||
BasicType ret_type) {
|
||||
if (method->is_method_handle_intrinsic()) {
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
intptr_t start = (intptr_t)__ pc();
|
||||
@ -1573,12 +1532,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
in_ByteSize(-1),
|
||||
(OopMapSet*)NULL);
|
||||
}
|
||||
bool is_critical_native = true;
|
||||
address native_func = critical_entry;
|
||||
if (native_func == NULL) {
|
||||
native_func = method->native_function();
|
||||
is_critical_native = false;
|
||||
}
|
||||
address native_func = method->native_function();
|
||||
assert(native_func != NULL, "must have function");
|
||||
|
||||
// An OopMap for lock (and class if static)
|
||||
@ -1592,55 +1546,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// the hidden arguments as arg[0] and possibly arg[1] (static method)
|
||||
|
||||
const int total_in_args = method->size_of_parameters();
|
||||
int total_c_args = total_in_args;
|
||||
if (!is_critical_native) {
|
||||
total_c_args += 1;
|
||||
if (method->is_static()) {
|
||||
total_c_args++;
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < total_in_args; i++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
total_c_args++;
|
||||
}
|
||||
}
|
||||
}
|
||||
int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
|
||||
|
||||
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
|
||||
BasicType* in_elem_bt = NULL;
|
||||
|
||||
int argc = 0;
|
||||
if (!is_critical_native) {
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
if (method->is_static()) {
|
||||
out_sig_bt[argc++] = T_OBJECT;
|
||||
}
|
||||
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
|
||||
SignatureStream ss(method->signature());
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
// Arrays are passed as int, elem* pair
|
||||
out_sig_bt[argc++] = T_INT;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
ss.skip_array_prefix(1); // skip one '['
|
||||
assert(ss.is_primitive(), "primitive type expected");
|
||||
in_elem_bt[i] = ss.type();
|
||||
} else {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
in_elem_bt[i] = T_VOID;
|
||||
}
|
||||
if (in_sig_bt[i] != T_VOID) {
|
||||
assert(in_sig_bt[i] == ss.type() ||
|
||||
in_sig_bt[i] == T_ARRAY, "must match");
|
||||
ss.next();
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
|
||||
// Now figure out where the args must be stored and how much stack space
|
||||
@ -1658,40 +1577,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Now the space for the inbound oop handle area
|
||||
int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
|
||||
if (is_critical_native) {
|
||||
// Critical natives may have to call out so they need a save area
|
||||
// for register arguments.
|
||||
int double_slots = 0;
|
||||
int single_slots = 0;
|
||||
for ( int i = 0; i < total_in_args; i++) {
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
const Register reg = in_regs[i].first()->as_Register();
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_BOOLEAN:
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_CHAR:
|
||||
case T_INT: single_slots++; break;
|
||||
case T_ARRAY: // specific to LP64 (7145024)
|
||||
case T_LONG: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_XMMRegister()) {
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_FLOAT: single_slots++; break;
|
||||
case T_DOUBLE: double_slots++; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (in_regs[i].first()->is_FloatRegister()) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
total_save_slots = double_slots * 2 + single_slots;
|
||||
// align the save area
|
||||
if (double_slots != 0) {
|
||||
stack_slots = align_up(stack_slots, 2);
|
||||
}
|
||||
}
|
||||
|
||||
int oop_handle_offset = stack_slots;
|
||||
stack_slots += total_save_slots;
|
||||
@ -1886,23 +1771,15 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
#endif /* ASSERT */
|
||||
|
||||
// This may iterate in two different directions depending on the
|
||||
// kind of native it is. The reason is that for regular JNI natives
|
||||
// the incoming and outgoing registers are offset upwards and for
|
||||
// critical natives they are offset down.
|
||||
// For JNI natives the incoming and outgoing registers are offset upwards.
|
||||
GrowableArray<int> arg_order(2 * total_in_args);
|
||||
|
||||
VMRegPair tmp_vmreg;
|
||||
tmp_vmreg.set2(rbx->as_VMReg());
|
||||
|
||||
if (!is_critical_native) {
|
||||
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
|
||||
arg_order.push(i);
|
||||
arg_order.push(c_arg);
|
||||
}
|
||||
} else {
|
||||
// Compute a valid move order, using tmp_vmreg to break any cycles
|
||||
ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
|
||||
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
|
||||
arg_order.push(i);
|
||||
arg_order.push(c_arg);
|
||||
}
|
||||
|
||||
int temploc = -1;
|
||||
@ -1910,20 +1787,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
int i = arg_order.at(ai);
|
||||
int c_arg = arg_order.at(ai + 1);
|
||||
__ block_comment(err_msg("move %d -> %d", i, c_arg));
|
||||
if (c_arg == -1) {
|
||||
assert(is_critical_native, "should only be required for critical natives");
|
||||
// This arg needs to be moved to a temporary
|
||||
__ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
|
||||
in_regs[i] = tmp_vmreg;
|
||||
temploc = i;
|
||||
continue;
|
||||
} else if (i == -1) {
|
||||
assert(is_critical_native, "should only be required for critical natives");
|
||||
// Read from the temporary location
|
||||
assert(temploc != -1, "must be valid");
|
||||
i = temploc;
|
||||
temploc = -1;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
if (in_regs[i].first()->is_Register()) {
|
||||
assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
|
||||
@ -1938,20 +1801,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
#endif /* ASSERT */
|
||||
switch (in_sig_bt[i]) {
|
||||
case T_ARRAY:
|
||||
if (is_critical_native) {
|
||||
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
|
||||
c_arg++;
|
||||
#ifdef ASSERT
|
||||
if (out_regs[c_arg].first()->is_Register()) {
|
||||
reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
|
||||
} else if (out_regs[c_arg].first()->is_XMMRegister()) {
|
||||
freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
case T_OBJECT:
|
||||
assert(!is_critical_native, "no oop arguments");
|
||||
__ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
|
||||
((i == 0) && (!is_static)),
|
||||
&receiver_offset);
|
||||
@ -1985,30 +1835,25 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Pre-load a static method's oop into r14. Used both by locking code and
|
||||
// the normal JNI call code.
|
||||
if (!is_critical_native) {
|
||||
// point c_arg at the first arg that is already loaded in case we
|
||||
// need to spill before we call out
|
||||
c_arg = total_c_args - total_in_args;
|
||||
// point c_arg at the first arg that is already loaded in case we
|
||||
// need to spill before we call out
|
||||
c_arg = total_c_args - total_in_args;
|
||||
|
||||
if (method->is_static()) {
|
||||
if (method->is_static()) {
|
||||
|
||||
// load oop into a register
|
||||
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
|
||||
// load oop into a register
|
||||
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
|
||||
|
||||
// Now handlize the static class mirror it's known not-null.
|
||||
__ movptr(Address(rsp, klass_offset), oop_handle_reg);
|
||||
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
|
||||
// Now handlize the static class mirror it's known not-null.
|
||||
__ movptr(Address(rsp, klass_offset), oop_handle_reg);
|
||||
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
|
||||
|
||||
// Now get the handle
|
||||
__ lea(oop_handle_reg, Address(rsp, klass_offset));
|
||||
// store the klass handle as second argument
|
||||
__ movptr(c_rarg1, oop_handle_reg);
|
||||
// and protect the arg if we must spill
|
||||
c_arg--;
|
||||
}
|
||||
} else {
|
||||
// For JNI critical methods we need to save all registers in save_args.
|
||||
c_arg = 0;
|
||||
// Now get the handle
|
||||
__ lea(oop_handle_reg, Address(rsp, klass_offset));
|
||||
// store the klass handle as second argument
|
||||
__ movptr(c_rarg1, oop_handle_reg);
|
||||
// and protect the arg if we must spill
|
||||
c_arg--;
|
||||
}
|
||||
|
||||
// Change state to native (we save the return address in the thread, since it might not
|
||||
@ -2060,8 +1905,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
Label lock_done;
|
||||
|
||||
if (method->is_synchronized()) {
|
||||
assert(!is_critical_native, "unhandled");
|
||||
|
||||
|
||||
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
@ -2115,12 +1958,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Finally just about ready to make the JNI call
|
||||
|
||||
// get JNIEnv* which is first argument to native
|
||||
if (!is_critical_native) {
|
||||
__ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
__ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
|
||||
|
||||
// Now set thread in native
|
||||
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
|
||||
}
|
||||
// Now set thread in native
|
||||
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
|
||||
|
||||
__ call(RuntimeAddress(native_func));
|
||||
|
||||
@ -2148,17 +1989,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
Label after_transition;
|
||||
|
||||
// If this is a critical native, check for a safepoint or suspend request after the call.
|
||||
// If a safepoint is needed, transition to native, then to native_trans to handle
|
||||
// safepoints like the native methods that are not critical natives.
|
||||
if (is_critical_native) {
|
||||
Label needs_safepoint;
|
||||
__ safepoint_poll(needs_safepoint, r15_thread, false /* at_return */, false /* in_nmethod */);
|
||||
__ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
|
||||
__ jcc(Assembler::equal, after_transition);
|
||||
__ bind(needs_safepoint);
|
||||
}
|
||||
|
||||
// Switch thread to "native transition" state before reading the synchronization state.
|
||||
// This additional state is necessary because reading and testing the synchronization
|
||||
// state is not atomic w.r.t. GC, as this scenario demonstrates:
|
||||
@ -2279,21 +2109,17 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
|
||||
}
|
||||
|
||||
if (!is_critical_native) {
|
||||
// reset handle block
|
||||
__ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
|
||||
__ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
|
||||
}
|
||||
// reset handle block
|
||||
__ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
|
||||
__ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
|
||||
|
||||
// pop our frame
|
||||
|
||||
__ leave();
|
||||
|
||||
if (!is_critical_native) {
|
||||
// Any exception pending?
|
||||
__ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
|
||||
__ jcc(Assembler::notEqual, exception_pending);
|
||||
}
|
||||
// Any exception pending?
|
||||
__ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
|
||||
__ jcc(Assembler::notEqual, exception_pending);
|
||||
|
||||
// Return
|
||||
|
||||
@ -2301,13 +2127,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Unexpected paths are out of line and go here
|
||||
|
||||
if (!is_critical_native) {
|
||||
// forward the exception
|
||||
__ bind(exception_pending);
|
||||
// forward the exception
|
||||
__ bind(exception_pending);
|
||||
|
||||
// and forward the exception
|
||||
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
|
||||
}
|
||||
// and forward the exception
|
||||
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
|
||||
|
||||
// Slow path locking & unlocking
|
||||
if (method->is_synchronized()) {
|
||||
|
@ -73,8 +73,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
int compile_id,
|
||||
BasicType *sig_bt,
|
||||
VMRegPair *regs,
|
||||
BasicType ret_type,
|
||||
address critical_entry) {
|
||||
BasicType ret_type) {
|
||||
ShouldNotCallThis();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2009 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -116,7 +116,6 @@ void VM_Version::initialize() {
|
||||
}
|
||||
|
||||
// Not implemented
|
||||
UNSUPPORTED_OPTION(CriticalJNINatives);
|
||||
UNSUPPORTED_OPTION(UseCompiler);
|
||||
#ifdef ASSERT
|
||||
UNSUPPORTED_OPTION(CountCompiledCalls);
|
||||
|
@ -182,24 +182,6 @@ char* NativeLookup::pure_jni_name(const methodHandle& method) {
|
||||
return st.as_string();
|
||||
}
|
||||
|
||||
|
||||
char* NativeLookup::critical_jni_name(const methodHandle& method) {
|
||||
stringStream st;
|
||||
// Prefix
|
||||
st.print("JavaCritical_");
|
||||
// Klass name
|
||||
if (!map_escaped_name_on(&st, method->klass_name())) {
|
||||
return NULL;
|
||||
}
|
||||
st.print("_");
|
||||
// Method name
|
||||
if (!map_escaped_name_on(&st, method->name())) {
|
||||
return NULL;
|
||||
}
|
||||
return st.as_string();
|
||||
}
|
||||
|
||||
|
||||
char* NativeLookup::long_jni_name(const methodHandle& method) {
|
||||
// Signatures ignore the wrapping parentheses and the trailing return type
|
||||
stringStream st;
|
||||
@ -332,12 +314,6 @@ const char* NativeLookup::compute_complete_jni_name(const char* pure_name, const
|
||||
return st.as_string();
|
||||
}
|
||||
|
||||
address NativeLookup::lookup_critical_style(void* dll, const char* pure_name, const char* long_name, int args_size, bool os_style) {
|
||||
const char* jni_name = compute_complete_jni_name(pure_name, long_name, args_size, os_style);
|
||||
assert(dll != NULL, "dll must be loaded");
|
||||
return (address)os::dll_lookup(dll, jni_name);
|
||||
}
|
||||
|
||||
// Check all the formats of native implementation name to see if there is one
|
||||
// for the specified method.
|
||||
address NativeLookup::lookup_entry(const methodHandle& method, TRAPS) {
|
||||
@ -381,53 +357,6 @@ address NativeLookup::lookup_entry(const methodHandle& method, TRAPS) {
|
||||
return entry; // NULL indicates not found
|
||||
}
|
||||
|
||||
// Check all the formats of native implementation name to see if there is one
|
||||
// for the specified method.
|
||||
address NativeLookup::lookup_critical_entry(const methodHandle& method) {
|
||||
assert(CriticalJNINatives, "or should not be here");
|
||||
|
||||
if (method->is_synchronized() ||
|
||||
!method->is_static()) {
|
||||
// Only static non-synchronized methods are allowed
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
|
||||
Symbol* signature = method->signature();
|
||||
for (int end = 0; end < signature->utf8_length(); end++) {
|
||||
if (signature->char_at(end) == 'L') {
|
||||
// Don't allow object types
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute argument size
|
||||
int args_size = method->size_of_parameters();
|
||||
for (SignatureStream ss(signature); !ss.at_return_type(); ss.next()) {
|
||||
if (ss.is_array()) {
|
||||
args_size += T_INT_size; // array length parameter
|
||||
}
|
||||
}
|
||||
|
||||
// dll handling requires I/O. Don't do that while in _thread_in_vm (safepoint may get requested).
|
||||
ThreadToNativeFromVM thread_in_native(JavaThread::current());
|
||||
|
||||
void* dll = dll_load(method);
|
||||
address entry = NULL;
|
||||
|
||||
if (dll != NULL) {
|
||||
entry = lookup_critical_style(dll, method, args_size);
|
||||
// Close the handle to avoid keeping the library alive if the native method holder is unloaded.
|
||||
// This is fine because the library is still kept alive by JNI (see JVM_LoadLibrary). As soon
|
||||
// as the holder class and the library are unloaded (see JVM_UnloadLibrary), the native wrapper
|
||||
// that calls 'critical_entry' becomes unreachable and is unloaded as well.
|
||||
os::dll_unload(dll);
|
||||
}
|
||||
|
||||
return entry; // NULL indicates not found
|
||||
}
|
||||
|
||||
void* NativeLookup::dll_load(const methodHandle& method) {
|
||||
if (method->has_native_function()) {
|
||||
|
||||
@ -446,44 +375,6 @@ void* NativeLookup::dll_load(const methodHandle& method) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address NativeLookup::lookup_critical_style(void* dll, const methodHandle& method, int args_size) {
|
||||
address entry = NULL;
|
||||
const char* critical_name = critical_jni_name(method);
|
||||
if (critical_name == NULL) {
|
||||
// JNI name mapping rejected this method so return
|
||||
// NULL to indicate UnsatisfiedLinkError should be thrown.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// 1) Try JNI short style
|
||||
entry = lookup_critical_style(dll, critical_name, "", args_size, true);
|
||||
if (entry != NULL) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
const char* long_name = long_jni_name(method);
|
||||
if (long_name == NULL) {
|
||||
// JNI name mapping rejected this method so return
|
||||
// NULL to indicate UnsatisfiedLinkError should be thrown.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// 2) Try JNI long style
|
||||
entry = lookup_critical_style(dll, critical_name, long_name, args_size, true);
|
||||
if (entry != NULL) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
// 3) Try JNI short style without os prefix/suffix
|
||||
entry = lookup_critical_style(dll, critical_name, "", args_size, false);
|
||||
if (entry != NULL) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
// 4) Try JNI long style without os prefix/suffix
|
||||
return lookup_critical_style(dll, critical_name, long_name, args_size, false);
|
||||
}
|
||||
|
||||
// Check if there are any JVM TI prefixes which have been applied to the native method name.
|
||||
// If any are found, remove them before attemping the look up of the
|
||||
// native implementation again.
|
||||
|
@ -35,8 +35,6 @@ class NativeLookup : AllStatic {
|
||||
private:
|
||||
// Style specific lookup
|
||||
static address lookup_style(const methodHandle& method, char* pure_name, const char* long_name, int args_size, bool os_style, TRAPS);
|
||||
static address lookup_critical_style(void* dll, const char* pure_name, const char* long_name, int args_size, bool os_style);
|
||||
static address lookup_critical_style(void* dll, const methodHandle& method, int args_size);
|
||||
static address lookup_base (const methodHandle& method, TRAPS);
|
||||
static address lookup_entry(const methodHandle& method, TRAPS);
|
||||
static address lookup_entry_prefixed(const methodHandle& method, TRAPS);
|
||||
@ -47,11 +45,9 @@ class NativeLookup : AllStatic {
|
||||
// JNI name computation
|
||||
static char* pure_jni_name(const methodHandle& method);
|
||||
static char* long_jni_name(const methodHandle& method);
|
||||
static char* critical_jni_name(const methodHandle& method);
|
||||
|
||||
// Lookup native function. May throw UnsatisfiedLinkError.
|
||||
static address lookup(const methodHandle& method, TRAPS);
|
||||
static address lookup_critical_entry(const methodHandle& method);
|
||||
};
|
||||
|
||||
#endif // SHARE_PRIMS_NATIVELOOKUP_HPP
|
||||
|
@ -314,9 +314,6 @@ const intx ObjectAlignmentInBytes = 8;
|
||||
product(bool, InlineUnsafeOps, true, DIAGNOSTIC, \
|
||||
"Inline memory ops (native methods) from Unsafe") \
|
||||
\
|
||||
product(bool, CriticalJNINatives, false, \
|
||||
"(Deprecated) Check for critical JNI entry points") \
|
||||
\
|
||||
product(bool, UseAESIntrinsics, false, DIAGNOSTIC, \
|
||||
"Use intrinsics for AES versions of crypto") \
|
||||
\
|
||||
|
@ -3008,17 +3008,11 @@ bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
|
||||
void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
|
||||
ResourceMark rm;
|
||||
nmethod* nm = NULL;
|
||||
address critical_entry = NULL;
|
||||
|
||||
assert(method->is_native(), "must be native");
|
||||
assert(method->is_method_handle_intrinsic() ||
|
||||
method->has_native_function(), "must have something valid to call!");
|
||||
|
||||
if (CriticalJNINatives && !method->is_method_handle_intrinsic()) {
|
||||
// We perform the I/O with transition to native before acquiring AdapterHandlerLibrary_lock.
|
||||
critical_entry = NativeLookup::lookup_critical_entry(method);
|
||||
}
|
||||
|
||||
{
|
||||
// Perform the work while holding the lock, but perform any printing outside the lock
|
||||
MutexLocker mu(AdapterHandlerLibrary_lock);
|
||||
@ -3061,7 +3055,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
|
||||
int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
|
||||
|
||||
// Generate the compiled-to-native wrapper code
|
||||
nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type, critical_entry);
|
||||
nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
|
||||
|
||||
if (nm != NULL) {
|
||||
{
|
||||
|
@ -474,15 +474,13 @@ class SharedRuntime: AllStatic {
|
||||
// returns.
|
||||
//
|
||||
// The wrapper may contain special-case code if the given method
|
||||
// is a JNI critical method, or a compiled method handle adapter,
|
||||
// such as _invokeBasic, _linkToVirtual, etc.
|
||||
// is a compiled method handle adapter, such as _invokeBasic, _linkToVirtual, etc.
|
||||
static nmethod* generate_native_wrapper(MacroAssembler* masm,
|
||||
const methodHandle& method,
|
||||
int compile_id,
|
||||
BasicType* sig_bt,
|
||||
VMRegPair* regs,
|
||||
BasicType ret_type,
|
||||
address critical_entry);
|
||||
BasicType ret_type);
|
||||
|
||||
// A compiled caller has just called the interpreter, but compiled code
|
||||
// exists. Patch the caller so he no longer calls into the interpreter.
|
||||
|
@ -211,7 +211,6 @@ tier1_gc_1 = \
|
||||
|
||||
tier1_gc_2 = \
|
||||
gc/ \
|
||||
-gc/CriticalNativeArgs.java \
|
||||
-gc/g1/ \
|
||||
-gc/logging/TestUnifiedLoggingSwitchStress.java \
|
||||
-gc/stress \
|
||||
@ -226,9 +225,7 @@ hotspot_gc_epsilon = \
|
||||
tier1_gc_epsilon = \
|
||||
gc/epsilon/
|
||||
|
||||
tier2_gc_epsilon = \
|
||||
gc/CriticalNativeArgs.java \
|
||||
gc/stress/CriticalNativeStress.java
|
||||
tier2_gc_epsilon =
|
||||
|
||||
tier1_gc_gcold = \
|
||||
gc/stress/gcold/TestGCOldWithG1.java \
|
||||
@ -267,8 +264,6 @@ tier2_gc_shenandoah = \
|
||||
gc/logging/TestUnifiedLoggingSwitchStress.java \
|
||||
runtime/Metaspace/DefineClass.java \
|
||||
gc/shenandoah/ \
|
||||
gc/CriticalNativeArgs.java \
|
||||
gc/stress/CriticalNativeStress.java \
|
||||
serviceability/sa/TestHeapDumpForInvokeDynamic.java \
|
||||
-gc/shenandoah/TestStringDedupStress.java \
|
||||
-:tier1_gc_shenandoah
|
||||
|
@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
|
||||
/* @test
|
||||
* @bug 8167409
|
||||
* @requires (os.arch != "aarch64") & (os.arch != "arm") & (vm.flavor != "zero")
|
||||
* @run main/othervm/native -Xcomp -XX:+CriticalJNINatives compiler.runtime.criticalnatives.argumentcorruption.CheckLongArgs
|
||||
*/
|
||||
package compiler.runtime.criticalnatives.argumentcorruption;
|
||||
public class CheckLongArgs {
|
||||
static {
|
||||
System.loadLibrary("CNCheckLongArgs");
|
||||
}
|
||||
static native void m1(long a1, long a2, long a3, long a4, long a5, long a6, long a7, long a8, byte[] result);
|
||||
static native void m2(long a1, int[] a2, long a3, int[] a4, long a5, int[] a6, long a7, int[] a8, long a9, byte[] result);
|
||||
public static void main(String args[]) throws Exception {
|
||||
test();
|
||||
}
|
||||
private static void test() throws Exception {
|
||||
int[] l1 = { 1111, 2222, 3333 };
|
||||
int[] l2 = { 4444, 5555, 6666 };
|
||||
int[] l3 = { 7777, 8888, 9999 };
|
||||
int[] l4 = { 1010, 2020, 3030 };
|
||||
byte[] result = { -1 };
|
||||
m1(1111111122222222L, 3333333344444444L, 5555555566666666L, 7777777788888888L, 9999999900000000L, 1212121234343434L,
|
||||
5656565678787878L, 9090909012121212L, result);
|
||||
check(result[0]);
|
||||
result[0] = -1;
|
||||
m2(1111111122222222L, l1, 3333333344444444L, l2, 5555555566666666L, l3, 7777777788888888L, l4, 9999999900000000L, result);
|
||||
check(result[0]);
|
||||
}
|
||||
private static void check(byte result) throws Exception {
|
||||
if (result != 2) {
|
||||
if (result == 1) {
|
||||
throw new Exception("critical native arguments mismatch");
|
||||
}
|
||||
throw new Exception("critical native lookup failed");
|
||||
}
|
||||
}
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
#include "jni.h"
|
||||
JNIEXPORT void JNICALL JavaCritical_compiler_runtime_criticalnatives_argumentcorruption_CheckLongArgs_m1
|
||||
(jlong a1, jlong a2, jlong a3, jlong a4, jlong a5, jlong a6, jlong a7, jlong a8,jint result_length,jbyte* result) {
|
||||
|
||||
if (a1 != 1111111122222222LL || a2 != 3333333344444444LL || a3 != 5555555566666666LL || a4 != 7777777788888888LL ||
|
||||
a5 != 9999999900000000LL || a6 != 1212121234343434LL || a7 != 5656565678787878LL || a8 != 9090909012121212LL ||
|
||||
result_length != 1 || result[0] != -1) {
|
||||
result[0] = 1;
|
||||
} else {
|
||||
result[0] = 2;
|
||||
}
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL JavaCritical_compiler_runtime_criticalnatives_argumentcorruption_CheckLongArgs_m2
|
||||
(jlong a1, jint a2_length, jint* a2, jlong a3, jint a4_length, jint* a4, jlong a5, jint a6_length, jint* a6, jlong a7,
|
||||
jint a8_length, jint* a8, jlong a9, jint result_length, jbyte* result) {
|
||||
if (a1 != 1111111122222222LL || a2_length != 3 || a2[0] != 1111 || a3 != 3333333344444444LL || a4_length != 3 || a4[0] != 4444 ||
|
||||
a5 != 5555555566666666LL || a6_length != 3 || a6[0] != 7777 || a7 != 7777777788888888LL || a8_length != 3 || a8[0] != 1010 || a9 != 9999999900000000LL ||
|
||||
result_length != 1 || result[0] != -1) {
|
||||
result[0] = 1;
|
||||
} else {
|
||||
result[0] = 2;
|
||||
}
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_compiler_runtime_criticalnatives_argumentcorruption_CheckLongArgs_m1
|
||||
(JNIEnv * env, jclass jclazz, jlong a3, jlong a4, jlong a5, jlong a6, jlong a7, jlong a8, jlong a9, jlong a10, jbyteArray result) {}
|
||||
|
||||
JNIEXPORT void JNICALL Java_compiler_runtime_criticalnatives_argumentcorruption_CheckLongArgs_m2
|
||||
(JNIEnv * env, jclass jclazz, jlong a3, jintArray a4, jlong a5, jintArray a6, jlong a7, jintArray a8, jlong a9, jintArray a10, jlong a11, jbyteArray result) {}
|
@ -1,60 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
|
||||
/* @test
|
||||
* @bug 8167408
|
||||
* @requires (os.arch != "aarch64") & (os.arch != "arm") & (vm.flavor != "zero")
|
||||
* @run main/othervm/native -Xcomp -XX:+CriticalJNINatives compiler.runtime.criticalnatives.lookup.LookUp
|
||||
*/
|
||||
package compiler.runtime.criticalnatives.lookup;
|
||||
public class LookUp {
|
||||
static {
|
||||
System.loadLibrary("CNLookUp");
|
||||
}
|
||||
static native void m1(byte a1, long a2, char a3, int a4, float a5, double a6, byte[] result);
|
||||
static native void m2(int a1, int[] a2, long a3, long[] a4, float a5,float[] a6, double a7, double[] a8, byte result[]);
|
||||
public static void main(String args[]) throws Exception {
|
||||
test();
|
||||
}
|
||||
private static void test() throws Exception {
|
||||
int[] l1 = { 1111, 2222, 3333 };
|
||||
long[] l2 = { 4444L, 5555L, 6666L };
|
||||
float[] l3 = { 7777.0F, 8888.0F, 9999.0F };
|
||||
double[] l4 = { 4545.0D, 5656.0D, 6767.0D };
|
||||
byte[] result = { -1 };
|
||||
m1((byte)0xA, 4444444455555555L, 'A', 12345678, 343434.0F, 6666666677777777.0D, result);
|
||||
check(result[0]);
|
||||
result[0] = -1;
|
||||
m2(12345678, l1, 4444444455555555L, l2, 343434.0F, l3, 6666666677777777.0D, l4, result);
|
||||
check(result[0]);
|
||||
}
|
||||
private static void check(byte result) throws Exception {
|
||||
if (result != 2) {
|
||||
if (result == 1) {
|
||||
throw new Exception("critical native arguments mismatch");
|
||||
}
|
||||
throw new Exception("critical native lookup failed");
|
||||
}
|
||||
}
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
#include "jni.h"
|
||||
JNIEXPORT void JNICALL JavaCritical_compiler_runtime_criticalnatives_lookup_LookUp_m1
|
||||
(jbyte a1, jlong a2, jchar a3, jint a4, jfloat a5, jdouble a6, jint result_length, jbyte* result) {
|
||||
jint l1 = (jint) a5;
|
||||
jlong l2 = (jlong) a6;
|
||||
|
||||
if (a1 != 0xA || a2 != 4444444455555555LL || a3 != 0x41 || a4 != 12345678 || l1 != 343434 || l2 != 6666666677777777LL ||
|
||||
result_length != 1 || result[0] != -1) {
|
||||
result[0] = 1;
|
||||
} else {
|
||||
result[0] = 2;
|
||||
}
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL JavaCritical_compiler_runtime_criticalnatives_lookup_LookUp_m2
|
||||
(jint a1, jint a2_length, jint* a2, jlong a3, jint a4_length, jlong* a4, jfloat a5, jint a6_length, jfloat* a6, jdouble a7,
|
||||
jint a8_length, jdouble* a8, jint result_length, jbyte* result) {
|
||||
jint l1 = (jint) a5;
|
||||
jlong l2 = (jlong) a7;
|
||||
|
||||
if (a1 != 12345678 || a2_length != 3 || a2[0] != 1111 || a3 != 4444444455555555LL || a4_length != 3 || a4[0] != 4444 ||
|
||||
l1 != 343434 || a6_length != 3 || 7777 != (jint)a6[0] || l2 != 6666666677777777LL || a8_length != 3 || 4545 != (jlong)a8[0] ||
|
||||
result_length != 1 || result[0] != -1) {
|
||||
result[0] = 1;
|
||||
} else {
|
||||
result[0] = 2;
|
||||
}
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_compiler_runtime_criticalnatives_lookup_LookUp_m1
|
||||
(JNIEnv * env, jclass jclazz, jbyte a3, jlong a4, jchar a5, jint a6, jfloat a7, jdouble a8, jbyteArray result) {}
|
||||
|
||||
JNIEXPORT void JNICALL Java_compiler_runtime_criticalnatives_lookup_LookUp_m2
|
||||
(JNIEnv * env, jclass jclazz, jint a3, jintArray a4, jlong a5, jlongArray a6, jfloat a7, jfloatArray a8, jdouble a9, jdoubleArray a10, jbyteArray result) {}
|
||||
|
@ -1,36 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Red Hat, Inc. and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package gc;
|
||||
|
||||
public class CriticalNative {
|
||||
static {
|
||||
System.loadLibrary("CriticalNative");
|
||||
}
|
||||
|
||||
public static native boolean isNull(int[] a);
|
||||
public static native long sum1(long[] a);
|
||||
// More than 6 parameters
|
||||
public static native long sum2(long a1, int[] a2, int[] a3, long[] a4, int[] a5);
|
||||
}
|
@ -1,109 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Red Hat, Inc. and/or its affiliates.
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package gc;
|
||||
|
||||
/*
|
||||
* @test CriticalNativeStressEpsilon
|
||||
* @bug 8199868
|
||||
* @library /
|
||||
* @requires os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386"
|
||||
* @requires vm.gc.Epsilon
|
||||
* @summary test argument unpacking nmethod wrapper of critical native method
|
||||
* @run main/othervm/native -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC -Xcomp -Xmx256M
|
||||
* -XX:-CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
* @run main/othervm/native -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC -Xcomp -Xmx256M
|
||||
* -XX:+CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test CriticalNativeStressShenandoah
|
||||
* @bug 8199868
|
||||
* @library /
|
||||
* @requires os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386"
|
||||
* @requires vm.gc.Shenandoah
|
||||
* @summary test argument unpacking nmethod wrapper of critical native method
|
||||
*
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xcomp -Xmx512M
|
||||
* -XX:+UseShenandoahGC
|
||||
* -XX:-CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xcomp -Xmx512M
|
||||
* -XX:+UseShenandoahGC
|
||||
* -XX:+CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
*
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xcomp -Xmx512M
|
||||
* -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:+ShenandoahDegeneratedGC
|
||||
* -XX:+CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xcomp -Xmx512M
|
||||
* -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:-ShenandoahDegeneratedGC
|
||||
* -XX:+CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
*
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xcomp -Xmx512M
|
||||
* -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
|
||||
* -XX:+CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
*
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xcomp -Xmx512M
|
||||
* -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu
|
||||
* -XX:+CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xcomp -Xmx512M
|
||||
* -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive
|
||||
* -XX:+CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test CriticalNativeStress
|
||||
* @bug 8199868 8233343
|
||||
* @library /
|
||||
* @requires os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386" | os.arch=="ppc64" | os.arch=="ppc64le" | os.arch=="s390x"
|
||||
* @summary test argument unpacking nmethod wrapper of critical native method
|
||||
* @run main/othervm/native -Xcomp -Xmx512M
|
||||
* -XX:-CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
* @run main/othervm/native -Xcomp -Xmx512M
|
||||
* -XX:+CriticalJNINatives
|
||||
* gc.CriticalNativeArgs
|
||||
*/
|
||||
public class CriticalNativeArgs {
|
||||
public static void main(String[] args) {
|
||||
int[] arr = new int[2];
|
||||
|
||||
if (CriticalNative.isNull(arr)) {
|
||||
throw new RuntimeException("Should not be null");
|
||||
}
|
||||
|
||||
if (!CriticalNative.isNull(null)) {
|
||||
throw new RuntimeException("Should be null");
|
||||
}
|
||||
}
|
||||
}
|
@ -1,130 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "jni.h"
|
||||
|
||||
JNIEXPORT jlong JNICALL JavaCritical_gc_CriticalNative_sum1
|
||||
(jint length, jlong* a) {
|
||||
jlong sum = 0;
|
||||
jint index;
|
||||
for (index = 0; index < length; index ++) {
|
||||
sum += a[index];
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL JavaCritical_gc_CriticalNative_sum2
|
||||
(jlong a1, jint a2_length, jint* a2, jint a4_length, jint* a4, jint a6_length, jlong* a6, jint a8_length, jint* a8) {
|
||||
jlong sum = a1;
|
||||
jint index;
|
||||
for (index = 0; index < a2_length; index ++) {
|
||||
sum += a2[index];
|
||||
}
|
||||
|
||||
for (index = 0; index < a4_length; index ++) {
|
||||
sum += a4[index];
|
||||
}
|
||||
|
||||
for (index = 0; index < a6_length; index ++) {
|
||||
sum += a6[index];
|
||||
}
|
||||
|
||||
for (index = 0; index < a8_length; index ++) {
|
||||
sum += a8[index];
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL Java_gc_CriticalNative_sum1
|
||||
(JNIEnv *env, jclass jclazz, jlongArray a) {
|
||||
jlong sum = 0;
|
||||
jsize len = (*env)->GetArrayLength(env, a);
|
||||
jsize index;
|
||||
jlong* arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a, 0);
|
||||
for (index = 0; index < len; index ++) {
|
||||
sum += arr[index];
|
||||
}
|
||||
|
||||
(*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0);
|
||||
return sum;
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL Java_gc_CriticalNative_sum2
|
||||
(JNIEnv *env, jclass jclazz, jlong a1, jintArray a2, jintArray a3, jlongArray a4, jintArray a5) {
|
||||
jlong sum = a1;
|
||||
jsize index;
|
||||
jsize len;
|
||||
jint* a2_arr;
|
||||
jint* a3_arr;
|
||||
jlong* a4_arr;
|
||||
jint* a5_arr;
|
||||
|
||||
len = (*env)->GetArrayLength(env, a2);
|
||||
a2_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a2, 0);
|
||||
for (index = 0; index < len; index ++) {
|
||||
sum += a2_arr[index];
|
||||
}
|
||||
(*env)->ReleasePrimitiveArrayCritical(env, a2, a2_arr, 0);
|
||||
|
||||
len = (*env)->GetArrayLength(env, a3);
|
||||
a3_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a3, 0);
|
||||
for (index = 0; index < len; index ++) {
|
||||
sum += a3_arr[index];
|
||||
}
|
||||
(*env)->ReleasePrimitiveArrayCritical(env, a3, a3_arr, 0);
|
||||
|
||||
len = (*env)->GetArrayLength(env, a4);
|
||||
a4_arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a4, 0);
|
||||
for (index = 0; index < len; index ++) {
|
||||
sum += a4_arr[index];
|
||||
}
|
||||
(*env)->ReleasePrimitiveArrayCritical(env, a4, a4_arr, 0);
|
||||
|
||||
len = (*env)->GetArrayLength(env, a5);
|
||||
a5_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a5, 0);
|
||||
for (index = 0; index < len; index ++) {
|
||||
sum += a5_arr[index];
|
||||
}
|
||||
(*env)->ReleasePrimitiveArrayCritical(env, a5, a5_arr, 0);
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
|
||||
JNIEXPORT jboolean JNICALL JavaCritical_gc_CriticalNative_isNull
|
||||
(jint length, jint* a) {
|
||||
return (a == NULL) && (length == 0);
|
||||
}
|
||||
|
||||
JNIEXPORT jboolean JNICALL Java_gc_CriticalNative_isNull
|
||||
(JNIEnv *env, jclass jclazz, jintArray a) {
|
||||
if (a == NULL) return JNI_TRUE;
|
||||
jsize len = (*env)->GetArrayLength(env, a);
|
||||
jint* arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a, 0);
|
||||
jboolean is_null = (arr == NULL) && (len == 0);
|
||||
(*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0);
|
||||
return is_null;
|
||||
}
|
||||
|
@ -1,226 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Red Hat, Inc. and/or its affiliates.
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
package gc.stress;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
import gc.CriticalNative;
|
||||
import jdk.test.lib.Utils;
|
||||
|
||||
/*
|
||||
* @test CriticalNativeStressEpsilon
|
||||
* @key randomness
|
||||
* @bug 8199868
|
||||
* @library / /test/lib
|
||||
* @requires os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386"
|
||||
* @requires vm.gc.Epsilon
|
||||
* @summary test argument pinning by nmethod wrapper of critical native method
|
||||
* @run main/othervm/native -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC -Xcomp -Xmx1G -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test CriticalNativeStressShenandoah
|
||||
* @key randomness
|
||||
* @bug 8199868
|
||||
* @library / /test/lib
|
||||
* @requires os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386"
|
||||
* @requires vm.gc.Shenandoah
|
||||
* @summary test argument pinning by nmethod wrapper of critical native method
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:-ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:+ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
*
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xcomp -Xmx256M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test CriticalNativeStress
|
||||
* @key randomness
|
||||
* @bug 8199868 8233343
|
||||
* @library / /test/lib
|
||||
* @requires os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386" | os.arch=="ppc64" | os.arch=="ppc64le" | os.arch=="s390x"
|
||||
* @summary test argument unpacking nmethod wrapper of critical native method
|
||||
* @run main/othervm/native -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
|
||||
*/
|
||||
|
||||
public class CriticalNativeStress {
|
||||
// CYCLES and THREAD_PER_CASE are used to tune the tests for different GC settings,
|
||||
// so that they can execrise enough GC cycles and not OOM
|
||||
private static int CYCLES = Integer.getInteger("cycles", 3);
|
||||
private static int THREAD_PER_CASE = Integer.getInteger("threadPerCase", 1);
|
||||
|
||||
static long sum(long[] a) {
|
||||
long sum = 0;
|
||||
for (int index = 0; index < a.length; index ++) {
|
||||
sum += a[index];
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
static long sum(int[] a) {
|
||||
long sum = 0;
|
||||
for (int index = 0; index < a.length; index ++) {
|
||||
sum += a[index];
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
private static volatile String garbage_array[];
|
||||
|
||||
// GC potentially moves arrays passed to critical native methods
|
||||
// if they are not pinned correctly.
|
||||
// Create enough garbages to exercise GC cycles, verify
|
||||
// the arrays are pinned correctly.
|
||||
static void create_garbage(int len) {
|
||||
len = Math.max(len, 1024);
|
||||
String array[] = new String[len];
|
||||
for (int index = 0; index < len; index ++) {
|
||||
array[index] = "String " + index;
|
||||
}
|
||||
garbage_array = array;
|
||||
}
|
||||
|
||||
// Two test cases with different method signatures:
|
||||
// Tests generate arbitrary length of arrays with
|
||||
// arbitrary values, then calcuate sum of the array
|
||||
// elements with critical native JNI methods and java
|
||||
// methods, and compare the results for correctness.
|
||||
static void run_test_case1(Random rand) {
|
||||
// Create testing arary with arbitrary length and
|
||||
// values
|
||||
int length = rand.nextInt(50) + 1;
|
||||
long[] arr = new long[length];
|
||||
for (int index = 0; index < length; index ++) {
|
||||
arr[index] = rand.nextLong() % 1002;
|
||||
}
|
||||
|
||||
// Generate garbages to trigger GCs
|
||||
for (int index = 0; index < length; index ++) {
|
||||
create_garbage(index);
|
||||
}
|
||||
|
||||
// Compare results for correctness.
|
||||
long native_sum = CriticalNative.sum1(arr);
|
||||
long java_sum = sum(arr);
|
||||
if (native_sum != java_sum) {
|
||||
StringBuffer sb = new StringBuffer("Sums do not match: native = ")
|
||||
.append(native_sum).append(" java = ").append(java_sum);
|
||||
|
||||
throw new RuntimeException(sb.toString());
|
||||
}
|
||||
}
|
||||
|
||||
static void run_test_case2(Random rand) {
|
||||
// Create testing arary with arbitrary length and
|
||||
// values
|
||||
int index;
|
||||
long a1 = rand.nextLong() % 1025;
|
||||
|
||||
int a2_length = rand.nextInt(50) + 1;
|
||||
int[] a2 = new int[a2_length];
|
||||
for (index = 0; index < a2_length; index ++) {
|
||||
a2[index] = rand.nextInt(106);
|
||||
}
|
||||
|
||||
int a3_length = rand.nextInt(150) + 1;
|
||||
int[] a3 = new int[a3_length];
|
||||
for (index = 0; index < a3_length; index ++) {
|
||||
a3[index] = rand.nextInt(3333);
|
||||
}
|
||||
|
||||
int a4_length = rand.nextInt(200) + 1;
|
||||
long[] a4 = new long[a4_length];
|
||||
for (index = 0; index < a4_length; index ++) {
|
||||
a4[index] = rand.nextLong() % 122;
|
||||
}
|
||||
|
||||
int a5_length = rand.nextInt(350) + 1;
|
||||
int[] a5 = new int[a5_length];
|
||||
for (index = 0; index < a5_length; index ++) {
|
||||
a5[index] = rand.nextInt(333);
|
||||
}
|
||||
|
||||
// Generate garbages to trigger GCs
|
||||
for (index = 0; index < a1; index ++) {
|
||||
create_garbage(index);
|
||||
}
|
||||
|
||||
// Compare results for correctness.
|
||||
long native_sum = CriticalNative.sum2(a1, a2, a3, a4, a5);
|
||||
long java_sum = a1 + sum(a2) + sum(a3) + sum(a4) + sum(a5);
|
||||
if (native_sum != java_sum) {
|
||||
StringBuffer sb = new StringBuffer("Sums do not match: native = ")
|
||||
.append(native_sum).append(" java = ").append(java_sum);
|
||||
|
||||
throw new RuntimeException(sb.toString());
|
||||
}
|
||||
}
|
||||
|
||||
static class Case1Runner extends Thread {
|
||||
private final Random rand;
|
||||
public Case1Runner() {
|
||||
rand = new Random(Utils.getRandomInstance().nextLong());
|
||||
start();
|
||||
}
|
||||
|
||||
public void run() {
|
||||
for (int index = 0; index < CYCLES; index ++) {
|
||||
run_test_case1(rand);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class Case2Runner extends Thread {
|
||||
private final Random rand;
|
||||
public Case2Runner() {
|
||||
rand = new Random(Utils.getRandomInstance().nextLong());
|
||||
start();
|
||||
}
|
||||
|
||||
public void run() {
|
||||
for (int index = 0; index < CYCLES; index ++) {
|
||||
run_test_case2(rand);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
Thread[] thrs = new Thread[THREAD_PER_CASE * 2];
|
||||
for (int index = 0; index < thrs.length; index = index + 2) {
|
||||
thrs[index] = new Case1Runner();
|
||||
thrs[index + 1] = new Case2Runner();
|
||||
}
|
||||
|
||||
for (int index = 0; index < thrs.length; index ++) {
|
||||
try {
|
||||
thrs[index].join();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user