8292584: assert(cb != __null) failed: must be with -XX:-Inline

Reviewed-by: kvn, rpressler
This commit is contained in:
Dean Long 2022-09-01 20:19:37 +00:00
parent 04d8069bac
commit fa68371bb8
48 changed files with 246 additions and 392 deletions

@ -339,18 +339,6 @@ void LIRGenerator::do_MonitorExit(MonitorExit* x) {
monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
}
void LIRGenerator::do_continuation_doYield(Intrinsic* x) {
BasicTypeList signature(0);
CallingConvention* cc = frame_map()->java_calling_convention(&signature, true);
const LIR_Opr result_reg = result_register_for(x->type());
address entry = StubRoutines::cont_doYield();
LIR_Opr result = rlock_result(x);
CodeEmitInfo* info = state_for(x, x->state());
__ call_runtime(entry, LIR_OprFact::illegalOpr, result_reg, cc->args(), info);
__ move(result_reg, result);
}
void LIRGenerator::do_NegateOp(NegateOp* x) {
LIRItem from(x->x(), this);

@ -1123,6 +1123,62 @@ static void gen_continuation_enter(MacroAssembler* masm,
CompiledStaticCall::emit_to_interp_stub(*cbuf, mark);
}
static void gen_continuation_yield(MacroAssembler* masm,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs,
int& exception_offset,
OopMapSet* oop_maps,
int& frame_complete,
int& stack_slots,
int& interpreted_entry_offset,
int& compiled_entry_offset) {
enum layout {
rfp_off1,
rfp_off2,
lr_off,
lr_off2,
framesize // inclusive of return address
};
// assert(is_even(framesize/2), "sp not 16-byte aligned");
stack_slots = framesize / VMRegImpl::slots_per_word;
assert(stack_slots == 2, "recheck layout");
address start = __ pc();
compiled_entry_offset = __ pc() - start;
__ enter();
__ mov(c_rarg1, sp);
frame_complete = __ pc() - start;
address the_pc = __ pc();
__ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
__ mov(c_rarg0, rthread);
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
__ call_VM_leaf(Continuation::freeze_entry(), 2);
__ reset_last_Java_frame(true);
Label pinned;
__ cbnz(r0, pinned);
// We've succeeded, set sp to the ContinuationEntry
__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
__ mov(sp, rscratch1);
continuation_enter_cleanup(masm);
__ bind(pinned); // pinned -- return to caller
__ leave();
__ ret(lr);
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(the_pc - start, map);
}
static void gen_special_dispatch(MacroAssembler* masm,
const methodHandle& method,
const BasicType* sig_bt,
@ -1207,25 +1263,38 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BasicType* in_sig_bt,
VMRegPair* in_regs,
BasicType ret_type) {
if (method->is_continuation_enter_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id();
intptr_t start = (intptr_t)__ pc();
if (method->is_continuation_native_intrinsic()) {
int vep_offset = 0;
int exception_offset = 0;
int frame_complete = 0;
int stack_slots = 0;
OopMapSet* oop_maps = new OopMapSet();
int interpreted_entry_offset = -1;
gen_continuation_enter(masm,
method,
in_sig_bt,
in_regs,
exception_offset,
oop_maps,
frame_complete,
stack_slots,
interpreted_entry_offset,
vep_offset);
if (method->is_continuation_enter_intrinsic()) {
gen_continuation_enter(masm,
method,
in_sig_bt,
in_regs,
exception_offset,
oop_maps,
frame_complete,
stack_slots,
interpreted_entry_offset,
vep_offset);
} else if (method->is_continuation_yield_intrinsic()) {
gen_continuation_yield(masm,
method,
in_sig_bt,
in_regs,
exception_offset,
oop_maps,
frame_complete,
stack_slots,
interpreted_entry_offset,
vep_offset);
} else {
guarantee(false, "Unknown Continuation native intrinsic");
}
__ flush();
nmethod* nm = nmethod::new_native_nmethod(method,
compile_id,
@ -1237,7 +1306,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
in_ByteSize(-1),
oop_maps,
exception_offset);
ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
if (method->is_continuation_enter_intrinsic()) {
ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
} else if (method->is_continuation_yield_intrinsic()) {
_cont_doYield_stub = nm;
} else {
guarantee(false, "Unknown Continuation native intrinsic");
}
return nm;
}

@ -6620,69 +6620,6 @@ class StubGenerator: public StubCodeGenerator {
}
#endif // LINUX
RuntimeStub* generate_cont_doYield() {
if (!Continuations::enabled()) return nullptr;
const char *name = "cont_doYield";
enum layout {
rfp_off1,
rfp_off2,
lr_off,
lr_off2,
framesize // inclusive of return address
};
// assert(is_even(framesize/2), "sp not 16-byte aligned");
int insts_size = 512;
int locs_size = 64;
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
MacroAssembler* _masm = masm;
address start = __ pc();
__ enter();
__ mov(c_rarg1, sp);
int frame_complete = __ pc() - start;
address the_pc = __ pc();
__ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
__ mov(c_rarg0, rthread);
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
__ call_VM_leaf(Continuation::freeze_entry(), 2);
__ reset_last_Java_frame(true);
Label pinned;
__ cbnz(r0, pinned);
// We've succeeded, set sp to the ContinuationEntry
__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
__ mov(sp, rscratch1);
continuation_enter_cleanup(masm);
__ bind(pinned); // pinned -- return to caller
__ leave();
__ ret(lr);
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub(name,
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
address generate_cont_thaw(Continuation::thaw_kind kind) {
bool return_barrier = Continuation::is_thaw_return_barrier(kind);
bool return_barrier_exception = Continuation::is_thaw_return_barrier_exception(kind);
@ -7857,9 +7794,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
StubRoutines::_cont_doYield_stub = generate_cont_doYield();
StubRoutines::_cont_doYield = StubRoutines::_cont_doYield_stub == nullptr ? nullptr
: StubRoutines::_cont_doYield_stub->entry_point();
JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)

@ -843,18 +843,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// End of helpers
address TemplateInterpreterGenerator::generate_Continuation_doYield_entry(void) {
if (!Continuations::enabled()) return nullptr;
address entry = __ pc();
assert(StubRoutines::cont_doYield() != NULL, "stub not yet generated");
__ push_cont_fastpath(rthread);
__ far_jump(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::cont_doYield())));
return entry;
}
// Various method entries
//------------------------------------------------------------------------------------------------------------------------
//

@ -1328,7 +1328,3 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
}
__ load(address, result, info, lir_patch_none);
}
void LIRGenerator::do_continuation_doYield(Intrinsic* x) {
fatal("Continuation.doYield intrinsic is not implemented on this platform");
}

@ -2959,12 +2959,6 @@ class StubGenerator: public StubCodeGenerator {
return stub->entry_point();
}
RuntimeStub* generate_cont_doYield() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return nullptr;
}
address generate_cont_thaw(const char* label, Continuation::thaw_kind kind) {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
@ -3075,9 +3069,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
StubRoutines::_cont_doYield_stub = generate_cont_doYield();
StubRoutines::_cont_doYield = StubRoutines::_cont_doYield_stub == nullptr ? nullptr
: StubRoutines::_cont_doYield_stub->entry_point();
JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)

@ -728,12 +728,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// [ parameter 1 ] <--- Rlocals
//
address TemplateInterpreterGenerator::generate_Continuation_doYield_entry(void) {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return NULL;
}
address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
// Code: _aload_0, _getfield, _areturn
// parameter size = 1

@ -1381,7 +1381,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
}
void LIRGenerator::do_continuation_doYield(Intrinsic* x) {
fatal("Continuation.doYield intrinsic is not implemented on this platform");
}

@ -4501,12 +4501,6 @@ class StubGenerator: public StubCodeGenerator {
#endif // VM_LITTLE_ENDIAN
RuntimeStub* generate_cont_doYield() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return nullptr;
}
address generate_cont_thaw() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
@ -4616,9 +4610,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
StubRoutines::_cont_doYield_stub = generate_cont_doYield();
StubRoutines::_cont_doYield = StubRoutines::_cont_doYield_stub == nullptr ? nullptr
: StubRoutines::_cont_doYield_stub->entry_point();
JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)

@ -476,12 +476,6 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) {
return entry;
}
address TemplateInterpreterGenerator::generate_Continuation_doYield_entry(void) {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return NULL;
}
// Interpreter intrinsic for WeakReference.get().
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
// into R8 and return quickly

@ -301,10 +301,6 @@ void LIRGenerator::do_MonitorExit(MonitorExit* x) {
monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
}
void LIRGenerator::do_continuation_doYield(Intrinsic* x) {
fatal("Continuation.doYield intrinsic is not implemented on this platform");
}
// neg
void LIRGenerator::do_NegateOp(NegateOp* x) {
LIRItem from(x->x(), this);

@ -3730,12 +3730,6 @@ class StubGenerator: public StubCodeGenerator {
return nullptr;
}
RuntimeStub* generate_cont_doYield() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return nullptr;
}
#if INCLUDE_JFR
#undef __
@ -3832,9 +3826,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
StubRoutines::_cont_doYield_stub = generate_cont_doYield();
StubRoutines::_cont_doYield = StubRoutines::_cont_doYield_stub == nullptr ? nullptr
: StubRoutines::_cont_doYield_stub->entry_point();
JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub == nullptr ? nullptr

@ -787,12 +787,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// End of helpers
address TemplateInterpreterGenerator::generate_Continuation_doYield_entry(void) {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return NULL;
}
// Various method entries
//------------------------------------------------------------------------------------------------------------------------
//

@ -1183,7 +1183,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
}
void LIRGenerator::do_continuation_doYield(Intrinsic* x) {
fatal("Continuation.doYield intrinsic is not implemented on this platform");
}

@ -2857,12 +2857,6 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
RuntimeStub* generate_cont_doYield() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return nullptr;
}
address generate_cont_thaw(bool return_barrier, bool exception) {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
@ -2940,9 +2934,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
StubRoutines::_cont_doYield_stub = generate_cont_doYield();
StubRoutines::_cont_doYield = StubRoutines::_cont_doYield_stub == nullptr ? nullptr
: StubRoutines::_cont_doYield_stub->entry_point();
JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)

@ -482,12 +482,6 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) {
return __ addr_at(entry_offset);
}
address TemplateInterpreterGenerator::generate_Continuation_doYield_entry(void) {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return NULL;
}
address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
// Inputs:
// Z_ARG1 - receiver

@ -336,18 +336,6 @@ void LIRGenerator::do_MonitorExit(MonitorExit* x) {
monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
}
void LIRGenerator::do_continuation_doYield(Intrinsic* x) {
BasicTypeList signature(0);
CallingConvention* cc = frame_map()->java_calling_convention(&signature, true);
const LIR_Opr result_reg = result_register_for(x->type());
address entry = StubRoutines::cont_doYield();
LIR_Opr result = rlock_result(x);
CodeEmitInfo* info = state_for(x, x->state());
__ call_runtime(entry, LIR_OprFact::illegalOpr, result_reg, cc->args(), info);
__ move(result_reg, result);
}
// _ineg, _lneg, _fneg, _dneg
void LIRGenerator::do_NegateOp(NegateOp* x) {
LIRItem value(x->x(), this);

@ -1271,13 +1271,13 @@ static void check_continuation_enter_argument(VMReg actual_vmreg,
}
static void gen_continuation_enter(MacroAssembler* masm,
const VMRegPair* regs,
int& exception_offset,
OopMapSet* oop_maps,
int& frame_complete,
int& stack_slots,
int& interpreted_entry_offset,
int& compiled_entry_offset) {
const VMRegPair* regs,
int& exception_offset,
OopMapSet* oop_maps,
int& frame_complete,
int& stack_slots,
int& interpreted_entry_offset,
int& compiled_entry_offset) {
// enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
int pos_cont_obj = 0;
@ -1444,6 +1444,61 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ jmp(rbx);
}
static void gen_continuation_yield(MacroAssembler* masm,
const VMRegPair* regs,
int& exception_offset,
OopMapSet* oop_maps,
int& frame_complete,
int& stack_slots,
int& interpreted_entry_offset,
int& compiled_entry_offset) {
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
stack_slots = framesize / VMRegImpl::slots_per_word;
assert(stack_slots == 2, "recheck layout");
address start = __ pc();
compiled_entry_offset = __ pc() - start;
__ enter();
address the_pc = __ pc();
frame_complete = the_pc - start;
// This nop must be exactly at the PC we push into the frame info.
// We use this nop for fast CodeBlob lookup, associate the OopMap
// with it right away.
__ post_call_nop();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
__ set_last_Java_frame(rsp, rbp, the_pc, rscratch1);
__ movptr(c_rarg0, r15_thread);
__ movptr(c_rarg1, rsp);
__ call_VM_leaf(Continuation::freeze_entry(), 2);
__ reset_last_Java_frame(true);
Label L_pinned;
__ testptr(rax, rax);
__ jcc(Assembler::notZero, L_pinned);
__ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
__ continuation_enter_cleanup();
__ pop(rbp);
__ ret(0);
__ bind(L_pinned);
// Pinned, return to caller
__ leave();
__ ret(0);
}
static void gen_special_dispatch(MacroAssembler* masm,
const methodHandle& method,
const BasicType* sig_bt,
@ -1528,23 +1583,34 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BasicType* in_sig_bt,
VMRegPair* in_regs,
BasicType ret_type) {
if (method->is_continuation_enter_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id();
intptr_t start = (intptr_t)__ pc();
if (method->is_continuation_native_intrinsic()) {
int vep_offset = 0;
int exception_offset = 0;
int frame_complete = 0;
int stack_slots = 0;
OopMapSet* oop_maps = new OopMapSet();
OopMapSet* oop_maps = new OopMapSet();
int interpreted_entry_offset = -1;
gen_continuation_enter(masm,
in_regs,
exception_offset,
oop_maps,
frame_complete,
stack_slots,
interpreted_entry_offset,
vep_offset);
if (method->is_continuation_enter_intrinsic()) {
gen_continuation_enter(masm,
in_regs,
exception_offset,
oop_maps,
frame_complete,
stack_slots,
interpreted_entry_offset,
vep_offset);
} else if (method->is_continuation_yield_intrinsic()) {
gen_continuation_yield(masm,
in_regs,
exception_offset,
oop_maps,
frame_complete,
stack_slots,
interpreted_entry_offset,
vep_offset);
} else {
guarantee(false, "Unknown Continuation native intrinsic");
}
__ flush();
nmethod* nm = nmethod::new_native_nmethod(method,
compile_id,
@ -1556,7 +1622,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
in_ByteSize(-1),
oop_maps,
exception_offset);
ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
if (method->is_continuation_enter_intrinsic()) {
ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
} else if (method->is_continuation_yield_intrinsic()) {
_cont_doYield_stub = nm;
}
return nm;
}

@ -3901,12 +3901,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::x86::_fpu_subnormal_bias2[2]= 0x7bff;
}
RuntimeStub* generate_cont_doYield() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return nullptr;
}
address generate_cont_thaw() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
@ -4076,9 +4070,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
StubRoutines::_cont_doYield_stub = generate_cont_doYield();
StubRoutines::_cont_doYield = StubRoutines::_cont_doYield_stub == nullptr ? nullptr
: StubRoutines::_cont_doYield_stub->entry_point();
JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)

@ -7401,66 +7401,6 @@ address generate_avx_ghash_processBlocks() {
}
RuntimeStub* generate_cont_doYield() {
if (!Continuations::enabled()) return nullptr;
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
CodeBuffer code("cont_doYield", 512, 64);
MacroAssembler* _masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
address the_pc = __ pc();
int frame_complete = the_pc - start;
// This nop must be exactly at the PC we push into the frame info.
// We use this nop for fast CodeBlob lookup, associate the OopMap
// with it right away.
__ post_call_nop();
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
__ set_last_Java_frame(rsp, rbp, the_pc, rscratch1);
__ movptr(c_rarg0, r15_thread);
__ movptr(c_rarg1, rsp);
__ call_VM_leaf(Continuation::freeze_entry(), 2);
__ reset_last_Java_frame(true);
Label L_pinned;
__ testptr(rax, rax);
__ jcc(Assembler::notZero, L_pinned);
__ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
__ continuation_enter_cleanup();
__ pop(rbp);
__ ret(0);
__ bind(L_pinned);
// Pinned, return to caller
__ leave();
__ ret(0);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(code.name(),
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps,
false);
return stub;
}
address generate_cont_thaw(const char* label, Continuation::thaw_kind kind) {
if (!Continuations::enabled()) return nullptr;
@ -7861,9 +7801,6 @@ address generate_avx_ghash_processBlocks() {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
StubRoutines::_cont_doYield_stub = generate_cont_doYield();
StubRoutines::_cont_doYield = StubRoutines::_cont_doYield_stub == nullptr ? nullptr
: StubRoutines::_cont_doYield_stub->entry_point();
JFR_ONLY(StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();)
JFR_ONLY(StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();)

@ -655,20 +655,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// End of helpers
address TemplateInterpreterGenerator::generate_Continuation_doYield_entry(void) {
if (!Continuations::enabled()) return nullptr;
address entry = __ pc();
assert(StubRoutines::cont_doYield() != NULL, "stub not yet generated");
__ push_cont_fastpath();
__ jump(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::cont_doYield())));
// return value is in rax
return entry;
}
// Method entry for java.lang.ref.Reference.get.
address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
// Code: _aload_0, _getfield, _areturn

@ -225,7 +225,6 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
case vmIntrinsics::_compareAndSetReference:
case vmIntrinsics::_getCharStringU:
case vmIntrinsics::_putCharStringU:
case vmIntrinsics::_Continuation_doYield:
#ifdef JFR_HAVE_INTRINSICS
case vmIntrinsics::_counterTime:
#endif

@ -3016,10 +3016,6 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
do_vectorizedMismatch(x);
break;
case vmIntrinsics::_Continuation_doYield:
do_continuation_doYield(x);
break;
case vmIntrinsics::_blackhole:
do_blackhole(x);
break;

@ -272,7 +272,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_update_CRC32C(Intrinsic* x);
void do_vectorizedMismatch(Intrinsic* x);
void do_blackhole(Intrinsic* x);
void do_continuation_doYield(Intrinsic* x);
public:
LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);

@ -341,7 +341,6 @@ const char* Runtime1::name_for_address(address entry) {
FUNCTION_CASE(entry, StubRoutines::dsin());
FUNCTION_CASE(entry, StubRoutines::dcos());
FUNCTION_CASE(entry, StubRoutines::dtan());
FUNCTION_CASE(entry, StubRoutines::cont_doYield());
#undef FUNCTION_CASE

@ -238,6 +238,7 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
case vmIntrinsics::_countPositives:
case vmIntrinsics::_Reference_get:
case vmIntrinsics::_Continuation_doYield:
case vmIntrinsics::_Continuation_enterSpecial:
break;
default:
return true;

@ -550,7 +550,7 @@ class methodHandle;
do_signature(continuationEnterSpecial_signature, "(Ljdk/internal/vm/Continuation;ZZ)V") \
do_signature(continuationGetStacks_signature, "(III)V") \
do_alias(continuationOnPinned_signature, int_void_signature) \
do_intrinsic(_Continuation_doYield, jdk_internal_vm_Continuation, doYield_name, continuationDoYield_signature, F_S) \
do_intrinsic(_Continuation_doYield, jdk_internal_vm_Continuation, doYield_name, continuationDoYield_signature, F_SN) \
do_alias( continuationDoYield_signature, void_int_signature) \
\
/* support for UnsafeConstants */ \

@ -430,7 +430,7 @@ int nmethod::total_size() const {
const char* nmethod::compile_kind() const {
if (is_osr_method()) return "osr";
if (method() != NULL && is_native_method()) {
if (method()->is_continuation_enter_intrinsic()) {
if (method()->is_continuation_native_intrinsic()) {
return "cnt";
}
return "c2n";

@ -53,6 +53,15 @@ bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) {
return false;
}
if (nm->method()->is_continuation_yield_intrinsic()) {
return false;
}
if (nm->method()->is_continuation_native_intrinsic()) {
guarantee(false, "Unknown Continuation native intrinsic");
return false;
}
if (!nm->is_native_method() && !nm->is_compiled_by_c2() && !nm->is_compiled_by_c1()) {
return false;
}

@ -150,11 +150,6 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(const methodHan
case vmIntrinsics::_dsqrt: return java_lang_math_sqrt;
case vmIntrinsics::_dsqrt_strict: return native;
case vmIntrinsics::_Reference_get: return java_lang_ref_reference_get;
case vmIntrinsics::_Continuation_doYield:
if (VMContinuations) {
return java_lang_continuation_doYield;
}
break;
case vmIntrinsics::_Object_init:
if (RegisterFinalizersAtInit && m->code_size() == 1) {
// We need to execute the special return bytecode to check for
@ -168,8 +163,9 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(const methodHan
// Native method?
if (m->is_native()) {
if (m->is_continuation_enter_intrinsic()) {
return zerolocals;
if (m->is_continuation_native_intrinsic()) {
// This entry will never be called. The real entry gets generated later, like for MH intrinsics.
return abstract;
}
assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out");
return m->is_synchronized() ? native_synchronized : native;

@ -81,7 +81,6 @@ class AbstractInterpreter: AllStatic {
java_lang_math_fmaF, // implementation of java.lang.Math.fma (x, y, z)
java_lang_math_fmaD, // implementation of java.lang.Math.fma (x, y, z)
java_lang_ref_reference_get, // implementation of java.lang.ref.Reference.get()
java_lang_continuation_doYield, // implementation of jdk.internal.vm.Continuation.doYield()
java_util_zip_CRC32_update, // implementation of java.util.zip.CRC32.update()
java_util_zip_CRC32_updateBytes, // implementation of java.util.zip.CRC32.updateBytes()
java_util_zip_CRC32_updateByteBuffer, // implementation of java.util.zip.CRC32.updateByteBuffer()

@ -1085,7 +1085,7 @@ void LinkResolver::resolve_static_call(CallInfo& result,
resolved_method = linktime_resolve_static_method(new_info, CHECK);
}
if (resolved_method->is_continuation_enter_intrinsic()
if (resolved_method->is_continuation_native_intrinsic()
&& resolved_method->from_interpreted_entry() == NULL) { // does a load_acquire
methodHandle mh(THREAD, resolved_method);
// Generate a compiled form of the enterSpecial intrinsic.

@ -223,8 +223,6 @@ void TemplateInterpreterGenerator::generate_all() {
method_entry(java_lang_Double_longBitsToDouble);
method_entry(java_lang_Double_doubleToRawLongBits);
method_entry(java_lang_continuation_doYield)
#undef method_entry
// Bytecodes
@ -425,8 +423,6 @@ address TemplateInterpreterGenerator::generate_method_entry(
case Interpreter::java_lang_math_fmaF : entry_point = generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = generate_Reference_get_entry(); break;
case Interpreter::java_lang_continuation_doYield
: entry_point = generate_Continuation_doYield_entry(); break;
case Interpreter::java_util_zip_CRC32_update
: native = true; entry_point = generate_CRC32_update_entry(); break;
case Interpreter::java_util_zip_CRC32_updateBytes

@ -91,7 +91,6 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_Reference_get_entry();
address generate_Continuation_doYield_entry();
address generate_CRC32_update_entry();
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind);

@ -336,7 +336,6 @@
static_field(StubRoutines, _vectorizedMismatch, address) \
static_field(StubRoutines, _bigIntegerRightShiftWorker, address) \
static_field(StubRoutines, _bigIntegerLeftShiftWorker, address) \
static_field(StubRoutines, _cont_doYield, address) \
static_field(StubRoutines, _cont_thaw, address) \
\
nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \

@ -1237,10 +1237,11 @@ void Method::link_method(const methodHandle& h_method, TRAPS) {
// ONLY USE the h_method now as make_adapter may have blocked
if (h_method->is_continuation_enter_intrinsic()) {
if (h_method->is_continuation_native_intrinsic()) {
// the entry points to this method will be set in set_code, called when first resolving this method
_from_interpreted_entry = NULL;
_from_compiled_entry = NULL;
_i2i_entry = NULL;
}
}
@ -1320,11 +1321,17 @@ void Method::set_code(const methodHandle& mh, CompiledMethod *code) {
mh->_from_compiled_entry = code->verified_entry_point();
OrderAccess::storestore();
if (mh->is_continuation_enter_intrinsic()) {
if (mh->is_continuation_native_intrinsic()) {
assert(mh->_from_interpreted_entry == NULL, "initialized incorrectly"); // see link_method
// This is the entry used when we're in interpreter-only mode; see InterpreterMacroAssembler::jump_from_interpreted
mh->_i2i_entry = ContinuationEntry::interpreted_entry();
if (mh->is_continuation_enter_intrinsic()) {
// This is the entry used when we're in interpreter-only mode; see InterpreterMacroAssembler::jump_from_interpreted
mh->_i2i_entry = ContinuationEntry::interpreted_entry();
} else if (mh->is_continuation_yield_intrinsic()) {
mh->_i2i_entry = mh->get_i2c_entry();
} else {
guarantee(false, "Unknown Continuation native intrinsic");
}
// This must come last, as it is what's tested in LinkResolver::resolve_static_call
Atomic::release_store(&mh->_from_interpreted_entry , mh->get_i2c_entry());
} else if (!mh->is_method_handle_intrinsic()) {

@ -727,6 +727,8 @@ public:
// Continuation
inline bool is_continuation_enter_intrinsic() const;
inline bool is_continuation_yield_intrinsic() const;
inline bool is_continuation_native_intrinsic() const;
inline bool is_special_native_intrinsic() const;
static Klass* check_non_bcp_klass(Klass* klass);

@ -93,8 +93,18 @@ inline bool Method::is_empty_method() const {
inline bool Method::is_continuation_enter_intrinsic() const {
return intrinsic_id() == vmIntrinsics::_Continuation_enterSpecial;
}
inline bool Method::is_continuation_yield_intrinsic() const {
return intrinsic_id() == vmIntrinsics::_Continuation_doYield;
}
inline bool Method::is_continuation_native_intrinsic() const {
return intrinsic_id() == vmIntrinsics::_Continuation_enterSpecial ||
intrinsic_id() == vmIntrinsics::_Continuation_doYield;
}
inline bool Method::is_special_native_intrinsic() const {
return is_method_handle_intrinsic() || is_continuation_enter_intrinsic();
return is_method_handle_intrinsic() || is_continuation_native_intrinsic();
}
#endif // SHARE_OOPS_METHOD_INLINE_HPP

@ -739,7 +739,6 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
case vmIntrinsics::_Preconditions_checkIndex:
case vmIntrinsics::_Preconditions_checkLongIndex:
case vmIntrinsics::_getObjectSize:
case vmIntrinsics::_Continuation_doYield:
break;
case vmIntrinsics::_VectorCompressExpand:

@ -644,9 +644,6 @@ bool LibraryCallKit::try_to_inline(int predicate) {
case vmIntrinsics::_fmaF:
return inline_fma(intrinsic_id());
case vmIntrinsics::_Continuation_doYield:
return inline_continuation_do_yield();
case vmIntrinsics::_isDigit:
case vmIntrinsics::_isLowerCase:
case vmIntrinsics::_isUpperCase:
@ -7410,15 +7407,6 @@ Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate)
return instof_false; // even if it is NULL
}
bool LibraryCallKit::inline_continuation_do_yield() {
address call_addr = StubRoutines::cont_doYield();
const TypeFunc* tf = OptoRuntime::continuation_doYield_Type();
Node* call = make_runtime_call(RC_NO_LEAF, tf, call_addr, "doYield", TypeRawPtr::BOTTOM);
Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
set_result(result);
return true;
}
//-------------inline_fma-----------------------------------
bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
Node *a = NULL;

@ -730,37 +730,6 @@ const TypeFunc* OptoRuntime::void_void_Type() {
return TypeFunc::make(domain, range);
}
const TypeFunc* OptoRuntime::continuation_doYield_Type() {
// create input type (domain)
const Type **fields = TypeTuple::fields(0);
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
// create result type (range)
fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = TypeInt::INT;
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
return TypeFunc::make(domain, range);
}
const TypeFunc* OptoRuntime::continuation_jump_Type() {
// create input type (domain)
const Type **fields = TypeTuple::fields(6);
fields[TypeFunc::Parms+0] = TypeLong::LONG;
fields[TypeFunc::Parms+1] = Type::HALF;
fields[TypeFunc::Parms+2] = TypeLong::LONG;
fields[TypeFunc::Parms+3] = Type::HALF;
fields[TypeFunc::Parms+4] = TypeLong::LONG;
fields[TypeFunc::Parms+5] = Type::HALF;
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+6, fields);
// create result type (range)
fields = TypeTuple::fields(0);
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
return TypeFunc::make(domain, range);
}
const TypeFunc* OptoRuntime::jfr_write_checkpoint_Type() {
// create input type (domain)
const Type **fields = TypeTuple::fields(0);

@ -243,8 +243,6 @@ private:
static const TypeFunc* l2f_Type();
static const TypeFunc* void_long_Type();
static const TypeFunc* void_void_Type();
static const TypeFunc* continuation_doYield_Type();
static const TypeFunc* continuation_jump_Type();
static const TypeFunc* jfr_write_checkpoint_Type();

@ -59,6 +59,7 @@
#include "runtime/orderAccess.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/smallRegisterMap.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stackChunkFrameStream.inline.hpp"
#include "runtime/stackFrameStream.inline.hpp"
#include "runtime/stackOverflow.hpp"
@ -220,7 +221,7 @@ template<typename ConfigT> static inline intptr_t* thaw_internal(JavaThread* thr
// Entry point to freeze. Transitions are handled manually
// Called from generate_cont_doYield() in stubGenerator_<cpu>.cpp through Continuation::freeze_entry();
// Called from gen_continuation_yield() in sharedRuntime_<cpu>.cpp through Continuation::freeze_entry();
template<typename ConfigT>
static JRT_BLOCK_ENTRY(int, freeze(JavaThread* current, intptr_t* sp))
assert(sp == current->frame_anchor()->last_Java_sp(), "");
@ -381,6 +382,10 @@ public:
inline int size_if_fast_freeze_available();
#ifdef ASSERT
bool interpreted_native_or_deoptimized_on_stack();
#endif
protected:
inline void init_rest();
void freeze_fast_init_cont_data(intptr_t* frame_sp);
@ -480,7 +485,7 @@ FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t*
assert(_cont.chunk_invariant(), "");
assert(!Interpreter::contains(_cont.entryPC()), "");
static const int doYield_stub_frame_size = frame::metadata_words;
assert(StubRoutines::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, "");
assert(SharedRuntime::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, "");
// properties of the continuation on the stack; all sizes are in words
_cont_stack_top = frame_sp + doYield_stub_frame_size; // we don't freeze the doYield stub frame
@ -554,7 +559,7 @@ int FreezeBase::size_if_fast_freeze_available() {
return 0;
}
assert(StubRoutines::cont_doYield_stub()->frame_size() == frame::metadata_words, "");
assert(SharedRuntime::cont_doYield_stub()->frame_size() == frame::metadata_words, "");
int total_size_needed = cont_size();
@ -745,7 +750,6 @@ NOINLINE freeze_result FreezeBase::freeze_slow() {
frame FreezeBase::freeze_start_frame() {
frame f = _thread->last_frame();
if (LIKELY(!_preempt)) {
assert(StubRoutines::cont_doYield_stub()->contains(f.pc()), "");
return freeze_start_frame_yield_stub(f);
} else {
return freeze_start_frame_safepoint_stub(f);
@ -753,8 +757,9 @@ frame FreezeBase::freeze_start_frame() {
}
frame FreezeBase::freeze_start_frame_yield_stub(frame f) {
assert(StubRoutines::cont_doYield_stub()->contains(f.pc()), "must be");
f = sender<ContinuationHelper::StubFrame>(f);
assert(SharedRuntime::cont_doYield_stub()->contains(f.pc()), "must be");
f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
return f;
}
@ -772,6 +777,7 @@ frame FreezeBase::freeze_start_frame_safepoint_stub(frame f) {
f = sender<ContinuationHelper::StubFrame>(f); // Safepoint stub in interpreter
}
}
assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
return f;
}
@ -1356,14 +1362,14 @@ static bool monitors_on_stack(JavaThread* thread) {
return false;
}
static bool interpreted_native_or_deoptimized_on_stack(JavaThread* thread) {
ContinuationEntry* ce = thread->last_continuation();
RegisterMap map(thread,
bool FreezeBase::interpreted_native_or_deoptimized_on_stack() {
ContinuationEntry* ce = _thread->last_continuation();
RegisterMap map(_thread,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::skip,
RegisterMap::WalkContinuation::skip);
map.set_include_argument_oops(false);
for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
if (f.is_interpreted_frame() || f.is_native_frame() || f.is_deoptimized_frame()) {
return true;
}
@ -1434,7 +1440,7 @@ static inline int freeze_internal(JavaThread* current, intptr_t* const sp) {
// adapter or called Deoptimization::unpack_frames. Calls from native frames also go through the interpreter
// (see JavaCalls::call_helper).
assert(!current->cont_fastpath()
|| (current->cont_fastpath_thread_state() && !interpreted_native_or_deoptimized_on_stack(current)), "");
|| (current->cont_fastpath_thread_state() && !freeze.interpreted_native_or_deoptimized_on_stack()), "");
bool fast = UseContinuationFastPath && current->cont_fastpath();
if (fast && freeze.size_if_fast_freeze_available() > 0) {
freeze.freeze_fast_existing_chunk();
@ -2311,7 +2317,7 @@ static void do_deopt_after_thaw(JavaThread* thread) {
for (; !fst.is_done(); fst.next()) {
if (fst.current()->cb()->is_compiled()) {
CompiledMethod* cm = fst.current()->cb()->as_compiled_method();
if (!cm->method()->is_continuation_enter_intrinsic()) {
if (!cm->method()->is_continuation_native_intrinsic()) {
cm->make_deoptimized();
}
}

@ -97,6 +97,7 @@ SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
#endif // COMPILER2
nmethod* SharedRuntime::_cont_doYield_stub;
//----------------------------generate_stubs-----------------------------------
void SharedRuntime::generate_stubs() {

@ -72,6 +72,8 @@ class SharedRuntime: AllStatic {
static UncommonTrapBlob* _uncommon_trap_blob;
#endif // COMPILER2
static nmethod* _cont_doYield_stub;
#ifndef PRODUCT
// Counters
static int64_t _nof_megamorphic_calls; // total # of megamorphic calls (through vtable)
@ -249,6 +251,11 @@ class SharedRuntime: AllStatic {
static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; }
static SafepointBlob* polling_page_vectors_safepoint_handler_blob() { return _polling_page_vectors_safepoint_handler_blob; }
static nmethod* cont_doYield_stub() {
assert(_cont_doYield_stub != nullptr, "oops");
return _cont_doYield_stub;
}
// Counters
#ifndef PRODUCT
static address nof_megamorphic_calls_addr() { return (address)&_nof_megamorphic_calls; }

@ -173,8 +173,6 @@ address StubRoutines::_dtan = NULL;
address StubRoutines::_vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{NULL}, {NULL}};
address StubRoutines::_vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{NULL}, {NULL}};
RuntimeStub* StubRoutines::_cont_doYield_stub = NULL;
address StubRoutines::_cont_doYield = NULL;
address StubRoutines::_cont_thaw = NULL;
address StubRoutines::_cont_returnBarrier = NULL;
address StubRoutines::_cont_returnBarrierExc = NULL;

@ -251,8 +251,6 @@ class StubRoutines: AllStatic {
static address _dlibm_tan_cot_huge;
static address _dtan;
static RuntimeStub* _cont_doYield_stub;
static address _cont_doYield;
static address _cont_thaw;
static address _cont_returnBarrier;
static address _cont_returnBarrierExc;
@ -430,8 +428,6 @@ class StubRoutines: AllStatic {
static address dlibm_tan_cot_huge() { return _dlibm_tan_cot_huge; }
static address dtan() { return _dtan; }
static RuntimeStub* cont_doYield_stub() { return _cont_doYield_stub; }
static address cont_doYield() { return _cont_doYield; }
static address cont_thaw() { return _cont_thaw; }
static address cont_returnBarrier() { return _cont_returnBarrier; }
static address cont_returnBarrierExc(){return _cont_returnBarrierExc; }

@ -305,7 +305,7 @@ public class Continuation {
}
@IntrinsicCandidate
private static int doYield() { throw new Error("Intrinsic not installed"); }
private native static int doYield();
@IntrinsicCandidate
private native static void enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread);