8338383: Implement JEP 491: Synchronize Virtual Threads without Pinning
Co-authored-by: Patricio Chilano Mateo <pchilanomate@openjdk.org> Co-authored-by: Alan Bateman <alanb@openjdk.org> Co-authored-by: Andrew Haley <aph@openjdk.org> Co-authored-by: Fei Yang <fyang@openjdk.org> Co-authored-by: Coleen Phillimore <coleenp@openjdk.org> Co-authored-by: Richard Reingruber <rrich@openjdk.org> Co-authored-by: Martin Doerr <mdoerr@openjdk.org> Reviewed-by: aboldtch, dholmes, coleenp, fbredberg, dlong, sspitsyn
This commit is contained in:
parent
8a2a75e56d
commit
78b80150e0
@ -1648,8 +1648,8 @@ int MachCallRuntimeNode::ret_addr_offset() {
|
|||||||
// for real runtime callouts it will be six instructions
|
// for real runtime callouts it will be six instructions
|
||||||
// see aarch64_enc_java_to_runtime
|
// see aarch64_enc_java_to_runtime
|
||||||
// adr(rscratch2, retaddr)
|
// adr(rscratch2, retaddr)
|
||||||
|
// str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
|
||||||
// lea(rscratch1, RuntimeAddress(addr)
|
// lea(rscratch1, RuntimeAddress(addr)
|
||||||
// stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
|
|
||||||
// blr(rscratch1)
|
// blr(rscratch1)
|
||||||
CodeBlob *cb = CodeCache::find_blob(_entry_point);
|
CodeBlob *cb = CodeCache::find_blob(_entry_point);
|
||||||
if (cb) {
|
if (cb) {
|
||||||
@ -3696,14 +3696,13 @@ encode %{
|
|||||||
__ post_call_nop();
|
__ post_call_nop();
|
||||||
} else {
|
} else {
|
||||||
Label retaddr;
|
Label retaddr;
|
||||||
|
// Make the anchor frame walkable
|
||||||
__ adr(rscratch2, retaddr);
|
__ adr(rscratch2, retaddr);
|
||||||
|
__ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
|
||||||
__ lea(rscratch1, RuntimeAddress(entry));
|
__ lea(rscratch1, RuntimeAddress(entry));
|
||||||
// Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
|
|
||||||
__ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
|
|
||||||
__ blr(rscratch1);
|
__ blr(rscratch1);
|
||||||
__ bind(retaddr);
|
__ bind(retaddr);
|
||||||
__ post_call_nop();
|
__ post_call_nop();
|
||||||
__ add(sp, sp, 2 * wordSize);
|
|
||||||
}
|
}
|
||||||
if (Compile::current()->max_vector_size() > 0) {
|
if (Compile::current()->max_vector_size() > 0) {
|
||||||
__ reinitialize_ptrue();
|
__ reinitialize_ptrue();
|
||||||
|
@ -119,8 +119,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||||||
cbnz(hdr, slow_case);
|
cbnz(hdr, slow_case);
|
||||||
// done
|
// done
|
||||||
bind(done);
|
bind(done);
|
||||||
|
inc_held_monitor_count(rscratch1);
|
||||||
}
|
}
|
||||||
increment(Address(rthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
return null_check_offset;
|
return null_check_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,8 +159,8 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||||||
}
|
}
|
||||||
// done
|
// done
|
||||||
bind(done);
|
bind(done);
|
||||||
|
dec_held_monitor_count(rscratch1);
|
||||||
}
|
}
|
||||||
decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -160,16 +160,15 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum return_state_t {
|
enum return_state_t {
|
||||||
does_not_return, requires_return
|
does_not_return, requires_return, requires_pop_epilogue_return
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// Implementation of StubFrame
|
// Implementation of StubFrame
|
||||||
|
|
||||||
class StubFrame: public StackObj {
|
class StubFrame: public StackObj {
|
||||||
private:
|
private:
|
||||||
StubAssembler* _sasm;
|
StubAssembler* _sasm;
|
||||||
bool _return_state;
|
return_state_t _return_state;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state=requires_return);
|
StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state=requires_return);
|
||||||
@ -183,8 +182,17 @@ void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
|
|||||||
enter();
|
enter();
|
||||||
}
|
}
|
||||||
|
|
||||||
void StubAssembler::epilogue() {
|
void StubAssembler::epilogue(bool use_pop) {
|
||||||
|
// Avoid using a leave instruction when this frame may
|
||||||
|
// have been frozen, since the current value of rfp
|
||||||
|
// restored from the stub would be invalid. We still
|
||||||
|
// must restore the rfp value saved on enter though.
|
||||||
|
if (use_pop) {
|
||||||
|
ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
|
||||||
|
authenticate_return_address();
|
||||||
|
} else {
|
||||||
leave();
|
leave();
|
||||||
|
}
|
||||||
ret(lr);
|
ret(lr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,10 +211,10 @@ void StubFrame::load_argument(int offset_in_words, Register reg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
StubFrame::~StubFrame() {
|
StubFrame::~StubFrame() {
|
||||||
if (_return_state == requires_return) {
|
if (_return_state == does_not_return) {
|
||||||
__ epilogue();
|
|
||||||
} else {
|
|
||||||
__ should_not_reach_here();
|
__ should_not_reach_here();
|
||||||
|
} else {
|
||||||
|
__ epilogue(_return_state == requires_pop_epilogue_return);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,7 +260,7 @@ static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
|
|||||||
|
|
||||||
for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
|
for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
|
||||||
Register r = as_Register(i);
|
Register r = as_Register(i);
|
||||||
if (i <= 18 && i != rscratch1->encoding() && i != rscratch2->encoding()) {
|
if (r == rthread || (i <= 18 && i != rscratch1->encoding() && i != rscratch2->encoding())) {
|
||||||
int sp_offset = cpu_reg_save_offsets[i];
|
int sp_offset = cpu_reg_save_offsets[i];
|
||||||
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
|
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
|
||||||
r->as_VMReg());
|
r->as_VMReg());
|
||||||
@ -337,6 +345,15 @@ void Runtime1::initialize_pd() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// return: offset in 64-bit words.
|
||||||
|
uint Runtime1::runtime_blob_current_thread_offset(frame f) {
|
||||||
|
CodeBlob* cb = f.cb();
|
||||||
|
assert(cb == Runtime1::blob_for(C1StubId::monitorenter_id) ||
|
||||||
|
cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id), "must be");
|
||||||
|
assert(cb != nullptr && cb->is_runtime_stub(), "invalid frame");
|
||||||
|
int offset = cpu_reg_save_offsets[rthread->encoding()];
|
||||||
|
return offset / 2; // SP offsets are in halfwords
|
||||||
|
}
|
||||||
|
|
||||||
// target: the entry point of the method that creates and posts the exception oop
|
// target: the entry point of the method that creates and posts the exception oop
|
||||||
// has_argument: true if the exception needs arguments (passed in rscratch1 and rscratch2)
|
// has_argument: true if the exception needs arguments (passed in rscratch1 and rscratch2)
|
||||||
@ -868,7 +885,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
|||||||
// fall through
|
// fall through
|
||||||
case C1StubId::monitorenter_id:
|
case C1StubId::monitorenter_id:
|
||||||
{
|
{
|
||||||
StubFrame f(sasm, "monitorenter", dont_gc_arguments);
|
StubFrame f(sasm, "monitorenter", dont_gc_arguments, requires_pop_epilogue_return);
|
||||||
OopMap* map = save_live_registers(sasm, save_fpu_registers);
|
OopMap* map = save_live_registers(sasm, save_fpu_registers);
|
||||||
|
|
||||||
// Called with store_parameter and not C abi
|
// Called with store_parameter and not C abi
|
||||||
|
@ -153,7 +153,7 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register
|
|||||||
Label count, no_count;
|
Label count, no_count;
|
||||||
|
|
||||||
assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
|
assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
|
||||||
assert_different_registers(oop, box, tmp, disp_hdr);
|
assert_different_registers(oop, box, tmp, disp_hdr, rscratch2);
|
||||||
|
|
||||||
// Load markWord from object into displaced_header.
|
// Load markWord from object into displaced_header.
|
||||||
ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
|
ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
|
||||||
@ -206,12 +206,10 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register
|
|||||||
// Handle existing monitor.
|
// Handle existing monitor.
|
||||||
bind(object_has_monitor);
|
bind(object_has_monitor);
|
||||||
|
|
||||||
// The object's monitor m is unlocked iff m->owner == nullptr,
|
// Try to CAS owner (no owner => current thread's _lock_id).
|
||||||
// otherwise m->owner may contain a thread or a stack address.
|
ldr(rscratch2, Address(rthread, JavaThread::lock_id_offset()));
|
||||||
//
|
|
||||||
// Try to CAS m->owner from null to current thread.
|
|
||||||
add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
|
add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
|
||||||
cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
|
cmpxchg(tmp, zr, rscratch2, Assembler::xword, /*acquire*/ true,
|
||||||
/*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
|
/*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
|
||||||
|
|
||||||
// Store a non-null value into the box to avoid looking like a re-entrant
|
// Store a non-null value into the box to avoid looking like a re-entrant
|
||||||
@ -223,7 +221,7 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register
|
|||||||
|
|
||||||
br(Assembler::EQ, cont); // CAS success means locking succeeded
|
br(Assembler::EQ, cont); // CAS success means locking succeeded
|
||||||
|
|
||||||
cmp(tmp3Reg, rthread);
|
cmp(tmp3Reg, rscratch2);
|
||||||
br(Assembler::NE, cont); // Check for recursive locking
|
br(Assembler::NE, cont); // Check for recursive locking
|
||||||
|
|
||||||
// Recursive lock case
|
// Recursive lock case
|
||||||
@ -236,7 +234,9 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register
|
|||||||
br(Assembler::NE, no_count);
|
br(Assembler::NE, no_count);
|
||||||
|
|
||||||
bind(count);
|
bind(count);
|
||||||
increment(Address(rthread, JavaThread::held_monitor_count_offset()));
|
if (LockingMode == LM_LEGACY) {
|
||||||
|
inc_held_monitor_count(rscratch1);
|
||||||
|
}
|
||||||
|
|
||||||
bind(no_count);
|
bind(no_count);
|
||||||
}
|
}
|
||||||
@ -343,7 +343,9 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
|
|||||||
br(Assembler::NE, no_count);
|
br(Assembler::NE, no_count);
|
||||||
|
|
||||||
bind(count);
|
bind(count);
|
||||||
decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
if (LockingMode == LM_LEGACY) {
|
||||||
|
dec_held_monitor_count(rscratch1);
|
||||||
|
}
|
||||||
|
|
||||||
bind(no_count);
|
bind(no_count);
|
||||||
}
|
}
|
||||||
@ -351,7 +353,7 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
|
|||||||
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1,
|
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1,
|
||||||
Register t2, Register t3) {
|
Register t2, Register t3) {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
assert_different_registers(obj, box, t1, t2, t3);
|
assert_different_registers(obj, box, t1, t2, t3, rscratch2);
|
||||||
|
|
||||||
// Handle inflated monitor.
|
// Handle inflated monitor.
|
||||||
Label inflated;
|
Label inflated;
|
||||||
@ -467,13 +469,14 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
|
|||||||
// Compute owner address.
|
// Compute owner address.
|
||||||
lea(t2_owner_addr, owner_address);
|
lea(t2_owner_addr, owner_address);
|
||||||
|
|
||||||
// CAS owner (null => current thread).
|
// Try to CAS owner (no owner => current thread's _lock_id).
|
||||||
cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true,
|
ldr(rscratch2, Address(rthread, JavaThread::lock_id_offset()));
|
||||||
|
cmpxchg(t2_owner_addr, zr, rscratch2, Assembler::xword, /*acquire*/ true,
|
||||||
/*release*/ false, /*weak*/ false, t3_owner);
|
/*release*/ false, /*weak*/ false, t3_owner);
|
||||||
br(Assembler::EQ, monitor_locked);
|
br(Assembler::EQ, monitor_locked);
|
||||||
|
|
||||||
// Check if recursive.
|
// Check if recursive.
|
||||||
cmp(t3_owner, rthread);
|
cmp(t3_owner, rscratch2);
|
||||||
br(Assembler::NE, slow_path);
|
br(Assembler::NE, slow_path);
|
||||||
|
|
||||||
// Recursive.
|
// Recursive.
|
||||||
@ -486,7 +489,6 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
|
|||||||
}
|
}
|
||||||
|
|
||||||
bind(locked);
|
bind(locked);
|
||||||
increment(Address(rthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Check that locked label is reached with Flags == EQ.
|
// Check that locked label is reached with Flags == EQ.
|
||||||
@ -655,7 +657,6 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Regi
|
|||||||
}
|
}
|
||||||
|
|
||||||
bind(unlocked);
|
bind(unlocked);
|
||||||
decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
cmp(zr, zr); // Set Flags to EQ => fast path
|
cmp(zr, zr); // Set Flags to EQ => fast path
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -129,6 +129,11 @@ void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
|
||||||
|
assert(f.interpreter_frame_last_sp() == nullptr, "should be null for top frame");
|
||||||
|
f.interpreter_frame_set_last_sp(f.unextended_sp());
|
||||||
|
}
|
||||||
|
|
||||||
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
||||||
assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
|
assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
|
||||||
assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
|
assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
|
||||||
@ -149,10 +154,16 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co
|
|||||||
// extended_sp is already relativized by TemplateInterpreterGenerator::generate_normal_entry or
|
// extended_sp is already relativized by TemplateInterpreterGenerator::generate_normal_entry or
|
||||||
// AbstractInterpreter::layout_activation
|
// AbstractInterpreter::layout_activation
|
||||||
|
|
||||||
|
// The interpreter native wrapper code adds space in the stack equal to size_of_parameters()
|
||||||
|
// after the fixed part of the frame. For wait0 this is equal to 3 words (this + long parameter).
|
||||||
|
// We adjust by this size since otherwise the saved last sp will be less than the extended_sp.
|
||||||
|
DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
|
||||||
|
DEBUG_ONLY(int extra_space = m->is_object_wait0() ? m->size_of_parameters() : 0;)
|
||||||
|
|
||||||
assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), "");
|
assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), "");
|
||||||
assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), "");
|
assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), "");
|
||||||
assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
|
assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
|
||||||
assert(hf.unextended_sp() > (intptr_t*)hf.at(frame::interpreter_frame_extended_sp_offset), "");
|
assert(hf.unextended_sp() + extra_space > (intptr_t*)hf.at(frame::interpreter_frame_extended_sp_offset), "");
|
||||||
assert(hf.fp() > (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
|
assert(hf.fp() > (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
|
||||||
assert(hf.fp() <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
|
assert(hf.fp() <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
|
||||||
}
|
}
|
||||||
@ -213,7 +224,6 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
// If caller is interpreted it already made room for the callee arguments
|
// If caller is interpreted it already made room for the callee arguments
|
||||||
int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
|
int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
|
||||||
const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
|
const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
|
||||||
const int locals = hf.interpreter_frame_method()->max_locals();
|
|
||||||
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
||||||
intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
|
intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
|
||||||
if ((intptr_t)fp % frame::frame_alignment != 0) {
|
if ((intptr_t)fp % frame::frame_alignment != 0) {
|
||||||
@ -235,7 +245,7 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
int fsize = FKind::size(hf);
|
int fsize = FKind::size(hf);
|
||||||
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
||||||
if (bottom || caller.is_interpreted_frame()) {
|
if (bottom || caller.is_interpreted_frame()) {
|
||||||
int argsize = hf.compiled_frame_stack_argsize();
|
int argsize = FKind::stack_argsize(hf);
|
||||||
|
|
||||||
fsize += argsize;
|
fsize += argsize;
|
||||||
frame_sp -= argsize;
|
frame_sp -= argsize;
|
||||||
@ -252,8 +262,8 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
// we need to recreate a "real" frame pointer, pointing into the stack
|
// we need to recreate a "real" frame pointer, pointing into the stack
|
||||||
fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
|
fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
|
||||||
} else {
|
} else {
|
||||||
fp = FKind::stub
|
fp = FKind::stub || FKind::native
|
||||||
? frame_sp + fsize - frame::sender_sp_offset // on AArch64, this value is used for the safepoint stub
|
? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
|
||||||
: *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
|
: *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
|
||||||
}
|
}
|
||||||
return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
|
return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
|
||||||
@ -277,6 +287,22 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) {
|
|||||||
patch_callee_link(caller, caller.fp());
|
patch_callee_link(caller, caller.fp());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
|
||||||
|
intptr_t* fp = caller_sp - frame::sender_sp_offset;
|
||||||
|
patch_callee_link(f, fp);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline intptr_t* ThawBase::push_cleanup_continuation() {
|
||||||
|
frame enterSpecial = new_entry_frame();
|
||||||
|
intptr_t* sp = enterSpecial.sp();
|
||||||
|
|
||||||
|
sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
|
||||||
|
sp[-2] = (intptr_t)enterSpecial.fp();
|
||||||
|
|
||||||
|
log_develop_trace(continuations, preempt)("push_cleanup_continuation initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT, p2i(sp + 2 * frame::metadata_words), p2i(sp));
|
||||||
|
return sp;
|
||||||
|
}
|
||||||
|
|
||||||
inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
|
inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
|
||||||
// Make sure that last_sp is kept relativized.
|
// Make sure that last_sp is kept relativized.
|
||||||
assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
|
assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
|
||||||
@ -285,7 +311,9 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c
|
|||||||
assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
|
assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
|
||||||
|
|
||||||
// Make sure that extended_sp is kept relativized.
|
// Make sure that extended_sp is kept relativized.
|
||||||
assert((intptr_t*)f.at_relative(frame::interpreter_frame_extended_sp_offset) < f.unextended_sp(), "");
|
DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
|
||||||
|
DEBUG_ONLY(int extra_space = m->is_object_wait0() ? m->size_of_parameters() : 0;) // see comment in relativize_interpreted_frame_metadata()
|
||||||
|
assert((intptr_t*)f.at_relative(frame::interpreter_frame_extended_sp_offset) < f.unextended_sp() + extra_space, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP
|
#endif // CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -40,6 +40,22 @@ static inline intptr_t** link_address(const frame& f) {
|
|||||||
: (intptr_t**)(f.unextended_sp() + f.cb()->frame_size() - frame::sender_sp_offset);
|
: (intptr_t**)(f.unextended_sp() + f.cb()->frame_size() - frame::sender_sp_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void patch_return_pc_with_preempt_stub(frame& f) {
|
||||||
|
if (f.is_runtime_frame()) {
|
||||||
|
// Unlike x86 we don't know where in the callee frame the return pc is
|
||||||
|
// saved so we can't patch the return from the VM call back to Java.
|
||||||
|
// Instead, we will patch the return from the runtime stub back to the
|
||||||
|
// compiled method so that the target returns to the preempt cleanup stub.
|
||||||
|
intptr_t* caller_sp = f.sp() + f.cb()->frame_size();
|
||||||
|
caller_sp[-1] = (intptr_t)StubRoutines::cont_preempt_stub();
|
||||||
|
} else {
|
||||||
|
// The target will check for preemption once it returns to the interpreter
|
||||||
|
// or the native wrapper code and will manually jump to the preempt stub.
|
||||||
|
JavaThread *thread = JavaThread::current();
|
||||||
|
thread->set_preempt_alternate_return(StubRoutines::cont_preempt_stub());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
inline int ContinuationHelper::frame_align_words(int size) {
|
inline int ContinuationHelper::frame_align_words(int size) {
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
return size & 1;
|
return size & 1;
|
||||||
@ -83,12 +99,12 @@ inline void ContinuationHelper::set_anchor_to_entry_pd(JavaFrameAnchor* anchor,
|
|||||||
anchor->set_last_Java_fp(entry->entry_fp());
|
anchor->set_last_Java_fp(entry->entry_fp());
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
||||||
intptr_t* fp = *(intptr_t**)(sp - frame::sender_sp_offset);
|
intptr_t* fp = *(intptr_t**)(sp - frame::sender_sp_offset);
|
||||||
anchor->set_last_Java_fp(fp);
|
anchor->set_last_Java_fp(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
||||||
intptr_t* sp = f.sp();
|
intptr_t* sp = f.sp();
|
||||||
address pc = ContinuationHelper::return_address_at(
|
address pc = ContinuationHelper::return_address_at(
|
||||||
|
@ -420,6 +420,36 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
|
|||||||
return fr;
|
return fr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(ASSERT)
|
||||||
|
static address get_register_address_in_stub(const frame& stub_fr, VMReg reg) {
|
||||||
|
RegisterMap map(nullptr,
|
||||||
|
RegisterMap::UpdateMap::include,
|
||||||
|
RegisterMap::ProcessFrames::skip,
|
||||||
|
RegisterMap::WalkContinuation::skip);
|
||||||
|
stub_fr.oop_map()->update_register_map(&stub_fr, &map);
|
||||||
|
return map.location(reg, stub_fr.sp());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
JavaThread** frame::saved_thread_address(const frame& f) {
|
||||||
|
CodeBlob* cb = f.cb();
|
||||||
|
assert(cb != nullptr && cb->is_runtime_stub(), "invalid frame");
|
||||||
|
|
||||||
|
JavaThread** thread_addr;
|
||||||
|
#ifdef COMPILER1
|
||||||
|
if (cb == Runtime1::blob_for(C1StubId::monitorenter_id) ||
|
||||||
|
cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id)) {
|
||||||
|
thread_addr = (JavaThread**)(f.sp() + Runtime1::runtime_blob_current_thread_offset(f));
|
||||||
|
} else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
// c2 only saves rbp in the stub frame so nothing to do.
|
||||||
|
thread_addr = nullptr;
|
||||||
|
}
|
||||||
|
assert(get_register_address_in_stub(f, SharedRuntime::thread_register()) == (address)thread_addr, "wrong thread address");
|
||||||
|
return thread_addr;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// frame::verify_deopt_original_pc
|
// frame::verify_deopt_original_pc
|
||||||
//
|
//
|
||||||
|
@ -73,7 +73,8 @@
|
|||||||
sender_sp_offset = 2,
|
sender_sp_offset = 2,
|
||||||
|
|
||||||
// Interpreter frames
|
// Interpreter frames
|
||||||
interpreter_frame_oop_temp_offset = 3, // for native calls only
|
interpreter_frame_result_handler_offset = 3, // for native calls only
|
||||||
|
interpreter_frame_oop_temp_offset = 2, // for native calls only
|
||||||
|
|
||||||
interpreter_frame_sender_sp_offset = -1,
|
interpreter_frame_sender_sp_offset = -1,
|
||||||
// outgoing sp before a call to an invoked method
|
// outgoing sp before a call to an invoked method
|
||||||
|
@ -666,7 +666,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||||||
{
|
{
|
||||||
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
|
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
|
||||||
if (LockingMode == LM_MONITOR) {
|
if (LockingMode == LM_MONITOR) {
|
||||||
call_VM(noreg,
|
call_VM_preemptable(noreg,
|
||||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||||
lock_reg);
|
lock_reg);
|
||||||
} else {
|
} else {
|
||||||
@ -697,7 +697,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
|
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
|
||||||
b(count);
|
b(done);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Load (object->mark() | 1) into swap_reg
|
// Load (object->mark() | 1) into swap_reg
|
||||||
ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
@ -747,18 +747,18 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||||||
|
|
||||||
// Save the test result, for recursive case, the result is zero
|
// Save the test result, for recursive case, the result is zero
|
||||||
str(swap_reg, Address(lock_reg, mark_offset));
|
str(swap_reg, Address(lock_reg, mark_offset));
|
||||||
br(Assembler::EQ, count);
|
br(Assembler::NE, slow_case);
|
||||||
|
|
||||||
|
bind(count);
|
||||||
|
inc_held_monitor_count(rscratch1);
|
||||||
|
b(done);
|
||||||
}
|
}
|
||||||
bind(slow_case);
|
bind(slow_case);
|
||||||
|
|
||||||
// Call the runtime routine for slow case
|
// Call the runtime routine for slow case
|
||||||
call_VM(noreg,
|
call_VM_preemptable(noreg,
|
||||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||||
lock_reg);
|
lock_reg);
|
||||||
b(done);
|
|
||||||
|
|
||||||
bind(count);
|
|
||||||
increment(Address(rthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
|
|
||||||
bind(done);
|
bind(done);
|
||||||
}
|
}
|
||||||
@ -804,11 +804,10 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
|||||||
// Free entry
|
// Free entry
|
||||||
str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
|
str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
|
||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
|
||||||
Label slow_case;
|
Label slow_case;
|
||||||
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
|
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
|
||||||
b(count);
|
b(done);
|
||||||
bind(slow_case);
|
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Load the old header from BasicLock structure
|
// Load the old header from BasicLock structure
|
||||||
ldr(header_reg, Address(swap_reg,
|
ldr(header_reg, Address(swap_reg,
|
||||||
@ -818,16 +817,17 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
|||||||
cbz(header_reg, count);
|
cbz(header_reg, count);
|
||||||
|
|
||||||
// Atomic swap back the old header
|
// Atomic swap back the old header
|
||||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
|
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, &slow_case);
|
||||||
|
|
||||||
|
bind(count);
|
||||||
|
dec_held_monitor_count(rscratch1);
|
||||||
|
b(done);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bind(slow_case);
|
||||||
// Call the runtime routine for slow case.
|
// Call the runtime routine for slow case.
|
||||||
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
|
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
|
||||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
|
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
|
||||||
b(done);
|
|
||||||
|
|
||||||
bind(count);
|
|
||||||
decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
|
|
||||||
bind(done);
|
bind(done);
|
||||||
restore_bcp();
|
restore_bcp();
|
||||||
}
|
}
|
||||||
@ -1531,6 +1531,55 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
|
|||||||
restore_locals();
|
restore_locals();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
|
||||||
|
address entry_point,
|
||||||
|
Register arg_1) {
|
||||||
|
assert(arg_1 == c_rarg1, "");
|
||||||
|
Label resume_pc, not_preempted;
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
{
|
||||||
|
Label L;
|
||||||
|
ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
cbz(rscratch1, L);
|
||||||
|
stop("Should not have alternate return address set");
|
||||||
|
bind(L);
|
||||||
|
}
|
||||||
|
#endif /* ASSERT */
|
||||||
|
|
||||||
|
// Force freeze slow path.
|
||||||
|
push_cont_fastpath();
|
||||||
|
|
||||||
|
// Make VM call. In case of preemption set last_pc to the one we want to resume to.
|
||||||
|
adr(rscratch1, resume_pc);
|
||||||
|
str(rscratch1, Address(rthread, JavaThread::last_Java_pc_offset()));
|
||||||
|
call_VM_base(oop_result, noreg, noreg, entry_point, 1, false /*check_exceptions*/);
|
||||||
|
|
||||||
|
pop_cont_fastpath();
|
||||||
|
|
||||||
|
// Check if preempted.
|
||||||
|
ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
cbz(rscratch1, not_preempted);
|
||||||
|
str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
br(rscratch1);
|
||||||
|
|
||||||
|
// In case of preemption, this is where we will resume once we finally acquire the monitor.
|
||||||
|
bind(resume_pc);
|
||||||
|
restore_after_resume(false /* is_native */);
|
||||||
|
|
||||||
|
bind(not_preempted);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::restore_after_resume(bool is_native) {
|
||||||
|
lea(rscratch1, ExternalAddress(Interpreter::cont_resume_interpreter_adapter()));
|
||||||
|
blr(rscratch1);
|
||||||
|
if (is_native) {
|
||||||
|
// On resume we need to set up stack as expected
|
||||||
|
push(dtos);
|
||||||
|
push(ltos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
||||||
assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
|
assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
|
||||||
Label update, next, none;
|
Label update, next, none;
|
||||||
|
@ -58,6 +58,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
|
|
||||||
void load_earlyret_value(TosState state);
|
void load_earlyret_value(TosState state);
|
||||||
|
|
||||||
|
void call_VM_preemptable(Register oop_result,
|
||||||
|
address entry_point,
|
||||||
|
Register arg_1);
|
||||||
|
void restore_after_resume(bool is_native);
|
||||||
|
|
||||||
void jump_to_entry(address entry);
|
void jump_to_entry(address entry);
|
||||||
|
|
||||||
virtual void check_and_handle_popframe(Register java_thread);
|
virtual void check_and_handle_popframe(Register java_thread);
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
#include "gc/shared/tlab_globals.hpp"
|
#include "gc/shared/tlab_globals.hpp"
|
||||||
#include "interpreter/bytecodeHistogram.hpp"
|
#include "interpreter/bytecodeHistogram.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
#include "jvm.h"
|
#include "jvm.h"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
@ -775,6 +776,10 @@ static void pass_arg3(MacroAssembler* masm, Register arg) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_preemptable(address entry_point) {
|
||||||
|
return entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter);
|
||||||
|
}
|
||||||
|
|
||||||
void MacroAssembler::call_VM_base(Register oop_result,
|
void MacroAssembler::call_VM_base(Register oop_result,
|
||||||
Register java_thread,
|
Register java_thread,
|
||||||
Register last_java_sp,
|
Register last_java_sp,
|
||||||
@ -810,7 +815,12 @@ void MacroAssembler::call_VM_base(Register oop_result,
|
|||||||
assert(last_java_sp != rfp, "can't use rfp");
|
assert(last_java_sp != rfp, "can't use rfp");
|
||||||
|
|
||||||
Label l;
|
Label l;
|
||||||
|
if (is_preemptable(entry_point)) {
|
||||||
|
// skip setting last_pc since we already set it to desired value.
|
||||||
|
set_last_Java_frame(last_java_sp, rfp, noreg, rscratch1);
|
||||||
|
} else {
|
||||||
set_last_Java_frame(last_java_sp, rfp, l, rscratch1);
|
set_last_Java_frame(last_java_sp, rfp, l, rscratch1);
|
||||||
|
}
|
||||||
|
|
||||||
// do the call, remove parameters
|
// do the call, remove parameters
|
||||||
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
|
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
|
||||||
@ -5536,6 +5546,38 @@ void MacroAssembler::tlab_allocate(Register obj,
|
|||||||
bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::inc_held_monitor_count(Register tmp) {
|
||||||
|
Address dst(rthread, JavaThread::held_monitor_count_offset());
|
||||||
|
#ifdef ASSERT
|
||||||
|
ldr(tmp, dst);
|
||||||
|
increment(tmp);
|
||||||
|
str(tmp, dst);
|
||||||
|
Label ok;
|
||||||
|
tbz(tmp, 63, ok);
|
||||||
|
STOP("assert(held monitor count underflow)");
|
||||||
|
should_not_reach_here();
|
||||||
|
bind(ok);
|
||||||
|
#else
|
||||||
|
increment(dst);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::dec_held_monitor_count(Register tmp) {
|
||||||
|
Address dst(rthread, JavaThread::held_monitor_count_offset());
|
||||||
|
#ifdef ASSERT
|
||||||
|
ldr(tmp, dst);
|
||||||
|
decrement(tmp);
|
||||||
|
str(tmp, dst);
|
||||||
|
Label ok;
|
||||||
|
tbz(tmp, 63, ok);
|
||||||
|
STOP("assert(held monitor count underflow)");
|
||||||
|
should_not_reach_here();
|
||||||
|
bind(ok);
|
||||||
|
#else
|
||||||
|
decrement(dst);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void MacroAssembler::verify_tlab() {
|
void MacroAssembler::verify_tlab() {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (UseTLAB && VerifyOops) {
|
if (UseTLAB && VerifyOops) {
|
||||||
|
@ -940,8 +940,11 @@ public:
|
|||||||
void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
|
void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
|
||||||
int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
|
int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
|
||||||
|
|
||||||
void push_cont_fastpath(Register java_thread);
|
void push_cont_fastpath(Register java_thread = rthread);
|
||||||
void pop_cont_fastpath(Register java_thread);
|
void pop_cont_fastpath(Register java_thread = rthread);
|
||||||
|
|
||||||
|
void inc_held_monitor_count(Register tmp);
|
||||||
|
void dec_held_monitor_count(Register tmp);
|
||||||
|
|
||||||
// Round up to a power of two
|
// Round up to a power of two
|
||||||
void round_to(Register reg, int modulus);
|
void round_to(Register reg, int modulus);
|
||||||
|
@ -1179,12 +1179,14 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
|||||||
|
|
||||||
__ bind(call_thaw);
|
__ bind(call_thaw);
|
||||||
|
|
||||||
|
ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
|
||||||
__ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
|
__ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
|
||||||
oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
|
oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
|
||||||
ContinuationEntry::_return_pc_offset = __ pc() - start;
|
ContinuationEntry::_return_pc_offset = __ pc() - start;
|
||||||
__ post_call_nop();
|
__ post_call_nop();
|
||||||
|
|
||||||
__ bind(exit);
|
__ bind(exit);
|
||||||
|
ContinuationEntry::_cleanup_offset = __ pc() - start;
|
||||||
continuation_enter_cleanup(masm);
|
continuation_enter_cleanup(masm);
|
||||||
__ leave();
|
__ leave();
|
||||||
__ ret(lr);
|
__ ret(lr);
|
||||||
@ -1281,6 +1283,10 @@ static void gen_continuation_yield(MacroAssembler* masm,
|
|||||||
oop_maps->add_gc_map(the_pc - start, map);
|
oop_maps->add_gc_map(the_pc - start, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
|
||||||
|
::continuation_enter_cleanup(masm);
|
||||||
|
}
|
||||||
|
|
||||||
static void gen_special_dispatch(MacroAssembler* masm,
|
static void gen_special_dispatch(MacroAssembler* masm,
|
||||||
const methodHandle& method,
|
const methodHandle& method,
|
||||||
const BasicType* sig_bt,
|
const BasicType* sig_bt,
|
||||||
@ -1747,11 +1753,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Change state to native (we save the return address in the thread, since it might not
|
// Change state to native (we save the return address in the thread, since it might not
|
||||||
// be pushed on the stack when we do a stack traversal).
|
// be pushed on the stack when we do a stack traversal). It is enough that the pc()
|
||||||
// We use the same pc/oopMap repeatedly when we call out
|
// points into the right code segment. It does not have to be the correct return pc.
|
||||||
|
// We use the same pc/oopMap repeatedly when we call out.
|
||||||
|
|
||||||
Label native_return;
|
Label native_return;
|
||||||
|
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||||
|
// For convenience we use the pc we want to resume to in case of preemption on Object.wait.
|
||||||
__ set_last_Java_frame(sp, noreg, native_return, rscratch1);
|
__ set_last_Java_frame(sp, noreg, native_return, rscratch1);
|
||||||
|
} else {
|
||||||
|
intptr_t the_pc = (intptr_t) __ pc();
|
||||||
|
oop_maps->add_gc_map(the_pc - start, map);
|
||||||
|
|
||||||
|
__ set_last_Java_frame(sp, noreg, __ pc(), rscratch1);
|
||||||
|
}
|
||||||
|
|
||||||
Label dtrace_method_entry, dtrace_method_entry_done;
|
Label dtrace_method_entry, dtrace_method_entry_done;
|
||||||
if (DTraceMethodProbes) {
|
if (DTraceMethodProbes) {
|
||||||
@ -1829,12 +1844,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
// Save the test result, for recursive case, the result is zero
|
// Save the test result, for recursive case, the result is zero
|
||||||
__ str(swap_reg, Address(lock_reg, mark_word_offset));
|
__ str(swap_reg, Address(lock_reg, mark_word_offset));
|
||||||
__ br(Assembler::NE, slow_path_lock);
|
__ br(Assembler::NE, slow_path_lock);
|
||||||
|
|
||||||
|
__ bind(count);
|
||||||
|
__ inc_held_monitor_count(rscratch1);
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
|
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
|
||||||
}
|
}
|
||||||
__ bind(count);
|
|
||||||
__ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
|
|
||||||
// Slow path will re-enter here
|
// Slow path will re-enter here
|
||||||
__ bind(lock_done);
|
__ bind(lock_done);
|
||||||
@ -1853,11 +1869,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
|
|
||||||
__ rt_call(native_func);
|
__ rt_call(native_func);
|
||||||
|
|
||||||
__ bind(native_return);
|
|
||||||
|
|
||||||
intptr_t return_pc = (intptr_t) __ pc();
|
|
||||||
oop_maps->add_gc_map(return_pc - start, map);
|
|
||||||
|
|
||||||
// Verify or restore cpu control state after JNI call
|
// Verify or restore cpu control state after JNI call
|
||||||
__ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
|
__ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
|
||||||
|
|
||||||
@ -1916,6 +1927,18 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
__ stlrw(rscratch1, rscratch2);
|
__ stlrw(rscratch1, rscratch2);
|
||||||
__ bind(after_transition);
|
__ bind(after_transition);
|
||||||
|
|
||||||
|
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||||
|
// Check preemption for Object.wait()
|
||||||
|
__ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ cbz(rscratch1, native_return);
|
||||||
|
__ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ br(rscratch1);
|
||||||
|
__ bind(native_return);
|
||||||
|
|
||||||
|
intptr_t the_pc = (intptr_t) __ pc();
|
||||||
|
oop_maps->add_gc_map(the_pc - start, map);
|
||||||
|
}
|
||||||
|
|
||||||
Label reguard;
|
Label reguard;
|
||||||
Label reguard_done;
|
Label reguard_done;
|
||||||
__ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
|
__ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
|
||||||
@ -1939,7 +1962,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
// Simple recursive lock?
|
// Simple recursive lock?
|
||||||
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||||
__ cbnz(rscratch1, not_recursive);
|
__ cbnz(rscratch1, not_recursive);
|
||||||
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
__ dec_held_monitor_count(rscratch1);
|
||||||
__ b(done);
|
__ b(done);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1962,11 +1985,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
Label count;
|
Label count;
|
||||||
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
|
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
|
||||||
__ bind(count);
|
__ bind(count);
|
||||||
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
__ dec_held_monitor_count(rscratch1);
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
||||||
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
|
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
|
||||||
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// slow path re-enters here
|
// slow path re-enters here
|
||||||
@ -2033,8 +2055,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
__ mov(c_rarg1, lock_reg);
|
__ mov(c_rarg1, lock_reg);
|
||||||
__ mov(c_rarg2, rthread);
|
__ mov(c_rarg2, rthread);
|
||||||
|
|
||||||
// Not a leaf but we have last_Java_frame setup as we want
|
// Not a leaf but we have last_Java_frame setup as we want.
|
||||||
|
// We don't want to unmount in case of contention since that would complicate preserving
|
||||||
|
// the arguments that had already been marshalled into the native convention. So we force
|
||||||
|
// the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
|
||||||
|
// and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
|
||||||
|
__ push_cont_fastpath();
|
||||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
|
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
|
||||||
|
__ pop_cont_fastpath();
|
||||||
restore_args(masm, total_c_args, c_arg, out_regs);
|
restore_args(masm, total_c_args, c_arg, out_regs);
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -2575,6 +2603,10 @@ uint SharedRuntime::out_preserve_stack_slots() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
VMReg SharedRuntime::thread_register() {
|
||||||
|
return rthread->as_VMReg();
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------generate_handler_blob------
|
//------------------------------generate_handler_blob------
|
||||||
//
|
//
|
||||||
// Generate a special Compile2Runtime blob that saves all registers,
|
// Generate a special Compile2Runtime blob that saves all registers,
|
||||||
|
@ -116,6 +116,7 @@ inline int StackChunkFrameStream<frame_kind>::interpreter_frame_num_oops() const
|
|||||||
f.interpreted_frame_oop_map(&mask);
|
f.interpreted_frame_oop_map(&mask);
|
||||||
return mask.num_oops()
|
return mask.num_oops()
|
||||||
+ 1 // for the mirror oop
|
+ 1 // for the mirror oop
|
||||||
|
+ (f.interpreter_frame_method()->is_native() ? 1 : 0) // temp oop slot
|
||||||
+ pointer_delta_as_int((intptr_t*)f.interpreter_frame_monitor_begin(),
|
+ pointer_delta_as_int((intptr_t*)f.interpreter_frame_monitor_begin(),
|
||||||
(intptr_t*)f.interpreter_frame_monitor_end())/BasicObjectLock::size();
|
(intptr_t*)f.interpreter_frame_monitor_end())/BasicObjectLock::size();
|
||||||
}
|
}
|
||||||
|
@ -7466,6 +7466,37 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address generate_cont_preempt_stub() {
|
||||||
|
if (!Continuations::enabled()) return nullptr;
|
||||||
|
StubCodeMark mark(this, "StubRoutines","Continuation preempt stub");
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
__ reset_last_Java_frame(true);
|
||||||
|
|
||||||
|
// Set sp to enterSpecial frame, i.e. remove all frames copied into the heap.
|
||||||
|
__ ldr(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
|
||||||
|
__ mov(sp, rscratch2);
|
||||||
|
|
||||||
|
Label preemption_cancelled;
|
||||||
|
__ ldrb(rscratch1, Address(rthread, JavaThread::preemption_cancelled_offset()));
|
||||||
|
__ cbnz(rscratch1, preemption_cancelled);
|
||||||
|
|
||||||
|
// Remove enterSpecial frame from the stack and return to Continuation.run() to unmount.
|
||||||
|
SharedRuntime::continuation_enter_cleanup(_masm);
|
||||||
|
__ leave();
|
||||||
|
__ ret(lr);
|
||||||
|
|
||||||
|
// We acquired the monitor after freezing the frames so call thaw to continue execution.
|
||||||
|
__ bind(preemption_cancelled);
|
||||||
|
__ strb(zr, Address(rthread, JavaThread::preemption_cancelled_offset()));
|
||||||
|
__ lea(rfp, Address(sp, checked_cast<int32_t>(ContinuationEntry::size())));
|
||||||
|
__ lea(rscratch1, ExternalAddress(ContinuationEntry::thaw_call_pc_address()));
|
||||||
|
__ ldr(rscratch1, Address(rscratch1));
|
||||||
|
__ br(rscratch1);
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
// In sun.security.util.math.intpoly.IntegerPolynomial1305, integers
|
// In sun.security.util.math.intpoly.IntegerPolynomial1305, integers
|
||||||
// are represented as long[5], with BITS_PER_LIMB = 26.
|
// are represented as long[5], with BITS_PER_LIMB = 26.
|
||||||
// Pack five 26-bit limbs into three 64-bit registers.
|
// Pack five 26-bit limbs into three 64-bit registers.
|
||||||
@ -8620,6 +8651,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
StubRoutines::_cont_thaw = generate_cont_thaw();
|
StubRoutines::_cont_thaw = generate_cont_thaw();
|
||||||
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
|
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
|
||||||
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
|
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
|
||||||
|
StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
|
||||||
}
|
}
|
||||||
|
|
||||||
void generate_final_stubs() {
|
void generate_final_stubs() {
|
||||||
|
@ -607,6 +607,40 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
|
||||||
|
if (!Continuations::enabled()) return nullptr;
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
__ restore_bcp();
|
||||||
|
__ restore_locals();
|
||||||
|
|
||||||
|
// Restore constant pool cache
|
||||||
|
__ ldr(rcpool, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
|
||||||
|
|
||||||
|
// Restore Java expression stack pointer
|
||||||
|
__ ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||||
|
__ lea(esp, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
|
||||||
|
// and NULL it as marker that esp is now tos until next java call
|
||||||
|
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||||
|
|
||||||
|
// Restore machine SP
|
||||||
|
__ ldr(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
|
||||||
|
__ lea(sp, Address(rfp, rscratch1, Address::lsl(LogBytesPerWord)));
|
||||||
|
|
||||||
|
// Restore method
|
||||||
|
__ ldr(rmethod, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
|
||||||
|
|
||||||
|
// Restore dispatch
|
||||||
|
uint64_t offset;
|
||||||
|
__ adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
|
||||||
|
__ add(rdispatch, rdispatch, offset);
|
||||||
|
|
||||||
|
__ ret(lr);
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Helpers for commoning out cases in the various type of method entries.
|
// Helpers for commoning out cases in the various type of method entries.
|
||||||
//
|
//
|
||||||
|
|
||||||
@ -1314,6 +1348,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// result handler is in r0
|
// result handler is in r0
|
||||||
// set result handler
|
// set result handler
|
||||||
__ mov(result_handler, r0);
|
__ mov(result_handler, r0);
|
||||||
|
// Save it in the frame in case of preemption; we cannot rely on callee saved registers.
|
||||||
|
__ str(r0, Address(rfp, frame::interpreter_frame_result_handler_offset * wordSize));
|
||||||
|
|
||||||
// pass mirror handle if static call
|
// pass mirror handle if static call
|
||||||
{
|
{
|
||||||
Label L;
|
Label L;
|
||||||
@ -1349,9 +1386,10 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// pass JNIEnv
|
// pass JNIEnv
|
||||||
__ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
|
__ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
|
||||||
|
|
||||||
// Set the last Java PC in the frame anchor to be the return address from
|
// It is enough that the pc() points into the right code
|
||||||
// the call to the native method: this will allow the debugger to
|
// segment. It does not have to be the correct return pc.
|
||||||
// generate an accurate stack trace.
|
// For convenience we use the pc we want to resume to in
|
||||||
|
// case of preemption on Object.wait.
|
||||||
Label native_return;
|
Label native_return;
|
||||||
__ set_last_Java_frame(esp, rfp, native_return, rscratch1);
|
__ set_last_Java_frame(esp, rfp, native_return, rscratch1);
|
||||||
|
|
||||||
@ -1372,9 +1410,13 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||||
__ stlrw(rscratch1, rscratch2);
|
__ stlrw(rscratch1, rscratch2);
|
||||||
|
|
||||||
|
__ push_cont_fastpath();
|
||||||
|
|
||||||
// Call the native method.
|
// Call the native method.
|
||||||
__ blr(r10);
|
__ blr(r10);
|
||||||
__ bind(native_return);
|
|
||||||
|
__ pop_cont_fastpath();
|
||||||
|
|
||||||
__ get_method(rmethod);
|
__ get_method(rmethod);
|
||||||
// result potentially in r0 or v0
|
// result potentially in r0 or v0
|
||||||
|
|
||||||
@ -1432,6 +1474,23 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||||
__ stlrw(rscratch1, rscratch2);
|
__ stlrw(rscratch1, rscratch2);
|
||||||
|
|
||||||
|
if (LockingMode != LM_LEGACY) {
|
||||||
|
// Check preemption for Object.wait()
|
||||||
|
Label not_preempted;
|
||||||
|
__ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ cbz(rscratch1, not_preempted);
|
||||||
|
__ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ br(rscratch1);
|
||||||
|
__ bind(native_return);
|
||||||
|
__ restore_after_resume(true /* is_native */);
|
||||||
|
// reload result_handler
|
||||||
|
__ ldr(result_handler, Address(rfp, frame::interpreter_frame_result_handler_offset*wordSize));
|
||||||
|
__ bind(not_preempted);
|
||||||
|
} else {
|
||||||
|
// any pc will do so just use this one for LM_LEGACY to keep code together.
|
||||||
|
__ bind(native_return);
|
||||||
|
}
|
||||||
|
|
||||||
// reset_last_Java_frame
|
// reset_last_Java_frame
|
||||||
__ reset_last_Java_frame(true);
|
__ reset_last_Java_frame(true);
|
||||||
|
|
||||||
|
@ -251,6 +251,10 @@ void StubAssembler::restore_live_registers_without_return() {
|
|||||||
void Runtime1::initialize_pd() {
|
void Runtime1::initialize_pd() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint Runtime1::runtime_blob_current_thread_offset(frame f) {
|
||||||
|
Unimplemented();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
|
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
|
||||||
OopMap* oop_map = save_live_registers(sasm);
|
OopMap* oop_map = save_live_registers(sasm);
|
||||||
|
@ -48,6 +48,10 @@ void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
|
|||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
@ -83,6 +87,15 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) {
|
|||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline intptr_t* ThawBase::push_cleanup_continuation() {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename ConfigT>
|
template <typename ConfigT>
|
||||||
inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
|
inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
|
@ -35,6 +35,10 @@ static inline intptr_t** link_address(const frame& f) {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void patch_return_pc_with_preempt_stub(frame& f) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
inline int ContinuationHelper::frame_align_words(int size) {
|
inline int ContinuationHelper::frame_align_words(int size) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
return 0;
|
return 0;
|
||||||
@ -62,11 +66,11 @@ inline void ContinuationHelper::set_anchor_to_entry_pd(JavaFrameAnchor* anchor,
|
|||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
return false;
|
return false;
|
||||||
|
@ -325,6 +325,11 @@ bool frame::upcall_stub_frame_is_first() const {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JavaThread** frame::saved_thread_address(const frame& f) {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// frame::verify_deopt_original_pc
|
// frame::verify_deopt_original_pc
|
||||||
//
|
//
|
||||||
|
@ -1358,6 +1358,11 @@ uint SharedRuntime::out_preserve_stack_slots() {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VMReg SharedRuntime::thread_register() {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------generate_deopt_blob----------------------------
|
//------------------------------generate_deopt_blob----------------------------
|
||||||
void SharedRuntime::generate_deopt_blob() {
|
void SharedRuntime::generate_deopt_blob() {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
|
@ -459,6 +459,10 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Helpers for commoning out cases in the various type of method entries.
|
// Helpers for commoning out cases in the various type of method entries.
|
||||||
//
|
//
|
||||||
|
@ -1816,6 +1816,7 @@ class Assembler : public AbstractAssembler {
|
|||||||
relocInfo::relocType rt = relocInfo::none);
|
relocInfo::relocType rt = relocInfo::none);
|
||||||
|
|
||||||
// helper function for b, bcxx
|
// helper function for b, bcxx
|
||||||
|
inline bool is_branch(address a);
|
||||||
inline bool is_within_range_of_b(address a, address pc);
|
inline bool is_within_range_of_b(address a, address pc);
|
||||||
inline bool is_within_range_of_bcxx(address a, address pc);
|
inline bool is_within_range_of_bcxx(address a, address pc);
|
||||||
|
|
||||||
|
@ -484,6 +484,12 @@ inline void Assembler::bclrl( int boint, int biint, int bhint, relocInfo::relocT
|
|||||||
inline void Assembler::bcctr( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
|
inline void Assembler::bcctr( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
|
||||||
inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
|
inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
|
||||||
|
|
||||||
|
inline bool Assembler::is_branch(address a) {
|
||||||
|
int32_t instr = *(int32_t*) a;
|
||||||
|
int op = inv_op_ppc(instr);
|
||||||
|
return op == b_op || op == bc_op;
|
||||||
|
}
|
||||||
|
|
||||||
// helper function for b
|
// helper function for b
|
||||||
inline bool Assembler::is_within_range_of_b(address a, address pc) {
|
inline bool Assembler::is_within_range_of_b(address a, address pc) {
|
||||||
// Guard against illegal branch targets, e.g. -1 (see CompiledDirectCall and ad-file).
|
// Guard against illegal branch targets, e.g. -1 (see CompiledDirectCall and ad-file).
|
||||||
|
@ -133,8 +133,10 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
|||||||
}
|
}
|
||||||
|
|
||||||
bind(done);
|
bind(done);
|
||||||
|
if (LockingMode == LM_LEGACY) {
|
||||||
inc_held_monitor_count(Rmark /*tmp*/);
|
inc_held_monitor_count(Rmark /*tmp*/);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
|
void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
|
||||||
@ -179,8 +181,10 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
|||||||
|
|
||||||
// Done
|
// Done
|
||||||
bind(done);
|
bind(done);
|
||||||
|
if (LockingMode == LM_LEGACY) {
|
||||||
dec_held_monitor_count(Rmark /*tmp*/);
|
dec_held_monitor_count(Rmark /*tmp*/);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void C1_MacroAssembler::try_allocate(
|
void C1_MacroAssembler::try_allocate(
|
||||||
|
@ -64,7 +64,8 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result,
|
|||||||
|
|
||||||
address return_pc = call_c(entry_point);
|
address return_pc = call_c(entry_point);
|
||||||
|
|
||||||
reset_last_Java_frame();
|
// Last java sp can be null when the RT call was preempted
|
||||||
|
reset_last_Java_frame(false /* check_last_java_sp */);
|
||||||
|
|
||||||
// Check for pending exceptions.
|
// Check for pending exceptions.
|
||||||
{
|
{
|
||||||
@ -257,6 +258,11 @@ void Runtime1::initialize_pd() {
|
|||||||
frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes);
|
frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint Runtime1::runtime_blob_current_thread_offset(frame f) {
|
||||||
|
// On PPC virtual threads don't save the JavaThread* in their context (e.g. C1 stub frames).
|
||||||
|
ShouldNotCallThis();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
|
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
|
||||||
// Make a frame and preserve the caller's caller-save registers.
|
// Make a frame and preserve the caller's caller-save registers.
|
||||||
|
@ -71,6 +71,12 @@ void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
|
|||||||
// nothing to do
|
// nothing to do
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
|
||||||
|
// nothing to do
|
||||||
|
DEBUG_ONLY( intptr_t* lspp = (intptr_t*) &(f.get_ijava_state()->top_frame_sp); )
|
||||||
|
assert(*lspp == f.unextended_sp() - f.fp(), "should be " INTPTR_FORMAT " usp:" INTPTR_FORMAT " fp:" INTPTR_FORMAT, *lspp, p2i(f.unextended_sp()), p2i(f.fp()));
|
||||||
|
}
|
||||||
|
|
||||||
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
||||||
intptr_t* vfp = f.fp();
|
intptr_t* vfp = f.fp();
|
||||||
intptr_t* hfp = hf.fp();
|
intptr_t* hfp = hf.fp();
|
||||||
@ -350,6 +356,7 @@ inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
|
|||||||
if (is_entry_frame) {
|
if (is_entry_frame) {
|
||||||
callers_sp = _cont.entryFP();
|
callers_sp = _cont.entryFP();
|
||||||
} else {
|
} else {
|
||||||
|
assert(!Interpreter::contains(pc), "sp:" PTR_FORMAT " pc:" PTR_FORMAT, p2i(sp), p2i(pc));
|
||||||
CodeBlob* cb = CodeCache::find_blob_fast(pc);
|
CodeBlob* cb = CodeCache::find_blob_fast(pc);
|
||||||
callers_sp = sp + cb->frame_size();
|
callers_sp = sp + cb->frame_size();
|
||||||
}
|
}
|
||||||
@ -480,8 +487,8 @@ inline frame ThawBase::new_entry_frame() {
|
|||||||
template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
|
template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
|
||||||
assert(FKind::is_instance(hf), "");
|
assert(FKind::is_instance(hf), "");
|
||||||
|
|
||||||
assert(is_aligned(caller.fp(), frame::frame_alignment), "");
|
assert(is_aligned(caller.fp(), frame::frame_alignment), PTR_FORMAT, p2i(caller.fp()));
|
||||||
assert(is_aligned(caller.sp(), frame::frame_alignment), "");
|
// caller.sp() can be unaligned. This is fixed below.
|
||||||
if (FKind::interpreted) {
|
if (FKind::interpreted) {
|
||||||
// Note: we have to overlap with the caller, at least if it is interpreted, to match the
|
// Note: we have to overlap with the caller, at least if it is interpreted, to match the
|
||||||
// max_thawing_size calculation during freeze. See also comment above.
|
// max_thawing_size calculation during freeze. See also comment above.
|
||||||
@ -510,7 +517,7 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
return f;
|
return f;
|
||||||
} else {
|
} else {
|
||||||
int fsize = FKind::size(hf);
|
int fsize = FKind::size(hf);
|
||||||
int argsize = hf.compiled_frame_stack_argsize();
|
int argsize = FKind::stack_argsize(hf);
|
||||||
intptr_t* frame_sp = caller.sp() - fsize;
|
intptr_t* frame_sp = caller.sp() - fsize;
|
||||||
|
|
||||||
if ((bottom && argsize > 0) || caller.is_interpreted_frame()) {
|
if ((bottom && argsize > 0) || caller.is_interpreted_frame()) {
|
||||||
@ -543,12 +550,29 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c
|
|||||||
// Keep top_frame_sp relativized.
|
// Keep top_frame_sp relativized.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline intptr_t* ThawBase::push_cleanup_continuation() {
|
||||||
|
frame enterSpecial = new_entry_frame();
|
||||||
|
frame::common_abi* enterSpecial_abi = (frame::common_abi*)enterSpecial.sp();
|
||||||
|
|
||||||
|
enterSpecial_abi->lr = (intptr_t)ContinuationEntry::cleanup_pc();
|
||||||
|
|
||||||
|
log_develop_trace(continuations, preempt)("push_cleanup_continuation enterSpecial sp: " INTPTR_FORMAT " cleanup pc: " INTPTR_FORMAT,
|
||||||
|
p2i(enterSpecial_abi),
|
||||||
|
p2i(ContinuationEntry::cleanup_pc()));
|
||||||
|
|
||||||
|
return enterSpecial.sp();
|
||||||
|
}
|
||||||
|
|
||||||
inline void ThawBase::patch_pd(frame& f, const frame& caller) {
|
inline void ThawBase::patch_pd(frame& f, const frame& caller) {
|
||||||
patch_callee_link(caller, caller.fp());
|
patch_callee_link(caller, caller.fp());
|
||||||
// Prevent assertion if f gets deoptimized right away before it's fully initialized
|
// Prevent assertion if f gets deoptimized right away before it's fully initialized
|
||||||
f.mark_not_fully_initialized();
|
f.mark_not_fully_initialized();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
|
||||||
|
assert(f.own_abi()->callers_sp == (uint64_t)caller_sp, "should have been fixed by patch_caller_links");
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Interpreter Calling Procedure on PPC
|
// Interpreter Calling Procedure on PPC
|
||||||
//
|
//
|
||||||
|
@ -27,10 +27,18 @@
|
|||||||
|
|
||||||
#include "runtime/continuationHelper.hpp"
|
#include "runtime/continuationHelper.hpp"
|
||||||
|
|
||||||
template<typename FKind>
|
static inline void patch_return_pc_with_preempt_stub(frame& f) {
|
||||||
static inline intptr_t** link_address(const frame& f) {
|
if (f.is_runtime_frame()) {
|
||||||
Unimplemented();
|
// Patch the pc of the now old last Java frame (we already set the anchor to enterSpecial)
|
||||||
return nullptr;
|
// so that when target goes back to Java it will actually return to the preempt cleanup stub.
|
||||||
|
frame::common_abi* abi = (frame::common_abi*)f.sp();
|
||||||
|
abi->lr = (uint64_t)StubRoutines::cont_preempt_stub();
|
||||||
|
} else {
|
||||||
|
// The target will check for preemption once it returns to the interpreter
|
||||||
|
// or the native wrapper code and will manually jump to the preempt stub.
|
||||||
|
JavaThread *thread = JavaThread::current();
|
||||||
|
thread->set_preempt_alternate_return(StubRoutines::cont_preempt_stub());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int ContinuationHelper::frame_align_words(int size) {
|
inline int ContinuationHelper::frame_align_words(int size) {
|
||||||
@ -59,11 +67,11 @@ inline void ContinuationHelper::set_anchor_to_entry_pd(JavaFrameAnchor* anchor,
|
|||||||
// nothing to do
|
// nothing to do
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
||||||
// nothing to do
|
// nothing to do
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
||||||
intptr_t* sp = f.sp();
|
intptr_t* sp = f.sp();
|
||||||
address pc = *(address*)(sp - frame::sender_sp_ret_address_offset());
|
address pc = *(address*)(sp - frame::sender_sp_ret_address_offset());
|
||||||
|
@ -243,6 +243,11 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
|
|||||||
return fr;
|
return fr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JavaThread** frame::saved_thread_address(const frame& f) {
|
||||||
|
// The current thread (JavaThread*) is never stored on the stack
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
|
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
|
||||||
// This is the sp before any possible extension (adapter/locals).
|
// This is the sp before any possible extension (adapter/locals).
|
||||||
intptr_t* unextended_sp = interpreter_frame_sender_sp();
|
intptr_t* unextended_sp = interpreter_frame_sender_sp();
|
||||||
|
@ -53,7 +53,10 @@ inline void frame::setup(kind knd) {
|
|||||||
// The back link for compiled frames on the heap is not valid
|
// The back link for compiled frames on the heap is not valid
|
||||||
if (is_heap_frame()) {
|
if (is_heap_frame()) {
|
||||||
// fp for interpreted frames should have been derelativized and passed to the constructor
|
// fp for interpreted frames should have been derelativized and passed to the constructor
|
||||||
assert(is_compiled_frame(), "");
|
assert(is_compiled_frame()
|
||||||
|
|| is_native_frame() // native wrapper (nmethod) for j.l.Object::wait0
|
||||||
|
|| is_runtime_frame(), // e.g. Runtime1::monitorenter, SharedRuntime::complete_monitor_locking_C
|
||||||
|
"sp:" PTR_FORMAT " fp:" PTR_FORMAT " name:%s", p2i(_sp), p2i(_unextended_sp + _cb->frame_size()), _cb->name());
|
||||||
// The back link for compiled frames on the heap is invalid.
|
// The back link for compiled frames on the heap is invalid.
|
||||||
_fp = _unextended_sp + _cb->frame_size();
|
_fp = _unextended_sp + _cb->frame_size();
|
||||||
} else {
|
} else {
|
||||||
|
@ -49,6 +49,14 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
virtual void check_and_handle_popframe(Register scratch_reg);
|
virtual void check_and_handle_popframe(Register scratch_reg);
|
||||||
virtual void check_and_handle_earlyret(Register scratch_reg);
|
virtual void check_and_handle_earlyret(Register scratch_reg);
|
||||||
|
|
||||||
|
void call_VM_preemptable(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
|
||||||
|
void restore_after_resume(Register fp);
|
||||||
|
// R22 and R31 are preserved when a vthread gets preempted in the interpreter.
|
||||||
|
// The interpreter already assumes that these registers are nonvolatile across native calls.
|
||||||
|
bool nonvolatile_accross_vthread_preemtion(Register r) const {
|
||||||
|
return r->is_nonvolatile() && ((r == R22) || (r == R31));
|
||||||
|
}
|
||||||
|
|
||||||
// Base routine for all dispatches.
|
// Base routine for all dispatches.
|
||||||
void dispatch_base(TosState state, address* table);
|
void dispatch_base(TosState state, address* table);
|
||||||
|
|
||||||
@ -182,7 +190,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
// Special call VM versions that check for exceptions and forward exception
|
// Special call VM versions that check for exceptions and forward exception
|
||||||
// via short cut (not via expensive forward exception stub).
|
// via short cut (not via expensive forward exception stub).
|
||||||
void check_and_forward_exception(Register Rscratch1, Register Rscratch2);
|
void check_and_forward_exception(Register Rscratch1, Register Rscratch2);
|
||||||
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
|
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true, Label* last_java_pc = nullptr);
|
||||||
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
|
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
|
||||||
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
|
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
|
||||||
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
|
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
|
||||||
|
@ -932,7 +932,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
|||||||
//
|
//
|
||||||
void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||||
if (LockingMode == LM_MONITOR) {
|
if (LockingMode == LM_MONITOR) {
|
||||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
|
call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
|
||||||
} else {
|
} else {
|
||||||
// template code (for LM_LEGACY):
|
// template code (for LM_LEGACY):
|
||||||
//
|
//
|
||||||
@ -953,8 +953,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
|||||||
const Register current_header = R9_ARG7;
|
const Register current_header = R9_ARG7;
|
||||||
const Register tmp = R10_ARG8;
|
const Register tmp = R10_ARG8;
|
||||||
|
|
||||||
Label count_locking, done;
|
Label count_locking, done, slow_case, cas_failed;
|
||||||
Label cas_failed, slow_case;
|
|
||||||
|
|
||||||
assert_different_registers(header, object_mark_addr, current_header, tmp);
|
assert_different_registers(header, object_mark_addr, current_header, tmp);
|
||||||
|
|
||||||
@ -969,7 +968,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
|||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
lightweight_lock(monitor, object, header, tmp, slow_case);
|
lightweight_lock(monitor, object, header, tmp, slow_case);
|
||||||
b(count_locking);
|
b(done);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Load markWord from object into header.
|
// Load markWord from object into header.
|
||||||
ld(header, oopDesc::mark_offset_in_bytes(), object);
|
ld(header, oopDesc::mark_offset_in_bytes(), object);
|
||||||
@ -1035,12 +1034,15 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
|||||||
// None of the above fast optimizations worked so we have to get into the
|
// None of the above fast optimizations worked so we have to get into the
|
||||||
// slow case of monitor enter.
|
// slow case of monitor enter.
|
||||||
bind(slow_case);
|
bind(slow_case);
|
||||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
|
call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
|
||||||
b(done);
|
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
if (LockingMode == LM_LEGACY) {
|
||||||
|
b(done);
|
||||||
align(32, 12);
|
align(32, 12);
|
||||||
bind(count_locking);
|
bind(count_locking);
|
||||||
inc_held_monitor_count(current_header /*tmp*/);
|
inc_held_monitor_count(current_header /*tmp*/);
|
||||||
|
}
|
||||||
bind(done);
|
bind(done);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1137,7 +1139,9 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
|||||||
bind(free_slot);
|
bind(free_slot);
|
||||||
li(R0, 0);
|
li(R0, 0);
|
||||||
std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor);
|
std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor);
|
||||||
|
if (LockingMode == LM_LEGACY) {
|
||||||
dec_held_monitor_count(current_header /*tmp*/);
|
dec_held_monitor_count(current_header /*tmp*/);
|
||||||
|
}
|
||||||
bind(done);
|
bind(done);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2133,10 +2137,10 @@ void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1,
|
|||||||
bind(Ldone);
|
bind(Ldone);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
|
void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions, Label* last_java_pc) {
|
||||||
save_interpreter_state(R11_scratch1);
|
save_interpreter_state(R11_scratch1);
|
||||||
|
|
||||||
MacroAssembler::call_VM(oop_result, entry_point, false);
|
MacroAssembler::call_VM(oop_result, entry_point, false /*check_exceptions*/, last_java_pc);
|
||||||
|
|
||||||
restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
|
restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
|
||||||
|
|
||||||
@ -2155,6 +2159,74 @@ void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point
|
|||||||
call_VM(oop_result, entry_point, check_exceptions);
|
call_VM(oop_result, entry_point, check_exceptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result, address entry_point,
|
||||||
|
Register arg_1, bool check_exceptions) {
|
||||||
|
if (!Continuations::enabled()) {
|
||||||
|
call_VM(oop_result, entry_point, arg_1, check_exceptions);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Label resume_pc, not_preempted;
|
||||||
|
|
||||||
|
DEBUG_ONLY(ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread));
|
||||||
|
DEBUG_ONLY(cmpdi(CCR0, R0, 0));
|
||||||
|
asm_assert_eq("Should not have alternate return address set");
|
||||||
|
|
||||||
|
// Preserve 2 registers
|
||||||
|
assert(nonvolatile_accross_vthread_preemtion(R31) && nonvolatile_accross_vthread_preemtion(R22), "");
|
||||||
|
ld(R3_ARG1, _abi0(callers_sp), R1_SP); // load FP
|
||||||
|
std(R31, _ijava_state_neg(lresult), R3_ARG1);
|
||||||
|
std(R22, _ijava_state_neg(fresult), R3_ARG1);
|
||||||
|
|
||||||
|
// We set resume_pc as last java pc. It will be saved if the vthread gets preempted.
|
||||||
|
// Later execution will continue right there.
|
||||||
|
mr_if_needed(R4_ARG2, arg_1);
|
||||||
|
push_cont_fastpath();
|
||||||
|
call_VM(oop_result, entry_point, false /*check_exceptions*/, &resume_pc /* last_java_pc */);
|
||||||
|
pop_cont_fastpath();
|
||||||
|
|
||||||
|
// Jump to handler if the call was preempted
|
||||||
|
ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||||
|
cmpdi(CCR0, R0, 0);
|
||||||
|
beq(CCR0, not_preempted);
|
||||||
|
mtlr(R0);
|
||||||
|
li(R0, 0);
|
||||||
|
std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||||
|
blr();
|
||||||
|
|
||||||
|
bind(resume_pc); // Location to resume execution
|
||||||
|
restore_after_resume(noreg /* fp */);
|
||||||
|
bind(not_preempted);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::restore_after_resume(Register fp) {
|
||||||
|
if (!Continuations::enabled()) return;
|
||||||
|
|
||||||
|
const address resume_adapter = TemplateInterpreter::cont_resume_interpreter_adapter();
|
||||||
|
add_const_optimized(R31, R29_TOC, MacroAssembler::offset_to_global_toc(resume_adapter));
|
||||||
|
mtctr(R31);
|
||||||
|
bctrl();
|
||||||
|
// Restore registers that are preserved across vthread preemption
|
||||||
|
assert(nonvolatile_accross_vthread_preemtion(R31) && nonvolatile_accross_vthread_preemtion(R22), "");
|
||||||
|
ld(R3_ARG1, _abi0(callers_sp), R1_SP); // load FP
|
||||||
|
ld(R31, _ijava_state_neg(lresult), R3_ARG1);
|
||||||
|
ld(R22, _ijava_state_neg(fresult), R3_ARG1);
|
||||||
|
#ifdef ASSERT
|
||||||
|
// Assert FP is in R11_scratch1 (see generate_cont_resume_interpreter_adapter())
|
||||||
|
{
|
||||||
|
Label ok;
|
||||||
|
ld(R12_scratch2, 0, R1_SP); // load fp
|
||||||
|
cmpd(CCR0, R12_scratch2, R11_scratch1);
|
||||||
|
beq(CCR0, ok);
|
||||||
|
stop(FILE_AND_LINE ": FP is expected in R11_scratch1");
|
||||||
|
bind(ok);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
if (fp != noreg && fp != R11_scratch1) {
|
||||||
|
mr(fp, R11_scratch1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point,
|
void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point,
|
||||||
Register arg_1, Register arg_2,
|
Register arg_1, Register arg_2,
|
||||||
bool check_exceptions) {
|
bool check_exceptions) {
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#include "gc/shared/barrierSet.hpp"
|
#include "gc/shared/barrierSet.hpp"
|
||||||
#include "gc/shared/barrierSetAssembler.hpp"
|
#include "gc/shared/barrierSetAssembler.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "nativeInst_ppc.hpp"
|
#include "nativeInst_ppc.hpp"
|
||||||
#include "oops/compressedKlass.inline.hpp"
|
#include "oops/compressedKlass.inline.hpp"
|
||||||
@ -115,7 +116,8 @@ void MacroAssembler::align_prefix() {
|
|||||||
|
|
||||||
// Issue instructions that calculate given TOC from global TOC.
|
// Issue instructions that calculate given TOC from global TOC.
|
||||||
void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16,
|
void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16,
|
||||||
bool add_relocation, bool emit_dummy_addr) {
|
bool add_relocation, bool emit_dummy_addr,
|
||||||
|
bool add_addr_to_reloc) {
|
||||||
int offset = -1;
|
int offset = -1;
|
||||||
if (emit_dummy_addr) {
|
if (emit_dummy_addr) {
|
||||||
offset = -128; // dummy address
|
offset = -128; // dummy address
|
||||||
@ -129,7 +131,10 @@ void MacroAssembler::calculate_address_from_global_toc(Register dst, address add
|
|||||||
if (lo16) {
|
if (lo16) {
|
||||||
if (add_relocation) {
|
if (add_relocation) {
|
||||||
// Relocate at the addi to avoid confusion with a load from the method's TOC.
|
// Relocate at the addi to avoid confusion with a load from the method's TOC.
|
||||||
relocate(internal_word_Relocation::spec(addr));
|
RelocationHolder rh = add_addr_to_reloc ?
|
||||||
|
internal_word_Relocation::spec(addr) :
|
||||||
|
internal_word_Relocation::spec_for_immediate();
|
||||||
|
relocate(rh);
|
||||||
}
|
}
|
||||||
addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset));
|
addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset));
|
||||||
}
|
}
|
||||||
@ -714,6 +719,7 @@ address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
void MacroAssembler::clobber_volatile_gprs(Register excluded_register) {
|
void MacroAssembler::clobber_volatile_gprs(Register excluded_register) {
|
||||||
const int magic_number = 0x42;
|
const int magic_number = 0x42;
|
||||||
|
|
||||||
@ -729,6 +735,37 @@ void MacroAssembler::clobber_volatile_gprs(Register excluded_register) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::clobber_nonvolatile_registers() {
|
||||||
|
BLOCK_COMMENT("clobber nonvolatile registers {");
|
||||||
|
static const Register regs[] = {
|
||||||
|
R14,
|
||||||
|
R15,
|
||||||
|
// don't zap R16_thread
|
||||||
|
R17,
|
||||||
|
R18,
|
||||||
|
R19,
|
||||||
|
R20,
|
||||||
|
R21,
|
||||||
|
R22,
|
||||||
|
R23,
|
||||||
|
R24,
|
||||||
|
R25,
|
||||||
|
R26,
|
||||||
|
R27,
|
||||||
|
R28,
|
||||||
|
// don't zap R29_TOC
|
||||||
|
R30,
|
||||||
|
R31
|
||||||
|
};
|
||||||
|
Register bad = regs[0];
|
||||||
|
load_const_optimized(bad, 0xbad0101babe11111);
|
||||||
|
for (uint32_t i = 1; i < (sizeof(regs) / sizeof(Register)); i++) {
|
||||||
|
mr(regs[i], bad);
|
||||||
|
}
|
||||||
|
BLOCK_COMMENT("} clobber nonvolatile registers");
|
||||||
|
}
|
||||||
|
#endif // ASSERT
|
||||||
|
|
||||||
void MacroAssembler::clobber_carg_stack_slots(Register tmp) {
|
void MacroAssembler::clobber_carg_stack_slots(Register tmp) {
|
||||||
const int magic_number = 0x43;
|
const int magic_number = 0x43;
|
||||||
|
|
||||||
@ -1288,13 +1325,14 @@ int MacroAssembler::ic_check(int end_alignment) {
|
|||||||
void MacroAssembler::call_VM_base(Register oop_result,
|
void MacroAssembler::call_VM_base(Register oop_result,
|
||||||
Register last_java_sp,
|
Register last_java_sp,
|
||||||
address entry_point,
|
address entry_point,
|
||||||
bool check_exceptions) {
|
bool check_exceptions,
|
||||||
|
Label* last_java_pc) {
|
||||||
BLOCK_COMMENT("call_VM {");
|
BLOCK_COMMENT("call_VM {");
|
||||||
// Determine last_java_sp register.
|
// Determine last_java_sp register.
|
||||||
if (!last_java_sp->is_valid()) {
|
if (!last_java_sp->is_valid()) {
|
||||||
last_java_sp = R1_SP;
|
last_java_sp = R1_SP;
|
||||||
}
|
}
|
||||||
set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1);
|
set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1, last_java_pc);
|
||||||
|
|
||||||
// ARG1 must hold thread address.
|
// ARG1 must hold thread address.
|
||||||
mr(R3_ARG1, R16_thread);
|
mr(R3_ARG1, R16_thread);
|
||||||
@ -1323,8 +1361,8 @@ void MacroAssembler::call_VM_leaf_base(address entry_point) {
|
|||||||
BLOCK_COMMENT("} call_VM_leaf");
|
BLOCK_COMMENT("} call_VM_leaf");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
|
void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions, Label* last_java_pc) {
|
||||||
call_VM_base(oop_result, noreg, entry_point, check_exceptions);
|
call_VM_base(oop_result, noreg, entry_point, check_exceptions, last_java_pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1,
|
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1,
|
||||||
@ -2625,15 +2663,15 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
|
|||||||
|
|
||||||
// Handle existing monitor.
|
// Handle existing monitor.
|
||||||
bind(object_has_monitor);
|
bind(object_has_monitor);
|
||||||
// The object's monitor m is unlocked iff m->owner is null,
|
|
||||||
// otherwise m->owner may contain a thread or a stack address.
|
|
||||||
|
|
||||||
// Try to CAS m->owner from null to current thread.
|
// Try to CAS owner (no owner => current thread's _lock_id).
|
||||||
addi(temp, displaced_header, in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value);
|
addi(temp, displaced_header, in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value);
|
||||||
|
Register thread_id = displaced_header;
|
||||||
|
ld(thread_id, in_bytes(JavaThread::lock_id_offset()), R16_thread);
|
||||||
cmpxchgd(/*flag=*/flag,
|
cmpxchgd(/*flag=*/flag,
|
||||||
/*current_value=*/current_header,
|
/*current_value=*/current_header,
|
||||||
/*compare_value=*/(intptr_t)0,
|
/*compare_value=*/(intptr_t)0,
|
||||||
/*exchange_value=*/R16_thread,
|
/*exchange_value=*/thread_id,
|
||||||
/*where=*/temp,
|
/*where=*/temp,
|
||||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||||
MacroAssembler::cmpxchgx_hint_acquire_lock());
|
MacroAssembler::cmpxchgx_hint_acquire_lock());
|
||||||
@ -2643,7 +2681,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
|
|||||||
beq(flag, success);
|
beq(flag, success);
|
||||||
|
|
||||||
// Check for recursive locking.
|
// Check for recursive locking.
|
||||||
cmpd(flag, current_header, R16_thread);
|
cmpd(flag, current_header, thread_id);
|
||||||
bne(flag, failure);
|
bne(flag, failure);
|
||||||
|
|
||||||
// Current thread already owns the lock. Just increment recursions.
|
// Current thread already owns the lock. Just increment recursions.
|
||||||
@ -2652,10 +2690,12 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
|
|||||||
addi(recursions, recursions, 1);
|
addi(recursions, recursions, 1);
|
||||||
std(recursions, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), temp);
|
std(recursions, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), temp);
|
||||||
|
|
||||||
// flag == EQ indicates success, increment held monitor count
|
// flag == EQ indicates success, increment held monitor count if LM_LEGACY is enabled
|
||||||
// flag == NE indicates failure
|
// flag == NE indicates failure
|
||||||
bind(success);
|
bind(success);
|
||||||
|
if (LockingMode == LM_LEGACY) {
|
||||||
inc_held_monitor_count(temp);
|
inc_held_monitor_count(temp);
|
||||||
|
}
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Check that unlocked label is reached with flag == EQ.
|
// Check that unlocked label is reached with flag == EQ.
|
||||||
Label flag_correct;
|
Label flag_correct;
|
||||||
@ -2675,7 +2715,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
|||||||
Register temp, Register displaced_header, Register current_header) {
|
Register temp, Register displaced_header, Register current_header) {
|
||||||
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_unlock_lightweight");
|
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_unlock_lightweight");
|
||||||
assert_different_registers(oop, box, temp, displaced_header, current_header);
|
assert_different_registers(oop, box, temp, displaced_header, current_header);
|
||||||
Label success, failure, object_has_monitor, notRecursive;
|
Label success, failure, object_has_monitor, not_recursive;
|
||||||
|
|
||||||
if (LockingMode == LM_LEGACY) {
|
if (LockingMode == LM_LEGACY) {
|
||||||
// Find the lock address and load the displaced header from the stack.
|
// Find the lock address and load the displaced header from the stack.
|
||||||
@ -2721,7 +2761,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
|||||||
|
|
||||||
ld(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
|
ld(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
|
||||||
addic_(displaced_header, displaced_header, -1);
|
addic_(displaced_header, displaced_header, -1);
|
||||||
blt(CCR0, notRecursive); // Not recursive if negative after decrement.
|
blt(CCR0, not_recursive); // Not recursive if negative after decrement.
|
||||||
|
|
||||||
// Recursive unlock
|
// Recursive unlock
|
||||||
std(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
|
std(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
|
||||||
@ -2730,7 +2770,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
|||||||
}
|
}
|
||||||
b(success);
|
b(success);
|
||||||
|
|
||||||
bind(notRecursive);
|
bind(not_recursive);
|
||||||
|
|
||||||
// Set owner to null.
|
// Set owner to null.
|
||||||
// Release to satisfy the JMM
|
// Release to satisfy the JMM
|
||||||
@ -2760,10 +2800,12 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
|||||||
std(current_header, in_bytes(JavaThread::unlocked_inflated_monitor_offset()), R16_thread);
|
std(current_header, in_bytes(JavaThread::unlocked_inflated_monitor_offset()), R16_thread);
|
||||||
b(failure); // flag == NE
|
b(failure); // flag == NE
|
||||||
|
|
||||||
// flag == EQ indicates success, decrement held monitor count
|
// flag == EQ indicates success, decrement held monitor count if LM_LEGACY is enabled
|
||||||
// flag == NE indicates failure
|
// flag == NE indicates failure
|
||||||
bind(success);
|
bind(success);
|
||||||
|
if (LockingMode == LM_LEGACY) {
|
||||||
dec_held_monitor_count(temp);
|
dec_held_monitor_count(temp);
|
||||||
|
}
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Check that unlocked label is reached with flag == EQ.
|
// Check that unlocked label is reached with flag == EQ.
|
||||||
Label flag_correct;
|
Label flag_correct;
|
||||||
@ -2782,6 +2824,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
|||||||
void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister flag, Register obj, Register box,
|
void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister flag, Register obj, Register box,
|
||||||
Register tmp1, Register tmp2, Register tmp3) {
|
Register tmp1, Register tmp2, Register tmp3) {
|
||||||
assert_different_registers(obj, box, tmp1, tmp2, tmp3);
|
assert_different_registers(obj, box, tmp1, tmp2, tmp3);
|
||||||
|
assert(UseObjectMonitorTable || tmp3 == noreg, "tmp3 not needed");
|
||||||
assert(flag == CCR0, "bad condition register");
|
assert(flag == CCR0, "bad condition register");
|
||||||
|
|
||||||
// Handle inflated monitor.
|
// Handle inflated monitor.
|
||||||
@ -2804,8 +2847,7 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
|||||||
bne(CCR0, slow_path);
|
bne(CCR0, slow_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
const Register mark = tmp1;
|
Register mark = tmp1;
|
||||||
const Register t = tmp3; // Usage of R0 allowed!
|
|
||||||
|
|
||||||
{ // Lightweight locking
|
{ // Lightweight locking
|
||||||
|
|
||||||
@ -2823,15 +2865,15 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
|||||||
// when the lock stack is empty because of the _bad_oop_sentinel field.
|
// when the lock stack is empty because of the _bad_oop_sentinel field.
|
||||||
|
|
||||||
// Check if recursive.
|
// Check if recursive.
|
||||||
subi(t, top, oopSize);
|
subi(R0, top, oopSize);
|
||||||
ldx(t, R16_thread, t);
|
ldx(R0, R16_thread, R0);
|
||||||
cmpd(CCR0, obj, t);
|
cmpd(CCR0, obj, R0);
|
||||||
beq(CCR0, push);
|
beq(CCR0, push);
|
||||||
|
|
||||||
// Check for monitor (0b10) or locked (0b00).
|
// Check for monitor (0b10) or locked (0b00).
|
||||||
ld(mark, oopDesc::mark_offset_in_bytes(), obj);
|
ld(mark, oopDesc::mark_offset_in_bytes(), obj);
|
||||||
andi_(t, mark, markWord::lock_mask_in_place);
|
andi_(R0, mark, markWord::lock_mask_in_place);
|
||||||
cmpldi(CCR0, t, markWord::unlocked_value);
|
cmpldi(CCR0, R0, markWord::unlocked_value);
|
||||||
bgt(CCR0, inflated);
|
bgt(CCR0, inflated);
|
||||||
bne(CCR0, slow_path);
|
bne(CCR0, slow_path);
|
||||||
|
|
||||||
@ -2854,13 +2896,15 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
|||||||
|
|
||||||
// mark contains the tagged ObjectMonitor*.
|
// mark contains the tagged ObjectMonitor*.
|
||||||
const uintptr_t monitor_tag = markWord::monitor_value;
|
const uintptr_t monitor_tag = markWord::monitor_value;
|
||||||
const Register monitor = mark;
|
const Register monitor = UseObjectMonitorTable ? tmp1 : noreg;
|
||||||
const Register owner_addr = tmp2;
|
const Register owner_addr = tmp2;
|
||||||
|
const Register thread_id = UseObjectMonitorTable ? tmp3 : tmp1;
|
||||||
Label monitor_locked;
|
Label monitor_locked;
|
||||||
|
|
||||||
if (!UseObjectMonitorTable) {
|
if (!UseObjectMonitorTable) {
|
||||||
// Compute owner address.
|
// Compute owner address.
|
||||||
addi(owner_addr, mark, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag);
|
addi(owner_addr, mark, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag);
|
||||||
|
mark = noreg;
|
||||||
} else {
|
} else {
|
||||||
Label monitor_found;
|
Label monitor_found;
|
||||||
Register cache_addr = tmp2;
|
Register cache_addr = tmp2;
|
||||||
@ -2870,8 +2914,8 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
|||||||
|
|
||||||
const int num_unrolled = 2;
|
const int num_unrolled = 2;
|
||||||
for (int i = 0; i < num_unrolled; i++) {
|
for (int i = 0; i < num_unrolled; i++) {
|
||||||
ld(tmp3, 0, cache_addr);
|
ld(R0, 0, cache_addr);
|
||||||
cmpd(CCR0, tmp3, obj);
|
cmpd(CCR0, R0, obj);
|
||||||
beq(CCR0, monitor_found);
|
beq(CCR0, monitor_found);
|
||||||
addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
|
addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
|
||||||
}
|
}
|
||||||
@ -2882,13 +2926,13 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
|||||||
bind(loop);
|
bind(loop);
|
||||||
|
|
||||||
// Check for match.
|
// Check for match.
|
||||||
ld(tmp3, 0, cache_addr);
|
ld(R0, 0, cache_addr);
|
||||||
cmpd(CCR0, tmp3, obj);
|
cmpd(CCR0, R0, obj);
|
||||||
beq(CCR0, monitor_found);
|
beq(CCR0, monitor_found);
|
||||||
|
|
||||||
// Search until null encountered, guaranteed _null_sentinel at end.
|
// Search until null encountered, guaranteed _null_sentinel at end.
|
||||||
addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
|
addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
|
||||||
cmpdi(CCR1, tmp3, 0);
|
cmpdi(CCR1, R0, 0);
|
||||||
bne(CCR1, loop);
|
bne(CCR1, loop);
|
||||||
// Cache Miss, CCR0.NE set from cmp above
|
// Cache Miss, CCR0.NE set from cmp above
|
||||||
b(slow_path);
|
b(slow_path);
|
||||||
@ -2900,18 +2944,20 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
|||||||
addi(owner_addr, monitor, in_bytes(ObjectMonitor::owner_offset()));
|
addi(owner_addr, monitor, in_bytes(ObjectMonitor::owner_offset()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// CAS owner (null => current thread).
|
// Try to CAS owner (no owner => current thread's _lock_id).
|
||||||
|
assert_different_registers(thread_id, monitor, owner_addr, box, R0);
|
||||||
|
ld(thread_id, in_bytes(JavaThread::lock_id_offset()), R16_thread);
|
||||||
cmpxchgd(/*flag=*/CCR0,
|
cmpxchgd(/*flag=*/CCR0,
|
||||||
/*current_value=*/t,
|
/*current_value=*/R0,
|
||||||
/*compare_value=*/(intptr_t)0,
|
/*compare_value=*/(intptr_t)0,
|
||||||
/*exchange_value=*/R16_thread,
|
/*exchange_value=*/thread_id,
|
||||||
/*where=*/owner_addr,
|
/*where=*/owner_addr,
|
||||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||||
MacroAssembler::cmpxchgx_hint_acquire_lock());
|
MacroAssembler::cmpxchgx_hint_acquire_lock());
|
||||||
beq(CCR0, monitor_locked);
|
beq(CCR0, monitor_locked);
|
||||||
|
|
||||||
// Check if recursive.
|
// Check if recursive.
|
||||||
cmpd(CCR0, t, R16_thread);
|
cmpd(CCR0, R0, thread_id);
|
||||||
bne(CCR0, slow_path);
|
bne(CCR0, slow_path);
|
||||||
|
|
||||||
// Recursive.
|
// Recursive.
|
||||||
@ -2934,7 +2980,6 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
|
|||||||
}
|
}
|
||||||
|
|
||||||
bind(locked);
|
bind(locked);
|
||||||
inc_held_monitor_count(tmp1);
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Check that locked label is reached with flag == EQ.
|
// Check that locked label is reached with flag == EQ.
|
||||||
@ -3109,7 +3154,6 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f
|
|||||||
}
|
}
|
||||||
|
|
||||||
bind(unlocked);
|
bind(unlocked);
|
||||||
dec_held_monitor_count(t);
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Check that unlocked label is reached with flag == EQ.
|
// Check that unlocked label is reached with flag == EQ.
|
||||||
@ -3191,9 +3235,11 @@ void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Ja
|
|||||||
std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
|
std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::reset_last_Java_frame(void) {
|
void MacroAssembler::reset_last_Java_frame(bool check_last_java_sp) {
|
||||||
|
if (check_last_java_sp) {
|
||||||
asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
|
asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
|
||||||
R16_thread, "SP was not set, still zero");
|
R16_thread, "SP was not set, still zero");
|
||||||
|
}
|
||||||
|
|
||||||
BLOCK_COMMENT("reset_last_Java_frame {");
|
BLOCK_COMMENT("reset_last_Java_frame {");
|
||||||
li(R0, 0);
|
li(R0, 0);
|
||||||
@ -3206,14 +3252,14 @@ void MacroAssembler::reset_last_Java_frame(void) {
|
|||||||
BLOCK_COMMENT("} reset_last_Java_frame");
|
BLOCK_COMMENT("} reset_last_Java_frame");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) {
|
void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, Label* jpc) {
|
||||||
assert_different_registers(sp, tmp1);
|
assert_different_registers(sp, tmp1);
|
||||||
|
|
||||||
// sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via
|
if (jpc == nullptr || jpc->is_bound()) {
|
||||||
// TOP_IJAVA_FRAME_ABI.
|
load_const_optimized(tmp1, jpc == nullptr ? pc() : target(*jpc));
|
||||||
// FIXME: assert that we really have a TOP_IJAVA_FRAME here!
|
} else {
|
||||||
address entry = pc();
|
load_const(tmp1, *jpc, R12_scratch2);
|
||||||
load_const_optimized(tmp1, entry);
|
}
|
||||||
|
|
||||||
set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
|
set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
|
||||||
}
|
}
|
||||||
@ -4530,9 +4576,9 @@ void MacroAssembler::asm_assert(bool check_equal, const char *msg) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
|
void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
|
||||||
Register mem_base, const char* msg) {
|
Register mem_base, const char* msg) {
|
||||||
#ifdef ASSERT
|
|
||||||
switch (size) {
|
switch (size) {
|
||||||
case 4:
|
case 4:
|
||||||
lwz(R0, mem_offset, mem_base);
|
lwz(R0, mem_offset, mem_base);
|
||||||
@ -4546,8 +4592,8 @@ void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_of
|
|||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
asm_assert(check_equal, msg);
|
asm_assert(check_equal, msg);
|
||||||
#endif // ASSERT
|
|
||||||
}
|
}
|
||||||
|
#endif // ASSERT
|
||||||
|
|
||||||
void MacroAssembler::verify_coop(Register coop, const char* msg) {
|
void MacroAssembler::verify_coop(Register coop, const char* msg) {
|
||||||
if (!VerifyOops) { return; }
|
if (!VerifyOops) { return; }
|
||||||
@ -4691,6 +4737,8 @@ void MacroAssembler::cache_wbsync(bool is_presync) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::push_cont_fastpath() {
|
void MacroAssembler::push_cont_fastpath() {
|
||||||
|
if (!Continuations::enabled()) return;
|
||||||
|
|
||||||
Label done;
|
Label done;
|
||||||
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||||
cmpld(CCR0, R1_SP, R0);
|
cmpld(CCR0, R1_SP, R0);
|
||||||
@ -4700,6 +4748,8 @@ void MacroAssembler::push_cont_fastpath() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::pop_cont_fastpath() {
|
void MacroAssembler::pop_cont_fastpath() {
|
||||||
|
if (!Continuations::enabled()) return;
|
||||||
|
|
||||||
Label done;
|
Label done;
|
||||||
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||||
cmpld(CCR0, R1_SP, R0);
|
cmpld(CCR0, R1_SP, R0);
|
||||||
@ -4711,6 +4761,7 @@ void MacroAssembler::pop_cont_fastpath() {
|
|||||||
|
|
||||||
// Note: Must preserve CCR0 EQ (invariant).
|
// Note: Must preserve CCR0 EQ (invariant).
|
||||||
void MacroAssembler::inc_held_monitor_count(Register tmp) {
|
void MacroAssembler::inc_held_monitor_count(Register tmp) {
|
||||||
|
assert(LockingMode == LM_LEGACY, "");
|
||||||
ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
Label ok;
|
Label ok;
|
||||||
@ -4726,6 +4777,7 @@ void MacroAssembler::inc_held_monitor_count(Register tmp) {
|
|||||||
|
|
||||||
// Note: Must preserve CCR0 EQ (invariant).
|
// Note: Must preserve CCR0 EQ (invariant).
|
||||||
void MacroAssembler::dec_held_monitor_count(Register tmp) {
|
void MacroAssembler::dec_held_monitor_count(Register tmp) {
|
||||||
|
assert(LockingMode == LM_LEGACY, "");
|
||||||
ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
Label ok;
|
Label ok;
|
||||||
|
@ -115,7 +115,13 @@ class MacroAssembler: public Assembler {
|
|||||||
// Global TOC.
|
// Global TOC.
|
||||||
void calculate_address_from_global_toc(Register dst, address addr,
|
void calculate_address_from_global_toc(Register dst, address addr,
|
||||||
bool hi16 = true, bool lo16 = true,
|
bool hi16 = true, bool lo16 = true,
|
||||||
bool add_relocation = true, bool emit_dummy_addr = false);
|
bool add_relocation = true, bool emit_dummy_addr = false,
|
||||||
|
bool add_addr_to_reloc = true);
|
||||||
|
void calculate_address_from_global_toc(Register dst, Label& addr,
|
||||||
|
bool hi16 = true, bool lo16 = true,
|
||||||
|
bool add_relocation = true, bool emit_dummy_addr = false) {
|
||||||
|
calculate_address_from_global_toc(dst, target(addr), hi16, lo16, add_relocation, emit_dummy_addr, false);
|
||||||
|
}
|
||||||
inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
|
inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
|
||||||
calculate_address_from_global_toc(dst, addr, true, false);
|
calculate_address_from_global_toc(dst, addr, true, false);
|
||||||
};
|
};
|
||||||
@ -284,7 +290,10 @@ class MacroAssembler: public Assembler {
|
|||||||
// Clobbers all volatile, (non-floating-point) general-purpose registers for debugging purposes.
|
// Clobbers all volatile, (non-floating-point) general-purpose registers for debugging purposes.
|
||||||
// This is especially useful for making calls to the JRT in places in which this hasn't been done before;
|
// This is especially useful for making calls to the JRT in places in which this hasn't been done before;
|
||||||
// e.g. with the introduction of LRBs (load reference barriers) for concurrent garbage collection.
|
// e.g. with the introduction of LRBs (load reference barriers) for concurrent garbage collection.
|
||||||
void clobber_volatile_gprs(Register excluded_register = noreg);
|
void clobber_volatile_gprs(Register excluded_register = noreg) NOT_DEBUG_RETURN;
|
||||||
|
// Load bad values into registers that are nonvolatile according to the ABI except R16_thread and R29_TOC.
|
||||||
|
// This is done after vthread preemption and before vthread resume.
|
||||||
|
void clobber_nonvolatile_registers() NOT_DEBUG_RETURN;
|
||||||
void clobber_carg_stack_slots(Register tmp);
|
void clobber_carg_stack_slots(Register tmp);
|
||||||
|
|
||||||
void save_nonvolatile_gprs( Register dst_base, int offset);
|
void save_nonvolatile_gprs( Register dst_base, int offset);
|
||||||
@ -398,7 +407,8 @@ class MacroAssembler: public Assembler {
|
|||||||
// the entry point
|
// the entry point
|
||||||
address entry_point,
|
address entry_point,
|
||||||
// flag which indicates if exception should be checked
|
// flag which indicates if exception should be checked
|
||||||
bool check_exception = true
|
bool check_exception = true,
|
||||||
|
Label* last_java_pc = nullptr
|
||||||
);
|
);
|
||||||
|
|
||||||
// Support for VM calls. This is the base routine called by the
|
// Support for VM calls. This is the base routine called by the
|
||||||
@ -411,7 +421,7 @@ class MacroAssembler: public Assembler {
|
|||||||
// Call into the VM.
|
// Call into the VM.
|
||||||
// Passes the thread pointer (in R3_ARG1) as a prepended argument.
|
// Passes the thread pointer (in R3_ARG1) as a prepended argument.
|
||||||
// Makes sure oop return values are visible to the GC.
|
// Makes sure oop return values are visible to the GC.
|
||||||
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
|
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true, Label* last_java_pc = nullptr);
|
||||||
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
|
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
|
||||||
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
|
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
|
||||||
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
|
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
|
||||||
@ -695,8 +705,8 @@ class MacroAssembler: public Assembler {
|
|||||||
// Support for last Java frame (but use call_VM instead where possible):
|
// Support for last Java frame (but use call_VM instead where possible):
|
||||||
// access R16_thread->last_Java_sp.
|
// access R16_thread->last_Java_sp.
|
||||||
void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
|
void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
|
||||||
void reset_last_Java_frame(void);
|
void reset_last_Java_frame(bool check_last_java_sp = true);
|
||||||
void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
|
void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, Label* jpc = nullptr);
|
||||||
|
|
||||||
// Read vm result from thread: oop_result = R16_thread->result;
|
// Read vm result from thread: oop_result = R16_thread->result;
|
||||||
void get_vm_result (Register oop_result);
|
void get_vm_result (Register oop_result);
|
||||||
@ -912,7 +922,7 @@ class MacroAssembler: public Assembler {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
|
void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
|
||||||
const char* msg);
|
const char* msg) NOT_DEBUG_RETURN;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -191,8 +191,18 @@ inline void MacroAssembler::set_oop(AddressLiteral obj_addr, Register d) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
|
inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
|
||||||
|
if (is_branch(branch)) {
|
||||||
jint& stub_inst = *(jint*) branch;
|
jint& stub_inst = *(jint*) branch;
|
||||||
stub_inst = patched_branch(target - branch, stub_inst, 0);
|
stub_inst = patched_branch(target - branch, stub_inst, 0);
|
||||||
|
} else if (is_calculate_address_from_global_toc_at(branch + BytesPerInstWord, branch)) {
|
||||||
|
const address inst1_addr = branch;
|
||||||
|
const address inst2_addr = branch + BytesPerInstWord;
|
||||||
|
patch_calculate_address_from_global_toc_at(inst2_addr, inst1_addr, target);
|
||||||
|
} else if (is_load_const_at(branch)) {
|
||||||
|
patch_const(branch, (long)target);
|
||||||
|
} else {
|
||||||
|
assert(false, "instruction at " PTR_FORMAT " not recognized", p2i(branch));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Relocation of conditional far branches.
|
// Relocation of conditional far branches.
|
||||||
|
@ -205,12 +205,14 @@ intptr_t NativeMovConstReg::data() const {
|
|||||||
// Therefore we use raw decoding.
|
// Therefore we use raw decoding.
|
||||||
if (CompressedOops::is_null(no)) return 0;
|
if (CompressedOops::is_null(no)) return 0;
|
||||||
return cast_from_oop<intptr_t>(CompressedOops::decode_raw(no));
|
return cast_from_oop<intptr_t>(CompressedOops::decode_raw(no));
|
||||||
} else {
|
} else if (MacroAssembler::is_load_const_from_method_toc_at(addr)) {
|
||||||
assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool");
|
|
||||||
|
|
||||||
address ctable = cb->content_begin();
|
address ctable = cb->content_begin();
|
||||||
int offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr);
|
int offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr);
|
||||||
return *(intptr_t *)(ctable + offset);
|
return *(intptr_t *)(ctable + offset);
|
||||||
|
} else {
|
||||||
|
assert(MacroAssembler::is_calculate_address_from_global_toc_at(addr, addr - BytesPerInstWord),
|
||||||
|
"must be calculate_address_from_global_toc");
|
||||||
|
return (intptr_t) MacroAssembler::get_address_of_calculate_address_from_global_toc_at(addr, addr - BytesPerInstWord);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12108,15 +12108,31 @@ instruct cmpFastUnlock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp
|
|||||||
ins_pipe(pipe_class_compare);
|
ins_pipe(pipe_class_compare);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, flagsRegCR1 cr1) %{
|
instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
|
||||||
predicate(LockingMode == LM_LIGHTWEIGHT);
|
predicate(LockingMode == LM_LIGHTWEIGHT && !UseObjectMonitorTable);
|
||||||
match(Set crx (FastLock oop box));
|
match(Set crx (FastLock oop box));
|
||||||
effect(TEMP tmp1, TEMP tmp2, KILL cr1);
|
effect(TEMP tmp1, TEMP tmp2);
|
||||||
|
|
||||||
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
|
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
|
__ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
|
||||||
$tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0);
|
$tmp1$$Register, $tmp2$$Register, noreg /*tmp3*/);
|
||||||
|
// If locking was successful, crx should indicate 'EQ'.
|
||||||
|
// The compiler generates a branch to the runtime call to
|
||||||
|
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
||||||
|
%}
|
||||||
|
ins_pipe(pipe_class_compare);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, flagsRegCR1 cr1) %{
|
||||||
|
predicate(LockingMode == LM_LIGHTWEIGHT && UseObjectMonitorTable);
|
||||||
|
match(Set crx (FastLock oop box));
|
||||||
|
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr1);
|
||||||
|
|
||||||
|
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2, $tmp3" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
|
||||||
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
|
||||||
// If locking was successful, crx should indicate 'EQ'.
|
// If locking was successful, crx should indicate 'EQ'.
|
||||||
// The compiler generates a branch to the runtime call to
|
// The compiler generates a branch to the runtime call to
|
||||||
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
||||||
|
@ -1602,6 +1602,7 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
|
|||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
__ load_const_optimized(tmp2, ContinuationEntry::cookie_value());
|
__ load_const_optimized(tmp2, ContinuationEntry::cookie_value());
|
||||||
__ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP);
|
__ stw(tmp2, in_bytes(ContinuationEntry::cookie_offset()), R1_SP);
|
||||||
|
__ std(tmp2, _abi0(cr), R1_SP);
|
||||||
#endif //ASSERT
|
#endif //ASSERT
|
||||||
|
|
||||||
__ li(zero, 0);
|
__ li(zero, 0);
|
||||||
@ -1645,6 +1646,10 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
|||||||
__ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread);
|
__ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread);
|
||||||
__ cmpd(CCR0, R1_SP, tmp1);
|
__ cmpd(CCR0, R1_SP, tmp1);
|
||||||
__ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP");
|
__ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP");
|
||||||
|
__ load_const_optimized(tmp1, ContinuationEntry::cookie_value());
|
||||||
|
__ ld(tmp2, _abi0(cr), R1_SP);
|
||||||
|
__ cmpd(CCR0, tmp1, tmp2);
|
||||||
|
__ asm_assert_eq(FILE_AND_LINE ": cookie not found");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
__ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
|
__ ld_ptr(tmp1, ContinuationEntry::parent_cont_fastpath_offset(), R1_SP);
|
||||||
@ -1853,6 +1858,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
|||||||
// --- Thawing path
|
// --- Thawing path
|
||||||
|
|
||||||
__ bind(L_thaw);
|
__ bind(L_thaw);
|
||||||
|
ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
|
||||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw()));
|
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(StubRoutines::cont_thaw()));
|
||||||
__ mtctr(R0);
|
__ mtctr(R0);
|
||||||
__ bctrl();
|
__ bctrl();
|
||||||
@ -1863,6 +1869,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
|||||||
// --- Normal exit (resolve/thawing)
|
// --- Normal exit (resolve/thawing)
|
||||||
|
|
||||||
__ bind(L_exit);
|
__ bind(L_exit);
|
||||||
|
ContinuationEntry::_cleanup_offset = __ pc() - start;
|
||||||
continuation_enter_cleanup(masm);
|
continuation_enter_cleanup(masm);
|
||||||
|
|
||||||
// Pop frame and return
|
// Pop frame and return
|
||||||
@ -1970,6 +1977,10 @@ static void gen_continuation_yield(MacroAssembler* masm,
|
|||||||
__ bctr();
|
__ bctr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
|
||||||
|
::continuation_enter_cleanup(masm);
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Generate a native wrapper for a given method. The method takes arguments
|
// Generate a native wrapper for a given method. The method takes arguments
|
||||||
// in the Java compiled code convention, marshals them to the native
|
// in the Java compiled code convention, marshals them to the native
|
||||||
@ -2190,9 +2201,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
intptr_t start_pc = (intptr_t)__ pc();
|
intptr_t start_pc = (intptr_t)__ pc();
|
||||||
intptr_t vep_start_pc;
|
intptr_t vep_start_pc;
|
||||||
intptr_t frame_done_pc;
|
intptr_t frame_done_pc;
|
||||||
intptr_t oopmap_pc;
|
|
||||||
|
|
||||||
Label handle_pending_exception;
|
Label handle_pending_exception;
|
||||||
|
Label last_java_pc;
|
||||||
|
|
||||||
Register r_callers_sp = R21;
|
Register r_callers_sp = R21;
|
||||||
Register r_temp_1 = R22;
|
Register r_temp_1 = R22;
|
||||||
@ -2201,7 +2212,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
Register r_temp_4 = R25;
|
Register r_temp_4 = R25;
|
||||||
Register r_temp_5 = R26;
|
Register r_temp_5 = R26;
|
||||||
Register r_temp_6 = R27;
|
Register r_temp_6 = R27;
|
||||||
Register r_return_pc = R28;
|
Register r_last_java_pc = R28;
|
||||||
|
|
||||||
Register r_carg1_jnienv = noreg;
|
Register r_carg1_jnienv = noreg;
|
||||||
Register r_carg2_classorobject = noreg;
|
Register r_carg2_classorobject = noreg;
|
||||||
@ -2363,15 +2374,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
// We MUST NOT touch any outgoing regs from this point on.
|
// We MUST NOT touch any outgoing regs from this point on.
|
||||||
// So if we must call out we must push a new frame.
|
// So if we must call out we must push a new frame.
|
||||||
|
|
||||||
// Get current pc for oopmap, and load it patchable relative to global toc.
|
// The last java pc will also be used as resume pc if this is the wrapper for wait0.
|
||||||
oopmap_pc = (intptr_t) __ pc();
|
// For this purpose the precise location matters but not for oopmap lookup.
|
||||||
__ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true);
|
__ calculate_address_from_global_toc(r_last_java_pc, last_java_pc, true, true, true, true);
|
||||||
|
|
||||||
// We use the same pc/oopMap repeatedly when we call out.
|
|
||||||
oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map);
|
|
||||||
|
|
||||||
// r_return_pc now has the pc loaded that we will use when we finally call
|
|
||||||
// to native.
|
|
||||||
|
|
||||||
// Make sure that thread is non-volatile; it crosses a bunch of VM calls below.
|
// Make sure that thread is non-volatile; it crosses a bunch of VM calls below.
|
||||||
assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register");
|
assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register");
|
||||||
@ -2399,7 +2404,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
// Try fastpath for locking.
|
// Try fastpath for locking.
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||||
__ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
|
||||||
|
__ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
|
||||||
} else {
|
} else {
|
||||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||||
__ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
__ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||||
@ -2416,9 +2422,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs);
|
RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs);
|
||||||
|
|
||||||
// Do the call.
|
// Do the call.
|
||||||
__ set_last_Java_frame(R11_scratch1, r_return_pc);
|
__ set_last_Java_frame(R11_scratch1, r_last_java_pc);
|
||||||
assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register");
|
assert(r_last_java_pc->is_nonvolatile(), "r_last_java_pc needs to be preserved accross complete_monitor_locking_C call");
|
||||||
|
// The following call will not be preempted.
|
||||||
|
// push_cont_fastpath forces freeze slow path in case we try to preempt where we will pin the
|
||||||
|
// vthread to the carrier (see FreezeBase::recurse_freeze_native_frame()).
|
||||||
|
__ push_cont_fastpath();
|
||||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread);
|
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread);
|
||||||
|
__ pop_cont_fastpath();
|
||||||
__ reset_last_Java_frame();
|
__ reset_last_Java_frame();
|
||||||
|
|
||||||
RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs);
|
RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs);
|
||||||
@ -2429,8 +2440,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
__ bind(locked);
|
__ bind(locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use that pc we placed in r_return_pc a while back as the current frame anchor.
|
__ set_last_Java_frame(R1_SP, r_last_java_pc);
|
||||||
__ set_last_Java_frame(R1_SP, r_return_pc);
|
|
||||||
|
|
||||||
// Publish thread state
|
// Publish thread state
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
@ -2490,8 +2500,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
Label after_transition;
|
|
||||||
|
|
||||||
// Publish thread state
|
// Publish thread state
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
|
|
||||||
@ -2566,7 +2574,23 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
__ lwsync(); // Acquire safepoint and suspend state, release thread state.
|
__ lwsync(); // Acquire safepoint and suspend state, release thread state.
|
||||||
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
|
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
|
||||||
__ stw(R0, thread_(thread_state));
|
__ stw(R0, thread_(thread_state));
|
||||||
__ bind(after_transition);
|
|
||||||
|
// Check preemption for Object.wait()
|
||||||
|
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||||
|
Label not_preempted;
|
||||||
|
__ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||||
|
__ cmpdi(CCR0, R0, 0);
|
||||||
|
__ beq(CCR0, not_preempted);
|
||||||
|
__ mtlr(R0);
|
||||||
|
__ li(R0, 0);
|
||||||
|
__ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||||
|
__ blr();
|
||||||
|
__ bind(not_preempted);
|
||||||
|
}
|
||||||
|
__ bind(last_java_pc);
|
||||||
|
// We use the same pc/oopMap repeatedly when we call out above.
|
||||||
|
intptr_t oopmap_pc = (intptr_t) __ pc();
|
||||||
|
oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reguard any pages if necessary.
|
// Reguard any pages if necessary.
|
||||||
@ -2648,7 +2672,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
// Clear "last Java frame" SP and PC.
|
// Clear "last Java frame" SP and PC.
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
|
|
||||||
__ reset_last_Java_frame();
|
// Last java frame won't be set if we're resuming after preemption
|
||||||
|
bool maybe_preempted = LockingMode != LM_LEGACY && method->is_object_wait0();
|
||||||
|
__ reset_last_Java_frame(!maybe_preempted /* check_last_java_sp */);
|
||||||
|
|
||||||
// Unbox oop result, e.g. JNIHandles::resolve value.
|
// Unbox oop result, e.g. JNIHandles::resolve value.
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
@ -2733,6 +2759,12 @@ uint SharedRuntime::out_preserve_stack_slots() {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VMReg SharedRuntime::thread_register() {
|
||||||
|
// On PPC virtual threads don't save the JavaThread* in their context (e.g. C1 stub frames).
|
||||||
|
ShouldNotCallThis();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(COMPILER1) || defined(COMPILER2)
|
#if defined(COMPILER1) || defined(COMPILER2)
|
||||||
// Frame generation for deopt and uncommon trap blobs.
|
// Frame generation for deopt and uncommon trap blobs.
|
||||||
static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
|
static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
|
||||||
|
@ -184,8 +184,9 @@ inline int StackChunkFrameStream<frame_kind>::interpreter_frame_num_oops() const
|
|||||||
f.interpreted_frame_oop_map(&mask);
|
f.interpreted_frame_oop_map(&mask);
|
||||||
return mask.num_oops()
|
return mask.num_oops()
|
||||||
+ 1 // for the mirror oop
|
+ 1 // for the mirror oop
|
||||||
+ ((intptr_t*)f.interpreter_frame_monitor_begin()
|
+ (f.interpreter_frame_method()->is_native() ? 1 : 0) // temp oop slot
|
||||||
- (intptr_t*)f.interpreter_frame_monitor_end())/BasicObjectLock::size();
|
+ pointer_delta_as_int((intptr_t*)f.interpreter_frame_monitor_begin(),
|
||||||
|
(intptr_t*)f.interpreter_frame_monitor_end())/BasicObjectLock::size();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
@ -4483,6 +4483,10 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
|
|||||||
|
|
||||||
address start = __ pc();
|
address start = __ pc();
|
||||||
|
|
||||||
|
if (kind == Continuation::thaw_top) {
|
||||||
|
__ clobber_nonvolatile_registers(); // Except R16_thread and R29_TOC
|
||||||
|
}
|
||||||
|
|
||||||
if (return_barrier) {
|
if (return_barrier) {
|
||||||
__ mr(nvtmp, R3_RET); __ fmr(nvftmp, F1_RET); // preserve possible return value from a method returning to the return barrier
|
__ mr(nvtmp, R3_RET); __ fmr(nvftmp, F1_RET); // preserve possible return value from a method returning to the return barrier
|
||||||
DEBUG_ONLY(__ ld_ptr(tmp1, _abi0(callers_sp), R1_SP);)
|
DEBUG_ONLY(__ ld_ptr(tmp1, _abi0(callers_sp), R1_SP);)
|
||||||
@ -4571,6 +4575,41 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
|
|||||||
return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception);
|
return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address generate_cont_preempt_stub() {
|
||||||
|
if (!Continuations::enabled()) return nullptr;
|
||||||
|
StubCodeMark mark(this, "StubRoutines","Continuation preempt stub");
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
__ clobber_nonvolatile_registers(); // Except R16_thread and R29_TOC
|
||||||
|
|
||||||
|
__ reset_last_Java_frame(false /*check_last_java_sp*/);
|
||||||
|
|
||||||
|
// Set sp to enterSpecial frame, i.e. remove all frames copied into the heap.
|
||||||
|
__ ld_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread);
|
||||||
|
|
||||||
|
Label preemption_cancelled;
|
||||||
|
__ lbz(R11_scratch1, in_bytes(JavaThread::preemption_cancelled_offset()), R16_thread);
|
||||||
|
__ cmpwi(CCR0, R11_scratch1, 0);
|
||||||
|
__ bne(CCR0, preemption_cancelled);
|
||||||
|
|
||||||
|
// Remove enterSpecial frame from the stack and return to Continuation.run() to unmount.
|
||||||
|
SharedRuntime::continuation_enter_cleanup(_masm);
|
||||||
|
__ pop_frame();
|
||||||
|
__ restore_LR(R11_scratch1);
|
||||||
|
__ blr();
|
||||||
|
|
||||||
|
// We acquired the monitor after freezing the frames so call thaw to continue execution.
|
||||||
|
__ bind(preemption_cancelled);
|
||||||
|
__ li(R11_scratch1, 0); // false
|
||||||
|
__ stb(R11_scratch1, in_bytes(JavaThread::preemption_cancelled_offset()), R16_thread);
|
||||||
|
int simm16_offs = __ load_const_optimized(R11_scratch1, ContinuationEntry::thaw_call_pc_address(), R0, true);
|
||||||
|
__ ld(R11_scratch1, simm16_offs, R11_scratch1);
|
||||||
|
__ mtctr(R11_scratch1);
|
||||||
|
__ bctr();
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
// exception handler for upcall stubs
|
// exception handler for upcall stubs
|
||||||
address generate_upcall_stub_exception_handler() {
|
address generate_upcall_stub_exception_handler() {
|
||||||
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
|
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
|
||||||
@ -4646,6 +4685,7 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
|
|||||||
StubRoutines::_cont_thaw = generate_cont_thaw();
|
StubRoutines::_cont_thaw = generate_cont_thaw();
|
||||||
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
|
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
|
||||||
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
|
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
|
||||||
|
StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
|
||||||
}
|
}
|
||||||
|
|
||||||
void generate_final_stubs() {
|
void generate_final_stubs() {
|
||||||
|
@ -696,6 +696,17 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
|
||||||
|
if (!Continuations::enabled()) return nullptr;
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
|
||||||
|
__ restore_interpreter_state(R11_scratch1, false, true /*restore_top_frame_sp*/);
|
||||||
|
__ blr();
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
// Helpers for commoning out cases in the various type of method entries.
|
// Helpers for commoning out cases in the various type of method entries.
|
||||||
|
|
||||||
// Increment invocation count & check for overflow.
|
// Increment invocation count & check for overflow.
|
||||||
@ -1197,7 +1208,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
const Register signature_handler_fd = R11_scratch1;
|
const Register signature_handler_fd = R11_scratch1;
|
||||||
const Register pending_exception = R0;
|
const Register pending_exception = R0;
|
||||||
const Register result_handler_addr = R31;
|
const Register result_handler_addr = R31;
|
||||||
const Register native_method_fd = R11_scratch1;
|
const Register native_method_fd = R12_scratch2; // preferred in MacroAssembler::branch_to
|
||||||
const Register access_flags = R22_tmp2;
|
const Register access_flags = R22_tmp2;
|
||||||
const Register active_handles = R11_scratch1; // R26_monitor saved to state.
|
const Register active_handles = R11_scratch1; // R26_monitor saved to state.
|
||||||
const Register sync_state = R12_scratch2;
|
const Register sync_state = R12_scratch2;
|
||||||
@ -1211,10 +1222,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
Label exception_return_sync_check;
|
Label exception_return_sync_check;
|
||||||
Label stack_overflow_return;
|
Label stack_overflow_return;
|
||||||
|
|
||||||
// Generate new interpreter state and jump to stack_overflow_return in case of
|
|
||||||
// a stack overflow.
|
|
||||||
//generate_compute_interpreter_state(stack_overflow_return);
|
|
||||||
|
|
||||||
Register size_of_parameters = R22_tmp2;
|
Register size_of_parameters = R22_tmp2;
|
||||||
|
|
||||||
generate_fixed_frame(true, size_of_parameters, noreg /* unused */);
|
generate_fixed_frame(true, size_of_parameters, noreg /* unused */);
|
||||||
@ -1253,8 +1260,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
|
|
||||||
// access_flags = method->access_flags();
|
// access_flags = method->access_flags();
|
||||||
// Load access flags.
|
// Load access flags.
|
||||||
assert(access_flags->is_nonvolatile(),
|
assert(__ nonvolatile_accross_vthread_preemtion(access_flags),
|
||||||
"access_flags must be in a non-volatile register");
|
"access_flags not preserved");
|
||||||
// Type check.
|
// Type check.
|
||||||
assert(4 == sizeof(AccessFlags), "unexpected field size");
|
assert(4 == sizeof(AccessFlags), "unexpected field size");
|
||||||
__ lwz(access_flags, method_(access_flags));
|
__ lwz(access_flags, method_(access_flags));
|
||||||
@ -1315,8 +1322,12 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// convenient and the slow signature handler can use this same frame
|
// convenient and the slow signature handler can use this same frame
|
||||||
// anchor.
|
// anchor.
|
||||||
|
|
||||||
|
bool support_vthread_preemption = Continuations::enabled() && LockingMode != LM_LEGACY;
|
||||||
|
|
||||||
// We have a TOP_IJAVA_FRAME here, which belongs to us.
|
// We have a TOP_IJAVA_FRAME here, which belongs to us.
|
||||||
__ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
|
Label last_java_pc;
|
||||||
|
Label *resume_pc = support_vthread_preemption ? &last_java_pc : nullptr;
|
||||||
|
__ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R3_ARG1/*tmp*/, resume_pc);
|
||||||
|
|
||||||
// Now the interpreter frame (and its call chain) have been
|
// Now the interpreter frame (and its call chain) have been
|
||||||
// invalidated and flushed. We are now protected against eager
|
// invalidated and flushed. We are now protected against eager
|
||||||
@ -1335,16 +1346,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
|
|
||||||
__ call_stub(signature_handler_fd);
|
__ call_stub(signature_handler_fd);
|
||||||
|
|
||||||
// Remove the register parameter varargs slots we allocated in
|
assert(__ nonvolatile_accross_vthread_preemtion(result_handler_addr),
|
||||||
// compute_interpreter_state. SP+16 ends up pointing to the ABI
|
"result_handler_addr not preserved");
|
||||||
// outgoing argument area.
|
|
||||||
//
|
|
||||||
// Not needed on PPC64.
|
|
||||||
//__ add(SP, SP, Argument::n_int_register_parameters_c*BytesPerWord);
|
|
||||||
|
|
||||||
assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
|
|
||||||
// Save across call to native method.
|
// Save across call to native method.
|
||||||
__ mr(result_handler_addr, R3_RET);
|
__ mr(result_handler_addr, R3_RET);
|
||||||
|
__ ld(R11_scratch1, _abi0(callers_sp), R1_SP); // load FP
|
||||||
|
|
||||||
__ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror.
|
__ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror.
|
||||||
|
|
||||||
@ -1358,12 +1364,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
__ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
|
__ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
|
||||||
__ bfalse(CCR0, method_is_not_static);
|
__ bfalse(CCR0, method_is_not_static);
|
||||||
|
|
||||||
__ ld(R11_scratch1, _abi0(callers_sp), R1_SP);
|
// Load mirror from interpreter frame (FP in R11_scratch1)
|
||||||
// Load mirror from interpreter frame.
|
__ ld(R21_tmp1, _ijava_state_neg(mirror), R11_scratch1);
|
||||||
__ ld(R12_scratch2, _ijava_state_neg(mirror), R11_scratch1);
|
|
||||||
// R4_ARG2 = &state->_oop_temp;
|
// R4_ARG2 = &state->_oop_temp;
|
||||||
__ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
|
__ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
|
||||||
__ std(R12_scratch2/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
|
__ std(R21_tmp1/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
|
||||||
BIND(method_is_not_static);
|
BIND(method_is_not_static);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1397,7 +1402,18 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// Call the native method. Argument registers must not have been
|
// Call the native method. Argument registers must not have been
|
||||||
// overwritten since "__ call_stub(signature_handler);" (except for
|
// overwritten since "__ call_stub(signature_handler);" (except for
|
||||||
// ARG1 and ARG2 for static methods).
|
// ARG1 and ARG2 for static methods).
|
||||||
|
|
||||||
|
if (support_vthread_preemption) {
|
||||||
|
// result_handler_addr is a nonvolatile register. Its value will be preserved across
|
||||||
|
// the native call but only if the call isn't preempted. To preserve its value even
|
||||||
|
// in the case of preemption we save it in the lresult slot. It is restored at
|
||||||
|
// resume_pc if, and only if the call was preempted. This works because only
|
||||||
|
// j.l.Object::wait calls are preempted which don't return a result.
|
||||||
|
__ std(result_handler_addr, _ijava_state_neg(lresult), R11_scratch1);
|
||||||
|
}
|
||||||
|
__ push_cont_fastpath();
|
||||||
__ call_c(native_method_fd);
|
__ call_c(native_method_fd);
|
||||||
|
__ pop_cont_fastpath();
|
||||||
|
|
||||||
__ li(R0, 0);
|
__ li(R0, 0);
|
||||||
__ ld(R11_scratch1, 0, R1_SP);
|
__ ld(R11_scratch1, 0, R1_SP);
|
||||||
@ -1495,6 +1511,35 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
__ lwsync(); // Acquire safepoint and suspend state, release thread state.
|
__ lwsync(); // Acquire safepoint and suspend state, release thread state.
|
||||||
__ stw(R0/*thread_state*/, thread_(thread_state));
|
__ stw(R0/*thread_state*/, thread_(thread_state));
|
||||||
|
|
||||||
|
if (support_vthread_preemption) {
|
||||||
|
// Check preemption for Object.wait()
|
||||||
|
Label not_preempted;
|
||||||
|
__ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||||
|
__ cmpdi(CCR0, R0, 0);
|
||||||
|
__ beq(CCR0, not_preempted);
|
||||||
|
__ mtlr(R0);
|
||||||
|
__ li(R0, 0);
|
||||||
|
__ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||||
|
__ blr();
|
||||||
|
|
||||||
|
// Execution will be resumed here when the vthread becomes runnable again.
|
||||||
|
__ bind(*resume_pc);
|
||||||
|
__ restore_after_resume(R11_scratch1 /* fp */);
|
||||||
|
// We saved the result handler before the call
|
||||||
|
__ ld(result_handler_addr, _ijava_state_neg(lresult), R11_scratch1);
|
||||||
|
#ifdef ASSERT
|
||||||
|
// Clobber result slots. Only native methods returning void can be preemted currently.
|
||||||
|
__ load_const(R3_RET, UCONST64(0xbad01001));
|
||||||
|
__ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
|
||||||
|
__ std(R3_RET, _ijava_state_neg(fresult), R11_scratch1);
|
||||||
|
// reset_last_Java_frame() below asserts that a last java sp is set
|
||||||
|
__ asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_sp_offset()),
|
||||||
|
R16_thread, FILE_AND_LINE ": Last java sp should not be set when resuming");
|
||||||
|
__ std(R3_RET, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
|
||||||
|
#endif
|
||||||
|
__ bind(not_preempted);
|
||||||
|
}
|
||||||
|
|
||||||
if (CheckJNICalls) {
|
if (CheckJNICalls) {
|
||||||
// clear_pending_jni_exception_check
|
// clear_pending_jni_exception_check
|
||||||
__ load_const_optimized(R0, 0L);
|
__ load_const_optimized(R0, 0L);
|
||||||
|
@ -83,8 +83,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||||||
// displaced header address in the object header - if it is not the same, get the
|
// displaced header address in the object header - if it is not the same, get the
|
||||||
// object header instead
|
// object header instead
|
||||||
la(temp, Address(obj, hdr_offset));
|
la(temp, Address(obj, hdr_offset));
|
||||||
cmpxchgptr(hdr, disp_hdr, temp, t1, done, /*fallthough*/nullptr);
|
|
||||||
// if the object header was the same, we're done
|
// if the object header was the same, we're done
|
||||||
|
cmpxchgptr(hdr, disp_hdr, temp, t1, done, /*fallthough*/nullptr);
|
||||||
// if the object header was not the same, it is now in the hdr register
|
// if the object header was not the same, it is now in the hdr register
|
||||||
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
||||||
//
|
//
|
||||||
@ -106,11 +106,12 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||||||
sd(hdr, Address(disp_hdr, 0));
|
sd(hdr, Address(disp_hdr, 0));
|
||||||
// otherwise we don't care about the result and handle locking via runtime call
|
// otherwise we don't care about the result and handle locking via runtime call
|
||||||
bnez(hdr, slow_case, /* is_far */ true);
|
bnez(hdr, slow_case, /* is_far */ true);
|
||||||
|
|
||||||
// done
|
// done
|
||||||
bind(done);
|
bind(done);
|
||||||
|
inc_held_monitor_count(t0);
|
||||||
}
|
}
|
||||||
|
|
||||||
increment(Address(xthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
return null_check_offset;
|
return null_check_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,11 +147,11 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||||||
} else {
|
} else {
|
||||||
cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
|
cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
|
||||||
}
|
}
|
||||||
|
|
||||||
// done
|
// done
|
||||||
bind(done);
|
bind(done);
|
||||||
|
dec_held_monitor_count(t0);
|
||||||
}
|
}
|
||||||
|
|
||||||
decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Defines obj, preserves var_size_in_bytes
|
// Defines obj, preserves var_size_in_bytes
|
||||||
|
@ -165,7 +165,7 @@ int StubAssembler::call_RT(Register oop_result, Register metadata_result, addres
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum return_state_t {
|
enum return_state_t {
|
||||||
does_not_return, requires_return
|
does_not_return, requires_return, requires_pop_epilogue_return
|
||||||
};
|
};
|
||||||
|
|
||||||
// Implementation of StubFrame
|
// Implementation of StubFrame
|
||||||
@ -173,7 +173,7 @@ enum return_state_t {
|
|||||||
class StubFrame: public StackObj {
|
class StubFrame: public StackObj {
|
||||||
private:
|
private:
|
||||||
StubAssembler* _sasm;
|
StubAssembler* _sasm;
|
||||||
bool _return_state;
|
return_state_t _return_state;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state=requires_return);
|
StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state=requires_return);
|
||||||
@ -187,8 +187,18 @@ void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
|
|||||||
enter();
|
enter();
|
||||||
}
|
}
|
||||||
|
|
||||||
void StubAssembler::epilogue() {
|
void StubAssembler::epilogue(bool use_pop) {
|
||||||
|
// Avoid using a leave instruction when this frame may
|
||||||
|
// have been frozen, since the current value of fp
|
||||||
|
// restored from the stub would be invalid. We still
|
||||||
|
// must restore the fp value saved on enter though.
|
||||||
|
if (use_pop) {
|
||||||
|
ld(fp, Address(sp));
|
||||||
|
ld(ra, Address(sp, wordSize));
|
||||||
|
addi(sp, sp, 2 * wordSize);
|
||||||
|
} else {
|
||||||
leave();
|
leave();
|
||||||
|
}
|
||||||
ret();
|
ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,10 +218,10 @@ void StubFrame::load_argument(int offset_in_words, Register reg) {
|
|||||||
|
|
||||||
|
|
||||||
StubFrame::~StubFrame() {
|
StubFrame::~StubFrame() {
|
||||||
if (_return_state == requires_return) {
|
if (_return_state == does_not_return) {
|
||||||
__ epilogue();
|
|
||||||
} else {
|
|
||||||
__ should_not_reach_here();
|
__ should_not_reach_here();
|
||||||
|
} else {
|
||||||
|
__ epilogue(_return_state == requires_pop_epilogue_return);
|
||||||
}
|
}
|
||||||
_sasm = nullptr;
|
_sasm = nullptr;
|
||||||
}
|
}
|
||||||
@ -266,6 +276,10 @@ static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
|
|||||||
r->as_VMReg());
|
r->as_VMReg());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int sp_offset = cpu_reg_save_offsets[xthread->encoding()];
|
||||||
|
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
|
||||||
|
xthread->as_VMReg());
|
||||||
|
|
||||||
// fpu_regs
|
// fpu_regs
|
||||||
if (save_fpu_registers) {
|
if (save_fpu_registers) {
|
||||||
for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
|
for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
|
||||||
@ -354,6 +368,16 @@ void Runtime1::initialize_pd() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// return: offset in 64-bit words.
|
||||||
|
uint Runtime1::runtime_blob_current_thread_offset(frame f) {
|
||||||
|
CodeBlob* cb = f.cb();
|
||||||
|
assert(cb == Runtime1::blob_for(C1StubId::monitorenter_id) ||
|
||||||
|
cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id), "must be");
|
||||||
|
assert(cb != nullptr && cb->is_runtime_stub(), "invalid frame");
|
||||||
|
int offset = cpu_reg_save_offsets[xthread->encoding()];
|
||||||
|
return offset / 2; // SP offsets are in halfwords
|
||||||
|
}
|
||||||
|
|
||||||
// target: the entry point of the method that creates and posts the exception oop
|
// target: the entry point of the method that creates and posts the exception oop
|
||||||
// has_argument: true if the exception needs arguments (passed in t0 and t1)
|
// has_argument: true if the exception needs arguments (passed in t0 and t1)
|
||||||
|
|
||||||
@ -879,7 +903,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
|||||||
// fall through
|
// fall through
|
||||||
case C1StubId::monitorenter_id:
|
case C1StubId::monitorenter_id:
|
||||||
{
|
{
|
||||||
StubFrame f(sasm, "monitorenter", dont_gc_arguments);
|
StubFrame f(sasm, "monitorenter", dont_gc_arguments, requires_pop_epilogue_return);
|
||||||
OopMap* map = save_live_registers(sasm, save_fpu_registers);
|
OopMap* map = save_live_registers(sasm, save_fpu_registers);
|
||||||
assert_cond(map != nullptr);
|
assert_cond(map != nullptr);
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@
|
|||||||
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
||||||
|
|
||||||
void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg,
|
void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg,
|
||||||
Register tmp1Reg, Register tmp2Reg, Register tmp3Reg) {
|
Register tmp1Reg, Register tmp2Reg, Register tmp3Reg, Register tmp4Reg) {
|
||||||
// Use cr register to indicate the fast_lock result: zero for success; non-zero for failure.
|
// Use cr register to indicate the fast_lock result: zero for success; non-zero for failure.
|
||||||
Register flag = t1;
|
Register flag = t1;
|
||||||
Register oop = objectReg;
|
Register oop = objectReg;
|
||||||
@ -104,9 +104,9 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg,
|
|||||||
// markWord of object (disp_hdr) with the stack pointer.
|
// markWord of object (disp_hdr) with the stack pointer.
|
||||||
sub(disp_hdr, disp_hdr, sp);
|
sub(disp_hdr, disp_hdr, sp);
|
||||||
mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
|
mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
|
||||||
// If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto label locked,
|
// If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto label
|
||||||
// hence we can store 0 as the displaced header in the box, which indicates that it is a
|
// locked, hence we can store 0 as the displaced header in the box, which indicates that it
|
||||||
// recursive lock.
|
// is a recursive lock.
|
||||||
andr(tmp/*==0?*/, disp_hdr, tmp);
|
andr(tmp/*==0?*/, disp_hdr, tmp);
|
||||||
sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||||
beqz(tmp, locked);
|
beqz(tmp, locked);
|
||||||
@ -115,12 +115,12 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg,
|
|||||||
|
|
||||||
// Handle existing monitor.
|
// Handle existing monitor.
|
||||||
bind(object_has_monitor);
|
bind(object_has_monitor);
|
||||||
// The object's monitor m is unlocked iff m->owner == nullptr,
|
|
||||||
// otherwise m->owner may contain a thread or a stack address.
|
// Try to CAS owner (no owner => current thread's _lock_id).
|
||||||
//
|
|
||||||
// Try to CAS m->owner from null to current thread.
|
|
||||||
add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
|
add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
|
||||||
cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/xthread, Assembler::int64,
|
Register tid = tmp4Reg;
|
||||||
|
ld(tid, Address(xthread, JavaThread::lock_id_offset()));
|
||||||
|
cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/tid, Assembler::int64,
|
||||||
Assembler::aq, Assembler::rl, /*result*/tmp3Reg); // cas succeeds if tmp3Reg == zr(expected)
|
Assembler::aq, Assembler::rl, /*result*/tmp3Reg); // cas succeeds if tmp3Reg == zr(expected)
|
||||||
|
|
||||||
// Store a non-null value into the box to avoid looking like a re-entrant
|
// Store a non-null value into the box to avoid looking like a re-entrant
|
||||||
@ -132,14 +132,16 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg,
|
|||||||
|
|
||||||
beqz(tmp3Reg, locked); // CAS success means locking succeeded
|
beqz(tmp3Reg, locked); // CAS success means locking succeeded
|
||||||
|
|
||||||
bne(tmp3Reg, xthread, slow_path); // Check for recursive locking
|
bne(tmp3Reg, tid, slow_path); // Check for recursive locking
|
||||||
|
|
||||||
// Recursive lock case
|
// Recursive lock case
|
||||||
increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, tmp2Reg, tmp3Reg);
|
increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, tmp2Reg, tmp3Reg);
|
||||||
|
|
||||||
bind(locked);
|
bind(locked);
|
||||||
mv(flag, zr);
|
mv(flag, zr);
|
||||||
increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2Reg, tmp3Reg);
|
if (LockingMode == LM_LEGACY) {
|
||||||
|
inc_held_monitor_count(t0);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Check that locked label is reached with flag == 0.
|
// Check that locked label is reached with flag == 0.
|
||||||
@ -253,7 +255,9 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg,
|
|||||||
|
|
||||||
bind(unlocked);
|
bind(unlocked);
|
||||||
mv(flag, zr);
|
mv(flag, zr);
|
||||||
decrement(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp1Reg, tmp2Reg);
|
if (LockingMode == LM_LEGACY) {
|
||||||
|
dec_held_monitor_count(t0);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Check that unlocked label is reached with flag == 0.
|
// Check that unlocked label is reached with flag == 0.
|
||||||
@ -273,12 +277,12 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
|
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
|
||||||
Register tmp1, Register tmp2, Register tmp3) {
|
Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
|
||||||
// Flag register, zero for success; non-zero for failure.
|
// Flag register, zero for success; non-zero for failure.
|
||||||
Register flag = t1;
|
Register flag = t1;
|
||||||
|
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
assert_different_registers(obj, box, tmp1, tmp2, tmp3, flag, t0);
|
assert_different_registers(obj, box, tmp1, tmp2, tmp3, tmp4, flag, t0);
|
||||||
|
|
||||||
mv(flag, 1);
|
mv(flag, 1);
|
||||||
|
|
||||||
@ -349,6 +353,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
|
|||||||
bind(inflated);
|
bind(inflated);
|
||||||
|
|
||||||
const Register tmp1_monitor = tmp1;
|
const Register tmp1_monitor = tmp1;
|
||||||
|
|
||||||
if (!UseObjectMonitorTable) {
|
if (!UseObjectMonitorTable) {
|
||||||
assert(tmp1_monitor == tmp1_mark, "should be the same here");
|
assert(tmp1_monitor == tmp1_mark, "should be the same here");
|
||||||
} else {
|
} else {
|
||||||
@ -395,13 +400,15 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
|
|||||||
// Compute owner address.
|
// Compute owner address.
|
||||||
la(tmp2_owner_addr, owner_address);
|
la(tmp2_owner_addr, owner_address);
|
||||||
|
|
||||||
// CAS owner (null => current thread).
|
// Try to CAS owner (no owner => current thread's _lock_id).
|
||||||
cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ xthread, Assembler::int64,
|
Register tid = tmp4;
|
||||||
|
ld(tid, Address(xthread, JavaThread::lock_id_offset()));
|
||||||
|
cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ tid, Assembler::int64,
|
||||||
/*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner);
|
/*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner);
|
||||||
beqz(tmp3_owner, monitor_locked);
|
beqz(tmp3_owner, monitor_locked);
|
||||||
|
|
||||||
// Check if recursive.
|
// Check if recursive.
|
||||||
bne(tmp3_owner, xthread, slow_path);
|
bne(tmp3_owner, tid, slow_path);
|
||||||
|
|
||||||
// Recursive.
|
// Recursive.
|
||||||
increment(recursions_address, 1, tmp2, tmp3);
|
increment(recursions_address, 1, tmp2, tmp3);
|
||||||
@ -414,7 +421,6 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
|
|||||||
|
|
||||||
bind(locked);
|
bind(locked);
|
||||||
mv(flag, zr);
|
mv(flag, zr);
|
||||||
increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2, tmp3);
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Check that locked label is reached with flag == 0.
|
// Check that locked label is reached with flag == 0.
|
||||||
@ -586,7 +592,6 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box,
|
|||||||
|
|
||||||
bind(unlocked);
|
bind(unlocked);
|
||||||
mv(flag, zr);
|
mv(flag, zr);
|
||||||
decrement(Address(xthread, JavaThread::held_monitor_count_offset()), 1, tmp2, tmp3);
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Check that unlocked label is reached with flag == 0.
|
// Check that unlocked label is reached with flag == 0.
|
||||||
|
@ -44,11 +44,15 @@
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
|
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
|
||||||
void fast_lock(Register object, Register box, Register tmp1, Register tmp2, Register tmp3);
|
void fast_lock(Register object, Register box,
|
||||||
|
Register tmp1, Register tmp2, Register tmp3, Register tmp4);
|
||||||
void fast_unlock(Register object, Register box, Register tmp1, Register tmp2);
|
void fast_unlock(Register object, Register box, Register tmp1, Register tmp2);
|
||||||
|
|
||||||
// Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
|
// Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
|
||||||
void fast_lock_lightweight(Register object, Register box, Register tmp1, Register tmp2, Register tmp3);
|
void fast_lock_lightweight(Register object, Register box,
|
||||||
void fast_unlock_lightweight(Register object, Register box, Register tmp1, Register tmp2, Register tmp3);
|
Register tmp1, Register tmp2, Register tmp3, Register tmp4);
|
||||||
|
void fast_unlock_lightweight(Register object, Register box,
|
||||||
|
Register tmp1, Register tmp2, Register tmp3);
|
||||||
|
|
||||||
void string_compare(Register str1, Register str2,
|
void string_compare(Register str1, Register str2,
|
||||||
Register cnt1, Register cnt2, Register result,
|
Register cnt1, Register cnt2, Register result,
|
||||||
|
@ -127,6 +127,11 @@ void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
|
||||||
|
assert(f.interpreter_frame_last_sp() == nullptr, "should be null for top frame");
|
||||||
|
f.interpreter_frame_set_last_sp(f.unextended_sp());
|
||||||
|
}
|
||||||
|
|
||||||
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
||||||
assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
|
assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
|
||||||
assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
|
assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
|
||||||
@ -147,10 +152,16 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co
|
|||||||
// extended_sp is already relativized by TemplateInterpreterGenerator::generate_normal_entry or
|
// extended_sp is already relativized by TemplateInterpreterGenerator::generate_normal_entry or
|
||||||
// AbstractInterpreter::layout_activation
|
// AbstractInterpreter::layout_activation
|
||||||
|
|
||||||
|
// The interpreter native wrapper code adds space in the stack equal to size_of_parameters()
|
||||||
|
// after the fixed part of the frame. For wait0 this is equal to 3 words (this + long parameter).
|
||||||
|
// We adjust by this size since otherwise the saved last sp will be less than the extended_sp.
|
||||||
|
DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
|
||||||
|
DEBUG_ONLY(int extra_space = m->is_object_wait0() ? m->size_of_parameters() : 0;)
|
||||||
|
|
||||||
assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), "");
|
assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), "");
|
||||||
assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), "");
|
assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), "");
|
||||||
assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
|
assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
|
||||||
assert(hf.unextended_sp() > (intptr_t*)hf.at(frame::interpreter_frame_extended_sp_offset), "");
|
assert(hf.unextended_sp() + extra_space > (intptr_t*)hf.at(frame::interpreter_frame_extended_sp_offset), "");
|
||||||
assert(hf.fp() > (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
|
assert(hf.fp() > (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (f.interpreter_frame_method()->max_locals() > 0) {
|
if (f.interpreter_frame_method()->max_locals() > 0) {
|
||||||
@ -203,7 +214,8 @@ inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
|
|||||||
|
|
||||||
inline frame ThawBase::new_entry_frame() {
|
inline frame ThawBase::new_entry_frame() {
|
||||||
intptr_t* sp = _cont.entrySP();
|
intptr_t* sp = _cont.entrySP();
|
||||||
return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
|
// TODO PERF: This finds code blob and computes deopt state
|
||||||
|
return frame(sp, sp, _cont.entryFP(), _cont.entryPC());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
|
template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
|
||||||
@ -215,7 +227,6 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
// If caller is interpreted it already made room for the callee arguments
|
// If caller is interpreted it already made room for the callee arguments
|
||||||
int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
|
int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
|
||||||
const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
|
const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
|
||||||
const int locals = hf.interpreter_frame_method()->max_locals();
|
|
||||||
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
||||||
intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
|
intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
|
||||||
if ((intptr_t)fp % frame::frame_alignment != 0) {
|
if ((intptr_t)fp % frame::frame_alignment != 0) {
|
||||||
@ -237,7 +248,7 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
int fsize = FKind::size(hf);
|
int fsize = FKind::size(hf);
|
||||||
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
||||||
if (bottom || caller.is_interpreted_frame()) {
|
if (bottom || caller.is_interpreted_frame()) {
|
||||||
int argsize = hf.compiled_frame_stack_argsize();
|
int argsize = FKind::stack_argsize(hf);
|
||||||
|
|
||||||
fsize += argsize;
|
fsize += argsize;
|
||||||
frame_sp -= argsize;
|
frame_sp -= argsize;
|
||||||
@ -252,13 +263,16 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
intptr_t* fp;
|
intptr_t* fp;
|
||||||
if (PreserveFramePointer) {
|
if (PreserveFramePointer) {
|
||||||
// we need to recreate a "real" frame pointer, pointing into the stack
|
// we need to recreate a "real" frame pointer, pointing into the stack
|
||||||
fp = frame_sp + FKind::size(hf) - 2;
|
fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
|
||||||
} else {
|
} else {
|
||||||
fp = FKind::stub
|
fp = FKind::stub || FKind::native
|
||||||
? frame_sp + fsize - 2 // On RISCV, this value is used for the safepoint stub
|
// fp always points to the address above the pushed return pc. We need correct address.
|
||||||
: *(intptr_t**)(hf.sp() - 2); // we need to re-read fp because it may be an oop and we might have fixed the frame.
|
? frame_sp + fsize - frame::sender_sp_offset
|
||||||
|
// we need to re-read fp because it may be an oop and we might have fixed the frame.
|
||||||
|
: *(intptr_t**)(hf.sp() - 2);
|
||||||
}
|
}
|
||||||
return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
|
// TODO PERF : this computes deopt state; is it necessary?
|
||||||
|
return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -279,6 +293,22 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) {
|
|||||||
patch_callee_link(caller, caller.fp());
|
patch_callee_link(caller, caller.fp());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
|
||||||
|
intptr_t* fp = caller_sp - frame::sender_sp_offset;
|
||||||
|
patch_callee_link(f, fp);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline intptr_t* ThawBase::push_cleanup_continuation() {
|
||||||
|
frame enterSpecial = new_entry_frame();
|
||||||
|
intptr_t* sp = enterSpecial.sp();
|
||||||
|
|
||||||
|
sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
|
||||||
|
sp[-2] = (intptr_t)enterSpecial.fp();
|
||||||
|
|
||||||
|
log_develop_trace(continuations, preempt)("push_cleanup_continuation initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT, p2i(sp + 2 * frame::metadata_words), p2i(sp));
|
||||||
|
return sp;
|
||||||
|
}
|
||||||
|
|
||||||
inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
|
inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
|
||||||
// Make sure that last_sp is kept relativized.
|
// Make sure that last_sp is kept relativized.
|
||||||
assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
|
assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
|
||||||
@ -286,8 +316,11 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c
|
|||||||
// Make sure that monitor_block_top is still relativized.
|
// Make sure that monitor_block_top is still relativized.
|
||||||
assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
|
assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
|
||||||
|
|
||||||
|
DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
|
||||||
|
DEBUG_ONLY(int extra_space = m->is_object_wait0() ? m->size_of_parameters() : 0;) // see comment in relativize_interpreted_frame_metadata()
|
||||||
|
|
||||||
// Make sure that extended_sp is kept relativized.
|
// Make sure that extended_sp is kept relativized.
|
||||||
assert((intptr_t*)f.at_relative(frame::interpreter_frame_extended_sp_offset) < f.unextended_sp(), "");
|
assert((intptr_t*)f.at_relative(frame::interpreter_frame_extended_sp_offset) < f.unextended_sp() + extra_space, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // CPU_RISCV_CONTINUATIONFREEZETHAW_RISCV_INLINE_HPP
|
#endif // CPU_RISCV_CONTINUATIONFREEZETHAW_RISCV_INLINE_HPP
|
||||||
|
@ -40,6 +40,22 @@ static inline intptr_t** link_address(const frame& f) {
|
|||||||
: (intptr_t**)(f.unextended_sp() + f.cb()->frame_size() - 2);
|
: (intptr_t**)(f.unextended_sp() + f.cb()->frame_size() - 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void patch_return_pc_with_preempt_stub(frame& f) {
|
||||||
|
if (f.is_runtime_frame()) {
|
||||||
|
// Unlike x86 we don't know where in the callee frame the return pc is
|
||||||
|
// saved so we can't patch the return from the VM call back to Java.
|
||||||
|
// Instead, we will patch the return from the runtime stub back to the
|
||||||
|
// compiled method so that the target returns to the preempt cleanup stub.
|
||||||
|
intptr_t* caller_sp = f.sp() + f.cb()->frame_size();
|
||||||
|
caller_sp[-1] = (intptr_t)StubRoutines::cont_preempt_stub();
|
||||||
|
} else {
|
||||||
|
// The target will check for preemption once it returns to the interpreter
|
||||||
|
// or the native wrapper code and will manually jump to the preempt stub.
|
||||||
|
JavaThread *thread = JavaThread::current();
|
||||||
|
thread->set_preempt_alternate_return(StubRoutines::cont_preempt_stub());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
inline int ContinuationHelper::frame_align_words(int size) {
|
inline int ContinuationHelper::frame_align_words(int size) {
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
return size & 1;
|
return size & 1;
|
||||||
@ -72,12 +88,12 @@ inline void ContinuationHelper::set_anchor_to_entry_pd(JavaFrameAnchor* anchor,
|
|||||||
anchor->set_last_Java_fp(entry->entry_fp());
|
anchor->set_last_Java_fp(entry->entry_fp());
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
||||||
intptr_t* fp = *(intptr_t**)(sp - 2);
|
intptr_t* fp = *(intptr_t**)(sp - 2);
|
||||||
anchor->set_last_Java_fp(fp);
|
anchor->set_last_Java_fp(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
||||||
intptr_t* sp = f.sp();
|
intptr_t* sp = f.sp();
|
||||||
address pc = *(address*)(sp - frame::sender_sp_ret_address_offset());
|
address pc = *(address*)(sp - frame::sender_sp_ret_address_offset());
|
||||||
|
@ -393,6 +393,36 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
|
|||||||
return fr;
|
return fr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(ASSERT)
|
||||||
|
static address get_register_address_in_stub(const frame& stub_fr, VMReg reg) {
|
||||||
|
RegisterMap map(nullptr,
|
||||||
|
RegisterMap::UpdateMap::include,
|
||||||
|
RegisterMap::ProcessFrames::skip,
|
||||||
|
RegisterMap::WalkContinuation::skip);
|
||||||
|
stub_fr.oop_map()->update_register_map(&stub_fr, &map);
|
||||||
|
return map.location(reg, stub_fr.sp());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
JavaThread** frame::saved_thread_address(const frame& f) {
|
||||||
|
CodeBlob* cb = f.cb();
|
||||||
|
assert(cb != nullptr && cb->is_runtime_stub(), "invalid frame");
|
||||||
|
|
||||||
|
JavaThread** thread_addr;
|
||||||
|
#ifdef COMPILER1
|
||||||
|
if (cb == Runtime1::blob_for(C1StubId::monitorenter_id) ||
|
||||||
|
cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id)) {
|
||||||
|
thread_addr = (JavaThread**)(f.sp() + Runtime1::runtime_blob_current_thread_offset(f));
|
||||||
|
} else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
// c2 only saves rbp in the stub frame so nothing to do.
|
||||||
|
thread_addr = nullptr;
|
||||||
|
}
|
||||||
|
assert(get_register_address_in_stub(f, SharedRuntime::thread_register()) == (address)thread_addr, "wrong thread address");
|
||||||
|
return thread_addr;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// frame::verify_deopt_original_pc
|
// frame::verify_deopt_original_pc
|
||||||
//
|
//
|
||||||
|
@ -111,7 +111,8 @@
|
|||||||
sender_sp_offset = 0,
|
sender_sp_offset = 0,
|
||||||
|
|
||||||
// Interpreter frames
|
// Interpreter frames
|
||||||
interpreter_frame_oop_temp_offset = 1, // for native calls only
|
interpreter_frame_result_handler_offset = 1, // for native calls only
|
||||||
|
interpreter_frame_oop_temp_offset = 0, // for native calls only
|
||||||
|
|
||||||
interpreter_frame_sender_sp_offset = -3,
|
interpreter_frame_sender_sp_offset = -3,
|
||||||
// outgoing sp before a call to an invoked method
|
// outgoing sp before a call to an invoked method
|
||||||
|
@ -721,7 +721,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||||||
{
|
{
|
||||||
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
|
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
|
||||||
if (LockingMode == LM_MONITOR) {
|
if (LockingMode == LM_MONITOR) {
|
||||||
call_VM(noreg,
|
call_VM_preemptable(noreg,
|
||||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||||
lock_reg);
|
lock_reg);
|
||||||
} else {
|
} else {
|
||||||
@ -752,7 +752,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
|
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
|
||||||
j(count);
|
j(done);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Load (object->mark() | 1) into swap_reg
|
// Load (object->mark() | 1) into swap_reg
|
||||||
ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
@ -781,19 +781,19 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||||||
|
|
||||||
// Save the test result, for recursive case, the result is zero
|
// Save the test result, for recursive case, the result is zero
|
||||||
sd(swap_reg, Address(lock_reg, mark_offset));
|
sd(swap_reg, Address(lock_reg, mark_offset));
|
||||||
beqz(swap_reg, count);
|
bnez(swap_reg, slow_case);
|
||||||
|
|
||||||
|
bind(count);
|
||||||
|
inc_held_monitor_count(t0);
|
||||||
|
j(done);
|
||||||
}
|
}
|
||||||
|
|
||||||
bind(slow_case);
|
bind(slow_case);
|
||||||
|
|
||||||
// Call the runtime routine for slow case
|
// Call the runtime routine for slow case
|
||||||
call_VM(noreg,
|
call_VM_preemptable(noreg,
|
||||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||||
lock_reg);
|
lock_reg);
|
||||||
j(done);
|
|
||||||
|
|
||||||
bind(count);
|
|
||||||
increment(Address(xthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
|
|
||||||
bind(done);
|
bind(done);
|
||||||
}
|
}
|
||||||
@ -839,12 +839,10 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
|||||||
// Free entry
|
// Free entry
|
||||||
sd(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
|
sd(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
|
||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
|
||||||
Label slow_case;
|
Label slow_case;
|
||||||
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
|
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
|
||||||
j(count);
|
j(done);
|
||||||
|
|
||||||
bind(slow_case);
|
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Load the old header from BasicLock structure
|
// Load the old header from BasicLock structure
|
||||||
ld(header_reg, Address(swap_reg,
|
ld(header_reg, Address(swap_reg,
|
||||||
@ -854,20 +852,19 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
|||||||
beqz(header_reg, count);
|
beqz(header_reg, count);
|
||||||
|
|
||||||
// Atomic swap back the old header
|
// Atomic swap back the old header
|
||||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, tmp_reg, count, /*fallthrough*/nullptr);
|
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, tmp_reg, count, &slow_case);
|
||||||
|
|
||||||
|
bind(count);
|
||||||
|
dec_held_monitor_count(t0);
|
||||||
|
j(done);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bind(slow_case);
|
||||||
// Call the runtime routine for slow case.
|
// Call the runtime routine for slow case.
|
||||||
sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
|
sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
|
||||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
|
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
|
||||||
|
|
||||||
j(done);
|
|
||||||
|
|
||||||
bind(count);
|
|
||||||
decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
|
|
||||||
bind(done);
|
bind(done);
|
||||||
|
|
||||||
restore_bcp();
|
restore_bcp();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1575,6 +1572,55 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
|
|||||||
restore_locals();
|
restore_locals();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
|
||||||
|
address entry_point,
|
||||||
|
Register arg_1) {
|
||||||
|
assert(arg_1 == c_rarg1, "");
|
||||||
|
Label resume_pc, not_preempted;
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
{
|
||||||
|
Label L;
|
||||||
|
ld(t0, Address(xthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
beqz(t0, L);
|
||||||
|
stop("Should not have alternate return address set");
|
||||||
|
bind(L);
|
||||||
|
}
|
||||||
|
#endif /* ASSERT */
|
||||||
|
|
||||||
|
// Force freeze slow path.
|
||||||
|
push_cont_fastpath();
|
||||||
|
|
||||||
|
// Make VM call. In case of preemption set last_pc to the one we want to resume to.
|
||||||
|
la(t0, resume_pc);
|
||||||
|
sd(t0, Address(xthread, JavaThread::last_Java_pc_offset()));
|
||||||
|
call_VM_base(oop_result, noreg, noreg, entry_point, 1, false /*check_exceptions*/);
|
||||||
|
|
||||||
|
pop_cont_fastpath();
|
||||||
|
|
||||||
|
// Check if preempted.
|
||||||
|
ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
beqz(t1, not_preempted);
|
||||||
|
sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
jr(t1);
|
||||||
|
|
||||||
|
// In case of preemption, this is where we will resume once we finally acquire the monitor.
|
||||||
|
bind(resume_pc);
|
||||||
|
restore_after_resume(false /* is_native */);
|
||||||
|
|
||||||
|
bind(not_preempted);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::restore_after_resume(bool is_native) {
|
||||||
|
la(t1, ExternalAddress(Interpreter::cont_resume_interpreter_adapter()));
|
||||||
|
jalr(t1);
|
||||||
|
if (is_native) {
|
||||||
|
// On resume we need to set up stack as expected
|
||||||
|
push(dtos);
|
||||||
|
push(ltos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) {
|
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) {
|
||||||
assert_different_registers(obj, tmp, t0, mdo_addr.base());
|
assert_different_registers(obj, tmp, t0, mdo_addr.base());
|
||||||
Label update, next, none;
|
Label update, next, none;
|
||||||
|
@ -59,6 +59,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
|
|
||||||
void load_earlyret_value(TosState state);
|
void load_earlyret_value(TosState state);
|
||||||
|
|
||||||
|
void call_VM_preemptable(Register oop_result,
|
||||||
|
address entry_point,
|
||||||
|
Register arg_1);
|
||||||
|
void restore_after_resume(bool is_native);
|
||||||
|
|
||||||
void jump_to_entry(address entry);
|
void jump_to_entry(address entry);
|
||||||
|
|
||||||
virtual void check_and_handle_popframe(Register java_thread);
|
virtual void check_and_handle_popframe(Register java_thread);
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#include "gc/shared/collectedHeap.hpp"
|
#include "gc/shared/collectedHeap.hpp"
|
||||||
#include "interpreter/bytecodeHistogram.hpp"
|
#include "interpreter/bytecodeHistogram.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
#include "oops/accessDecorators.hpp"
|
#include "oops/accessDecorators.hpp"
|
||||||
@ -226,6 +227,36 @@ void MacroAssembler::pop_cont_fastpath(Register java_thread) {
|
|||||||
bind(done);
|
bind(done);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::inc_held_monitor_count(Register tmp) {
|
||||||
|
Address dst(xthread, JavaThread::held_monitor_count_offset());
|
||||||
|
ld(tmp, dst);
|
||||||
|
addi(tmp, tmp, 1);
|
||||||
|
sd(tmp, dst);
|
||||||
|
#ifdef ASSERT
|
||||||
|
Label ok;
|
||||||
|
test_bit(tmp, tmp, 63);
|
||||||
|
beqz(tmp, ok);
|
||||||
|
STOP("assert(held monitor count overflow)");
|
||||||
|
should_not_reach_here();
|
||||||
|
bind(ok);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::dec_held_monitor_count(Register tmp) {
|
||||||
|
Address dst(xthread, JavaThread::held_monitor_count_offset());
|
||||||
|
ld(tmp, dst);
|
||||||
|
addi(tmp, tmp, -1);
|
||||||
|
sd(tmp, dst);
|
||||||
|
#ifdef ASSERT
|
||||||
|
Label ok;
|
||||||
|
test_bit(tmp, tmp, 63);
|
||||||
|
beqz(tmp, ok);
|
||||||
|
STOP("assert(held monitor count underflow)");
|
||||||
|
should_not_reach_here();
|
||||||
|
bind(ok);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
int MacroAssembler::align(int modulus, int extra_offset) {
|
int MacroAssembler::align(int modulus, int extra_offset) {
|
||||||
CompressibleRegion cr(this);
|
CompressibleRegion cr(this);
|
||||||
intptr_t before = offset();
|
intptr_t before = offset();
|
||||||
@ -407,6 +438,10 @@ void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
|
|||||||
sd(zr, Address(xthread, JavaThread::last_Java_pc_offset()));
|
sd(zr, Address(xthread, JavaThread::last_Java_pc_offset()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_preemptable(address entry_point) {
|
||||||
|
return entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter);
|
||||||
|
}
|
||||||
|
|
||||||
void MacroAssembler::call_VM_base(Register oop_result,
|
void MacroAssembler::call_VM_base(Register oop_result,
|
||||||
Register java_thread,
|
Register java_thread,
|
||||||
Register last_java_sp,
|
Register last_java_sp,
|
||||||
@ -436,7 +471,12 @@ void MacroAssembler::call_VM_base(Register oop_result,
|
|||||||
assert(last_java_sp != fp, "can't use fp");
|
assert(last_java_sp != fp, "can't use fp");
|
||||||
|
|
||||||
Label l;
|
Label l;
|
||||||
|
if (is_preemptable(entry_point)) {
|
||||||
|
// skip setting last_pc since we already set it to desired value.
|
||||||
|
set_last_Java_frame(last_java_sp, fp, noreg);
|
||||||
|
} else {
|
||||||
set_last_Java_frame(last_java_sp, fp, l, t0);
|
set_last_Java_frame(last_java_sp, fp, l, t0);
|
||||||
|
}
|
||||||
|
|
||||||
// do the call, remove parameters
|
// do the call, remove parameters
|
||||||
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
|
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
|
||||||
|
@ -812,8 +812,11 @@ public:
|
|||||||
void push_CPU_state(bool save_vectors = false, int vector_size_in_bytes = 0);
|
void push_CPU_state(bool save_vectors = false, int vector_size_in_bytes = 0);
|
||||||
void pop_CPU_state(bool restore_vectors = false, int vector_size_in_bytes = 0);
|
void pop_CPU_state(bool restore_vectors = false, int vector_size_in_bytes = 0);
|
||||||
|
|
||||||
void push_cont_fastpath(Register java_thread);
|
void push_cont_fastpath(Register java_thread = xthread);
|
||||||
void pop_cont_fastpath(Register java_thread);
|
void pop_cont_fastpath(Register java_thread = xthread);
|
||||||
|
|
||||||
|
void inc_held_monitor_count(Register tmp);
|
||||||
|
void dec_held_monitor_count(Register tmp);
|
||||||
|
|
||||||
// if heap base register is used - reinit it with the correct value
|
// if heap base register is used - reinit it with the correct value
|
||||||
void reinit_heapbase();
|
void reinit_heapbase();
|
||||||
|
@ -1259,11 +1259,10 @@ int MachCallRuntimeNode::ret_addr_offset() {
|
|||||||
// jal(addr)
|
// jal(addr)
|
||||||
// or with far branches
|
// or with far branches
|
||||||
// jal(trampoline_stub)
|
// jal(trampoline_stub)
|
||||||
// for real runtime callouts it will be 9 instructions
|
// for real runtime callouts it will be 8 instructions
|
||||||
// see riscv_enc_java_to_runtime
|
// see riscv_enc_java_to_runtime
|
||||||
// la(t0, retaddr) -> auipc + addi
|
// la(t0, retaddr) -> auipc + addi
|
||||||
// addi(sp, sp, -2 * wordSize) -> addi
|
// sd(t0, Address(xthread, JavaThread::last_Java_pc_offset())) -> sd
|
||||||
// sd(t0, Address(sp, wordSize)) -> sd
|
|
||||||
// movptr(t1, addr, offset, t0) -> lui + lui + slli + add
|
// movptr(t1, addr, offset, t0) -> lui + lui + slli + add
|
||||||
// jalr(t1, offset) -> jalr
|
// jalr(t1, offset) -> jalr
|
||||||
if (CodeCache::contains(_entry_point)) {
|
if (CodeCache::contains(_entry_point)) {
|
||||||
@ -1272,7 +1271,7 @@ int MachCallRuntimeNode::ret_addr_offset() {
|
|||||||
}
|
}
|
||||||
return 3 * NativeInstruction::instruction_size;
|
return 3 * NativeInstruction::instruction_size;
|
||||||
} else {
|
} else {
|
||||||
return 9 * NativeInstruction::instruction_size;
|
return 8 * NativeInstruction::instruction_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2502,17 +2501,15 @@ encode %{
|
|||||||
__ post_call_nop();
|
__ post_call_nop();
|
||||||
} else {
|
} else {
|
||||||
Label retaddr;
|
Label retaddr;
|
||||||
|
// Make the anchor frame walkable
|
||||||
__ la(t0, retaddr);
|
__ la(t0, retaddr);
|
||||||
// Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
|
__ sd(t0, Address(xthread, JavaThread::last_Java_pc_offset()));
|
||||||
__ addi(sp, sp, -2 * wordSize);
|
|
||||||
__ sd(t0, Address(sp, wordSize));
|
|
||||||
int32_t offset = 0;
|
int32_t offset = 0;
|
||||||
// No relocation needed
|
// No relocation needed
|
||||||
__ movptr(t1, entry, offset, t0); // lui + lui + slli + add
|
__ movptr(t1, entry, offset, t0); // lui + lui + slli + add
|
||||||
__ jalr(t1, offset);
|
__ jalr(t1, offset);
|
||||||
__ bind(retaddr);
|
__ bind(retaddr);
|
||||||
__ post_call_nop();
|
__ post_call_nop();
|
||||||
__ addi(sp, sp, 2 * wordSize);
|
|
||||||
}
|
}
|
||||||
%}
|
%}
|
||||||
|
|
||||||
@ -10539,17 +10536,19 @@ instruct tlsLoadP(javaThread_RegP dst)
|
|||||||
|
|
||||||
// inlined locking and unlocking
|
// inlined locking and unlocking
|
||||||
// using t1 as the 'flag' register to bridge the BoolNode producers and consumers
|
// using t1 as the 'flag' register to bridge the BoolNode producers and consumers
|
||||||
instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
|
instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box,
|
||||||
|
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
|
||||||
%{
|
%{
|
||||||
predicate(LockingMode != LM_LIGHTWEIGHT);
|
predicate(LockingMode != LM_LIGHTWEIGHT);
|
||||||
match(Set cr (FastLock object box));
|
match(Set cr (FastLock object box));
|
||||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
|
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
|
||||||
|
|
||||||
ins_cost(10 * DEFAULT_COST);
|
ins_cost(10 * DEFAULT_COST);
|
||||||
format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3, #@cmpFastLock" %}
|
format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %}
|
||||||
|
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ fast_lock($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
|
__ fast_lock($object$$Register, $box$$Register,
|
||||||
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
ins_pipe(pipe_serial);
|
ins_pipe(pipe_serial);
|
||||||
@ -10572,23 +10571,26 @@ instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iR
|
|||||||
ins_pipe(pipe_serial);
|
ins_pipe(pipe_serial);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
|
instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box,
|
||||||
|
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
|
||||||
%{
|
%{
|
||||||
predicate(LockingMode == LM_LIGHTWEIGHT);
|
predicate(LockingMode == LM_LIGHTWEIGHT);
|
||||||
match(Set cr (FastLock object box));
|
match(Set cr (FastLock object box));
|
||||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
|
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
|
||||||
|
|
||||||
ins_cost(10 * DEFAULT_COST);
|
ins_cost(10 * DEFAULT_COST);
|
||||||
format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastLockLightweight" %}
|
format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLockLightweight" %}
|
||||||
|
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ fast_lock_lightweight($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
|
__ fast_lock_lightweight($object$$Register, $box$$Register,
|
||||||
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
ins_pipe(pipe_serial);
|
ins_pipe(pipe_serial);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
|
instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box,
|
||||||
|
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
|
||||||
%{
|
%{
|
||||||
predicate(LockingMode == LM_LIGHTWEIGHT);
|
predicate(LockingMode == LM_LIGHTWEIGHT);
|
||||||
match(Set cr (FastUnlock object box));
|
match(Set cr (FastUnlock object box));
|
||||||
@ -10598,7 +10600,8 @@ instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNo
|
|||||||
format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlockLightweight" %}
|
format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlockLightweight" %}
|
||||||
|
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
|
__ fast_unlock_lightweight($object$$Register, $box$$Register,
|
||||||
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
ins_pipe(pipe_serial);
|
ins_pipe(pipe_serial);
|
||||||
|
@ -1050,12 +1050,14 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
|||||||
|
|
||||||
__ bind(call_thaw);
|
__ bind(call_thaw);
|
||||||
|
|
||||||
|
ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
|
||||||
__ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
|
__ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
|
||||||
oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
|
oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
|
||||||
ContinuationEntry::_return_pc_offset = __ pc() - start;
|
ContinuationEntry::_return_pc_offset = __ pc() - start;
|
||||||
__ post_call_nop();
|
__ post_call_nop();
|
||||||
|
|
||||||
__ bind(exit);
|
__ bind(exit);
|
||||||
|
ContinuationEntry::_cleanup_offset = __ pc() - start;
|
||||||
continuation_enter_cleanup(masm);
|
continuation_enter_cleanup(masm);
|
||||||
__ leave();
|
__ leave();
|
||||||
__ ret();
|
__ ret();
|
||||||
@ -1149,6 +1151,10 @@ static void gen_continuation_yield(MacroAssembler* masm,
|
|||||||
oop_maps->add_gc_map(the_pc - start, map);
|
oop_maps->add_gc_map(the_pc - start, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
|
||||||
|
::continuation_enter_cleanup(masm);
|
||||||
|
}
|
||||||
|
|
||||||
static void gen_special_dispatch(MacroAssembler* masm,
|
static void gen_special_dispatch(MacroAssembler* masm,
|
||||||
const methodHandle& method,
|
const methodHandle& method,
|
||||||
const BasicType* sig_bt,
|
const BasicType* sig_bt,
|
||||||
@ -1629,11 +1635,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Change state to native (we save the return address in the thread, since it might not
|
// Change state to native (we save the return address in the thread, since it might not
|
||||||
// be pushed on the stack when we do a stack traversal).
|
// be pushed on the stack when we do a stack traversal). It is enough that the pc()
|
||||||
// We use the same pc/oopMap repeatedly when we call out
|
// points into the right code segment. It does not have to be the correct return pc.
|
||||||
|
// We use the same pc/oopMap repeatedly when we call out.
|
||||||
|
|
||||||
Label native_return;
|
Label native_return;
|
||||||
|
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||||
|
// For convenience we use the pc we want to resume to in case of preemption on Object.wait.
|
||||||
__ set_last_Java_frame(sp, noreg, native_return, t0);
|
__ set_last_Java_frame(sp, noreg, native_return, t0);
|
||||||
|
} else {
|
||||||
|
intptr_t the_pc = (intptr_t) __ pc();
|
||||||
|
oop_maps->add_gc_map(the_pc - start, map);
|
||||||
|
|
||||||
|
__ set_last_Java_frame(sp, noreg, __ pc(), t0);
|
||||||
|
}
|
||||||
|
|
||||||
Label dtrace_method_entry, dtrace_method_entry_done;
|
Label dtrace_method_entry, dtrace_method_entry_done;
|
||||||
if (DTraceMethodProbes) {
|
if (DTraceMethodProbes) {
|
||||||
@ -1709,14 +1724,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
// Save the test result, for recursive case, the result is zero
|
// Save the test result, for recursive case, the result is zero
|
||||||
__ sd(swap_reg, Address(lock_reg, mark_word_offset));
|
__ sd(swap_reg, Address(lock_reg, mark_word_offset));
|
||||||
__ bnez(swap_reg, slow_path_lock);
|
__ bnez(swap_reg, slow_path_lock);
|
||||||
|
|
||||||
|
__ bind(count);
|
||||||
|
__ inc_held_monitor_count(t0);
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
|
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
__ bind(count);
|
|
||||||
__ increment(Address(xthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
|
|
||||||
// Slow path will re-enter here
|
// Slow path will re-enter here
|
||||||
__ bind(lock_done);
|
__ bind(lock_done);
|
||||||
}
|
}
|
||||||
@ -1736,11 +1751,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
// Clobbers t1
|
// Clobbers t1
|
||||||
__ rt_call(native_func);
|
__ rt_call(native_func);
|
||||||
|
|
||||||
__ bind(native_return);
|
|
||||||
|
|
||||||
intptr_t return_pc = (intptr_t) __ pc();
|
|
||||||
oop_maps->add_gc_map(return_pc - start, map);
|
|
||||||
|
|
||||||
// Verify or restore cpu control state after JNI call
|
// Verify or restore cpu control state after JNI call
|
||||||
__ restore_cpu_control_state_after_jni(t0);
|
__ restore_cpu_control_state_after_jni(t0);
|
||||||
|
|
||||||
@ -1791,6 +1801,18 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
__ sw(t0, Address(t1));
|
__ sw(t0, Address(t1));
|
||||||
__ bind(after_transition);
|
__ bind(after_transition);
|
||||||
|
|
||||||
|
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||||
|
// Check preemption for Object.wait()
|
||||||
|
__ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ beqz(t1, native_return);
|
||||||
|
__ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ jr(t1);
|
||||||
|
__ bind(native_return);
|
||||||
|
|
||||||
|
intptr_t the_pc = (intptr_t) __ pc();
|
||||||
|
oop_maps->add_gc_map(the_pc - start, map);
|
||||||
|
}
|
||||||
|
|
||||||
Label reguard;
|
Label reguard;
|
||||||
Label reguard_done;
|
Label reguard_done;
|
||||||
__ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
|
__ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
|
||||||
@ -1814,7 +1836,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
// Simple recursive lock?
|
// Simple recursive lock?
|
||||||
__ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
__ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||||
__ bnez(t0, not_recursive);
|
__ bnez(t0, not_recursive);
|
||||||
__ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
|
__ dec_held_monitor_count(t0);
|
||||||
__ j(done);
|
__ j(done);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1837,11 +1859,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
Label count;
|
Label count;
|
||||||
__ cmpxchg_obj_header(x10, old_hdr, obj_reg, lock_tmp, count, &slow_path_unlock);
|
__ cmpxchg_obj_header(x10, old_hdr, obj_reg, lock_tmp, count, &slow_path_unlock);
|
||||||
__ bind(count);
|
__ bind(count);
|
||||||
__ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
|
__ dec_held_monitor_count(t0);
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
||||||
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
|
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
|
||||||
__ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// slow path re-enters here
|
// slow path re-enters here
|
||||||
@ -1909,8 +1930,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
__ mv(c_rarg1, lock_reg);
|
__ mv(c_rarg1, lock_reg);
|
||||||
__ mv(c_rarg2, xthread);
|
__ mv(c_rarg2, xthread);
|
||||||
|
|
||||||
// Not a leaf but we have last_Java_frame setup as we want
|
// Not a leaf but we have last_Java_frame setup as we want.
|
||||||
|
// We don't want to unmount in case of contention since that would complicate preserving
|
||||||
|
// the arguments that had already been marshalled into the native convention. So we force
|
||||||
|
// the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
|
||||||
|
// and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
|
||||||
|
__ push_cont_fastpath();
|
||||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
|
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
|
||||||
|
__ pop_cont_fastpath();
|
||||||
restore_args(masm, total_c_args, c_arg, out_regs);
|
restore_args(masm, total_c_args, c_arg, out_regs);
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -2441,6 +2468,10 @@ uint SharedRuntime::out_preserve_stack_slots() {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VMReg SharedRuntime::thread_register() {
|
||||||
|
return xthread->as_VMReg();
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------generate_handler_blob------
|
//------------------------------generate_handler_blob------
|
||||||
//
|
//
|
||||||
// Generate a special Compile2Runtime blob that saves all registers,
|
// Generate a special Compile2Runtime blob that saves all registers,
|
||||||
|
@ -114,6 +114,7 @@ inline int StackChunkFrameStream<frame_kind>::interpreter_frame_num_oops() const
|
|||||||
f.interpreted_frame_oop_map(&mask);
|
f.interpreted_frame_oop_map(&mask);
|
||||||
return mask.num_oops()
|
return mask.num_oops()
|
||||||
+ 1 // for the mirror oop
|
+ 1 // for the mirror oop
|
||||||
|
+ (f.interpreter_frame_method()->is_native() ? 1 : 0) // temp oop slot
|
||||||
+ pointer_delta_as_int((intptr_t*)f.interpreter_frame_monitor_begin(),
|
+ pointer_delta_as_int((intptr_t*)f.interpreter_frame_monitor_begin(),
|
||||||
(intptr_t*)f.interpreter_frame_monitor_end()) / BasicObjectLock::size();
|
(intptr_t*)f.interpreter_frame_monitor_end()) / BasicObjectLock::size();
|
||||||
}
|
}
|
||||||
|
@ -4040,6 +4040,36 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address generate_cont_preempt_stub() {
|
||||||
|
if (!Continuations::enabled()) return nullptr;
|
||||||
|
StubCodeMark mark(this, "StubRoutines","Continuation preempt stub");
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
__ reset_last_Java_frame(true);
|
||||||
|
|
||||||
|
// Set sp to enterSpecial frame, i.e. remove all frames copied into the heap.
|
||||||
|
__ ld(sp, Address(xthread, JavaThread::cont_entry_offset()));
|
||||||
|
|
||||||
|
Label preemption_cancelled;
|
||||||
|
__ lbu(t0, Address(xthread, JavaThread::preemption_cancelled_offset()));
|
||||||
|
__ bnez(t0, preemption_cancelled);
|
||||||
|
|
||||||
|
// Remove enterSpecial frame from the stack and return to Continuation.run() to unmount.
|
||||||
|
SharedRuntime::continuation_enter_cleanup(_masm);
|
||||||
|
__ leave();
|
||||||
|
__ ret();
|
||||||
|
|
||||||
|
// We acquired the monitor after freezing the frames so call thaw to continue execution.
|
||||||
|
__ bind(preemption_cancelled);
|
||||||
|
__ sb(zr, Address(xthread, JavaThread::preemption_cancelled_offset()));
|
||||||
|
__ la(fp, Address(sp, checked_cast<int32_t>(ContinuationEntry::size() + 2 * wordSize)));
|
||||||
|
__ la(t1, ExternalAddress(ContinuationEntry::thaw_call_pc_address()));
|
||||||
|
__ ld(t1, Address(t1));
|
||||||
|
__ jr(t1);
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
#if COMPILER2_OR_JVMCI
|
#if COMPILER2_OR_JVMCI
|
||||||
|
|
||||||
#undef __
|
#undef __
|
||||||
@ -6402,6 +6432,7 @@ static const int64_t right_3_bits = right_n_bits(3);
|
|||||||
StubRoutines::_cont_thaw = generate_cont_thaw();
|
StubRoutines::_cont_thaw = generate_cont_thaw();
|
||||||
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
|
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
|
||||||
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
|
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
|
||||||
|
StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
|
||||||
}
|
}
|
||||||
|
|
||||||
void generate_final_stubs() {
|
void generate_final_stubs() {
|
||||||
|
@ -539,6 +539,38 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
|
||||||
|
if (!Continuations::enabled()) return nullptr;
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
__ restore_bcp();
|
||||||
|
__ restore_locals();
|
||||||
|
|
||||||
|
// Restore constant pool cache
|
||||||
|
__ ld(xcpool, Address(fp, frame::interpreter_frame_cache_offset * wordSize));
|
||||||
|
|
||||||
|
// Restore Java expression stack pointer
|
||||||
|
__ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||||
|
__ shadd(esp, t0, fp, t0, Interpreter::logStackElementSize);
|
||||||
|
// and NULL it as marker that esp is now tos until next java call
|
||||||
|
__ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||||
|
|
||||||
|
// Restore machine SP
|
||||||
|
__ ld(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
|
||||||
|
__ shadd(sp, t0, fp, t0, LogBytesPerWord);
|
||||||
|
|
||||||
|
// Restore method
|
||||||
|
__ ld(xmethod, Address(fp, frame::interpreter_frame_method_offset * wordSize));
|
||||||
|
|
||||||
|
// Restore dispatch
|
||||||
|
__ la(xdispatch, ExternalAddress((address)Interpreter::dispatch_table()));
|
||||||
|
|
||||||
|
__ ret();
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Helpers for commoning out cases in the various type of method entries.
|
// Helpers for commoning out cases in the various type of method entries.
|
||||||
//
|
//
|
||||||
|
|
||||||
@ -1092,6 +1124,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// result handler is in x10
|
// result handler is in x10
|
||||||
// set result handler
|
// set result handler
|
||||||
__ mv(result_handler, x10);
|
__ mv(result_handler, x10);
|
||||||
|
// Save it in the frame in case of preemption; we cannot rely on callee saved registers.
|
||||||
|
__ sd(x10, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
|
||||||
|
|
||||||
// pass mirror handle if static call
|
// pass mirror handle if static call
|
||||||
{
|
{
|
||||||
Label L;
|
Label L;
|
||||||
@ -1130,6 +1165,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
|
|
||||||
// It is enough that the pc() points into the right code
|
// It is enough that the pc() points into the right code
|
||||||
// segment. It does not have to be the correct return pc.
|
// segment. It does not have to be the correct return pc.
|
||||||
|
// For convenience we use the pc we want to resume to in
|
||||||
|
// case of preemption on Object.wait.
|
||||||
Label native_return;
|
Label native_return;
|
||||||
__ set_last_Java_frame(esp, fp, native_return, x30);
|
__ set_last_Java_frame(esp, fp, native_return, x30);
|
||||||
|
|
||||||
@ -1151,9 +1188,13 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
__ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
|
__ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
|
||||||
__ sw(t0, Address(t1));
|
__ sw(t0, Address(t1));
|
||||||
|
|
||||||
|
__ push_cont_fastpath();
|
||||||
|
|
||||||
// Call the native method.
|
// Call the native method.
|
||||||
__ jalr(x28);
|
__ jalr(x28);
|
||||||
__ bind(native_return);
|
|
||||||
|
__ pop_cont_fastpath();
|
||||||
|
|
||||||
__ get_method(xmethod);
|
__ get_method(xmethod);
|
||||||
// result potentially in x10 or f10
|
// result potentially in x10 or f10
|
||||||
|
|
||||||
@ -1219,6 +1260,23 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
__ mv(t0, _thread_in_Java);
|
__ mv(t0, _thread_in_Java);
|
||||||
__ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
|
__ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
|
||||||
|
|
||||||
|
if (LockingMode != LM_LEGACY) {
|
||||||
|
// Check preemption for Object.wait()
|
||||||
|
Label not_preempted;
|
||||||
|
__ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ beqz(t1, not_preempted);
|
||||||
|
__ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ jr(t1);
|
||||||
|
__ bind(native_return);
|
||||||
|
__ restore_after_resume(true /* is_native */);
|
||||||
|
// reload result_handler
|
||||||
|
__ ld(result_handler, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
|
||||||
|
__ bind(not_preempted);
|
||||||
|
} else {
|
||||||
|
// any pc will do so just use this one for LM_LEGACY to keep code together.
|
||||||
|
__ bind(native_return);
|
||||||
|
}
|
||||||
|
|
||||||
// reset_last_Java_frame
|
// reset_last_Java_frame
|
||||||
__ reset_last_Java_frame(true);
|
__ reset_last_Java_frame(true);
|
||||||
|
|
||||||
|
@ -208,6 +208,11 @@ void Runtime1::initialize_pd() {
|
|||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint Runtime1::runtime_blob_current_thread_offset(frame f) {
|
||||||
|
Unimplemented();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
|
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
|
||||||
// Make a frame and preserve the caller's caller-save registers.
|
// Make a frame and preserve the caller's caller-save registers.
|
||||||
OopMap* oop_map = save_live_registers(sasm);
|
OopMap* oop_map = save_live_registers(sasm);
|
||||||
|
@ -48,6 +48,10 @@ void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
|
|||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
@ -83,6 +87,15 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) {
|
|||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline intptr_t* ThawBase::push_cleanup_continuation() {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename ConfigT>
|
template <typename ConfigT>
|
||||||
inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
|
inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
|
@ -35,6 +35,10 @@ static inline intptr_t** link_address(const frame& f) {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void patch_return_pc_with_preempt_stub(frame& f) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
inline int ContinuationHelper::frame_align_words(int size) {
|
inline int ContinuationHelper::frame_align_words(int size) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
return 0;
|
return 0;
|
||||||
@ -62,11 +66,11 @@ inline void ContinuationHelper::set_anchor_to_entry_pd(JavaFrameAnchor* anchor,
|
|||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
return false;
|
return false;
|
||||||
|
@ -246,6 +246,11 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
|
|||||||
return fr;
|
return fr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JavaThread** frame::saved_thread_address(const frame& f) {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
|
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
|
||||||
// Pass callers sender_sp as unextended_sp.
|
// Pass callers sender_sp as unextended_sp.
|
||||||
return frame(sender_sp(), sender_pc(), (intptr_t*)(ijava_state()->sender_sp));
|
return frame(sender_sp(), sender_pc(), (intptr_t*)(ijava_state()->sender_sp));
|
||||||
|
@ -3503,7 +3503,7 @@ void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1
|
|||||||
void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2) {
|
void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2) {
|
||||||
|
|
||||||
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_lock_lightweight");
|
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_lock_lightweight");
|
||||||
assert_different_registers(oop, box, temp1, temp2);
|
assert_different_registers(oop, box, temp1, temp2, Z_R0_scratch);
|
||||||
|
|
||||||
Register displacedHeader = temp1;
|
Register displacedHeader = temp1;
|
||||||
Register currentHeader = temp1;
|
Register currentHeader = temp1;
|
||||||
@ -3572,14 +3572,13 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
|
|||||||
|
|
||||||
Register zero = temp;
|
Register zero = temp;
|
||||||
Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
|
Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
|
||||||
// The object's monitor m is unlocked iff m->owner is null,
|
|
||||||
// otherwise m->owner may contain a thread or a stack address.
|
|
||||||
|
|
||||||
// Try to CAS m->owner from null to current thread.
|
// Try to CAS owner (no owner => current thread's _lock_id).
|
||||||
// If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
|
// If csg succeeds then CR=EQ, otherwise, register zero is filled
|
||||||
// Otherwise, register zero is filled with the current owner.
|
// with the current owner.
|
||||||
z_lghi(zero, 0);
|
z_lghi(zero, 0);
|
||||||
z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
|
z_lg(Z_R0_scratch, Address(Z_thread, JavaThread::lock_id_offset()));
|
||||||
|
z_csg(zero, Z_R0_scratch, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
|
||||||
|
|
||||||
// Store a non-null value into the box.
|
// Store a non-null value into the box.
|
||||||
z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
|
z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
|
||||||
@ -3588,7 +3587,7 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
|
|||||||
|
|
||||||
BLOCK_COMMENT("fast_path_recursive_lock {");
|
BLOCK_COMMENT("fast_path_recursive_lock {");
|
||||||
// Check if we are already the owner (recursive lock)
|
// Check if we are already the owner (recursive lock)
|
||||||
z_cgr(Z_thread, zero); // owner is stored in zero by "z_csg" above
|
z_cgr(Z_R0_scratch, zero); // owner is stored in zero by "z_csg" above
|
||||||
z_brne(done); // not a recursive lock
|
z_brne(done); // not a recursive lock
|
||||||
|
|
||||||
// Current thread already owns the lock. Just increment recursion count.
|
// Current thread already owns the lock. Just increment recursion count.
|
||||||
@ -3606,7 +3605,7 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
|
|||||||
void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2) {
|
void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2) {
|
||||||
|
|
||||||
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_unlock_lightweight");
|
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_unlock_lightweight");
|
||||||
assert_different_registers(oop, box, temp1, temp2);
|
assert_different_registers(oop, box, temp1, temp2, Z_R0_scratch);
|
||||||
|
|
||||||
Register displacedHeader = temp1;
|
Register displacedHeader = temp1;
|
||||||
Register currentHeader = temp2;
|
Register currentHeader = temp2;
|
||||||
@ -3653,7 +3652,8 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
|
|||||||
// Handle existing monitor.
|
// Handle existing monitor.
|
||||||
bind(object_has_monitor);
|
bind(object_has_monitor);
|
||||||
|
|
||||||
z_cg(Z_thread, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
z_lg(Z_R0_scratch, Address(Z_thread, JavaThread::lock_id_offset()));
|
||||||
|
z_cg(Z_R0_scratch, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
z_brne(done);
|
z_brne(done);
|
||||||
|
|
||||||
BLOCK_COMMENT("fast_path_recursive_unlock {");
|
BLOCK_COMMENT("fast_path_recursive_unlock {");
|
||||||
@ -6228,7 +6228,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register temp1, Register t
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2) {
|
void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2) {
|
||||||
assert_different_registers(obj, box, tmp1, tmp2);
|
assert_different_registers(obj, box, tmp1, tmp2, Z_R0_scratch);
|
||||||
|
|
||||||
// Handle inflated monitor.
|
// Handle inflated monitor.
|
||||||
NearLabel inflated;
|
NearLabel inflated;
|
||||||
@ -6356,15 +6356,16 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe
|
|||||||
const Address recursions_address(tmp1_monitor, ObjectMonitor::recursions_offset() - monitor_tag);
|
const Address recursions_address(tmp1_monitor, ObjectMonitor::recursions_offset() - monitor_tag);
|
||||||
|
|
||||||
|
|
||||||
// Try to CAS m->owner from null to current thread.
|
// Try to CAS owner (no owner => current thread's _lock_id).
|
||||||
// If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
|
// If csg succeeds then CR=EQ, otherwise, register zero is filled
|
||||||
// Otherwise, register zero is filled with the current owner.
|
// with the current owner.
|
||||||
z_lghi(zero, 0);
|
z_lghi(zero, 0);
|
||||||
z_csg(zero, Z_thread, owner_address);
|
z_lg(Z_R0_scratch, Address(Z_thread, JavaThread::lock_id_offset()));
|
||||||
|
z_csg(zero, Z_R0_scratch, owner_address);
|
||||||
z_bre(monitor_locked);
|
z_bre(monitor_locked);
|
||||||
|
|
||||||
// Check if recursive.
|
// Check if recursive.
|
||||||
z_cgr(Z_thread, zero); // zero contains the owner from z_csg instruction
|
z_cgr(Z_R0_scratch, zero); // zero contains the owner from z_csg instruction
|
||||||
z_brne(slow_path);
|
z_brne(slow_path);
|
||||||
|
|
||||||
// Recursive
|
// Recursive
|
||||||
|
@ -2387,6 +2387,11 @@ uint SharedRuntime::out_preserve_stack_slots() {
|
|||||||
return frame::z_jit_out_preserve_size/VMRegImpl::stack_slot_size;
|
return frame::z_jit_out_preserve_size/VMRegImpl::stack_slot_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VMReg SharedRuntime::thread_register() {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Frame generation for deopt and uncommon trap blobs.
|
// Frame generation for deopt and uncommon trap blobs.
|
||||||
//
|
//
|
||||||
|
@ -721,6 +721,11 @@ address TemplateInterpreterGenerator::generate_safept_entry_for (TosState state,
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Helpers for commoning out cases in the various type of method entries.
|
// Helpers for commoning out cases in the various type of method entries.
|
||||||
//
|
//
|
||||||
|
@ -2854,6 +2854,26 @@ void Assembler::leal(Register dst, Address src) {
|
|||||||
emit_operand(dst, src, 0);
|
emit_operand(dst, src, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
void Assembler::lea(Register dst, Label& L) {
|
||||||
|
emit_prefix_and_int8(get_prefixq(Address(), dst), (unsigned char)0x8D);
|
||||||
|
if (!L.is_bound()) {
|
||||||
|
// Patch @0x8D opcode
|
||||||
|
L.add_patch_at(code(), CodeBuffer::locator(offset() - 1, sect()));
|
||||||
|
// Register and [rip+disp] operand
|
||||||
|
emit_modrm(0b00, raw_encode(dst), 0b101);
|
||||||
|
emit_int32(0);
|
||||||
|
} else {
|
||||||
|
// Register and [rip+disp] operand
|
||||||
|
emit_modrm(0b00, raw_encode(dst), 0b101);
|
||||||
|
// Adjust displacement by sizeof lea instruction
|
||||||
|
int32_t disp = checked_cast<int32_t>(target(L) - (pc() + sizeof(int32_t)));
|
||||||
|
assert(is_simm32(disp), "must be 32bit offset [rip+offset]");
|
||||||
|
emit_int32(disp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void Assembler::lfence() {
|
void Assembler::lfence() {
|
||||||
emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xE8);
|
emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xE8);
|
||||||
}
|
}
|
||||||
|
@ -1625,6 +1625,10 @@ private:
|
|||||||
|
|
||||||
void leaq(Register dst, Address src);
|
void leaq(Register dst, Address src);
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
void lea(Register dst, Label& L);
|
||||||
|
#endif
|
||||||
|
|
||||||
void lfence();
|
void lfence();
|
||||||
|
|
||||||
void lock();
|
void lock();
|
||||||
|
@ -109,9 +109,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||||||
jcc(Assembler::notZero, slow_case);
|
jcc(Assembler::notZero, slow_case);
|
||||||
// done
|
// done
|
||||||
bind(done);
|
bind(done);
|
||||||
}
|
|
||||||
|
|
||||||
inc_held_monitor_count();
|
inc_held_monitor_count();
|
||||||
|
}
|
||||||
|
|
||||||
return null_check_offset;
|
return null_check_offset;
|
||||||
}
|
}
|
||||||
@ -153,10 +152,10 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||||||
// we do unlocking via runtime call
|
// we do unlocking via runtime call
|
||||||
jcc(Assembler::notEqual, slow_case);
|
jcc(Assembler::notEqual, slow_case);
|
||||||
// done
|
// done
|
||||||
}
|
|
||||||
bind(done);
|
bind(done);
|
||||||
dec_held_monitor_count();
|
dec_held_monitor_count();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Defines obj, preserves var_size_in_bytes
|
// Defines obj, preserves var_size_in_bytes
|
||||||
|
@ -206,9 +206,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
|
|||||||
class StubFrame: public StackObj {
|
class StubFrame: public StackObj {
|
||||||
private:
|
private:
|
||||||
StubAssembler* _sasm;
|
StubAssembler* _sasm;
|
||||||
|
bool _use_pop_on_epilog;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
|
StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, bool use_pop_on_epilog = false);
|
||||||
void load_argument(int offset_in_words, Register reg);
|
void load_argument(int offset_in_words, Register reg);
|
||||||
|
|
||||||
~StubFrame();
|
~StubFrame();
|
||||||
@ -219,15 +220,20 @@ void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
|
|||||||
enter();
|
enter();
|
||||||
}
|
}
|
||||||
|
|
||||||
void StubAssembler::epilogue() {
|
void StubAssembler::epilogue(bool use_pop) {
|
||||||
leave();
|
// Avoid using a leave instruction when this frame may
|
||||||
|
// have been frozen, since the current value of rbp
|
||||||
|
// restored from the stub would be invalid. We still
|
||||||
|
// must restore the rbp value saved on enter though.
|
||||||
|
use_pop ? pop(rbp) : leave();
|
||||||
ret(0);
|
ret(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __ _sasm->
|
#define __ _sasm->
|
||||||
|
|
||||||
StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
|
StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, bool use_pop_on_epilog) {
|
||||||
_sasm = sasm;
|
_sasm = sasm;
|
||||||
|
_use_pop_on_epilog = use_pop_on_epilog;
|
||||||
__ prologue(name, must_gc_arguments);
|
__ prologue(name, must_gc_arguments);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,7 +245,7 @@ void StubFrame::load_argument(int offset_in_words, Register reg) {
|
|||||||
|
|
||||||
|
|
||||||
StubFrame::~StubFrame() {
|
StubFrame::~StubFrame() {
|
||||||
__ epilogue();
|
__ epilogue(_use_pop_on_epilog);
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef __
|
#undef __
|
||||||
@ -632,6 +638,15 @@ void Runtime1::initialize_pd() {
|
|||||||
// nothing to do
|
// nothing to do
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// return: offset in 64-bit words.
|
||||||
|
uint Runtime1::runtime_blob_current_thread_offset(frame f) {
|
||||||
|
#ifdef _LP64
|
||||||
|
return r15_off / 2; // rsp offsets are in halfwords
|
||||||
|
#else
|
||||||
|
Unimplemented();
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
// Target: the entry point of the method that creates and posts the exception oop.
|
// Target: the entry point of the method that creates and posts the exception oop.
|
||||||
// has_argument: true if the exception needs arguments (passed on the stack because
|
// has_argument: true if the exception needs arguments (passed on the stack because
|
||||||
@ -1308,7 +1323,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
|||||||
// fall through
|
// fall through
|
||||||
case C1StubId::monitorenter_id:
|
case C1StubId::monitorenter_id:
|
||||||
{
|
{
|
||||||
StubFrame f(sasm, "monitorenter", dont_gc_arguments);
|
StubFrame f(sasm, "monitorenter", dont_gc_arguments, true /* use_pop_on_epilog */);
|
||||||
OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
|
OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
|
||||||
|
|
||||||
// Called with store_parameter and not C abi
|
// Called with store_parameter and not C abi
|
||||||
|
@ -311,61 +311,24 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
|||||||
// The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value
|
// The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value
|
||||||
|
|
||||||
#ifndef _LP64
|
#ifndef _LP64
|
||||||
// The object is inflated.
|
// Just take slow path to avoid dealing with 64 bit atomic instructions here.
|
||||||
|
orl(boxReg, 1); // set ICC.ZF=0 to indicate failure
|
||||||
// boxReg refers to the on-stack BasicLock in the current frame.
|
#else
|
||||||
// We'd like to write:
|
|
||||||
// set box->_displaced_header = markWord::unused_mark(). Any non-0 value suffices.
|
|
||||||
// This is convenient but results a ST-before-CAS penalty. The following CAS suffers
|
|
||||||
// additional latency as we have another ST in the store buffer that must drain.
|
|
||||||
|
|
||||||
// avoid ST-before-CAS
|
|
||||||
// register juggle because we need tmpReg for cmpxchgptr below
|
|
||||||
movptr(scrReg, boxReg);
|
|
||||||
movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
|
|
||||||
|
|
||||||
// Optimistic form: consider XORL tmpReg,tmpReg
|
|
||||||
movptr(tmpReg, NULL_WORD);
|
|
||||||
|
|
||||||
// Appears unlocked - try to swing _owner from null to non-null.
|
|
||||||
// Ideally, I'd manifest "Self" with get_thread and then attempt
|
|
||||||
// to CAS the register containing Self into m->Owner.
|
|
||||||
// But we don't have enough registers, so instead we can either try to CAS
|
|
||||||
// rsp or the address of the box (in scr) into &m->owner. If the CAS succeeds
|
|
||||||
// we later store "Self" into m->Owner. Transiently storing a stack address
|
|
||||||
// (rsp or the address of the box) into m->owner is harmless.
|
|
||||||
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
|
|
||||||
lock();
|
|
||||||
cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
|
||||||
movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3
|
|
||||||
// If we weren't able to swing _owner from null to the BasicLock
|
|
||||||
// then take the slow path.
|
|
||||||
jccb (Assembler::notZero, NO_COUNT);
|
|
||||||
// update _owner from BasicLock to thread
|
|
||||||
get_thread (scrReg); // beware: clobbers ICCs
|
|
||||||
movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg);
|
|
||||||
xorptr(boxReg, boxReg); // set icc.ZFlag = 1 to indicate success
|
|
||||||
|
|
||||||
// If the CAS fails we can either retry or pass control to the slow path.
|
|
||||||
// We use the latter tactic.
|
|
||||||
// Pass the CAS result in the icc.ZFlag into DONE_LABEL
|
|
||||||
// If the CAS was successful ...
|
|
||||||
// Self has acquired the lock
|
|
||||||
// Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
|
|
||||||
// Intentional fall-through into DONE_LABEL ...
|
|
||||||
#else // _LP64
|
|
||||||
// It's inflated and we use scrReg for ObjectMonitor* in this section.
|
|
||||||
movq(scrReg, tmpReg);
|
|
||||||
xorq(tmpReg, tmpReg);
|
|
||||||
lock();
|
|
||||||
cmpxchgptr(thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
|
||||||
// Unconditionally set box->_displaced_header = markWord::unused_mark().
|
// Unconditionally set box->_displaced_header = markWord::unused_mark().
|
||||||
// Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
|
// Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
|
||||||
movptr(Address(boxReg, 0), checked_cast<int32_t>(markWord::unused_mark().value()));
|
movptr(Address(boxReg, 0), checked_cast<int32_t>(markWord::unused_mark().value()));
|
||||||
|
|
||||||
|
// It's inflated and we use scrReg for ObjectMonitor* in this section.
|
||||||
|
movptr(boxReg, Address(r15_thread, JavaThread::lock_id_offset()));
|
||||||
|
movq(scrReg, tmpReg);
|
||||||
|
xorq(tmpReg, tmpReg);
|
||||||
|
lock();
|
||||||
|
cmpxchgptr(boxReg, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
|
|
||||||
// Propagate ICC.ZF from CAS above into DONE_LABEL.
|
// Propagate ICC.ZF from CAS above into DONE_LABEL.
|
||||||
jccb(Assembler::equal, COUNT); // CAS above succeeded; propagate ZF = 1 (success)
|
jccb(Assembler::equal, COUNT); // CAS above succeeded; propagate ZF = 1 (success)
|
||||||
|
|
||||||
cmpptr(thread, rax); // Check if we are already the owner (recursive lock)
|
cmpptr(boxReg, rax); // Check if we are already the owner (recursive lock)
|
||||||
jccb(Assembler::notEqual, NO_COUNT); // If not recursive, ZF = 0 at this point (fail)
|
jccb(Assembler::notEqual, NO_COUNT); // If not recursive, ZF = 0 at this point (fail)
|
||||||
incq(Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
|
incq(Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
|
||||||
xorq(rax, rax); // Set ZF = 1 (success) for recursive lock, denoting locking success
|
xorq(rax, rax); // Set ZF = 1 (success) for recursive lock, denoting locking success
|
||||||
@ -377,9 +340,12 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
|||||||
jccb(Assembler::notZero, NO_COUNT); // jump if ZFlag == 0
|
jccb(Assembler::notZero, NO_COUNT); // jump if ZFlag == 0
|
||||||
|
|
||||||
bind(COUNT);
|
bind(COUNT);
|
||||||
|
if (LockingMode == LM_LEGACY) {
|
||||||
|
#ifdef _LP64
|
||||||
// Count monitors in fast path
|
// Count monitors in fast path
|
||||||
increment(Address(thread, JavaThread::held_monitor_count_offset()));
|
increment(Address(thread, JavaThread::held_monitor_count_offset()));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
xorl(tmpReg, tmpReg); // Set ZF == 1
|
xorl(tmpReg, tmpReg); // Set ZF == 1
|
||||||
|
|
||||||
bind(NO_COUNT);
|
bind(NO_COUNT);
|
||||||
@ -441,6 +407,11 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
|||||||
|
|
||||||
// It's inflated.
|
// It's inflated.
|
||||||
|
|
||||||
|
#ifndef _LP64
|
||||||
|
// Just take slow path to avoid dealing with 64 bit atomic instructions here.
|
||||||
|
orl(boxReg, 1); // set ICC.ZF=0 to indicate failure
|
||||||
|
jmpb(DONE_LABEL);
|
||||||
|
#else
|
||||||
// Despite our balanced locking property we still check that m->_owner == Self
|
// Despite our balanced locking property we still check that m->_owner == Self
|
||||||
// as java routines or native JNI code called by this thread might
|
// as java routines or native JNI code called by this thread might
|
||||||
// have released the lock.
|
// have released the lock.
|
||||||
@ -489,12 +460,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
|||||||
// Save the monitor pointer in the current thread, so we can try to
|
// Save the monitor pointer in the current thread, so we can try to
|
||||||
// reacquire the lock in SharedRuntime::monitor_exit_helper().
|
// reacquire the lock in SharedRuntime::monitor_exit_helper().
|
||||||
andptr(tmpReg, ~(int32_t)markWord::monitor_value);
|
andptr(tmpReg, ~(int32_t)markWord::monitor_value);
|
||||||
#ifndef _LP64
|
|
||||||
get_thread(boxReg);
|
|
||||||
movptr(Address(boxReg, JavaThread::unlocked_inflated_monitor_offset()), tmpReg);
|
|
||||||
#else // _LP64
|
|
||||||
movptr(Address(r15_thread, JavaThread::unlocked_inflated_monitor_offset()), tmpReg);
|
movptr(Address(r15_thread, JavaThread::unlocked_inflated_monitor_offset()), tmpReg);
|
||||||
#endif
|
|
||||||
|
|
||||||
orl (boxReg, 1); // set ICC.ZF=0 to indicate failure
|
orl (boxReg, 1); // set ICC.ZF=0 to indicate failure
|
||||||
jmpb (DONE_LABEL);
|
jmpb (DONE_LABEL);
|
||||||
@ -502,6 +468,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
|||||||
bind (LSuccess);
|
bind (LSuccess);
|
||||||
testl (boxReg, 0); // set ICC.ZF=1 to indicate success
|
testl (boxReg, 0); // set ICC.ZF=1 to indicate success
|
||||||
jmpb (DONE_LABEL);
|
jmpb (DONE_LABEL);
|
||||||
|
#endif // _LP64
|
||||||
|
|
||||||
if (LockingMode == LM_LEGACY) {
|
if (LockingMode == LM_LEGACY) {
|
||||||
bind (Stacked);
|
bind (Stacked);
|
||||||
@ -518,13 +485,13 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
|||||||
jccb(Assembler::notZero, NO_COUNT);
|
jccb(Assembler::notZero, NO_COUNT);
|
||||||
|
|
||||||
bind(COUNT);
|
bind(COUNT);
|
||||||
|
|
||||||
|
if (LockingMode == LM_LEGACY) {
|
||||||
// Count monitors in fast path
|
// Count monitors in fast path
|
||||||
#ifndef _LP64
|
#ifdef _LP64
|
||||||
get_thread(tmpReg);
|
|
||||||
decrementl(Address(tmpReg, JavaThread::held_monitor_count_offset()));
|
|
||||||
#else // _LP64
|
|
||||||
decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
|
decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
xorl(tmpReg, tmpReg); // Set ZF == 1
|
xorl(tmpReg, tmpReg); // Set ZF == 1
|
||||||
|
|
||||||
@ -602,6 +569,11 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
|
|||||||
{ // Handle inflated monitor.
|
{ // Handle inflated monitor.
|
||||||
bind(inflated);
|
bind(inflated);
|
||||||
|
|
||||||
|
#ifndef _LP64
|
||||||
|
// Just take slow path to avoid dealing with 64 bit atomic instructions here.
|
||||||
|
orl(box, 1); // set ICC.ZF=0 to indicate failure
|
||||||
|
jmpb(slow_path);
|
||||||
|
#else
|
||||||
const Register monitor = t;
|
const Register monitor = t;
|
||||||
|
|
||||||
if (!UseObjectMonitorTable) {
|
if (!UseObjectMonitorTable) {
|
||||||
@ -647,27 +619,30 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
|
|||||||
Label monitor_locked;
|
Label monitor_locked;
|
||||||
// Lock the monitor.
|
// Lock the monitor.
|
||||||
|
|
||||||
// CAS owner (null => current thread).
|
if (UseObjectMonitorTable) {
|
||||||
|
// Cache the monitor for unlock before trashing box. On failure to acquire
|
||||||
|
// the lock, the slow path will reset the entry accordingly (see CacheSetter).
|
||||||
|
movptr(Address(box, BasicLock::object_monitor_cache_offset_in_bytes()), monitor);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to CAS owner (no owner => current thread's _lock_id).
|
||||||
xorptr(rax_reg, rax_reg);
|
xorptr(rax_reg, rax_reg);
|
||||||
lock(); cmpxchgptr(thread, owner_address);
|
movptr(box, Address(thread, JavaThread::lock_id_offset()));
|
||||||
|
lock(); cmpxchgptr(box, owner_address);
|
||||||
jccb(Assembler::equal, monitor_locked);
|
jccb(Assembler::equal, monitor_locked);
|
||||||
|
|
||||||
// Check if recursive.
|
// Check if recursive.
|
||||||
cmpptr(thread, rax_reg);
|
cmpptr(box, rax_reg);
|
||||||
jccb(Assembler::notEqual, slow_path);
|
jccb(Assembler::notEqual, slow_path);
|
||||||
|
|
||||||
// Recursive.
|
// Recursive.
|
||||||
increment(recursions_address);
|
increment(recursions_address);
|
||||||
|
|
||||||
bind(monitor_locked);
|
bind(monitor_locked);
|
||||||
if (UseObjectMonitorTable) {
|
#endif // _LP64
|
||||||
// Cache the monitor for unlock
|
|
||||||
movptr(Address(box, BasicLock::object_monitor_cache_offset_in_bytes()), monitor);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bind(locked);
|
bind(locked);
|
||||||
increment(Address(thread, JavaThread::held_monitor_count_offset()));
|
|
||||||
// Set ZF = 1
|
// Set ZF = 1
|
||||||
xorl(rax_reg, rax_reg);
|
xorl(rax_reg, rax_reg);
|
||||||
|
|
||||||
@ -777,6 +752,11 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax,
|
|||||||
|
|
||||||
bind(inflated);
|
bind(inflated);
|
||||||
|
|
||||||
|
#ifndef _LP64
|
||||||
|
// Just take slow path to avoid dealing with 64 bit atomic instructions here.
|
||||||
|
orl(t, 1); // set ICC.ZF=0 to indicate failure
|
||||||
|
jmpb(slow_path);
|
||||||
|
#else
|
||||||
if (!UseObjectMonitorTable) {
|
if (!UseObjectMonitorTable) {
|
||||||
assert(mark == monitor, "should be the same here");
|
assert(mark == monitor, "should be the same here");
|
||||||
} else {
|
} else {
|
||||||
@ -828,10 +808,10 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax,
|
|||||||
// Recursive unlock.
|
// Recursive unlock.
|
||||||
bind(recursive);
|
bind(recursive);
|
||||||
decrement(recursions_address);
|
decrement(recursions_address);
|
||||||
|
#endif // _LP64
|
||||||
}
|
}
|
||||||
|
|
||||||
bind(unlocked);
|
bind(unlocked);
|
||||||
decrement(Address(thread, JavaThread::held_monitor_count_offset()));
|
|
||||||
xorl(t, t); // Fast Unlock ZF = 1
|
xorl(t, t); // Fast Unlock ZF = 1
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -126,6 +126,11 @@ void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
|
||||||
|
assert(f.interpreter_frame_last_sp() == nullptr, "should be null for top frame");
|
||||||
|
f.interpreter_frame_set_last_sp(f.unextended_sp());
|
||||||
|
}
|
||||||
|
|
||||||
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
||||||
assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
|
assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
|
||||||
assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
|
assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
|
||||||
@ -136,7 +141,10 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co
|
|||||||
assert((intptr_t*)hf.at_relative(frame::interpreter_frame_last_sp_offset) == hf.unextended_sp(), "");
|
assert((intptr_t*)hf.at_relative(frame::interpreter_frame_last_sp_offset) == hf.unextended_sp(), "");
|
||||||
|
|
||||||
// Make sure that locals is already relativized.
|
// Make sure that locals is already relativized.
|
||||||
assert((*hf.addr_at(frame::interpreter_frame_locals_offset) == frame::sender_sp_offset + f.interpreter_frame_method()->max_locals() - 1), "");
|
DEBUG_ONLY(Method* m = f.interpreter_frame_method();)
|
||||||
|
// Frames for native methods have 2 extra words (temp oop/result handler) before fixed part of frame.
|
||||||
|
DEBUG_ONLY(int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
|
||||||
|
assert((*hf.addr_at(frame::interpreter_frame_locals_offset) == frame::sender_sp_offset + max_locals - 1), "");
|
||||||
|
|
||||||
// Make sure that monitor_block_top is already relativized.
|
// Make sure that monitor_block_top is already relativized.
|
||||||
assert(hf.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
|
assert(hf.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
|
||||||
@ -207,7 +215,6 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
// If caller is interpreted it already made room for the callee arguments
|
// If caller is interpreted it already made room for the callee arguments
|
||||||
int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
|
int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
|
||||||
const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
|
const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
|
||||||
const int locals = hf.interpreter_frame_method()->max_locals();
|
|
||||||
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
||||||
intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
|
intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
|
||||||
DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
|
DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
|
||||||
@ -217,7 +224,10 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
// we need to set the locals so that the caller of new_stack_frame() can call
|
// we need to set the locals so that the caller of new_stack_frame() can call
|
||||||
// ContinuationHelper::InterpretedFrame::frame_bottom
|
// ContinuationHelper::InterpretedFrame::frame_bottom
|
||||||
intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
|
intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
|
||||||
assert((int)locals_offset == frame::sender_sp_offset + locals - 1, "");
|
DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
|
||||||
|
// Frames for native methods have 2 extra words (temp oop/result handler) before fixed part of frame.
|
||||||
|
DEBUG_ONLY(const int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
|
||||||
|
assert((int)locals_offset == frame::sender_sp_offset + max_locals - 1, "");
|
||||||
// copy relativized locals from the heap frame
|
// copy relativized locals from the heap frame
|
||||||
*f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
|
*f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
|
||||||
return f;
|
return f;
|
||||||
@ -225,7 +235,7 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
int fsize = FKind::size(hf);
|
int fsize = FKind::size(hf);
|
||||||
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
intptr_t* frame_sp = caller.unextended_sp() - fsize;
|
||||||
if (bottom || caller.is_interpreted_frame()) {
|
if (bottom || caller.is_interpreted_frame()) {
|
||||||
int argsize = hf.compiled_frame_stack_argsize();
|
int argsize = FKind::stack_argsize(hf);
|
||||||
|
|
||||||
fsize += argsize;
|
fsize += argsize;
|
||||||
frame_sp -= argsize;
|
frame_sp -= argsize;
|
||||||
@ -242,8 +252,9 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
|
|||||||
// we need to recreate a "real" frame pointer, pointing into the stack
|
// we need to recreate a "real" frame pointer, pointing into the stack
|
||||||
fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
|
fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
|
||||||
} else {
|
} else {
|
||||||
// we need to re-read fp because it may be an oop and we might have fixed the frame.
|
fp = FKind::stub || FKind::native
|
||||||
fp = *(intptr_t**)(hf.sp() - frame::sender_sp_offset);
|
? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
|
||||||
|
: *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
|
||||||
}
|
}
|
||||||
return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
|
return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
|
||||||
}
|
}
|
||||||
@ -266,6 +277,22 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) {
|
|||||||
patch_callee_link(caller, caller.fp());
|
patch_callee_link(caller, caller.fp());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
|
||||||
|
intptr_t* fp = caller_sp - frame::sender_sp_offset;
|
||||||
|
patch_callee_link(f, fp);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline intptr_t* ThawBase::push_cleanup_continuation() {
|
||||||
|
frame enterSpecial = new_entry_frame();
|
||||||
|
intptr_t* sp = enterSpecial.sp();
|
||||||
|
|
||||||
|
sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
|
||||||
|
sp[-2] = (intptr_t)enterSpecial.fp();
|
||||||
|
|
||||||
|
log_develop_trace(continuations, preempt)("push_cleanup_continuation initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT, p2i(sp + 2 * frame::metadata_words), p2i(sp));
|
||||||
|
return sp;
|
||||||
|
}
|
||||||
|
|
||||||
inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
|
inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
|
||||||
// Make sure that last_sp is kept relativized.
|
// Make sure that last_sp is kept relativized.
|
||||||
assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
|
assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -40,6 +40,20 @@ static inline intptr_t** link_address(const frame& f) {
|
|||||||
: (intptr_t**)(f.unextended_sp() + f.cb()->frame_size() - frame::sender_sp_offset);
|
: (intptr_t**)(f.unextended_sp() + f.cb()->frame_size() - frame::sender_sp_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void patch_return_pc_with_preempt_stub(frame& f) {
|
||||||
|
if (f.is_runtime_frame()) {
|
||||||
|
// Patch the pc of the now old last Java frame (we already set the anchor to enterSpecial)
|
||||||
|
// so that when target goes back to Java it will actually return to the preempt cleanup stub.
|
||||||
|
intptr_t* sp = f.sp();
|
||||||
|
sp[-1] = (intptr_t)StubRoutines::cont_preempt_stub();
|
||||||
|
} else {
|
||||||
|
// The target will check for preemption once it returns to the interpreter
|
||||||
|
// or the native wrapper code and will manually jump to the preempt stub.
|
||||||
|
JavaThread *thread = JavaThread::current();
|
||||||
|
thread->set_preempt_alternate_return(StubRoutines::cont_preempt_stub());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
inline int ContinuationHelper::frame_align_words(int size) {
|
inline int ContinuationHelper::frame_align_words(int size) {
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
return size & 1;
|
return size & 1;
|
||||||
@ -72,12 +86,12 @@ inline void ContinuationHelper::set_anchor_to_entry_pd(JavaFrameAnchor* anchor,
|
|||||||
anchor->set_last_Java_fp(entry->entry_fp());
|
anchor->set_last_Java_fp(entry->entry_fp());
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
||||||
intptr_t* fp = *(intptr_t**)(sp - frame::sender_sp_offset);
|
intptr_t* fp = *(intptr_t**)(sp - frame::sender_sp_offset);
|
||||||
anchor->set_last_Java_fp(fp);
|
anchor->set_last_Java_fp(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
||||||
intptr_t* sp = f.sp();
|
intptr_t* sp = f.sp();
|
||||||
address pc = *(address*)(sp - frame::sender_sp_ret_address_offset());
|
address pc = *(address*)(sp - frame::sender_sp_ret_address_offset());
|
||||||
|
@ -409,6 +409,36 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
|
|||||||
return fr;
|
return fr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(ASSERT)
|
||||||
|
static address get_register_address_in_stub(const frame& stub_fr, VMReg reg) {
|
||||||
|
RegisterMap map(nullptr,
|
||||||
|
RegisterMap::UpdateMap::include,
|
||||||
|
RegisterMap::ProcessFrames::skip,
|
||||||
|
RegisterMap::WalkContinuation::skip);
|
||||||
|
stub_fr.oop_map()->update_register_map(&stub_fr, &map);
|
||||||
|
return map.location(reg, stub_fr.sp());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
JavaThread** frame::saved_thread_address(const frame& f) {
|
||||||
|
CodeBlob* cb = f.cb();
|
||||||
|
assert(cb != nullptr && cb->is_runtime_stub(), "invalid frame");
|
||||||
|
|
||||||
|
JavaThread** thread_addr;
|
||||||
|
#ifdef COMPILER1
|
||||||
|
if (cb == Runtime1::blob_for(C1StubId::monitorenter_id) ||
|
||||||
|
cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id)) {
|
||||||
|
thread_addr = (JavaThread**)(f.sp() + Runtime1::runtime_blob_current_thread_offset(f));
|
||||||
|
} else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
// c2 only saves rbp in the stub frame so nothing to do.
|
||||||
|
thread_addr = nullptr;
|
||||||
|
}
|
||||||
|
assert(get_register_address_in_stub(f, SharedRuntime::thread_register()) == (address)thread_addr, "wrong thread address");
|
||||||
|
return thread_addr;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// frame::verify_deopt_original_pc
|
// frame::verify_deopt_original_pc
|
||||||
//
|
//
|
||||||
|
@ -34,7 +34,9 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
|
|||||||
|
|
||||||
#define SUPPORTS_NATIVE_CX8
|
#define SUPPORTS_NATIVE_CX8
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
#define SUPPORT_MONITOR_COUNT
|
#define SUPPORT_MONITOR_COUNT
|
||||||
|
#endif
|
||||||
|
|
||||||
#define CPU_MULTI_COPY_ATOMIC
|
#define CPU_MULTI_COPY_ATOMIC
|
||||||
|
|
||||||
|
@ -336,6 +336,66 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
|
|||||||
restore_locals();
|
restore_locals();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
|
||||||
|
address entry_point,
|
||||||
|
Register arg_1) {
|
||||||
|
assert(arg_1 == c_rarg1, "");
|
||||||
|
Label resume_pc, not_preempted;
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
{
|
||||||
|
Label L;
|
||||||
|
cmpptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
|
||||||
|
jcc(Assembler::equal, L);
|
||||||
|
stop("Should not have alternate return address set");
|
||||||
|
bind(L);
|
||||||
|
}
|
||||||
|
#endif /* ASSERT */
|
||||||
|
|
||||||
|
// Force freeze slow path.
|
||||||
|
push_cont_fastpath();
|
||||||
|
|
||||||
|
// Make VM call. In case of preemption set last_pc to the one we want to resume to.
|
||||||
|
// Note: call_VM_helper requires last_Java_pc for anchor to be at the top of the stack.
|
||||||
|
lea(rscratch1, resume_pc);
|
||||||
|
push(rscratch1);
|
||||||
|
MacroAssembler::call_VM_helper(oop_result, entry_point, 1, false /*check_exceptions*/);
|
||||||
|
pop(rscratch1);
|
||||||
|
|
||||||
|
pop_cont_fastpath();
|
||||||
|
|
||||||
|
// Check if preempted.
|
||||||
|
movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
cmpptr(rscratch1, NULL_WORD);
|
||||||
|
jccb(Assembler::zero, not_preempted);
|
||||||
|
movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
|
||||||
|
jmp(rscratch1);
|
||||||
|
|
||||||
|
// In case of preemption, this is where we will resume once we finally acquire the monitor.
|
||||||
|
bind(resume_pc);
|
||||||
|
restore_after_resume(false /* is_native */);
|
||||||
|
|
||||||
|
bind(not_preempted);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::restore_after_resume(bool is_native) {
|
||||||
|
lea(rscratch1, ExternalAddress(Interpreter::cont_resume_interpreter_adapter()));
|
||||||
|
call(rscratch1);
|
||||||
|
if (is_native) {
|
||||||
|
// On resume we need to set up stack as expected.
|
||||||
|
push(dtos);
|
||||||
|
push(ltos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
|
||||||
|
address entry_point,
|
||||||
|
Register arg_1) {
|
||||||
|
MacroAssembler::call_VM(oop_result, entry_point, arg_1);
|
||||||
|
}
|
||||||
|
#endif // _LP64
|
||||||
|
|
||||||
void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
|
void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
|
||||||
if (JvmtiExport::can_pop_frame()) {
|
if (JvmtiExport::can_pop_frame()) {
|
||||||
Label L;
|
Label L;
|
||||||
@ -1154,7 +1214,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
|||||||
"The argument is only for looks. It must be c_rarg1");
|
"The argument is only for looks. It must be c_rarg1");
|
||||||
|
|
||||||
if (LockingMode == LM_MONITOR) {
|
if (LockingMode == LM_MONITOR) {
|
||||||
call_VM(noreg,
|
call_VM_preemptable(noreg,
|
||||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||||
lock_reg);
|
lock_reg);
|
||||||
} else {
|
} else {
|
||||||
@ -1241,14 +1301,14 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
|||||||
jcc(Assembler::notZero, slow_case);
|
jcc(Assembler::notZero, slow_case);
|
||||||
|
|
||||||
bind(count_locking);
|
bind(count_locking);
|
||||||
}
|
|
||||||
inc_held_monitor_count();
|
inc_held_monitor_count();
|
||||||
|
}
|
||||||
jmp(done);
|
jmp(done);
|
||||||
|
|
||||||
bind(slow_case);
|
bind(slow_case);
|
||||||
|
|
||||||
// Call the runtime routine for slow case
|
// Call the runtime routine for slow case
|
||||||
call_VM(noreg,
|
call_VM_preemptable(noreg,
|
||||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||||
lock_reg);
|
lock_reg);
|
||||||
bind(done);
|
bind(done);
|
||||||
@ -1321,8 +1381,8 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
|||||||
jcc(Assembler::notZero, slow_case);
|
jcc(Assembler::notZero, slow_case);
|
||||||
|
|
||||||
bind(count_locking);
|
bind(count_locking);
|
||||||
}
|
|
||||||
dec_held_monitor_count();
|
dec_held_monitor_count();
|
||||||
|
}
|
||||||
jmp(done);
|
jmp(done);
|
||||||
|
|
||||||
bind(slow_case);
|
bind(slow_case);
|
||||||
|
@ -63,6 +63,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
|
|
||||||
void load_earlyret_value(TosState state);
|
void load_earlyret_value(TosState state);
|
||||||
|
|
||||||
|
void call_VM_preemptable(Register oop_result,
|
||||||
|
address entry_point,
|
||||||
|
Register arg_1);
|
||||||
|
void restore_after_resume(bool is_native);
|
||||||
|
|
||||||
// Interpreter-specific registers
|
// Interpreter-specific registers
|
||||||
void save_bcp() {
|
void save_bcp() {
|
||||||
movptr(Address(rbp, frame::interpreter_frame_bcp_offset * wordSize), _bcp_register);
|
movptr(Address(rbp, frame::interpreter_frame_bcp_offset * wordSize), _bcp_register);
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
#include "gc/shared/tlab_globals.hpp"
|
#include "gc/shared/tlab_globals.hpp"
|
||||||
#include "interpreter/bytecodeHistogram.hpp"
|
#include "interpreter/bytecodeHistogram.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
#include "jvm.h"
|
#include "jvm.h"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
@ -528,7 +529,6 @@ void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
|
|||||||
// restore stack pointer
|
// restore stack pointer
|
||||||
addq(rsp, frame::arg_reg_save_area_bytes);
|
addq(rsp, frame::arg_reg_save_area_bytes);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
|
void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
|
||||||
@ -3040,25 +3040,13 @@ void MacroAssembler::pop_cont_fastpath() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::inc_held_monitor_count() {
|
void MacroAssembler::inc_held_monitor_count() {
|
||||||
#ifndef _LP64
|
#ifdef _LP64
|
||||||
Register thread = rax;
|
|
||||||
push(thread);
|
|
||||||
get_thread(thread);
|
|
||||||
incrementl(Address(thread, JavaThread::held_monitor_count_offset()));
|
|
||||||
pop(thread);
|
|
||||||
#else // LP64
|
|
||||||
incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
|
incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::dec_held_monitor_count() {
|
void MacroAssembler::dec_held_monitor_count() {
|
||||||
#ifndef _LP64
|
#ifdef _LP64
|
||||||
Register thread = rax;
|
|
||||||
push(thread);
|
|
||||||
get_thread(thread);
|
|
||||||
decrementl(Address(thread, JavaThread::held_monitor_count_offset()));
|
|
||||||
pop(thread);
|
|
||||||
#else // LP64
|
|
||||||
decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
|
decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -3155,6 +3143,17 @@ void MacroAssembler::set_last_Java_frame(Register java_thread,
|
|||||||
movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
|
movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
||||||
|
Register last_java_fp,
|
||||||
|
Label &L,
|
||||||
|
Register scratch) {
|
||||||
|
lea(scratch, L);
|
||||||
|
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch);
|
||||||
|
set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, nullptr, scratch);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void MacroAssembler::shlptr(Register dst, int imm8) {
|
void MacroAssembler::shlptr(Register dst, int imm8) {
|
||||||
LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
|
LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
|
||||||
}
|
}
|
||||||
|
@ -111,7 +111,8 @@ class MacroAssembler: public Assembler {
|
|||||||
op == 0xEB /* short jmp */ ||
|
op == 0xEB /* short jmp */ ||
|
||||||
(op & 0xF0) == 0x70 /* short jcc */ ||
|
(op & 0xF0) == 0x70 /* short jcc */ ||
|
||||||
(op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
|
(op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
|
||||||
(op == 0xC7 && branch[1] == 0xF8) /* xbegin */,
|
(op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
|
||||||
|
(op == 0x8D) /* lea */,
|
||||||
"Invalid opcode at patch point");
|
"Invalid opcode at patch point");
|
||||||
|
|
||||||
if (op == 0xEB || (op & 0xF0) == 0x70) {
|
if (op == 0xEB || (op & 0xF0) == 0x70) {
|
||||||
@ -122,7 +123,7 @@ class MacroAssembler: public Assembler {
|
|||||||
file == nullptr ? "<null>" : file, line);
|
file == nullptr ? "<null>" : file, line);
|
||||||
*disp = (char)imm8;
|
*disp = (char)imm8;
|
||||||
} else {
|
} else {
|
||||||
int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
|
int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1];
|
||||||
int imm32 = checked_cast<int>(target - (address) &disp[1]);
|
int imm32 = checked_cast<int>(target - (address) &disp[1]);
|
||||||
*disp = imm32;
|
*disp = imm32;
|
||||||
}
|
}
|
||||||
@ -335,6 +336,13 @@ class MacroAssembler: public Assembler {
|
|||||||
address last_java_pc,
|
address last_java_pc,
|
||||||
Register rscratch);
|
Register rscratch);
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
void set_last_Java_frame(Register last_java_sp,
|
||||||
|
Register last_java_fp,
|
||||||
|
Label &last_java_pc,
|
||||||
|
Register scratch);
|
||||||
|
#endif
|
||||||
|
|
||||||
void reset_last_Java_frame(Register thread, bool clear_fp);
|
void reset_last_Java_frame(Register thread, bool clear_fp);
|
||||||
|
|
||||||
// thread in the default location (r15_thread on 64bit)
|
// thread in the default location (r15_thread on 64bit)
|
||||||
@ -954,7 +962,7 @@ public:
|
|||||||
void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; }
|
void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; }
|
||||||
void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
|
void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
|
||||||
|
|
||||||
void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
|
using Assembler::lea;
|
||||||
void lea(Register dst, AddressLiteral adr);
|
void lea(Register dst, AddressLiteral adr);
|
||||||
void lea(Address dst, AddressLiteral adr, Register rscratch);
|
void lea(Address dst, AddressLiteral adr, Register rscratch);
|
||||||
|
|
||||||
|
@ -2056,6 +2056,11 @@ uint SharedRuntime::out_preserve_stack_slots() {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VMReg SharedRuntime::thread_register() {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------generate_deopt_blob----------------------------
|
//------------------------------generate_deopt_blob----------------------------
|
||||||
void SharedRuntime::generate_deopt_blob() {
|
void SharedRuntime::generate_deopt_blob() {
|
||||||
// allocate space for the code
|
// allocate space for the code
|
||||||
|
@ -171,6 +171,7 @@ class RegisterSaver {
|
|||||||
static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
|
static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
|
||||||
static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
|
static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
|
||||||
static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
|
static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
|
||||||
|
static int r15_offset_in_bytes(void) { return BytesPerInt * r15_off; }
|
||||||
static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
|
static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
|
||||||
static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
|
static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
|
||||||
|
|
||||||
@ -1420,7 +1421,7 @@ static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj,
|
|||||||
// Kills:
|
// Kills:
|
||||||
// rbx
|
// rbx
|
||||||
//
|
//
|
||||||
void static continuation_enter_cleanup(MacroAssembler* masm) {
|
static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
Label L_good_sp;
|
Label L_good_sp;
|
||||||
__ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
|
__ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
|
||||||
@ -1610,6 +1611,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
|||||||
|
|
||||||
__ bind(L_thaw);
|
__ bind(L_thaw);
|
||||||
|
|
||||||
|
ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
|
||||||
__ call(RuntimeAddress(StubRoutines::cont_thaw()));
|
__ call(RuntimeAddress(StubRoutines::cont_thaw()));
|
||||||
|
|
||||||
ContinuationEntry::_return_pc_offset = __ pc() - start;
|
ContinuationEntry::_return_pc_offset = __ pc() - start;
|
||||||
@ -1619,7 +1621,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
|||||||
// --- Normal exit (resolve/thawing)
|
// --- Normal exit (resolve/thawing)
|
||||||
|
|
||||||
__ bind(L_exit);
|
__ bind(L_exit);
|
||||||
|
ContinuationEntry::_cleanup_offset = __ pc() - start;
|
||||||
continuation_enter_cleanup(masm);
|
continuation_enter_cleanup(masm);
|
||||||
__ pop(rbp);
|
__ pop(rbp);
|
||||||
__ ret(0);
|
__ ret(0);
|
||||||
@ -1712,6 +1714,10 @@ static void gen_continuation_yield(MacroAssembler* masm,
|
|||||||
__ ret(0);
|
__ ret(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
|
||||||
|
::continuation_enter_cleanup(masm);
|
||||||
|
}
|
||||||
|
|
||||||
static void gen_special_dispatch(MacroAssembler* masm,
|
static void gen_special_dispatch(MacroAssembler* masm,
|
||||||
const methodHandle& method,
|
const methodHandle& method,
|
||||||
const BasicType* sig_bt,
|
const BasicType* sig_bt,
|
||||||
@ -2180,11 +2186,16 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
// points into the right code segment. It does not have to be the correct return pc.
|
// points into the right code segment. It does not have to be the correct return pc.
|
||||||
// We use the same pc/oopMap repeatedly when we call out
|
// We use the same pc/oopMap repeatedly when we call out
|
||||||
|
|
||||||
|
Label native_return;
|
||||||
|
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||||
|
// For convenience we use the pc we want to resume to in case of preemption on Object.wait.
|
||||||
|
__ set_last_Java_frame(rsp, noreg, native_return, rscratch1);
|
||||||
|
} else {
|
||||||
intptr_t the_pc = (intptr_t) __ pc();
|
intptr_t the_pc = (intptr_t) __ pc();
|
||||||
oop_maps->add_gc_map(the_pc - start, map);
|
oop_maps->add_gc_map(the_pc - start, map);
|
||||||
|
|
||||||
__ set_last_Java_frame(rsp, noreg, (address)the_pc, rscratch1);
|
__ set_last_Java_frame(rsp, noreg, __ pc(), rscratch1);
|
||||||
|
}
|
||||||
|
|
||||||
// We have all of the arguments setup at this point. We must not touch any register
|
// We have all of the arguments setup at this point. We must not touch any register
|
||||||
// argument registers at this point (what if we save/restore them there are no oop?
|
// argument registers at this point (what if we save/restore them there are no oop?
|
||||||
@ -2271,12 +2282,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
// Save the test result, for recursive case, the result is zero
|
// Save the test result, for recursive case, the result is zero
|
||||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||||
__ jcc(Assembler::notEqual, slow_path_lock);
|
__ jcc(Assembler::notEqual, slow_path_lock);
|
||||||
|
|
||||||
|
__ bind(count_mon);
|
||||||
|
__ inc_held_monitor_count();
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
__ lightweight_lock(lock_reg, obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
|
__ lightweight_lock(lock_reg, obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
|
||||||
}
|
}
|
||||||
__ bind(count_mon);
|
|
||||||
__ inc_held_monitor_count();
|
|
||||||
|
|
||||||
// Slow path will re-enter here
|
// Slow path will re-enter here
|
||||||
__ bind(lock_done);
|
__ bind(lock_done);
|
||||||
@ -2367,6 +2379,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
|
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
|
||||||
__ bind(after_transition);
|
__ bind(after_transition);
|
||||||
|
|
||||||
|
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||||
|
// Check preemption for Object.wait()
|
||||||
|
__ movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ cmpptr(rscratch1, NULL_WORD);
|
||||||
|
__ jccb(Assembler::equal, native_return);
|
||||||
|
__ movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
|
||||||
|
__ jmp(rscratch1);
|
||||||
|
__ bind(native_return);
|
||||||
|
|
||||||
|
intptr_t the_pc = (intptr_t) __ pc();
|
||||||
|
oop_maps->add_gc_map(the_pc - start, map);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Label reguard;
|
Label reguard;
|
||||||
Label reguard_done;
|
Label reguard_done;
|
||||||
__ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
|
__ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||||
@ -2416,7 +2442,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
__ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
|
__ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
|
||||||
__ dec_held_monitor_count();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// slow path re-enters here
|
// slow path re-enters here
|
||||||
@ -2490,8 +2515,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
__ mov(c_rarg1, lock_reg);
|
__ mov(c_rarg1, lock_reg);
|
||||||
__ mov(c_rarg2, r15_thread);
|
__ mov(c_rarg2, r15_thread);
|
||||||
|
|
||||||
// Not a leaf but we have last_Java_frame setup as we want
|
// Not a leaf but we have last_Java_frame setup as we want.
|
||||||
|
// We don't want to unmount in case of contention since that would complicate preserving
|
||||||
|
// the arguments that had already been marshalled into the native convention. So we force
|
||||||
|
// the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
|
||||||
|
// and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
|
||||||
|
__ push_cont_fastpath();
|
||||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
|
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
|
||||||
|
__ pop_cont_fastpath();
|
||||||
restore_args(masm, total_c_args, c_arg, out_regs);
|
restore_args(masm, total_c_args, c_arg, out_regs);
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -2606,6 +2637,10 @@ uint SharedRuntime::in_preserve_stack_slots() {
|
|||||||
return 4 + 2 * VerifyStackAtCalls;
|
return 4 + 2 * VerifyStackAtCalls;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VMReg SharedRuntime::thread_register() {
|
||||||
|
return r15_thread->as_VMReg();
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------generate_deopt_blob----------------------------
|
//------------------------------generate_deopt_blob----------------------------
|
||||||
void SharedRuntime::generate_deopt_blob() {
|
void SharedRuntime::generate_deopt_blob() {
|
||||||
// Allocate space for the code
|
// Allocate space for the code
|
||||||
|
@ -114,6 +114,7 @@ inline int StackChunkFrameStream<frame_kind>::interpreter_frame_num_oops() const
|
|||||||
f.interpreted_frame_oop_map(&mask);
|
f.interpreted_frame_oop_map(&mask);
|
||||||
return mask.num_oops()
|
return mask.num_oops()
|
||||||
+ 1 // for the mirror oop
|
+ 1 // for the mirror oop
|
||||||
|
+ (f.interpreter_frame_method()->is_native() ? 1 : 0) // temp oop slot
|
||||||
+ pointer_delta_as_int((intptr_t*)f.interpreter_frame_monitor_begin(),
|
+ pointer_delta_as_int((intptr_t*)f.interpreter_frame_monitor_begin(),
|
||||||
(intptr_t*)f.interpreter_frame_monitor_end())/BasicObjectLock::size();
|
(intptr_t*)f.interpreter_frame_monitor_end())/BasicObjectLock::size();
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
#include "prims/jvmtiExport.hpp"
|
#include "prims/jvmtiExport.hpp"
|
||||||
#include "prims/upcallLinker.hpp"
|
#include "prims/upcallLinker.hpp"
|
||||||
#include "runtime/arguments.hpp"
|
#include "runtime/arguments.hpp"
|
||||||
|
#include "runtime/continuationEntry.hpp"
|
||||||
#include "runtime/javaThread.hpp"
|
#include "runtime/javaThread.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
@ -3781,6 +3782,36 @@ address StubGenerator::generate_cont_returnBarrier_exception() {
|
|||||||
return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception);
|
return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address StubGenerator::generate_cont_preempt_stub() {
|
||||||
|
if (!Continuations::enabled()) return nullptr;
|
||||||
|
StubCodeMark mark(this, "StubRoutines","Continuation preempt stub");
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
__ reset_last_Java_frame(true);
|
||||||
|
|
||||||
|
// Set rsp to enterSpecial frame, i.e. remove all frames copied into the heap.
|
||||||
|
__ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
|
||||||
|
|
||||||
|
Label preemption_cancelled;
|
||||||
|
__ movbool(rscratch1, Address(r15_thread, JavaThread::preemption_cancelled_offset()));
|
||||||
|
__ testbool(rscratch1);
|
||||||
|
__ jcc(Assembler::notZero, preemption_cancelled);
|
||||||
|
|
||||||
|
// Remove enterSpecial frame from the stack and return to Continuation.run() to unmount.
|
||||||
|
SharedRuntime::continuation_enter_cleanup(_masm);
|
||||||
|
__ pop(rbp);
|
||||||
|
__ ret(0);
|
||||||
|
|
||||||
|
// We acquired the monitor after freezing the frames so call thaw to continue execution.
|
||||||
|
__ bind(preemption_cancelled);
|
||||||
|
__ movbool(Address(r15_thread, JavaThread::preemption_cancelled_offset()), false);
|
||||||
|
__ lea(rbp, Address(rsp, checked_cast<int32_t>(ContinuationEntry::size())));
|
||||||
|
__ movptr(rscratch1, ExternalAddress(ContinuationEntry::thaw_call_pc_address()));
|
||||||
|
__ jmp(rscratch1);
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
// exception handler for upcall stubs
|
// exception handler for upcall stubs
|
||||||
address StubGenerator::generate_upcall_stub_exception_handler() {
|
address StubGenerator::generate_upcall_stub_exception_handler() {
|
||||||
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
|
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
|
||||||
@ -3953,6 +3984,7 @@ void StubGenerator::generate_continuation_stubs() {
|
|||||||
StubRoutines::_cont_thaw = generate_cont_thaw();
|
StubRoutines::_cont_thaw = generate_cont_thaw();
|
||||||
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
|
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
|
||||||
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
|
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
|
||||||
|
StubRoutines::_cont_preempt_stub = generate_cont_preempt_stub();
|
||||||
}
|
}
|
||||||
|
|
||||||
void StubGenerator::generate_final_stubs() {
|
void StubGenerator::generate_final_stubs() {
|
||||||
|
@ -602,6 +602,8 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
address generate_cont_returnBarrier();
|
address generate_cont_returnBarrier();
|
||||||
address generate_cont_returnBarrier_exception();
|
address generate_cont_returnBarrier_exception();
|
||||||
|
|
||||||
|
address generate_cont_preempt_stub();
|
||||||
|
|
||||||
// Continuation point for throwing of implicit exceptions that are
|
// Continuation point for throwing of implicit exceptions that are
|
||||||
// not handled in the current activation. Fabricates an exception
|
// not handled in the current activation. Fabricates an exception
|
||||||
// oop and initiates normal exception dispatching in this
|
// oop and initiates normal exception dispatching in this
|
||||||
|
@ -34,7 +34,7 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _call_
|
|||||||
enum platform_dependent_constants {
|
enum platform_dependent_constants {
|
||||||
// simply increase sizes if too small (assembler will crash if too small)
|
// simply increase sizes if too small (assembler will crash if too small)
|
||||||
_initial_stubs_code_size = 20000 WINDOWS_ONLY(+1000),
|
_initial_stubs_code_size = 20000 WINDOWS_ONLY(+1000),
|
||||||
_continuation_stubs_code_size = 1000 LP64_ONLY(+1000),
|
_continuation_stubs_code_size = 1000 LP64_ONLY(+2000),
|
||||||
// AVX512 intrinsics add more code in 64-bit VM,
|
// AVX512 intrinsics add more code in 64-bit VM,
|
||||||
// Windows have more code to save/restore registers
|
// Windows have more code to save/restore registers
|
||||||
_compiler_stubs_code_size = 20000 LP64_ONLY(+47000) WINDOWS_ONLY(+2000),
|
_compiler_stubs_code_size = 20000 LP64_ONLY(+47000) WINDOWS_ONLY(+2000),
|
||||||
|
@ -387,6 +387,26 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
|
||||||
|
if (!Continuations::enabled()) return nullptr;
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
__ restore_bcp();
|
||||||
|
__ restore_locals();
|
||||||
|
|
||||||
|
// Get return address before adjusting rsp
|
||||||
|
__ movptr(rax, Address(rsp, 0));
|
||||||
|
|
||||||
|
// Restore stack bottom
|
||||||
|
__ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||||
|
__ lea(rsp, Address(rbp, rcx, Address::times_ptr));
|
||||||
|
// and NULL it as marker that esp is now tos until next java call
|
||||||
|
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||||
|
|
||||||
|
__ jmp(rax);
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Helpers for commoning out cases in the various type of method entries.
|
// Helpers for commoning out cases in the various type of method entries.
|
||||||
@ -1029,7 +1049,10 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
|
|
||||||
// It is enough that the pc() points into the right code
|
// It is enough that the pc() points into the right code
|
||||||
// segment. It does not have to be the correct return pc.
|
// segment. It does not have to be the correct return pc.
|
||||||
__ set_last_Java_frame(rsp, rbp, (address) __ pc(), rscratch1);
|
// For convenience we use the pc we want to resume to in
|
||||||
|
// case of preemption on Object.wait.
|
||||||
|
Label native_return;
|
||||||
|
__ set_last_Java_frame(rsp, rbp, native_return, rscratch1);
|
||||||
#endif // _LP64
|
#endif // _LP64
|
||||||
|
|
||||||
// change thread state
|
// change thread state
|
||||||
@ -1049,11 +1072,15 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
__ movl(Address(thread, JavaThread::thread_state_offset()),
|
__ movl(Address(thread, JavaThread::thread_state_offset()),
|
||||||
_thread_in_native);
|
_thread_in_native);
|
||||||
|
|
||||||
|
__ push_cont_fastpath();
|
||||||
|
|
||||||
// Call the native method.
|
// Call the native method.
|
||||||
__ call(rax);
|
__ call(rax);
|
||||||
// 32: result potentially in rdx:rax or ST0
|
// 32: result potentially in rdx:rax or ST0
|
||||||
// 64: result potentially in rax or xmm0
|
// 64: result potentially in rax or xmm0
|
||||||
|
|
||||||
|
__ pop_cont_fastpath();
|
||||||
|
|
||||||
// Verify or restore cpu control state after JNI call
|
// Verify or restore cpu control state after JNI call
|
||||||
__ restore_cpu_control_state_after_jni(rscratch1);
|
__ restore_cpu_control_state_after_jni(rscratch1);
|
||||||
|
|
||||||
@ -1077,10 +1104,10 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
Label push_double;
|
Label push_double;
|
||||||
ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
|
ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
|
||||||
ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
|
ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
|
||||||
__ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
|
__ cmpptr(Address(rbp, (frame::interpreter_frame_result_handler_offset)*wordSize),
|
||||||
float_handler.addr(), noreg);
|
float_handler.addr(), noreg);
|
||||||
__ jcc(Assembler::equal, push_double);
|
__ jcc(Assembler::equal, push_double);
|
||||||
__ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
|
__ cmpptr(Address(rbp, (frame::interpreter_frame_result_handler_offset)*wordSize),
|
||||||
double_handler.addr(), noreg);
|
double_handler.addr(), noreg);
|
||||||
__ jcc(Assembler::notEqual, L);
|
__ jcc(Assembler::notEqual, L);
|
||||||
__ bind(push_double);
|
__ bind(push_double);
|
||||||
@ -1150,6 +1177,24 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||||||
// change thread state
|
// change thread state
|
||||||
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
|
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
if (LockingMode != LM_LEGACY) {
|
||||||
|
// Check preemption for Object.wait()
|
||||||
|
Label not_preempted;
|
||||||
|
__ movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset()));
|
||||||
|
__ cmpptr(rscratch1, NULL_WORD);
|
||||||
|
__ jccb(Assembler::equal, not_preempted);
|
||||||
|
__ movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
|
||||||
|
__ jmp(rscratch1);
|
||||||
|
__ bind(native_return);
|
||||||
|
__ restore_after_resume(true /* is_native */);
|
||||||
|
__ bind(not_preempted);
|
||||||
|
} else {
|
||||||
|
// any pc will do so just use this one for LM_LEGACY to keep code together.
|
||||||
|
__ bind(native_return);
|
||||||
|
}
|
||||||
|
#endif // _LP64
|
||||||
|
|
||||||
// reset_last_Java_frame
|
// reset_last_Java_frame
|
||||||
__ reset_last_Java_frame(thread, true);
|
__ reset_last_Java_frame(thread, true);
|
||||||
|
|
||||||
|
@ -48,6 +48,10 @@ void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
|
|||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
@ -83,6 +87,15 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) {
|
|||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline intptr_t* ThawBase::push_cleanup_continuation() {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename ConfigT>
|
template <typename ConfigT>
|
||||||
inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
|
inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
|
@ -33,6 +33,10 @@ static inline intptr_t** link_address(const frame& f) {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void patch_return_pc_with_preempt_stub(frame& f) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
inline int ContinuationHelper::frame_align_words(int size) {
|
inline int ContinuationHelper::frame_align_words(int size) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
return 0;
|
return 0;
|
||||||
@ -60,11 +64,11 @@ inline void ContinuationHelper::set_anchor_to_entry_pd(JavaFrameAnchor* anchor,
|
|||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
inline void ContinuationHelper::set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
return false;
|
return false;
|
||||||
|
@ -72,6 +72,11 @@ bool frame::upcall_stub_frame_is_first() const {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JavaThread** frame::saved_thread_address(const frame& f) {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
|
frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
|
||||||
assert(zeroframe()->is_interpreter_frame() ||
|
assert(zeroframe()->is_interpreter_frame() ||
|
||||||
zeroframe()->is_fake_stub_frame(), "wrong type of frame");
|
zeroframe()->is_fake_stub_frame(), "wrong type of frame");
|
||||||
|
@ -36,8 +36,6 @@
|
|||||||
// The default padding size for data structures to avoid false sharing.
|
// The default padding size for data structures to avoid false sharing.
|
||||||
#define DEFAULT_PADDING_SIZE DEFAULT_CACHE_LINE_SIZE
|
#define DEFAULT_PADDING_SIZE DEFAULT_CACHE_LINE_SIZE
|
||||||
|
|
||||||
#define SUPPORT_MONITOR_COUNT
|
|
||||||
|
|
||||||
#include <ffi.h>
|
#include <ffi.h>
|
||||||
|
|
||||||
// Indicates whether the C calling conventions require that
|
// Indicates whether the C calling conventions require that
|
||||||
|
@ -85,6 +85,11 @@ uint SharedRuntime::out_preserve_stack_slots() {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VMReg SharedRuntime::thread_register() {
|
||||||
|
Unimplemented();
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
JRT_LEAF(void, zero_stub())
|
JRT_LEAF(void, zero_stub())
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
JRT_END
|
JRT_END
|
||||||
|
@ -346,9 +346,6 @@ int ZeroInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
|
|||||||
success = false;
|
success = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (success) {
|
|
||||||
THREAD->inc_held_monitor_count();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (!success) {
|
if (!success) {
|
||||||
CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
|
CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
|
||||||
@ -499,9 +496,6 @@ int ZeroInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
|
|||||||
success = false;
|
success = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (success) {
|
|
||||||
THREAD->dec_held_monitor_count();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (!success) {
|
if (!success) {
|
||||||
InterpreterRuntime::monitorexit(monitor);
|
InterpreterRuntime::monitorexit(monitor);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -89,7 +89,7 @@ class StubAssembler: public C1_MacroAssembler {
|
|||||||
int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3);
|
int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3);
|
||||||
|
|
||||||
void prologue(const char* name, bool must_gc_arguments);
|
void prologue(const char* name, bool must_gc_arguments);
|
||||||
void epilogue();
|
void epilogue(bool use_pop = false);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_C1_C1_MACROASSEMBLER_HPP
|
#endif // SHARE_C1_C1_MACROASSEMBLER_HPP
|
||||||
|
@ -135,6 +135,9 @@ public:
|
|||||||
static void initialize(BufferBlob* blob);
|
static void initialize(BufferBlob* blob);
|
||||||
static void initialize_pd();
|
static void initialize_pd();
|
||||||
|
|
||||||
|
// return offset in words
|
||||||
|
static uint runtime_blob_current_thread_offset(frame f);
|
||||||
|
|
||||||
// stubs
|
// stubs
|
||||||
static CodeBlob* blob_for (C1StubId id);
|
static CodeBlob* blob_for (C1StubId id);
|
||||||
static address entry_for(C1StubId id) { return blob_for(id)->code_begin(); }
|
static address entry_for(C1StubId id) { return blob_for(id)->code_begin(); }
|
||||||
|
@ -1686,6 +1686,7 @@ bool java_lang_Thread::is_in_VTMS_transition(oop java_thread) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void java_lang_Thread::set_is_in_VTMS_transition(oop java_thread, bool val) {
|
void java_lang_Thread::set_is_in_VTMS_transition(oop java_thread, bool val) {
|
||||||
|
assert(is_in_VTMS_transition(java_thread) != val, "already %s transition", val ? "inside" : "outside");
|
||||||
java_thread->bool_field_put_volatile(_jvmti_is_in_VTMS_transition_offset, val);
|
java_thread->bool_field_put_volatile(_jvmti_is_in_VTMS_transition_offset, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2021,12 +2022,20 @@ int java_lang_VirtualThread::static_vthread_scope_offset;
|
|||||||
int java_lang_VirtualThread::_carrierThread_offset;
|
int java_lang_VirtualThread::_carrierThread_offset;
|
||||||
int java_lang_VirtualThread::_continuation_offset;
|
int java_lang_VirtualThread::_continuation_offset;
|
||||||
int java_lang_VirtualThread::_state_offset;
|
int java_lang_VirtualThread::_state_offset;
|
||||||
|
int java_lang_VirtualThread::_next_offset;
|
||||||
|
int java_lang_VirtualThread::_onWaitingList_offset;
|
||||||
|
int java_lang_VirtualThread::_notified_offset;
|
||||||
|
int java_lang_VirtualThread::_timeout_offset;
|
||||||
|
|
||||||
#define VTHREAD_FIELDS_DO(macro) \
|
#define VTHREAD_FIELDS_DO(macro) \
|
||||||
macro(static_vthread_scope_offset, k, "VTHREAD_SCOPE", continuationscope_signature, true); \
|
macro(static_vthread_scope_offset, k, "VTHREAD_SCOPE", continuationscope_signature, true); \
|
||||||
macro(_carrierThread_offset, k, "carrierThread", thread_signature, false); \
|
macro(_carrierThread_offset, k, "carrierThread", thread_signature, false); \
|
||||||
macro(_continuation_offset, k, "cont", continuation_signature, false); \
|
macro(_continuation_offset, k, "cont", continuation_signature, false); \
|
||||||
macro(_state_offset, k, "state", int_signature, false)
|
macro(_state_offset, k, "state", int_signature, false); \
|
||||||
|
macro(_next_offset, k, "next", vthread_signature, false); \
|
||||||
|
macro(_onWaitingList_offset, k, "onWaitingList", bool_signature, false); \
|
||||||
|
macro(_notified_offset, k, "notified", bool_signature, false); \
|
||||||
|
macro(_timeout_offset, k, "timeout", long_signature, false);
|
||||||
|
|
||||||
|
|
||||||
void java_lang_VirtualThread::compute_offsets() {
|
void java_lang_VirtualThread::compute_offsets() {
|
||||||
@ -2052,6 +2061,56 @@ int java_lang_VirtualThread::state(oop vthread) {
|
|||||||
return vthread->int_field_acquire(_state_offset);
|
return vthread->int_field_acquire(_state_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void java_lang_VirtualThread::set_state(oop vthread, int state) {
|
||||||
|
vthread->release_int_field_put(_state_offset, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
int java_lang_VirtualThread::cmpxchg_state(oop vthread, int old_state, int new_state) {
|
||||||
|
jint* addr = vthread->field_addr<jint>(_state_offset);
|
||||||
|
int res = Atomic::cmpxchg(addr, old_state, new_state);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
oop java_lang_VirtualThread::next(oop vthread) {
|
||||||
|
return vthread->obj_field(_next_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void java_lang_VirtualThread::set_next(oop vthread, oop next_vthread) {
|
||||||
|
vthread->obj_field_put(_next_offset, next_vthread);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add vthread to the waiting list if it's not already in it. Multiple threads
|
||||||
|
// could be trying to add vthread to the list at the same time, so we control
|
||||||
|
// access with a cmpxchg on onWaitingList. The winner adds vthread to the list.
|
||||||
|
// Method returns true if we added vthread to the list, false otherwise.
|
||||||
|
bool java_lang_VirtualThread::set_onWaitingList(oop vthread, OopHandle& list_head) {
|
||||||
|
jboolean* addr = vthread->field_addr<jboolean>(_onWaitingList_offset);
|
||||||
|
jboolean vthread_on_list = Atomic::load(addr);
|
||||||
|
if (!vthread_on_list) {
|
||||||
|
vthread_on_list = Atomic::cmpxchg(addr, (jboolean)JNI_FALSE, (jboolean)JNI_TRUE);
|
||||||
|
if (!vthread_on_list) {
|
||||||
|
for (;;) {
|
||||||
|
oop head = list_head.resolve();
|
||||||
|
java_lang_VirtualThread::set_next(vthread, head);
|
||||||
|
if (list_head.cmpxchg(head, vthread) == head) return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false; // already on waiting list
|
||||||
|
}
|
||||||
|
|
||||||
|
void java_lang_VirtualThread::set_notified(oop vthread, jboolean value) {
|
||||||
|
vthread->bool_field_put_volatile(_notified_offset, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
jlong java_lang_VirtualThread::timeout(oop vthread) {
|
||||||
|
return vthread->long_field(_timeout_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void java_lang_VirtualThread::set_timeout(oop vthread, jlong value) {
|
||||||
|
vthread->long_field_put(_timeout_offset, value);
|
||||||
|
}
|
||||||
|
|
||||||
JavaThreadStatus java_lang_VirtualThread::map_state_to_thread_status(int state) {
|
JavaThreadStatus java_lang_VirtualThread::map_state_to_thread_status(int state) {
|
||||||
JavaThreadStatus status = JavaThreadStatus::NEW;
|
JavaThreadStatus status = JavaThreadStatus::NEW;
|
||||||
switch (state & ~SUSPENDED) {
|
switch (state & ~SUSPENDED) {
|
||||||
@ -2065,6 +2124,9 @@ JavaThreadStatus java_lang_VirtualThread::map_state_to_thread_status(int state)
|
|||||||
case UNPARKED:
|
case UNPARKED:
|
||||||
case YIELDING:
|
case YIELDING:
|
||||||
case YIELDED:
|
case YIELDED:
|
||||||
|
case UNBLOCKED:
|
||||||
|
case WAITING:
|
||||||
|
case TIMED_WAITING:
|
||||||
status = JavaThreadStatus::RUNNABLE;
|
status = JavaThreadStatus::RUNNABLE;
|
||||||
break;
|
break;
|
||||||
case PARKED:
|
case PARKED:
|
||||||
@ -2075,6 +2137,16 @@ JavaThreadStatus java_lang_VirtualThread::map_state_to_thread_status(int state)
|
|||||||
case TIMED_PINNED:
|
case TIMED_PINNED:
|
||||||
status = JavaThreadStatus::PARKED_TIMED;
|
status = JavaThreadStatus::PARKED_TIMED;
|
||||||
break;
|
break;
|
||||||
|
case BLOCKING:
|
||||||
|
case BLOCKED:
|
||||||
|
status = JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER;
|
||||||
|
break;
|
||||||
|
case WAIT:
|
||||||
|
status = JavaThreadStatus::IN_OBJECT_WAIT;
|
||||||
|
break;
|
||||||
|
case TIMED_WAIT:
|
||||||
|
status = JavaThreadStatus::IN_OBJECT_WAIT_TIMED;
|
||||||
|
break;
|
||||||
case TERMINATED:
|
case TERMINATED:
|
||||||
status = JavaThreadStatus::TERMINATED;
|
status = JavaThreadStatus::TERMINATED;
|
||||||
break;
|
break;
|
||||||
@ -2084,6 +2156,13 @@ JavaThreadStatus java_lang_VirtualThread::map_state_to_thread_status(int state)
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool java_lang_VirtualThread::is_preempted(oop vthread) {
|
||||||
|
oop continuation = java_lang_VirtualThread::continuation(vthread);
|
||||||
|
assert(continuation != nullptr, "vthread with no continuation");
|
||||||
|
stackChunkOop chunk = jdk_internal_vm_Continuation::tail(continuation);
|
||||||
|
return chunk != nullptr && chunk->preempted();
|
||||||
|
}
|
||||||
|
|
||||||
#if INCLUDE_CDS
|
#if INCLUDE_CDS
|
||||||
void java_lang_VirtualThread::serialize_offsets(SerializeClosure* f) {
|
void java_lang_VirtualThread::serialize_offsets(SerializeClosure* f) {
|
||||||
VTHREAD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
|
VTHREAD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
|
||||||
|
@ -530,6 +530,11 @@ class java_lang_VirtualThread : AllStatic {
|
|||||||
static int _carrierThread_offset;
|
static int _carrierThread_offset;
|
||||||
static int _continuation_offset;
|
static int _continuation_offset;
|
||||||
static int _state_offset;
|
static int _state_offset;
|
||||||
|
static int _next_offset;
|
||||||
|
static int _onWaitingList_offset;
|
||||||
|
static int _notified_offset;
|
||||||
|
static int _recheckInterval_offset;
|
||||||
|
static int _timeout_offset;
|
||||||
JFR_ONLY(static int _jfr_epoch_offset;)
|
JFR_ONLY(static int _jfr_epoch_offset;)
|
||||||
public:
|
public:
|
||||||
enum {
|
enum {
|
||||||
@ -545,6 +550,13 @@ class java_lang_VirtualThread : AllStatic {
|
|||||||
UNPARKED = 9,
|
UNPARKED = 9,
|
||||||
YIELDING = 10,
|
YIELDING = 10,
|
||||||
YIELDED = 11,
|
YIELDED = 11,
|
||||||
|
BLOCKING = 12,
|
||||||
|
BLOCKED = 13,
|
||||||
|
UNBLOCKED = 14,
|
||||||
|
WAITING = 15,
|
||||||
|
WAIT = 16, // waiting in Object.wait
|
||||||
|
TIMED_WAITING = 17,
|
||||||
|
TIMED_WAIT = 18, // waiting in timed-Object.wait
|
||||||
TERMINATED = 99,
|
TERMINATED = 99,
|
||||||
|
|
||||||
// additional state bits
|
// additional state bits
|
||||||
@ -564,6 +576,15 @@ class java_lang_VirtualThread : AllStatic {
|
|||||||
static oop carrier_thread(oop vthread);
|
static oop carrier_thread(oop vthread);
|
||||||
static oop continuation(oop vthread);
|
static oop continuation(oop vthread);
|
||||||
static int state(oop vthread);
|
static int state(oop vthread);
|
||||||
|
static void set_state(oop vthread, int state);
|
||||||
|
static int cmpxchg_state(oop vthread, int old_state, int new_state);
|
||||||
|
static oop next(oop vthread);
|
||||||
|
static void set_next(oop vthread, oop next_vthread);
|
||||||
|
static bool set_onWaitingList(oop vthread, OopHandle& list_head);
|
||||||
|
static jlong timeout(oop vthread);
|
||||||
|
static void set_timeout(oop vthread, jlong value);
|
||||||
|
static void set_notified(oop vthread, jboolean value);
|
||||||
|
static bool is_preempted(oop vthread);
|
||||||
static JavaThreadStatus map_state_to_thread_status(int state);
|
static JavaThreadStatus map_state_to_thread_status(int state);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -516,6 +516,8 @@ class SerializeClosure;
|
|||||||
template(checkIndex_name, "checkIndex") \
|
template(checkIndex_name, "checkIndex") \
|
||||||
template(jfr_epoch_name, "jfr_epoch") \
|
template(jfr_epoch_name, "jfr_epoch") \
|
||||||
template(maxThawingSize_name, "maxThawingSize") \
|
template(maxThawingSize_name, "maxThawingSize") \
|
||||||
|
template(lockStackSize_name, "lockStackSize") \
|
||||||
|
template(objectWaiter_name, "objectWaiter") \
|
||||||
\
|
\
|
||||||
/* name symbols needed by intrinsics */ \
|
/* name symbols needed by intrinsics */ \
|
||||||
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
|
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
|
||||||
@ -564,6 +566,7 @@ class SerializeClosure;
|
|||||||
template(continuation_signature, "Ljdk/internal/vm/Continuation;") \
|
template(continuation_signature, "Ljdk/internal/vm/Continuation;") \
|
||||||
template(continuationscope_signature, "Ljdk/internal/vm/ContinuationScope;") \
|
template(continuationscope_signature, "Ljdk/internal/vm/ContinuationScope;") \
|
||||||
template(stackchunk_signature, "Ljdk/internal/vm/StackChunk;") \
|
template(stackchunk_signature, "Ljdk/internal/vm/StackChunk;") \
|
||||||
|
template(vthread_signature, "Ljava/lang/VirtualThread;") \
|
||||||
template(object_void_signature, "(Ljava/lang/Object;)V") \
|
template(object_void_signature, "(Ljava/lang/Object;)V") \
|
||||||
template(object_int_signature, "(Ljava/lang/Object;)I") \
|
template(object_int_signature, "(Ljava/lang/Object;)I") \
|
||||||
template(long_object_long_signature, "(JLjava/lang/Object;)J") \
|
template(long_object_long_signature, "(JLjava/lang/Object;)J") \
|
||||||
|
@ -708,7 +708,8 @@ void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map
|
|||||||
|
|
||||||
// handle the case of an anchor explicitly set in continuation code that doesn't have a callee
|
// handle the case of an anchor explicitly set in continuation code that doesn't have a callee
|
||||||
JavaThread* thread = reg_map->thread();
|
JavaThread* thread = reg_map->thread();
|
||||||
if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) {
|
if ((thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp())
|
||||||
|
JVMTI_ONLY(|| (method()->is_continuation_enter_intrinsic() && thread->on_monitor_waited_event()))) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1298,7 +1299,7 @@ nmethod::nmethod(
|
|||||||
_comp_level = CompLevel_none;
|
_comp_level = CompLevel_none;
|
||||||
_compiler_type = type;
|
_compiler_type = type;
|
||||||
_orig_pc_offset = 0;
|
_orig_pc_offset = 0;
|
||||||
_num_stack_arg_slots = _method->constMethod()->num_stack_arg_slots();
|
_num_stack_arg_slots = 0;
|
||||||
|
|
||||||
if (offsets->value(CodeOffsets::Exceptions) != -1) {
|
if (offsets->value(CodeOffsets::Exceptions) != -1) {
|
||||||
// Continuation enter intrinsic
|
// Continuation enter intrinsic
|
||||||
|
@ -1145,6 +1145,12 @@ JVM_VirtualThreadUnmount(JNIEnv* env, jobject vthread, jboolean hide);
|
|||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
JVM_VirtualThreadDisableSuspend(JNIEnv* env, jclass clazz, jboolean enter);
|
JVM_VirtualThreadDisableSuspend(JNIEnv* env, jclass clazz, jboolean enter);
|
||||||
|
|
||||||
|
JNIEXPORT void JNICALL
|
||||||
|
JVM_VirtualThreadPinnedEvent(JNIEnv* env, jclass clazz, jstring op);
|
||||||
|
|
||||||
|
JNIEXPORT jobject JNICALL
|
||||||
|
JVM_TakeVirtualThreadListToUnblock(JNIEnv* env, jclass ignored);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Core reflection support.
|
* Core reflection support.
|
||||||
*/
|
*/
|
||||||
|
@ -735,7 +735,7 @@ JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* current, B
|
|||||||
assert(Universe::heap()->is_in_or_null(elem->obj()),
|
assert(Universe::heap()->is_in_or_null(elem->obj()),
|
||||||
"must be null or an object");
|
"must be null or an object");
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
current->last_frame().interpreter_frame_verify_monitor(elem);
|
if (!current->preempting()) current->last_frame().interpreter_frame_verify_monitor(elem);
|
||||||
#endif
|
#endif
|
||||||
JRT_END
|
JRT_END
|
||||||
|
|
||||||
|
@ -234,8 +234,10 @@ class MaskFillerForNative: public NativeSignatureIterator {
|
|||||||
private:
|
private:
|
||||||
uintptr_t * _mask; // the bit mask to be filled
|
uintptr_t * _mask; // the bit mask to be filled
|
||||||
int _size; // the mask size in bits
|
int _size; // the mask size in bits
|
||||||
|
int _num_oops;
|
||||||
|
|
||||||
void set_one(int i) {
|
void set_one(int i) {
|
||||||
|
_num_oops++;
|
||||||
i *= InterpreterOopMap::bits_per_entry;
|
i *= InterpreterOopMap::bits_per_entry;
|
||||||
assert(0 <= i && i < _size, "offset out of bounds");
|
assert(0 <= i && i < _size, "offset out of bounds");
|
||||||
_mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));
|
_mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));
|
||||||
@ -253,6 +255,7 @@ class MaskFillerForNative: public NativeSignatureIterator {
|
|||||||
MaskFillerForNative(const methodHandle& method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {
|
MaskFillerForNative(const methodHandle& method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {
|
||||||
_mask = mask;
|
_mask = mask;
|
||||||
_size = size;
|
_size = size;
|
||||||
|
_num_oops = 0;
|
||||||
// initialize with 0
|
// initialize with 0
|
||||||
int i = (size + BitsPerWord - 1) / BitsPerWord;
|
int i = (size + BitsPerWord - 1) / BitsPerWord;
|
||||||
while (i-- > 0) _mask[i] = 0;
|
while (i-- > 0) _mask[i] = 0;
|
||||||
@ -261,6 +264,8 @@ class MaskFillerForNative: public NativeSignatureIterator {
|
|||||||
void generate() {
|
void generate() {
|
||||||
iterate();
|
iterate();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int num_oops() { return _num_oops; }
|
||||||
};
|
};
|
||||||
|
|
||||||
bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {
|
bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {
|
||||||
@ -319,6 +324,7 @@ void OopMapCacheEntry::fill_for_native(const methodHandle& mh) {
|
|||||||
// fill mask for parameters
|
// fill mask for parameters
|
||||||
MaskFillerForNative mf(mh, bit_mask(), mask_size());
|
MaskFillerForNative mf(mh, bit_mask(), mask_size());
|
||||||
mf.generate();
|
mf.generate();
|
||||||
|
_num_oops = mf.num_oops();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user