8316179: Use consistent naming for lightweight locking in MacroAssembler

Reviewed-by: rkennke, coleenp, dholmes
This commit is contained in:
Stefan Karlsson 2023-09-14 07:02:29 +00:00
parent 11d431b2c4
commit 639ba13c4b
29 changed files with 78 additions and 78 deletions

View File

@ -3875,7 +3875,7 @@ encode %{
__ b(cont);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ fast_lock(oop, disp_hdr, tmp, rscratch1, no_count);
__ lightweight_lock(oop, disp_hdr, tmp, rscratch1, no_count);
__ b(count);
}
@ -3956,7 +3956,7 @@ encode %{
__ b(cont);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ fast_unlock(oop, tmp, box, disp_hdr, no_count);
__ lightweight_unlock(oop, tmp, box, disp_hdr, no_count);
__ b(count);
}

View File

@ -83,7 +83,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// Load object header
ldr(hdr, Address(obj, hdr_offset));
if (LockingMode == LM_LIGHTWEIGHT) {
fast_lock(obj, hdr, rscratch1, rscratch2, slow_case);
lightweight_lock(obj, hdr, rscratch1, rscratch2, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
// and mark it as unlocked
@ -149,7 +149,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
// be encoded.
tst(hdr, markWord::monitor_value);
br(Assembler::NE, slow_case);
fast_unlock(obj, hdr, rscratch1, rscratch2, slow_case);
lightweight_unlock(obj, hdr, rscratch1, rscratch2, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to

View File

@ -767,7 +767,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
if (LockingMode == LM_LIGHTWEIGHT) {
ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
fast_lock(obj_reg, tmp, rscratch1, rscratch2, slow_case);
lightweight_lock(obj_reg, tmp, rscratch1, rscratch2, slow_case);
b(count);
} else if (LockingMode == LM_LEGACY) {
// Load (object->mark() | 1) into swap_reg
@ -898,7 +898,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
ldr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
tbnz(header_reg, exact_log2(markWord::monitor_value), slow_case);
fast_unlock(obj_reg, header_reg, swap_reg, rscratch1, slow_case);
lightweight_unlock(obj_reg, header_reg, swap_reg, rscratch1, slow_case);
b(count);
bind(slow_case);
} else if (LockingMode == LM_LEGACY) {

View File

@ -6314,14 +6314,14 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
}
}
// Implements fast-locking.
// Implements lightweight-locking.
// Branches to slow upon failure to lock the object, with ZF cleared.
// Falls through upon success with ZF set.
//
// - obj: the object to be locked
// - hdr: the header, already loaded from obj, will be destroyed
// - t1, t2: temporary registers, will be destroyed
void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, hdr, t1, t2);
@ -6346,14 +6346,14 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Register
strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
}
// Implements fast-unlocking.
// Implements lightweight-unlocking.
// Branches to slow upon failure, with ZF cleared.
// Falls through upon success, with ZF set.
//
// - obj: the object to be unlocked
// - hdr: the (pre-loaded) header of the object
// - t1, t2: temporary registers
void MacroAssembler::fast_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, hdr, t1, t2);

View File

@ -1592,8 +1592,8 @@ public:
// Code for java.lang.Thread::onSpinWait() intrinsic.
void spin_wait();
void fast_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
void fast_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
void lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
void lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
private:
// Check the current thread doesn't need a cross modify fence.

View File

@ -1812,7 +1812,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ fast_lock(obj_reg, swap_reg, tmp, rscratch1, slow_path_lock);
__ lightweight_lock(obj_reg, swap_reg, tmp, rscratch1, slow_path_lock);
}
__ bind(count);
__ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
@ -1953,7 +1953,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "");
__ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ tbnz(old_hdr, exact_log2(markWord::monitor_value), slow_path_unlock);
__ fast_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
}

View File

@ -219,7 +219,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
fast_lock_2(obj /* obj */, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
lightweight_lock(obj /* obj */, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
// Success: fall through
} else if (LockingMode == LM_LEGACY) {
@ -282,8 +282,8 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
fast_unlock_2(obj /* object */, t1, t2, t3, 1 /* savemask (save t1) */,
slow_case);
lightweight_unlock(obj /* object */, t1, t2, t3, 1 /* savemask (save t1) */,
slow_case);
// Success: Fall through
} else if (LockingMode == LM_LEGACY) {

View File

@ -93,8 +93,8 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
if (LockingMode == LM_LIGHTWEIGHT) {
fast_lock_2(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
// Success: set Z
cmp(Roop, Roop);
@ -143,8 +143,8 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra
if (LockingMode == LM_LIGHTWEIGHT) {
fast_unlock_2(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
cmp(Roop, Roop); // Success: Set Z
// Fall through

View File

@ -912,7 +912,7 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
}
if (LockingMode == LM_LIGHTWEIGHT) {
fast_lock_2(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
b(done);
} else if (LockingMode == LM_LEGACY) {
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
@ -1034,8 +1034,8 @@ void InterpreterMacroAssembler::unlock_object(Register Rlock) {
cmpoop(Rtemp, Robj);
b(slow_case, ne);
fast_unlock_2(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
1 /* savemask (save t1) */, slow_case);
lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
1 /* savemask (save t1) */, slow_case);
b(done);

View File

@ -1748,14 +1748,14 @@ void MacroAssembler::read_polling_page(Register dest, relocInfo::relocType rtype
POISON_REG(mask, 1, R2, poison) \
POISON_REG(mask, 2, R3, poison)
// Attempt to fast-lock an object
// Attempt to lightweight-lock an object
// Registers:
// - obj: the object to be locked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void MacroAssembler::fast_lock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, t1, t2, t3);
@ -1806,14 +1806,14 @@ void MacroAssembler::fast_lock_2(Register obj, Register t1, Register t2, Registe
// Success: fall through
}
// Attempt to fast-unlock an object
// Attempt to lightweight-unlock an object
// Registers:
// - obj: the object to be unlocked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void MacroAssembler::fast_unlock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, t1, t2, t3);

View File

@ -1009,23 +1009,23 @@ public:
void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
// Attempt to fast-lock an object
// Attempt to lightweight-lock an object
// Registers:
// - obj: the object to be locked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void fast_lock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
void lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
// Attempt to fast-unlock an object
// Attempt to lightweight-unlock an object
// Registers:
// - obj: the object to be unlocked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void fast_unlock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
#ifndef PRODUCT
// Preserves flags and all registers.

View File

@ -1155,8 +1155,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
if (LockingMode == LM_LIGHTWEIGHT) {
log_trace(fastlock)("SharedRuntime lock fast");
__ fast_lock_2(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
0x7 /* savemask */, slow_lock);
__ lightweight_lock(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
0x7 /* savemask */, slow_lock);
// Fall through to lock_done
} else if (LockingMode == LM_LEGACY) {
const Register mark = tmp;
@ -1242,8 +1242,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
if (method->is_synchronized()) {
if (LockingMode == LM_LIGHTWEIGHT) {
log_trace(fastlock)("SharedRuntime unlock fast");
__ fast_unlock_2(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
7 /* savemask */, slow_unlock);
__ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
7 /* savemask */, slow_unlock);
// Fall through
} else if (LockingMode == LM_LEGACY) {
// See C1_MacroAssembler::unlock_object() for more comments

View File

@ -115,7 +115,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
}
if (LockingMode == LM_LIGHTWEIGHT) {
fast_lock(Roop, Rmark, Rscratch, slow_int);
lightweight_lock(Roop, Rmark, Rscratch, slow_int);
} else if (LockingMode == LM_LEGACY) {
// ... and mark it unlocked.
ori(Rmark, Rmark, markWord::unlocked_value);
@ -181,7 +181,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
andi_(R0, Rmark, markWord::monitor_value);
bne(CCR0, slow_int);
fast_unlock(Roop, Rmark, slow_int);
lightweight_unlock(Roop, Rmark, slow_int);
} else if (LockingMode == LM_LEGACY) {
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markWord of the object.

View File

@ -987,7 +987,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
}
if (LockingMode == LM_LIGHTWEIGHT) {
fast_lock(object, /* mark word */ header, tmp, slow_case);
lightweight_lock(object, /* mark word */ header, tmp, slow_case);
b(count_locking);
} else if (LockingMode == LM_LEGACY) {
@ -1137,7 +1137,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
ld(header, oopDesc::mark_offset_in_bytes(), object);
andi_(R0, header, markWord::monitor_value);
bne(CCR0, slow_case);
fast_unlock(object, header, slow_case);
lightweight_unlock(object, header, slow_case);
} else {
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());

View File

@ -2250,7 +2250,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
b(failure);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
fast_lock(oop, displaced_header, temp, failure);
lightweight_lock(oop, displaced_header, temp, failure);
b(success);
}
@ -2334,7 +2334,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
b(success);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
fast_unlock(oop, current_header, failure);
lightweight_unlock(oop, current_header, failure);
b(success);
}
@ -3993,14 +3993,14 @@ void MacroAssembler::atomically_flip_locked_state(bool is_unlock, Register obj,
}
}
// Implements fast-locking.
// Implements lightweight-locking.
// Branches to slow upon failure to lock the object, with CCR0 NE.
// Falls through upon success with CCR0 EQ.
//
// - obj: the object to be locked
// - hdr: the header, already loaded from obj, will be destroyed
// - t1: temporary register
void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Label& slow) {
void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, hdr, t1);
@ -4026,13 +4026,13 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Label& s
stw(t1, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
}
// Implements fast-unlocking.
// Implements lightweight-unlocking.
// Branches to slow upon failure, with CCR0 NE.
// Falls through upon success, with CCR0 EQ.
//
// - obj: the object to be unlocked
// - hdr: the (pre-loaded) header of the object, will be destroyed
void MacroAssembler::fast_unlock(Register obj, Register hdr, Label& slow) {
void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, hdr);

View File

@ -606,8 +606,8 @@ class MacroAssembler: public Assembler {
void inc_held_monitor_count(Register tmp);
void dec_held_monitor_count(Register tmp);
void atomically_flip_locked_state(bool is_unlock, Register obj, Register tmp, Label& failed, int semantics);
void fast_lock(Register obj, Register hdr, Register t1, Label& slow);
void fast_unlock(Register obj, Register hdr, Label& slow);
void lightweight_lock(Register obj, Register hdr, Register t1, Label& slow);
void lightweight_unlock(Register obj, Register hdr, Label& slow);
// allocation (for C1)
void tlab_allocate(

View File

@ -73,7 +73,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
ld(hdr, Address(obj, hdr_offset));
if (LockingMode == LM_LIGHTWEIGHT) {
fast_lock(obj, hdr, t0, t1, slow_case);
lightweight_lock(obj, hdr, t0, t1, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
// and mark it as unlocked
@ -137,7 +137,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
ld(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
test_bit(t0, hdr, exact_log2(markWord::monitor_value));
bnez(t0, slow_case, /* is_far */ true);
fast_unlock(obj, hdr, t0, t1, slow_case);
lightweight_unlock(obj, hdr, t0, t1, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to

View File

@ -836,7 +836,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
if (LockingMode == LM_LIGHTWEIGHT) {
ld(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
fast_lock(obj_reg, tmp, t0, t1, slow_case);
lightweight_lock(obj_reg, tmp, t0, t1, slow_case);
j(count);
} else if (LockingMode == LM_LEGACY) {
// Load (object->mark() | 1) into swap_reg
@ -949,7 +949,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
ld(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
test_bit(t0, header_reg, exact_log2(markWord::monitor_value));
bnez(t0, slow_case);
fast_unlock(obj_reg, header_reg, swap_reg, t0, slow_case);
lightweight_unlock(obj_reg, header_reg, swap_reg, t0, slow_case);
j(count);
bind(slow_case);

View File

@ -4647,14 +4647,14 @@ void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos, Regist
andi(Rd, Rs, 1UL << bit_pos, tmp);
}
// Implements fast-locking.
// Implements lightweight-locking.
// Branches to slow upon failure to lock the object.
// Falls through upon success.
//
// - obj: the object to be locked
// - hdr: the header, already loaded from obj, will be destroyed
// - tmp1, tmp2: temporary registers, will be destroyed
void MacroAssembler::fast_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, hdr, tmp1, tmp2);
@ -4681,14 +4681,14 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register tmp1, Regist
sw(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
}
// Implements fast-unlocking.
// Implements ligthweight-unlocking.
// Branches to slow upon failure.
// Falls through upon success.
//
// - obj: the object to be unlocked
// - hdr: the (pre-loaded) header of the object
// - tmp1, tmp2: temporary registers
void MacroAssembler::fast_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, hdr, tmp1, tmp2);

View File

@ -1457,8 +1457,8 @@ private:
void store_conditional(Register dst, Register new_val, Register addr, enum operand_size size, Assembler::Aqrl release);
public:
void fast_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
void fast_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
void lightweight_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
void lightweight_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
};
#ifdef ASSERT

View File

@ -2502,7 +2502,7 @@ encode %{
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "");
Label slow;
__ fast_lock(oop, disp_hdr, tmp, t0, slow);
__ lightweight_lock(oop, disp_hdr, tmp, t0, slow);
// Indicate success on completion.
__ mv(flag, zr);
@ -2593,7 +2593,7 @@ encode %{
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "");
Label slow;
__ fast_unlock(oop, tmp, box, disp_hdr, slow);
__ lightweight_unlock(oop, tmp, box, disp_hdr, slow);
// Indicate success on completion.
__ mv(flag, zr);

View File

@ -1701,7 +1701,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "");
__ ld(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ fast_lock(obj_reg, swap_reg, tmp, t0, slow_path_lock);
__ lightweight_lock(obj_reg, swap_reg, tmp, t0, slow_path_lock);
}
__ bind(count);
@ -1829,7 +1829,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ test_bit(t0, old_hdr, exact_log2(markWord::monitor_value));
__ bnez(t0, slow_path_unlock);
__ fast_unlock(obj_reg, old_hdr, swap_reg, t0, slow_path_unlock);
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, t0, slow_path_unlock);
__ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
}

View File

@ -70,7 +70,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
const Register thread = disp_hdr;
get_thread(thread);
#endif
fast_lock_impl(obj, hdr, thread, tmp, slow_case);
lightweight_lock(obj, hdr, thread, tmp, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
// and mark it as unlocked
@ -136,7 +136,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
if (LockingMode == LM_LIGHTWEIGHT) {
movptr(disp_hdr, Address(obj, hdr_offset));
andptr(disp_hdr, ~(int32_t)markWord::lock_mask_in_place);
fast_unlock_impl(obj, disp_hdr, hdr, slow_case);
lightweight_unlock(obj, disp_hdr, hdr, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to

View File

@ -622,7 +622,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
movptr(Address(boxReg, 0), tmpReg);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "");
fast_lock_impl(objReg, tmpReg, thread, scrReg, NO_COUNT);
lightweight_lock(objReg, tmpReg, thread, scrReg, NO_COUNT);
jmp(COUNT);
}
jmp(DONE_LABEL);
@ -926,7 +926,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
bind (Stacked);
if (LockingMode == LM_LIGHTWEIGHT) {
mov(boxReg, tmpReg);
fast_unlock_impl(objReg, boxReg, tmpReg, NO_COUNT);
lightweight_unlock(objReg, boxReg, tmpReg, NO_COUNT);
jmp(COUNT);
} else if (LockingMode == LM_LEGACY) {
movptr(tmpReg, Address (boxReg, 0)); // re-fetch

View File

@ -1236,7 +1236,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
#endif
// Load object header, prepare for CAS from unlocked to locked.
movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
fast_lock_impl(obj_reg, swap_reg, thread, tmp_reg, slow_case);
lightweight_lock(obj_reg, swap_reg, thread, tmp_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load immediate 1 into swap_reg %rax
movl(swap_reg, 1);
@ -1366,7 +1366,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Try to swing header from locked to unlocked.
movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
fast_unlock_impl(obj_reg, swap_reg, header_reg, slow_case);
lightweight_unlock(obj_reg, swap_reg, header_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load the old header from BasicLock structure
movptr(header_reg, Address(swap_reg,

View File

@ -9789,7 +9789,7 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
bind(L_stack_ok);
}
// Implements fast-locking.
// Implements lightweight-locking.
// Branches to slow upon failure to lock the object, with ZF cleared.
// Falls through upon success with unspecified ZF.
//
@ -9797,7 +9797,7 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
// hdr: the (pre-loaded) header of the object, must be rax
// thread: the thread which attempts to lock obj
// tmp: a temporary register
void MacroAssembler::fast_lock_impl(Register obj, Register hdr, Register thread, Register tmp, Label& slow) {
void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow) {
assert(hdr == rax, "header must be in rax for cmpxchg");
assert_different_registers(obj, hdr, thread, tmp);
@ -9825,14 +9825,14 @@ void MacroAssembler::fast_lock_impl(Register obj, Register hdr, Register thread,
movl(Address(thread, JavaThread::lock_stack_top_offset()), tmp);
}
// Implements fast-unlocking.
// Implements lightweight-unlocking.
// Branches to slow upon failure, with ZF cleared.
// Falls through upon success, with unspecified ZF.
//
// obj: the object to be unlocked
// hdr: the (pre-loaded) header of the object, must be rax
// tmp: a temporary register
void MacroAssembler::fast_unlock_impl(Register obj, Register hdr, Register tmp, Label& slow) {
void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow) {
assert(hdr == rax, "header must be in rax for cmpxchg");
assert_different_registers(obj, hdr, tmp);

View File

@ -2023,8 +2023,8 @@ public:
void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
void fast_lock_impl(Register obj, Register hdr, Register thread, Register tmp, Label& slow);
void fast_unlock_impl(Register obj, Register hdr, Register tmp, Label& slow);
void lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow);
void lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow);
};
/**

View File

@ -1717,7 +1717,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
// Load object header
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ fast_lock_impl(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
__ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
}
__ bind(count_mon);
__ inc_held_monitor_count();
@ -1876,7 +1876,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
__ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
__ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
__ dec_held_monitor_count();
}

View File

@ -2188,7 +2188,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
// Load object header
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ fast_lock_impl(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
__ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
}
__ bind(count_mon);
__ inc_held_monitor_count();
@ -2332,7 +2332,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
__ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
__ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
__ dec_held_monitor_count();
}