8316179: Use consistent naming for lightweight locking in MacroAssembler
Reviewed-by: rkennke, coleenp, dholmes
This commit is contained in:
parent
11d431b2c4
commit
639ba13c4b
@ -3875,7 +3875,7 @@ encode %{
|
|||||||
__ b(cont);
|
__ b(cont);
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
__ fast_lock(oop, disp_hdr, tmp, rscratch1, no_count);
|
__ lightweight_lock(oop, disp_hdr, tmp, rscratch1, no_count);
|
||||||
__ b(count);
|
__ b(count);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3956,7 +3956,7 @@ encode %{
|
|||||||
__ b(cont);
|
__ b(cont);
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
__ fast_unlock(oop, tmp, box, disp_hdr, no_count);
|
__ lightweight_unlock(oop, tmp, box, disp_hdr, no_count);
|
||||||
__ b(count);
|
__ b(count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||||||
// Load object header
|
// Load object header
|
||||||
ldr(hdr, Address(obj, hdr_offset));
|
ldr(hdr, Address(obj, hdr_offset));
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
fast_lock(obj, hdr, rscratch1, rscratch2, slow_case);
|
lightweight_lock(obj, hdr, rscratch1, rscratch2, slow_case);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
Label done;
|
Label done;
|
||||||
// and mark it as unlocked
|
// and mark it as unlocked
|
||||||
@ -149,7 +149,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||||||
// be encoded.
|
// be encoded.
|
||||||
tst(hdr, markWord::monitor_value);
|
tst(hdr, markWord::monitor_value);
|
||||||
br(Assembler::NE, slow_case);
|
br(Assembler::NE, slow_case);
|
||||||
fast_unlock(obj, hdr, rscratch1, rscratch2, slow_case);
|
lightweight_unlock(obj, hdr, rscratch1, rscratch2, slow_case);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// test if object header is pointing to the displaced header, and if so, restore
|
// test if object header is pointing to the displaced header, and if so, restore
|
||||||
// the displaced header in the object - if the object header is not pointing to
|
// the displaced header in the object - if the object header is not pointing to
|
||||||
|
@ -767,7 +767,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
fast_lock(obj_reg, tmp, rscratch1, rscratch2, slow_case);
|
lightweight_lock(obj_reg, tmp, rscratch1, rscratch2, slow_case);
|
||||||
b(count);
|
b(count);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Load (object->mark() | 1) into swap_reg
|
// Load (object->mark() | 1) into swap_reg
|
||||||
@ -898,7 +898,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
|||||||
|
|
||||||
ldr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
ldr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
tbnz(header_reg, exact_log2(markWord::monitor_value), slow_case);
|
tbnz(header_reg, exact_log2(markWord::monitor_value), slow_case);
|
||||||
fast_unlock(obj_reg, header_reg, swap_reg, rscratch1, slow_case);
|
lightweight_unlock(obj_reg, header_reg, swap_reg, rscratch1, slow_case);
|
||||||
b(count);
|
b(count);
|
||||||
bind(slow_case);
|
bind(slow_case);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
|
@ -6314,14 +6314,14 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements fast-locking.
|
// Implements lightweight-locking.
|
||||||
// Branches to slow upon failure to lock the object, with ZF cleared.
|
// Branches to slow upon failure to lock the object, with ZF cleared.
|
||||||
// Falls through upon success with ZF set.
|
// Falls through upon success with ZF set.
|
||||||
//
|
//
|
||||||
// - obj: the object to be locked
|
// - obj: the object to be locked
|
||||||
// - hdr: the header, already loaded from obj, will be destroyed
|
// - hdr: the header, already loaded from obj, will be destroyed
|
||||||
// - t1, t2: temporary registers, will be destroyed
|
// - t1, t2: temporary registers, will be destroyed
|
||||||
void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
|
void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||||
assert_different_registers(obj, hdr, t1, t2);
|
assert_different_registers(obj, hdr, t1, t2);
|
||||||
|
|
||||||
@ -6346,14 +6346,14 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Register
|
|||||||
strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
|
strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements fast-unlocking.
|
// Implements lightweight-unlocking.
|
||||||
// Branches to slow upon failure, with ZF cleared.
|
// Branches to slow upon failure, with ZF cleared.
|
||||||
// Falls through upon success, with ZF set.
|
// Falls through upon success, with ZF set.
|
||||||
//
|
//
|
||||||
// - obj: the object to be unlocked
|
// - obj: the object to be unlocked
|
||||||
// - hdr: the (pre-loaded) header of the object
|
// - hdr: the (pre-loaded) header of the object
|
||||||
// - t1, t2: temporary registers
|
// - t1, t2: temporary registers
|
||||||
void MacroAssembler::fast_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
|
void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||||
assert_different_registers(obj, hdr, t1, t2);
|
assert_different_registers(obj, hdr, t1, t2);
|
||||||
|
|
||||||
|
@ -1592,8 +1592,8 @@ public:
|
|||||||
// Code for java.lang.Thread::onSpinWait() intrinsic.
|
// Code for java.lang.Thread::onSpinWait() intrinsic.
|
||||||
void spin_wait();
|
void spin_wait();
|
||||||
|
|
||||||
void fast_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
|
void lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
|
||||||
void fast_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
|
void lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Check the current thread doesn't need a cross modify fence.
|
// Check the current thread doesn't need a cross modify fence.
|
||||||
|
@ -1812,7 +1812,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
__ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
__ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
__ fast_lock(obj_reg, swap_reg, tmp, rscratch1, slow_path_lock);
|
__ lightweight_lock(obj_reg, swap_reg, tmp, rscratch1, slow_path_lock);
|
||||||
}
|
}
|
||||||
__ bind(count);
|
__ bind(count);
|
||||||
__ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
|
__ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
|
||||||
@ -1953,7 +1953,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
||||||
__ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
__ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
__ tbnz(old_hdr, exact_log2(markWord::monitor_value), slow_path_unlock);
|
__ tbnz(old_hdr, exact_log2(markWord::monitor_value), slow_path_unlock);
|
||||||
__ fast_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);
|
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);
|
||||||
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,7 +219,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||||||
Register t2 = hdr; // blow
|
Register t2 = hdr; // blow
|
||||||
Register t3 = Rtemp; // blow
|
Register t3 = Rtemp; // blow
|
||||||
|
|
||||||
fast_lock_2(obj /* obj */, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
|
lightweight_lock(obj /* obj */, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
|
||||||
// Success: fall through
|
// Success: fall through
|
||||||
|
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
@ -282,7 +282,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||||||
Register t2 = hdr; // blow
|
Register t2 = hdr; // blow
|
||||||
Register t3 = Rtemp; // blow
|
Register t3 = Rtemp; // blow
|
||||||
|
|
||||||
fast_unlock_2(obj /* object */, t1, t2, t3, 1 /* savemask (save t1) */,
|
lightweight_unlock(obj /* object */, t1, t2, t3, 1 /* savemask (save t1) */,
|
||||||
slow_case);
|
slow_case);
|
||||||
// Success: Fall through
|
// Success: Fall through
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
|
|||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
|
|
||||||
fast_lock_2(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
|
lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
|
||||||
1 /* savemask (save t1) */, done);
|
1 /* savemask (save t1) */, done);
|
||||||
|
|
||||||
// Success: set Z
|
// Success: set Z
|
||||||
@ -143,7 +143,7 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra
|
|||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
|
|
||||||
fast_unlock_2(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
|
lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
|
||||||
1 /* savemask (save t1) */, done);
|
1 /* savemask (save t1) */, done);
|
||||||
|
|
||||||
cmp(Roop, Roop); // Success: Set Z
|
cmp(Roop, Roop); // Success: Set Z
|
||||||
|
@ -912,7 +912,7 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
fast_lock_2(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
|
lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
|
||||||
b(done);
|
b(done);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||||
@ -1034,7 +1034,7 @@ void InterpreterMacroAssembler::unlock_object(Register Rlock) {
|
|||||||
cmpoop(Rtemp, Robj);
|
cmpoop(Rtemp, Robj);
|
||||||
b(slow_case, ne);
|
b(slow_case, ne);
|
||||||
|
|
||||||
fast_unlock_2(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
|
lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
|
||||||
1 /* savemask (save t1) */, slow_case);
|
1 /* savemask (save t1) */, slow_case);
|
||||||
|
|
||||||
b(done);
|
b(done);
|
||||||
|
@ -1748,14 +1748,14 @@ void MacroAssembler::read_polling_page(Register dest, relocInfo::relocType rtype
|
|||||||
POISON_REG(mask, 1, R2, poison) \
|
POISON_REG(mask, 1, R2, poison) \
|
||||||
POISON_REG(mask, 2, R3, poison)
|
POISON_REG(mask, 2, R3, poison)
|
||||||
|
|
||||||
// Attempt to fast-lock an object
|
// Attempt to lightweight-lock an object
|
||||||
// Registers:
|
// Registers:
|
||||||
// - obj: the object to be locked
|
// - obj: the object to be locked
|
||||||
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
|
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
|
||||||
// Result:
|
// Result:
|
||||||
// - Success: fallthrough
|
// - Success: fallthrough
|
||||||
// - Error: break to slow, Z cleared.
|
// - Error: break to slow, Z cleared.
|
||||||
void MacroAssembler::fast_lock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
|
void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||||
assert_different_registers(obj, t1, t2, t3);
|
assert_different_registers(obj, t1, t2, t3);
|
||||||
|
|
||||||
@ -1806,14 +1806,14 @@ void MacroAssembler::fast_lock_2(Register obj, Register t1, Register t2, Registe
|
|||||||
// Success: fall through
|
// Success: fall through
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to fast-unlock an object
|
// Attempt to lightweight-unlock an object
|
||||||
// Registers:
|
// Registers:
|
||||||
// - obj: the object to be unlocked
|
// - obj: the object to be unlocked
|
||||||
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
|
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
|
||||||
// Result:
|
// Result:
|
||||||
// - Success: fallthrough
|
// - Success: fallthrough
|
||||||
// - Error: break to slow, Z cleared.
|
// - Error: break to slow, Z cleared.
|
||||||
void MacroAssembler::fast_unlock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
|
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||||
assert_different_registers(obj, t1, t2, t3);
|
assert_different_registers(obj, t1, t2, t3);
|
||||||
|
|
||||||
|
@ -1009,23 +1009,23 @@ public:
|
|||||||
void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
|
void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
|
||||||
void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
|
void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
|
||||||
|
|
||||||
// Attempt to fast-lock an object
|
// Attempt to lightweight-lock an object
|
||||||
// Registers:
|
// Registers:
|
||||||
// - obj: the object to be locked
|
// - obj: the object to be locked
|
||||||
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
|
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
|
||||||
// Result:
|
// Result:
|
||||||
// - Success: fallthrough
|
// - Success: fallthrough
|
||||||
// - Error: break to slow, Z cleared.
|
// - Error: break to slow, Z cleared.
|
||||||
void fast_lock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
|
void lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
|
||||||
|
|
||||||
// Attempt to fast-unlock an object
|
// Attempt to lightweight-unlock an object
|
||||||
// Registers:
|
// Registers:
|
||||||
// - obj: the object to be unlocked
|
// - obj: the object to be unlocked
|
||||||
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
|
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
|
||||||
// Result:
|
// Result:
|
||||||
// - Success: fallthrough
|
// - Success: fallthrough
|
||||||
// - Error: break to slow, Z cleared.
|
// - Error: break to slow, Z cleared.
|
||||||
void fast_unlock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
|
void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// Preserves flags and all registers.
|
// Preserves flags and all registers.
|
||||||
|
@ -1155,7 +1155,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
log_trace(fastlock)("SharedRuntime lock fast");
|
log_trace(fastlock)("SharedRuntime lock fast");
|
||||||
__ fast_lock_2(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
|
__ lightweight_lock(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
|
||||||
0x7 /* savemask */, slow_lock);
|
0x7 /* savemask */, slow_lock);
|
||||||
// Fall through to lock_done
|
// Fall through to lock_done
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
@ -1242,7 +1242,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
if (method->is_synchronized()) {
|
if (method->is_synchronized()) {
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
log_trace(fastlock)("SharedRuntime unlock fast");
|
log_trace(fastlock)("SharedRuntime unlock fast");
|
||||||
__ fast_unlock_2(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
|
__ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
|
||||||
7 /* savemask */, slow_unlock);
|
7 /* savemask */, slow_unlock);
|
||||||
// Fall through
|
// Fall through
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
|
@ -115,7 +115,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
fast_lock(Roop, Rmark, Rscratch, slow_int);
|
lightweight_lock(Roop, Rmark, Rscratch, slow_int);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// ... and mark it unlocked.
|
// ... and mark it unlocked.
|
||||||
ori(Rmark, Rmark, markWord::unlocked_value);
|
ori(Rmark, Rmark, markWord::unlocked_value);
|
||||||
@ -181,7 +181,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
|||||||
ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
|
ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
|
||||||
andi_(R0, Rmark, markWord::monitor_value);
|
andi_(R0, Rmark, markWord::monitor_value);
|
||||||
bne(CCR0, slow_int);
|
bne(CCR0, slow_int);
|
||||||
fast_unlock(Roop, Rmark, slow_int);
|
lightweight_unlock(Roop, Rmark, slow_int);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Check if it is still a light weight lock, this is is true if we see
|
// Check if it is still a light weight lock, this is is true if we see
|
||||||
// the stack address of the basicLock in the markWord of the object.
|
// the stack address of the basicLock in the markWord of the object.
|
||||||
|
@ -987,7 +987,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
fast_lock(object, /* mark word */ header, tmp, slow_case);
|
lightweight_lock(object, /* mark word */ header, tmp, slow_case);
|
||||||
b(count_locking);
|
b(count_locking);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
|
|
||||||
@ -1137,7 +1137,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
|||||||
ld(header, oopDesc::mark_offset_in_bytes(), object);
|
ld(header, oopDesc::mark_offset_in_bytes(), object);
|
||||||
andi_(R0, header, markWord::monitor_value);
|
andi_(R0, header, markWord::monitor_value);
|
||||||
bne(CCR0, slow_case);
|
bne(CCR0, slow_case);
|
||||||
fast_unlock(object, header, slow_case);
|
lightweight_unlock(object, header, slow_case);
|
||||||
} else {
|
} else {
|
||||||
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
||||||
|
|
||||||
|
@ -2250,7 +2250,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
|
|||||||
b(failure);
|
b(failure);
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
fast_lock(oop, displaced_header, temp, failure);
|
lightweight_lock(oop, displaced_header, temp, failure);
|
||||||
b(success);
|
b(success);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2334,7 +2334,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
|||||||
b(success);
|
b(success);
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
fast_unlock(oop, current_header, failure);
|
lightweight_unlock(oop, current_header, failure);
|
||||||
b(success);
|
b(success);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3993,14 +3993,14 @@ void MacroAssembler::atomically_flip_locked_state(bool is_unlock, Register obj,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements fast-locking.
|
// Implements lightweight-locking.
|
||||||
// Branches to slow upon failure to lock the object, with CCR0 NE.
|
// Branches to slow upon failure to lock the object, with CCR0 NE.
|
||||||
// Falls through upon success with CCR0 EQ.
|
// Falls through upon success with CCR0 EQ.
|
||||||
//
|
//
|
||||||
// - obj: the object to be locked
|
// - obj: the object to be locked
|
||||||
// - hdr: the header, already loaded from obj, will be destroyed
|
// - hdr: the header, already loaded from obj, will be destroyed
|
||||||
// - t1: temporary register
|
// - t1: temporary register
|
||||||
void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Label& slow) {
|
void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Label& slow) {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||||
assert_different_registers(obj, hdr, t1);
|
assert_different_registers(obj, hdr, t1);
|
||||||
|
|
||||||
@ -4026,13 +4026,13 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Label& s
|
|||||||
stw(t1, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
|
stw(t1, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements fast-unlocking.
|
// Implements lightweight-unlocking.
|
||||||
// Branches to slow upon failure, with CCR0 NE.
|
// Branches to slow upon failure, with CCR0 NE.
|
||||||
// Falls through upon success, with CCR0 EQ.
|
// Falls through upon success, with CCR0 EQ.
|
||||||
//
|
//
|
||||||
// - obj: the object to be unlocked
|
// - obj: the object to be unlocked
|
||||||
// - hdr: the (pre-loaded) header of the object, will be destroyed
|
// - hdr: the (pre-loaded) header of the object, will be destroyed
|
||||||
void MacroAssembler::fast_unlock(Register obj, Register hdr, Label& slow) {
|
void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Label& slow) {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||||
assert_different_registers(obj, hdr);
|
assert_different_registers(obj, hdr);
|
||||||
|
|
||||||
|
@ -606,8 +606,8 @@ class MacroAssembler: public Assembler {
|
|||||||
void inc_held_monitor_count(Register tmp);
|
void inc_held_monitor_count(Register tmp);
|
||||||
void dec_held_monitor_count(Register tmp);
|
void dec_held_monitor_count(Register tmp);
|
||||||
void atomically_flip_locked_state(bool is_unlock, Register obj, Register tmp, Label& failed, int semantics);
|
void atomically_flip_locked_state(bool is_unlock, Register obj, Register tmp, Label& failed, int semantics);
|
||||||
void fast_lock(Register obj, Register hdr, Register t1, Label& slow);
|
void lightweight_lock(Register obj, Register hdr, Register t1, Label& slow);
|
||||||
void fast_unlock(Register obj, Register hdr, Label& slow);
|
void lightweight_unlock(Register obj, Register hdr, Label& slow);
|
||||||
|
|
||||||
// allocation (for C1)
|
// allocation (for C1)
|
||||||
void tlab_allocate(
|
void tlab_allocate(
|
||||||
|
@ -73,7 +73,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||||||
ld(hdr, Address(obj, hdr_offset));
|
ld(hdr, Address(obj, hdr_offset));
|
||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
fast_lock(obj, hdr, t0, t1, slow_case);
|
lightweight_lock(obj, hdr, t0, t1, slow_case);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
Label done;
|
Label done;
|
||||||
// and mark it as unlocked
|
// and mark it as unlocked
|
||||||
@ -137,7 +137,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||||||
ld(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
|
ld(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||||
test_bit(t0, hdr, exact_log2(markWord::monitor_value));
|
test_bit(t0, hdr, exact_log2(markWord::monitor_value));
|
||||||
bnez(t0, slow_case, /* is_far */ true);
|
bnez(t0, slow_case, /* is_far */ true);
|
||||||
fast_unlock(obj, hdr, t0, t1, slow_case);
|
lightweight_unlock(obj, hdr, t0, t1, slow_case);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// test if object header is pointing to the displaced header, and if so, restore
|
// test if object header is pointing to the displaced header, and if so, restore
|
||||||
// the displaced header in the object - if the object header is not pointing to
|
// the displaced header in the object - if the object header is not pointing to
|
||||||
|
@ -836,7 +836,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||||||
|
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
ld(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
ld(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
fast_lock(obj_reg, tmp, t0, t1, slow_case);
|
lightweight_lock(obj_reg, tmp, t0, t1, slow_case);
|
||||||
j(count);
|
j(count);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Load (object->mark() | 1) into swap_reg
|
// Load (object->mark() | 1) into swap_reg
|
||||||
@ -949,7 +949,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
|||||||
ld(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
ld(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
test_bit(t0, header_reg, exact_log2(markWord::monitor_value));
|
test_bit(t0, header_reg, exact_log2(markWord::monitor_value));
|
||||||
bnez(t0, slow_case);
|
bnez(t0, slow_case);
|
||||||
fast_unlock(obj_reg, header_reg, swap_reg, t0, slow_case);
|
lightweight_unlock(obj_reg, header_reg, swap_reg, t0, slow_case);
|
||||||
j(count);
|
j(count);
|
||||||
|
|
||||||
bind(slow_case);
|
bind(slow_case);
|
||||||
|
@ -4647,14 +4647,14 @@ void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos, Regist
|
|||||||
andi(Rd, Rs, 1UL << bit_pos, tmp);
|
andi(Rd, Rs, 1UL << bit_pos, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements fast-locking.
|
// Implements lightweight-locking.
|
||||||
// Branches to slow upon failure to lock the object.
|
// Branches to slow upon failure to lock the object.
|
||||||
// Falls through upon success.
|
// Falls through upon success.
|
||||||
//
|
//
|
||||||
// - obj: the object to be locked
|
// - obj: the object to be locked
|
||||||
// - hdr: the header, already loaded from obj, will be destroyed
|
// - hdr: the header, already loaded from obj, will be destroyed
|
||||||
// - tmp1, tmp2: temporary registers, will be destroyed
|
// - tmp1, tmp2: temporary registers, will be destroyed
|
||||||
void MacroAssembler::fast_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
|
void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||||
assert_different_registers(obj, hdr, tmp1, tmp2);
|
assert_different_registers(obj, hdr, tmp1, tmp2);
|
||||||
|
|
||||||
@ -4681,14 +4681,14 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register tmp1, Regist
|
|||||||
sw(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
|
sw(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements fast-unlocking.
|
// Implements ligthweight-unlocking.
|
||||||
// Branches to slow upon failure.
|
// Branches to slow upon failure.
|
||||||
// Falls through upon success.
|
// Falls through upon success.
|
||||||
//
|
//
|
||||||
// - obj: the object to be unlocked
|
// - obj: the object to be unlocked
|
||||||
// - hdr: the (pre-loaded) header of the object
|
// - hdr: the (pre-loaded) header of the object
|
||||||
// - tmp1, tmp2: temporary registers
|
// - tmp1, tmp2: temporary registers
|
||||||
void MacroAssembler::fast_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
|
void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||||
assert_different_registers(obj, hdr, tmp1, tmp2);
|
assert_different_registers(obj, hdr, tmp1, tmp2);
|
||||||
|
|
||||||
|
@ -1457,8 +1457,8 @@ private:
|
|||||||
void store_conditional(Register dst, Register new_val, Register addr, enum operand_size size, Assembler::Aqrl release);
|
void store_conditional(Register dst, Register new_val, Register addr, enum operand_size size, Assembler::Aqrl release);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void fast_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
|
void lightweight_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
|
||||||
void fast_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
|
void lightweight_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
@ -2502,7 +2502,7 @@ encode %{
|
|||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
||||||
Label slow;
|
Label slow;
|
||||||
__ fast_lock(oop, disp_hdr, tmp, t0, slow);
|
__ lightweight_lock(oop, disp_hdr, tmp, t0, slow);
|
||||||
|
|
||||||
// Indicate success on completion.
|
// Indicate success on completion.
|
||||||
__ mv(flag, zr);
|
__ mv(flag, zr);
|
||||||
@ -2593,7 +2593,7 @@ encode %{
|
|||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
||||||
Label slow;
|
Label slow;
|
||||||
__ fast_unlock(oop, tmp, box, disp_hdr, slow);
|
__ lightweight_unlock(oop, tmp, box, disp_hdr, slow);
|
||||||
|
|
||||||
// Indicate success on completion.
|
// Indicate success on completion.
|
||||||
__ mv(flag, zr);
|
__ mv(flag, zr);
|
||||||
|
@ -1701,7 +1701,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
||||||
__ ld(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
__ ld(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
__ fast_lock(obj_reg, swap_reg, tmp, t0, slow_path_lock);
|
__ lightweight_lock(obj_reg, swap_reg, tmp, t0, slow_path_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
__ bind(count);
|
__ bind(count);
|
||||||
@ -1829,7 +1829,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
__ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
__ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
__ test_bit(t0, old_hdr, exact_log2(markWord::monitor_value));
|
__ test_bit(t0, old_hdr, exact_log2(markWord::monitor_value));
|
||||||
__ bnez(t0, slow_path_unlock);
|
__ bnez(t0, slow_path_unlock);
|
||||||
__ fast_unlock(obj_reg, old_hdr, swap_reg, t0, slow_path_unlock);
|
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, t0, slow_path_unlock);
|
||||||
__ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
|
__ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||||||
const Register thread = disp_hdr;
|
const Register thread = disp_hdr;
|
||||||
get_thread(thread);
|
get_thread(thread);
|
||||||
#endif
|
#endif
|
||||||
fast_lock_impl(obj, hdr, thread, tmp, slow_case);
|
lightweight_lock(obj, hdr, thread, tmp, slow_case);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
Label done;
|
Label done;
|
||||||
// and mark it as unlocked
|
// and mark it as unlocked
|
||||||
@ -136,7 +136,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
movptr(disp_hdr, Address(obj, hdr_offset));
|
movptr(disp_hdr, Address(obj, hdr_offset));
|
||||||
andptr(disp_hdr, ~(int32_t)markWord::lock_mask_in_place);
|
andptr(disp_hdr, ~(int32_t)markWord::lock_mask_in_place);
|
||||||
fast_unlock_impl(obj, disp_hdr, hdr, slow_case);
|
lightweight_unlock(obj, disp_hdr, hdr, slow_case);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// test if object header is pointing to the displaced header, and if so, restore
|
// test if object header is pointing to the displaced header, and if so, restore
|
||||||
// the displaced header in the object - if the object header is not pointing to
|
// the displaced header in the object - if the object header is not pointing to
|
||||||
|
@ -622,7 +622,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
|||||||
movptr(Address(boxReg, 0), tmpReg);
|
movptr(Address(boxReg, 0), tmpReg);
|
||||||
} else {
|
} else {
|
||||||
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
||||||
fast_lock_impl(objReg, tmpReg, thread, scrReg, NO_COUNT);
|
lightweight_lock(objReg, tmpReg, thread, scrReg, NO_COUNT);
|
||||||
jmp(COUNT);
|
jmp(COUNT);
|
||||||
}
|
}
|
||||||
jmp(DONE_LABEL);
|
jmp(DONE_LABEL);
|
||||||
@ -926,7 +926,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
|||||||
bind (Stacked);
|
bind (Stacked);
|
||||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||||
mov(boxReg, tmpReg);
|
mov(boxReg, tmpReg);
|
||||||
fast_unlock_impl(objReg, boxReg, tmpReg, NO_COUNT);
|
lightweight_unlock(objReg, boxReg, tmpReg, NO_COUNT);
|
||||||
jmp(COUNT);
|
jmp(COUNT);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
movptr(tmpReg, Address (boxReg, 0)); // re-fetch
|
movptr(tmpReg, Address (boxReg, 0)); // re-fetch
|
||||||
|
@ -1236,7 +1236,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
|||||||
#endif
|
#endif
|
||||||
// Load object header, prepare for CAS from unlocked to locked.
|
// Load object header, prepare for CAS from unlocked to locked.
|
||||||
movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
fast_lock_impl(obj_reg, swap_reg, thread, tmp_reg, slow_case);
|
lightweight_lock(obj_reg, swap_reg, thread, tmp_reg, slow_case);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Load immediate 1 into swap_reg %rax
|
// Load immediate 1 into swap_reg %rax
|
||||||
movl(swap_reg, 1);
|
movl(swap_reg, 1);
|
||||||
@ -1366,7 +1366,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
|||||||
// Try to swing header from locked to unlocked.
|
// Try to swing header from locked to unlocked.
|
||||||
movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
|
andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
|
||||||
fast_unlock_impl(obj_reg, swap_reg, header_reg, slow_case);
|
lightweight_unlock(obj_reg, swap_reg, header_reg, slow_case);
|
||||||
} else if (LockingMode == LM_LEGACY) {
|
} else if (LockingMode == LM_LEGACY) {
|
||||||
// Load the old header from BasicLock structure
|
// Load the old header from BasicLock structure
|
||||||
movptr(header_reg, Address(swap_reg,
|
movptr(header_reg, Address(swap_reg,
|
||||||
|
@ -9789,7 +9789,7 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
|
|||||||
bind(L_stack_ok);
|
bind(L_stack_ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements fast-locking.
|
// Implements lightweight-locking.
|
||||||
// Branches to slow upon failure to lock the object, with ZF cleared.
|
// Branches to slow upon failure to lock the object, with ZF cleared.
|
||||||
// Falls through upon success with unspecified ZF.
|
// Falls through upon success with unspecified ZF.
|
||||||
//
|
//
|
||||||
@ -9797,7 +9797,7 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
|
|||||||
// hdr: the (pre-loaded) header of the object, must be rax
|
// hdr: the (pre-loaded) header of the object, must be rax
|
||||||
// thread: the thread which attempts to lock obj
|
// thread: the thread which attempts to lock obj
|
||||||
// tmp: a temporary register
|
// tmp: a temporary register
|
||||||
void MacroAssembler::fast_lock_impl(Register obj, Register hdr, Register thread, Register tmp, Label& slow) {
|
void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow) {
|
||||||
assert(hdr == rax, "header must be in rax for cmpxchg");
|
assert(hdr == rax, "header must be in rax for cmpxchg");
|
||||||
assert_different_registers(obj, hdr, thread, tmp);
|
assert_different_registers(obj, hdr, thread, tmp);
|
||||||
|
|
||||||
@ -9825,14 +9825,14 @@ void MacroAssembler::fast_lock_impl(Register obj, Register hdr, Register thread,
|
|||||||
movl(Address(thread, JavaThread::lock_stack_top_offset()), tmp);
|
movl(Address(thread, JavaThread::lock_stack_top_offset()), tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements fast-unlocking.
|
// Implements lightweight-unlocking.
|
||||||
// Branches to slow upon failure, with ZF cleared.
|
// Branches to slow upon failure, with ZF cleared.
|
||||||
// Falls through upon success, with unspecified ZF.
|
// Falls through upon success, with unspecified ZF.
|
||||||
//
|
//
|
||||||
// obj: the object to be unlocked
|
// obj: the object to be unlocked
|
||||||
// hdr: the (pre-loaded) header of the object, must be rax
|
// hdr: the (pre-loaded) header of the object, must be rax
|
||||||
// tmp: a temporary register
|
// tmp: a temporary register
|
||||||
void MacroAssembler::fast_unlock_impl(Register obj, Register hdr, Register tmp, Label& slow) {
|
void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow) {
|
||||||
assert(hdr == rax, "header must be in rax for cmpxchg");
|
assert(hdr == rax, "header must be in rax for cmpxchg");
|
||||||
assert_different_registers(obj, hdr, tmp);
|
assert_different_registers(obj, hdr, tmp);
|
||||||
|
|
||||||
|
@ -2023,8 +2023,8 @@ public:
|
|||||||
|
|
||||||
void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
|
void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
|
||||||
|
|
||||||
void fast_lock_impl(Register obj, Register hdr, Register thread, Register tmp, Label& slow);
|
void lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow);
|
||||||
void fast_unlock_impl(Register obj, Register hdr, Register tmp, Label& slow);
|
void lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1717,7 +1717,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
// Load object header
|
// Load object header
|
||||||
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
__ fast_lock_impl(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
|
__ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
|
||||||
}
|
}
|
||||||
__ bind(count_mon);
|
__ bind(count_mon);
|
||||||
__ inc_held_monitor_count();
|
__ inc_held_monitor_count();
|
||||||
@ -1876,7 +1876,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
__ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
|
__ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
|
||||||
__ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
|
__ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
|
||||||
__ dec_held_monitor_count();
|
__ dec_held_monitor_count();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2188,7 +2188,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
// Load object header
|
// Load object header
|
||||||
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
__ fast_lock_impl(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
|
__ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
|
||||||
}
|
}
|
||||||
__ bind(count_mon);
|
__ bind(count_mon);
|
||||||
__ inc_held_monitor_count();
|
__ inc_held_monitor_count();
|
||||||
@ -2332,7 +2332,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||||
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||||
__ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
|
__ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
|
||||||
__ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
|
__ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
|
||||||
__ dec_held_monitor_count();
|
__ dec_held_monitor_count();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user