8276901: Implement UseHeavyMonitors consistently
Reviewed-by: coleenp, mdoerr, dcubed
This commit is contained in:
parent
69d8669fb3
commit
5b81d5eeb4
src/hotspot
cpu
aarch64
arm
ppc
s390
x86
share
test
hotspot/jtreg/runtime/CommandLine
jdk/java/util/concurrent/ConcurrentHashMap
@ -3906,37 +3906,40 @@ encode %{
|
||||
// Check for existing monitor
|
||||
__ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
|
||||
|
||||
// Set tmp to be (markWord of object | UNLOCK_VALUE).
|
||||
__ orr(tmp, disp_hdr, markWord::unlocked_value);
|
||||
if (!UseHeavyMonitors) {
|
||||
// Set tmp to be (markWord of object | UNLOCK_VALUE).
|
||||
__ orr(tmp, disp_hdr, markWord::unlocked_value);
|
||||
|
||||
// Initialize the box. (Must happen before we update the object mark!)
|
||||
__ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
// Initialize the box. (Must happen before we update the object mark!)
|
||||
__ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
||||
// Compare object markWord with an unlocked value (tmp) and if
|
||||
// equal exchange the stack address of our box with object markWord.
|
||||
// On failure disp_hdr contains the possibly locked markWord.
|
||||
__ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
|
||||
/*release*/ true, /*weak*/ false, disp_hdr);
|
||||
__ br(Assembler::EQ, cont);
|
||||
// Compare object markWord with an unlocked value (tmp) and if
|
||||
// equal exchange the stack address of our box with object markWord.
|
||||
// On failure disp_hdr contains the possibly locked markWord.
|
||||
__ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
|
||||
/*release*/ true, /*weak*/ false, disp_hdr);
|
||||
__ br(Assembler::EQ, cont);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
||||
// If the compare-and-exchange succeeded, then we found an unlocked
|
||||
// object, will have now locked it will continue at label cont
|
||||
// If the compare-and-exchange succeeded, then we found an unlocked
|
||||
// object, will have now locked it will continue at label cont
|
||||
|
||||
__ bind(cas_failed);
|
||||
// We did not see an unlocked object so try the fast recursive case.
|
||||
|
||||
// Check if the owner is self by comparing the value in the
|
||||
// markWord of object (disp_hdr) with the stack pointer.
|
||||
__ mov(rscratch1, sp);
|
||||
__ sub(disp_hdr, disp_hdr, rscratch1);
|
||||
__ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
|
||||
// If condition is true we are cont and hence we can store 0 as the
|
||||
// displaced header in the box, which indicates that it is a recursive lock.
|
||||
__ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
|
||||
__ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
__ bind(cas_failed);
|
||||
// We did not see an unlocked object so try the fast recursive case.
|
||||
|
||||
// Check if the owner is self by comparing the value in the
|
||||
// markWord of object (disp_hdr) with the stack pointer.
|
||||
__ mov(rscratch1, sp);
|
||||
__ sub(disp_hdr, disp_hdr, rscratch1);
|
||||
__ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
|
||||
// If condition is true we are cont and hence we can store 0 as the
|
||||
// displaced header in the box, which indicates that it is a recursive lock.
|
||||
__ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
|
||||
__ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
} else {
|
||||
__ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
|
||||
}
|
||||
__ b(cont);
|
||||
|
||||
// Handle existing monitor.
|
||||
@ -3982,23 +3985,29 @@ encode %{
|
||||
|
||||
assert_different_registers(oop, box, tmp, disp_hdr);
|
||||
|
||||
// Find the lock address and load the displaced header from the stack.
|
||||
__ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
if (!UseHeavyMonitors) {
|
||||
// Find the lock address and load the displaced header from the stack.
|
||||
__ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
||||
// If the displaced header is 0, we have a recursive unlock.
|
||||
__ cmp(disp_hdr, zr);
|
||||
__ br(Assembler::EQ, cont);
|
||||
// If the displaced header is 0, we have a recursive unlock.
|
||||
__ cmp(disp_hdr, zr);
|
||||
__ br(Assembler::EQ, cont);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
__ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
|
||||
__ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
|
||||
|
||||
// Check if it is still a light weight lock, this is is true if we
|
||||
// see the stack address of the basicLock in the markWord of the
|
||||
// object.
|
||||
if (!UseHeavyMonitors) {
|
||||
// Check if it is still a light weight lock, this is is true if we
|
||||
// see the stack address of the basicLock in the markWord of the
|
||||
// object.
|
||||
|
||||
__ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
|
||||
/*release*/ true, /*weak*/ false, tmp);
|
||||
__ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
|
||||
/*release*/ true, /*weak*/ false, tmp);
|
||||
} else {
|
||||
__ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
|
||||
}
|
||||
__ b(cont);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
@ -438,7 +438,11 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::r0_opr);
|
||||
stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
|
||||
__ unlock_object(r5, r4, r0, *stub->entry());
|
||||
if (UseHeavyMonitors) {
|
||||
__ b(*stub->entry());
|
||||
} else {
|
||||
__ unlock_object(r5, r4, r0, *stub->entry());
|
||||
}
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
@ -2562,7 +2566,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
Register obj = op->obj_opr()->as_register(); // may not be an oop
|
||||
Register hdr = op->hdr_opr()->as_register();
|
||||
Register lock = op->lock_opr()->as_register();
|
||||
if (!UseFastLocking) {
|
||||
if (UseHeavyMonitors) {
|
||||
__ b(*op->stub()->entry());
|
||||
} else if (op->code() == lir_lock) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
|
@ -1642,39 +1642,42 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Load the oop from the handle
|
||||
__ ldr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
// Load (object->mark() | 1) into swap_reg %r0
|
||||
__ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ orr(swap_reg, rscratch1, 1);
|
||||
if (!UseHeavyMonitors) {
|
||||
// Load (object->mark() | 1) into swap_reg %r0
|
||||
__ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ orr(swap_reg, rscratch1, 1);
|
||||
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
__ str(swap_reg, Address(lock_reg, mark_word_offset));
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
__ str(swap_reg, Address(lock_reg, mark_word_offset));
|
||||
|
||||
// src -> dest iff dest == r0 else r0 <- dest
|
||||
{ Label here;
|
||||
__ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
|
||||
// src -> dest iff dest == r0 else r0 <- dest
|
||||
{ Label here;
|
||||
__ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
|
||||
}
|
||||
|
||||
// Hmm should this move to the slow path code area???
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 3) == 0, and
|
||||
// 2) sp <= mark < mark + os::pagesize()
|
||||
// These 3 tests can be done by evaluating the following
|
||||
// expression: ((mark - sp) & (3 - os::vm_page_size())),
|
||||
// assuming both stack pointer and pagesize have their
|
||||
// least significant 2 bits clear.
|
||||
// NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
|
||||
|
||||
__ sub(swap_reg, sp, swap_reg);
|
||||
__ neg(swap_reg, swap_reg);
|
||||
__ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
|
||||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
__ str(swap_reg, Address(lock_reg, mark_word_offset));
|
||||
__ br(Assembler::NE, slow_path_lock);
|
||||
} else {
|
||||
__ b(slow_path_lock);
|
||||
}
|
||||
|
||||
// Hmm should this move to the slow path code area???
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 3) == 0, and
|
||||
// 2) sp <= mark < mark + os::pagesize()
|
||||
// These 3 tests can be done by evaluating the following
|
||||
// expression: ((mark - sp) & (3 - os::vm_page_size())),
|
||||
// assuming both stack pointer and pagesize have their
|
||||
// least significant 2 bits clear.
|
||||
// NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
|
||||
|
||||
__ sub(swap_reg, sp, swap_reg);
|
||||
__ neg(swap_reg, swap_reg);
|
||||
__ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
|
||||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
__ str(swap_reg, Address(lock_reg, mark_word_offset));
|
||||
__ br(Assembler::NE, slow_path_lock);
|
||||
|
||||
// Slow path will re-enter here
|
||||
|
||||
__ bind(lock_done);
|
||||
}
|
||||
|
||||
@ -1775,27 +1778,31 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ ldr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
Label done;
|
||||
// Simple recursive lock?
|
||||
|
||||
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||
__ cbz(rscratch1, done);
|
||||
if (!UseHeavyMonitors) {
|
||||
// Simple recursive lock?
|
||||
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||
__ cbz(rscratch1, done);
|
||||
|
||||
// Must save r0 if if it is live now because cmpxchg must use it
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
// Must save r0 if if it is live now because cmpxchg must use it
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
}
|
||||
|
||||
|
||||
// get address of the stack lock
|
||||
__ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||
// get old displaced header
|
||||
__ ldr(old_hdr, Address(r0, 0));
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
Label succeed;
|
||||
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
|
||||
__ bind(succeed);
|
||||
} else {
|
||||
__ b(slow_path_unlock);
|
||||
}
|
||||
|
||||
|
||||
// get address of the stack lock
|
||||
__ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||
// get old displaced header
|
||||
__ ldr(old_hdr, Address(r0, 0));
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
Label succeed;
|
||||
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
|
||||
__ bind(succeed);
|
||||
|
||||
// slow path re-enters here
|
||||
__ bind(unlock_done);
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
|
@ -2425,7 +2425,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
Register hdr = op->hdr_opr()->as_pointer_register();
|
||||
Register lock = op->lock_opr()->as_pointer_register();
|
||||
|
||||
if (!UseFastLocking) {
|
||||
if (UseHeavyMonitors) {
|
||||
__ b(*op->stub()->entry());
|
||||
} else if (op->code() == lir_lock) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
|
@ -2689,7 +2689,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
// Obj may not be an oop.
|
||||
if (op->code() == lir_lock) {
|
||||
MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
|
||||
if (UseFastLocking) {
|
||||
if (!UseHeavyMonitors) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
// Add debug info for NullPointerException only if one is possible.
|
||||
if (op->info() != NULL) {
|
||||
@ -2711,7 +2711,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
}
|
||||
} else {
|
||||
assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
|
||||
if (UseFastLocking) {
|
||||
if (!UseHeavyMonitors) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
} else {
|
||||
|
@ -2660,27 +2660,32 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
|
||||
andi_(temp, displaced_header, markWord::monitor_value);
|
||||
bne(CCR0, object_has_monitor);
|
||||
|
||||
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
|
||||
ori(displaced_header, displaced_header, markWord::unlocked_value);
|
||||
if (!UseHeavyMonitors) {
|
||||
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
|
||||
ori(displaced_header, displaced_header, markWord::unlocked_value);
|
||||
|
||||
// Load Compare Value application register.
|
||||
// Load Compare Value application register.
|
||||
|
||||
// Initialize the box. (Must happen before we update the object mark!)
|
||||
std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
|
||||
// Initialize the box. (Must happen before we update the object mark!)
|
||||
std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
|
||||
|
||||
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
|
||||
// Compare object markWord with mark and if equal exchange scratch1 with object markWord.
|
||||
cmpxchgd(/*flag=*/flag,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/displaced_header,
|
||||
/*exchange_value=*/box,
|
||||
/*where=*/oop,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock(),
|
||||
noreg,
|
||||
&cas_failed,
|
||||
/*check without membar and ldarx first*/true);
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
|
||||
// Compare object markWord with mark and if equal exchange scratch1 with object markWord.
|
||||
cmpxchgd(/*flag=*/flag,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/displaced_header,
|
||||
/*exchange_value=*/box,
|
||||
/*where=*/oop,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock(),
|
||||
noreg,
|
||||
&cas_failed,
|
||||
/*check without membar and ldarx first*/true);
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
} else {
|
||||
// Set NE to indicate 'failure' -> take slow-path.
|
||||
crandc(flag, Assembler::equal, flag, Assembler::equal);
|
||||
}
|
||||
|
||||
// If the compare-and-exchange succeeded, then we found an unlocked
|
||||
// object and we have now locked it.
|
||||
@ -2768,12 +2773,14 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
||||
}
|
||||
#endif
|
||||
|
||||
// Find the lock address and load the displaced header from the stack.
|
||||
ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
|
||||
if (!UseHeavyMonitors) {
|
||||
// Find the lock address and load the displaced header from the stack.
|
||||
ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
|
||||
|
||||
// If the displaced header is 0, we have a recursive unlock.
|
||||
cmpdi(flag, displaced_header, 0);
|
||||
beq(flag, cont);
|
||||
// If the displaced header is 0, we have a recursive unlock.
|
||||
cmpdi(flag, displaced_header, 0);
|
||||
beq(flag, cont);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
// The object has an existing monitor iff (mark & monitor_value) != 0.
|
||||
@ -2782,20 +2789,24 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
||||
andi_(R0, current_header, markWord::monitor_value);
|
||||
bne(CCR0, object_has_monitor);
|
||||
|
||||
// Check if it is still a light weight lock, this is is true if we see
|
||||
// the stack address of the basicLock in the markWord of the object.
|
||||
// Cmpxchg sets flag to cmpd(current_header, box).
|
||||
cmpxchgd(/*flag=*/flag,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/box,
|
||||
/*exchange_value=*/displaced_header,
|
||||
/*where=*/oop,
|
||||
MacroAssembler::MemBarRel,
|
||||
MacroAssembler::cmpxchgx_hint_release_lock(),
|
||||
noreg,
|
||||
&cont);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
if (!UseHeavyMonitors) {
|
||||
// Check if it is still a light weight lock, this is is true if we see
|
||||
// the stack address of the basicLock in the markWord of the object.
|
||||
// Cmpxchg sets flag to cmpd(current_header, box).
|
||||
cmpxchgd(/*flag=*/flag,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/box,
|
||||
/*exchange_value=*/displaced_header,
|
||||
/*where=*/oop,
|
||||
MacroAssembler::MemBarRel,
|
||||
MacroAssembler::cmpxchgx_hint_release_lock(),
|
||||
noreg,
|
||||
&cont);
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
} else {
|
||||
// Set NE to indicate 'failure' -> take slow-path.
|
||||
crandc(flag, Assembler::equal, flag, Assembler::equal);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
b(cont);
|
||||
|
@ -2730,7 +2730,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
Register obj = op->obj_opr()->as_register(); // May not be an oop.
|
||||
Register hdr = op->hdr_opr()->as_register();
|
||||
Register lock = op->lock_opr()->as_register();
|
||||
if (!UseFastLocking) {
|
||||
if (UseHeavyMonitors) {
|
||||
__ branch_optimized(Assembler::bcondAlways, *op->stub()->entry());
|
||||
} else if (op->code() == lir_lock) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
|
@ -461,7 +461,11 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::rax_opr);
|
||||
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
|
||||
__ unlock_object(rdi, rsi, rax, *stub->entry());
|
||||
if (UseHeavyMonitors) {
|
||||
__ jmp(*stub->entry());
|
||||
} else {
|
||||
__ unlock_object(rdi, rsi, rax, *stub->entry());
|
||||
}
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
@ -3498,7 +3502,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
Register obj = op->obj_opr()->as_register(); // may not be an oop
|
||||
Register hdr = op->hdr_opr()->as_register();
|
||||
Register lock = op->lock_opr()->as_register();
|
||||
if (!UseFastLocking) {
|
||||
if (UseHeavyMonitors) {
|
||||
__ jmp(*op->stub()->entry());
|
||||
} else if (op->code() == lir_lock) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
|
@ -485,6 +485,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
if (UseRTMForStackLocks && use_rtm) {
|
||||
assert(!UseHeavyMonitors, "+UseHeavyMonitors and +UseRTMForStackLocks are mutually exclusive");
|
||||
rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
|
||||
stack_rtm_counters, method_data, profile_rtm,
|
||||
DONE_LABEL, IsInflated);
|
||||
@ -495,20 +496,25 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
||||
testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral
|
||||
jccb(Assembler::notZero, IsInflated);
|
||||
|
||||
// Attempt stack-locking ...
|
||||
orptr (tmpReg, markWord::unlocked_value);
|
||||
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
|
||||
lock();
|
||||
cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg
|
||||
jcc(Assembler::equal, DONE_LABEL); // Success
|
||||
if (!UseHeavyMonitors) {
|
||||
// Attempt stack-locking ...
|
||||
orptr (tmpReg, markWord::unlocked_value);
|
||||
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
|
||||
lock();
|
||||
cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg
|
||||
jcc(Assembler::equal, DONE_LABEL); // Success
|
||||
|
||||
// Recursive locking.
|
||||
// The object is stack-locked: markword contains stack pointer to BasicLock.
|
||||
// Locked by current thread if difference with current SP is less than one page.
|
||||
subptr(tmpReg, rsp);
|
||||
// Next instruction set ZFlag == 1 (Success) if difference is less then one page.
|
||||
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
|
||||
movptr(Address(boxReg, 0), tmpReg);
|
||||
// Recursive locking.
|
||||
// The object is stack-locked: markword contains stack pointer to BasicLock.
|
||||
// Locked by current thread if difference with current SP is less than one page.
|
||||
subptr(tmpReg, rsp);
|
||||
// Next instruction set ZFlag == 1 (Success) if difference is less then one page.
|
||||
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
|
||||
movptr(Address(boxReg, 0), tmpReg);
|
||||
} else {
|
||||
// Clear ZF so that we take the slow path at the DONE label. objReg is known to be not 0.
|
||||
testptr(objReg, objReg);
|
||||
}
|
||||
jmp(DONE_LABEL);
|
||||
|
||||
bind(IsInflated);
|
||||
@ -638,6 +644,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
if (UseRTMForStackLocks && use_rtm) {
|
||||
assert(!UseHeavyMonitors, "+UseHeavyMonitors and +UseRTMForStackLocks are mutually exclusive");
|
||||
Label L_regular_unlock;
|
||||
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
|
||||
andptr(tmpReg, markWord::lock_mask_in_place); // look at 2 lock bits
|
||||
@ -649,11 +656,15 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
||||
}
|
||||
#endif
|
||||
|
||||
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
|
||||
jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
|
||||
if (!UseHeavyMonitors) {
|
||||
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
|
||||
jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
|
||||
}
|
||||
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword
|
||||
testptr(tmpReg, markWord::monitor_value); // Inflated?
|
||||
jccb (Assembler::zero, Stacked);
|
||||
if (!UseHeavyMonitors) {
|
||||
testptr(tmpReg, markWord::monitor_value); // Inflated?
|
||||
jccb (Assembler::zero, Stacked);
|
||||
}
|
||||
|
||||
// It's inflated.
|
||||
#if INCLUDE_RTM_OPT
|
||||
@ -795,11 +806,12 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
||||
testl (boxReg, 0); // set ICC.ZF=1 to indicate success
|
||||
jmpb (DONE_LABEL);
|
||||
|
||||
bind (Stacked);
|
||||
movptr(tmpReg, Address (boxReg, 0)); // re-fetch
|
||||
lock();
|
||||
cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
|
||||
|
||||
if (!UseHeavyMonitors) {
|
||||
bind (Stacked);
|
||||
movptr(tmpReg, Address (boxReg, 0)); // re-fetch
|
||||
lock();
|
||||
cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
|
||||
}
|
||||
#endif
|
||||
bind(DONE_LABEL);
|
||||
}
|
||||
|
@ -1705,36 +1705,41 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Load the oop from the handle
|
||||
__ movptr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
// Load immediate 1 into swap_reg %rax,
|
||||
__ movptr(swap_reg, 1);
|
||||
if (!UseHeavyMonitors) {
|
||||
// Load immediate 1 into swap_reg %rax,
|
||||
__ movptr(swap_reg, 1);
|
||||
|
||||
// Load (object->mark() | 1) into swap_reg %rax,
|
||||
__ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
// Load (object->mark() | 1) into swap_reg %rax,
|
||||
__ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
|
||||
// src -> dest iff dest == rax, else rax, <- dest
|
||||
// *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
|
||||
__ lock();
|
||||
__ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::equal, lock_done);
|
||||
// src -> dest iff dest == rax, else rax, <- dest
|
||||
// *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
|
||||
__ lock();
|
||||
__ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::equal, lock_done);
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 3) == 0, and
|
||||
// 2) rsp <= mark < mark + os::pagesize()
|
||||
// These 3 tests can be done by evaluating the following
|
||||
// expression: ((mark - rsp) & (3 - os::vm_page_size())),
|
||||
// assuming both stack pointer and pagesize have their
|
||||
// least significant 2 bits clear.
|
||||
// NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 3) == 0, and
|
||||
// 2) rsp <= mark < mark + os::pagesize()
|
||||
// These 3 tests can be done by evaluating the following
|
||||
// expression: ((mark - rsp) & (3 - os::vm_page_size())),
|
||||
// assuming both stack pointer and pagesize have their
|
||||
// least significant 2 bits clear.
|
||||
// NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
|
||||
|
||||
__ subptr(swap_reg, rsp);
|
||||
__ andptr(swap_reg, 3 - os::vm_page_size());
|
||||
__ subptr(swap_reg, rsp);
|
||||
__ andptr(swap_reg, 3 - os::vm_page_size());
|
||||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
__ jcc(Assembler::notEqual, slow_path_lock);
|
||||
} else {
|
||||
__ jmp(slow_path_lock);
|
||||
}
|
||||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
__ jcc(Assembler::notEqual, slow_path_lock);
|
||||
// Slow path will re-enter here
|
||||
__ bind(lock_done);
|
||||
}
|
||||
@ -1852,29 +1857,33 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Get locked oop from the handle we passed to jni
|
||||
__ movptr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
// Simple recursive lock?
|
||||
if (!UseHeavyMonitors) {
|
||||
// Simple recursive lock?
|
||||
|
||||
__ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
|
||||
__ jcc(Assembler::equal, done);
|
||||
__ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
|
||||
__ jcc(Assembler::equal, done);
|
||||
|
||||
// Must save rax, if if it is live now because cmpxchg must use it
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
// Must save rax, if if it is live now because cmpxchg must use it
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
}
|
||||
|
||||
// get old displaced header
|
||||
__ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
|
||||
|
||||
// get address of the stack lock
|
||||
__ lea(rax, Address(rbp, lock_slot_rbp_offset));
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
// src -> dest iff dest == rax, else rax, <- dest
|
||||
// *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
|
||||
__ lock();
|
||||
__ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::notEqual, slow_path_unlock);
|
||||
} else {
|
||||
__ jmp(slow_path_unlock);
|
||||
}
|
||||
|
||||
// get old displaced header
|
||||
__ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
|
||||
|
||||
// get address of the stack lock
|
||||
__ lea(rax, Address(rbp, lock_slot_rbp_offset));
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
// src -> dest iff dest == rax, else rax, <- dest
|
||||
// *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
|
||||
__ lock();
|
||||
__ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::notEqual, slow_path_unlock);
|
||||
|
||||
// slow path re-enters here
|
||||
__ bind(unlock_done);
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
|
@ -1918,37 +1918,41 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Load the oop from the handle
|
||||
__ movptr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
// Load immediate 1 into swap_reg %rax
|
||||
__ movl(swap_reg, 1);
|
||||
if (!UseHeavyMonitors) {
|
||||
// Load immediate 1 into swap_reg %rax
|
||||
__ movl(swap_reg, 1);
|
||||
|
||||
// Load (object->mark() | 1) into swap_reg %rax
|
||||
__ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
// Load (object->mark() | 1) into swap_reg %rax
|
||||
__ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
|
||||
// src -> dest iff dest == rax else rax <- dest
|
||||
__ lock();
|
||||
__ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::equal, lock_done);
|
||||
// src -> dest iff dest == rax else rax <- dest
|
||||
__ lock();
|
||||
__ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::equal, lock_done);
|
||||
|
||||
// Hmm should this move to the slow path code area???
|
||||
// Hmm should this move to the slow path code area???
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 3) == 0, and
|
||||
// 2) rsp <= mark < mark + os::pagesize()
|
||||
// These 3 tests can be done by evaluating the following
|
||||
// expression: ((mark - rsp) & (3 - os::vm_page_size())),
|
||||
// assuming both stack pointer and pagesize have their
|
||||
// least significant 2 bits clear.
|
||||
// NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 3) == 0, and
|
||||
// 2) rsp <= mark < mark + os::pagesize()
|
||||
// These 3 tests can be done by evaluating the following
|
||||
// expression: ((mark - rsp) & (3 - os::vm_page_size())),
|
||||
// assuming both stack pointer and pagesize have their
|
||||
// least significant 2 bits clear.
|
||||
// NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
|
||||
|
||||
__ subptr(swap_reg, rsp);
|
||||
__ andptr(swap_reg, 3 - os::vm_page_size());
|
||||
__ subptr(swap_reg, rsp);
|
||||
__ andptr(swap_reg, 3 - os::vm_page_size());
|
||||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
__ jcc(Assembler::notEqual, slow_path_lock);
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
__ jcc(Assembler::notEqual, slow_path_lock);
|
||||
} else {
|
||||
__ jmp(slow_path_lock);
|
||||
}
|
||||
|
||||
// Slow path will re-enter here
|
||||
|
||||
@ -2055,27 +2059,31 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ movptr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
Label done;
|
||||
// Simple recursive lock?
|
||||
|
||||
__ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
|
||||
__ jcc(Assembler::equal, done);
|
||||
if (!UseHeavyMonitors) {
|
||||
// Simple recursive lock?
|
||||
__ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
|
||||
__ jcc(Assembler::equal, done);
|
||||
|
||||
// Must save rax if if it is live now because cmpxchg must use it
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
// Must save rax if if it is live now because cmpxchg must use it
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
}
|
||||
|
||||
|
||||
// get address of the stack lock
|
||||
__ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||
// get old displaced header
|
||||
__ movptr(old_hdr, Address(rax, 0));
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
__ lock();
|
||||
__ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::notEqual, slow_path_unlock);
|
||||
} else {
|
||||
__ jmp(slow_path_unlock);
|
||||
}
|
||||
|
||||
|
||||
// get address of the stack lock
|
||||
__ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||
// get old displaced header
|
||||
__ movptr(old_hdr, Address(rax, 0));
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
__ lock();
|
||||
__ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::notEqual, slow_path_unlock);
|
||||
|
||||
// slow path re-enters here
|
||||
__ bind(unlock_done);
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
|
@ -620,7 +620,7 @@ void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, L
|
||||
// setup registers
|
||||
LIR_Opr hdr = lock;
|
||||
lock = new_hdr;
|
||||
CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
|
||||
CodeStub* slow_path = new MonitorExitStub(lock, !UseHeavyMonitors, monitor_no);
|
||||
__ load_stack_address_monitor(monitor_no, lock);
|
||||
__ unlock_object(hdr, object, lock, scratch, slow_path);
|
||||
}
|
||||
|
@ -736,7 +736,7 @@ JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj,
|
||||
_monitorenter_slowcase_cnt++;
|
||||
}
|
||||
#endif
|
||||
if (!UseFastLocking) {
|
||||
if (UseHeavyMonitors) {
|
||||
lock->set_obj(obj);
|
||||
}
|
||||
assert(obj == lock->obj(), "must match");
|
||||
|
@ -242,9 +242,6 @@
|
||||
develop(bool, UseFastNewObjectArray, true, \
|
||||
"Use fast inlined object array allocation") \
|
||||
\
|
||||
develop(bool, UseFastLocking, true, \
|
||||
"Use fast inlined locking code") \
|
||||
\
|
||||
develop(bool, UseSlowPath, false, \
|
||||
"For debugging: test slow cases by always using them") \
|
||||
\
|
||||
|
@ -532,6 +532,9 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
{ "DynamicDumpSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
|
||||
{ "RequireSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
|
||||
{ "UseSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
|
||||
#ifdef PRODUCT
|
||||
{ "UseHeavyMonitors", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::jdk(20) },
|
||||
#endif
|
||||
|
||||
// --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
|
||||
{ "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
@ -2018,6 +2021,20 @@ bool Arguments::check_vm_args_consistency() {
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(X86) && !defined(AARCH64) && !defined(PPC64)
|
||||
if (UseHeavyMonitors) {
|
||||
warning("UseHeavyMonitors is not fully implemented on this architecture");
|
||||
}
|
||||
#endif
|
||||
#if defined(X86) || defined(PPC64)
|
||||
if (UseHeavyMonitors && UseRTMForStackLocks) {
|
||||
fatal("-XX:+UseHeavyMonitors and -XX:+UseRTMForStackLocks are mutually exclusive");
|
||||
}
|
||||
#endif
|
||||
if (VerifyHeavyMonitors && !UseHeavyMonitors) {
|
||||
fatal("-XX:+VerifyHeavyMonitors requires -XX:+UseHeavyMonitors");
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1066,7 +1066,12 @@ const intx ObjectAlignmentInBytes = 8;
|
||||
"If true, error data is printed to stdout instead of a file") \
|
||||
\
|
||||
product(bool, UseHeavyMonitors, false, \
|
||||
"use heavyweight instead of lightweight Java monitors") \
|
||||
"(Deprecated) Use heavyweight instead of lightweight Java " \
|
||||
"monitors") \
|
||||
\
|
||||
develop(bool, VerifyHeavyMonitors, false, \
|
||||
"Checks that no stack locking happens when using " \
|
||||
"+UseHeavyMonitors") \
|
||||
\
|
||||
product(bool, PrintStringTableStatistics, false, \
|
||||
"print statistics about the StringTable and SymbolTable") \
|
||||
|
@ -417,6 +417,14 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread
|
||||
}
|
||||
}
|
||||
|
||||
static bool useHeavyMonitors() {
|
||||
#if defined(X86) || defined(AARCH64) || defined(PPC64)
|
||||
return UseHeavyMonitors;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Monitor Enter/Exit
|
||||
// The interpreter and compiler assembly code tries to lock using the fast path
|
||||
@ -428,28 +436,33 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
|
||||
handle_sync_on_value_based_class(obj, current);
|
||||
}
|
||||
|
||||
markWord mark = obj->mark();
|
||||
if (mark.is_neutral()) {
|
||||
// Anticipate successful CAS -- the ST of the displaced mark must
|
||||
// be visible <= the ST performed by the CAS.
|
||||
lock->set_displaced_header(mark);
|
||||
if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
|
||||
if (!useHeavyMonitors()) {
|
||||
markWord mark = obj->mark();
|
||||
if (mark.is_neutral()) {
|
||||
// Anticipate successful CAS -- the ST of the displaced mark must
|
||||
// be visible <= the ST performed by the CAS.
|
||||
lock->set_displaced_header(mark);
|
||||
if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
|
||||
return;
|
||||
}
|
||||
// Fall through to inflate() ...
|
||||
} else if (mark.has_locker() &&
|
||||
current->is_lock_owned((address)mark.locker())) {
|
||||
assert(lock != mark.locker(), "must not re-lock the same lock");
|
||||
assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
|
||||
lock->set_displaced_header(markWord::from_pointer(NULL));
|
||||
return;
|
||||
}
|
||||
// Fall through to inflate() ...
|
||||
} else if (mark.has_locker() &&
|
||||
current->is_lock_owned((address)mark.locker())) {
|
||||
assert(lock != mark.locker(), "must not re-lock the same lock");
|
||||
assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
|
||||
lock->set_displaced_header(markWord::from_pointer(NULL));
|
||||
return;
|
||||
|
||||
// The object header will never be displaced to this lock,
|
||||
// so it does not matter what the value is, except that it
|
||||
// must be non-zero to avoid looking like a re-entrant lock,
|
||||
// and must not look locked either.
|
||||
lock->set_displaced_header(markWord::unused_mark());
|
||||
} else if (VerifyHeavyMonitors) {
|
||||
guarantee(!obj->mark().has_locker(), "must not be stack-locked");
|
||||
}
|
||||
|
||||
// The object header will never be displaced to this lock,
|
||||
// so it does not matter what the value is, except that it
|
||||
// must be non-zero to avoid looking like a re-entrant lock,
|
||||
// and must not look locked either.
|
||||
lock->set_displaced_header(markWord::unused_mark());
|
||||
// An async deflation can race after the inflate() call and before
|
||||
// enter() can make the ObjectMonitor busy. enter() returns false if
|
||||
// we have lost the race to async deflation and we simply try again.
|
||||
@ -462,45 +475,49 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
|
||||
}
|
||||
|
||||
void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
|
||||
markWord mark = object->mark();
|
||||
if (!useHeavyMonitors()) {
|
||||
markWord mark = object->mark();
|
||||
|
||||
markWord dhw = lock->displaced_header();
|
||||
if (dhw.value() == 0) {
|
||||
// If the displaced header is NULL, then this exit matches up with
|
||||
// a recursive enter. No real work to do here except for diagnostics.
|
||||
markWord dhw = lock->displaced_header();
|
||||
if (dhw.value() == 0) {
|
||||
// If the displaced header is NULL, then this exit matches up with
|
||||
// a recursive enter. No real work to do here except for diagnostics.
|
||||
#ifndef PRODUCT
|
||||
if (mark != markWord::INFLATING()) {
|
||||
// Only do diagnostics if we are not racing an inflation. Simply
|
||||
// exiting a recursive enter of a Java Monitor that is being
|
||||
// inflated is safe; see the has_monitor() comment below.
|
||||
assert(!mark.is_neutral(), "invariant");
|
||||
assert(!mark.has_locker() ||
|
||||
current->is_lock_owned((address)mark.locker()), "invariant");
|
||||
if (mark.has_monitor()) {
|
||||
// The BasicLock's displaced_header is marked as a recursive
|
||||
// enter and we have an inflated Java Monitor (ObjectMonitor).
|
||||
// This is a special case where the Java Monitor was inflated
|
||||
// after this thread entered the stack-lock recursively. When a
|
||||
// Java Monitor is inflated, we cannot safely walk the Java
|
||||
// Monitor owner's stack and update the BasicLocks because a
|
||||
// Java Monitor can be asynchronously inflated by a thread that
|
||||
// does not own the Java Monitor.
|
||||
ObjectMonitor* m = mark.monitor();
|
||||
assert(m->object()->mark() == mark, "invariant");
|
||||
assert(m->is_entered(current), "invariant");
|
||||
if (mark != markWord::INFLATING()) {
|
||||
// Only do diagnostics if we are not racing an inflation. Simply
|
||||
// exiting a recursive enter of a Java Monitor that is being
|
||||
// inflated is safe; see the has_monitor() comment below.
|
||||
assert(!mark.is_neutral(), "invariant");
|
||||
assert(!mark.has_locker() ||
|
||||
current->is_lock_owned((address)mark.locker()), "invariant");
|
||||
if (mark.has_monitor()) {
|
||||
// The BasicLock's displaced_header is marked as a recursive
|
||||
// enter and we have an inflated Java Monitor (ObjectMonitor).
|
||||
// This is a special case where the Java Monitor was inflated
|
||||
// after this thread entered the stack-lock recursively. When a
|
||||
// Java Monitor is inflated, we cannot safely walk the Java
|
||||
// Monitor owner's stack and update the BasicLocks because a
|
||||
// Java Monitor can be asynchronously inflated by a thread that
|
||||
// does not own the Java Monitor.
|
||||
ObjectMonitor* m = mark.monitor();
|
||||
assert(m->object()->mark() == mark, "invariant");
|
||||
assert(m->is_entered(current), "invariant");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
if (mark == markWord::from_pointer(lock)) {
|
||||
// If the object is stack-locked by the current thread, try to
|
||||
// swing the displaced header from the BasicLock back to the mark.
|
||||
assert(dhw.is_neutral(), "invariant");
|
||||
if (object->cas_set_mark(dhw, mark) == mark) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (mark == markWord::from_pointer(lock)) {
|
||||
// If the object is stack-locked by the current thread, try to
|
||||
// swing the displaced header from the BasicLock back to the mark.
|
||||
assert(dhw.is_neutral(), "invariant");
|
||||
if (object->cas_set_mark(dhw, mark) == mark) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else if (VerifyHeavyMonitors) {
|
||||
guarantee(!object->mark().has_locker(), "must not be stack-locked");
|
||||
}
|
||||
|
||||
// We have to take the slow-path of possible inflation and then exit.
|
||||
@ -804,7 +821,10 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
|
||||
markWord temp, test;
|
||||
intptr_t hash;
|
||||
markWord mark = read_stable_mark(obj);
|
||||
|
||||
if (VerifyHeavyMonitors) {
|
||||
assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
|
||||
guarantee(!mark.has_locker(), "must not be stack locked");
|
||||
}
|
||||
if (mark.is_neutral()) { // if this is a normal header
|
||||
hash = mark.hash();
|
||||
if (hash != 0) { // if it has a hash, just return it
|
||||
|
@ -21,6 +21,10 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.cli.*;
|
||||
@ -39,22 +43,31 @@ public class VMDeprecatedOptions {
|
||||
* each entry is {[0]: option name, [1]: value to set
|
||||
* (true/false/n/string)}.
|
||||
*/
|
||||
public static final String[][] DEPRECATED_OPTIONS = {
|
||||
// deprecated non-alias flags:
|
||||
{"MaxGCMinorPauseMillis", "1032"},
|
||||
{"MaxRAMFraction", "8"},
|
||||
{"MinRAMFraction", "2"},
|
||||
{"InitialRAMFraction", "64"},
|
||||
{"TLABStats", "false"},
|
||||
{"AllowRedefinitionToAddDeleteMethods", "true"},
|
||||
{"UseSharedSpaces", "false"},
|
||||
{"RequireSharedSpaces", "false"},
|
||||
{"DumpSharedSpaces", "false"},
|
||||
{"DynamicDumpSharedSpaces", "false"},
|
||||
public static final String[][] DEPRECATED_OPTIONS;
|
||||
static {
|
||||
ArrayList<String[]> deprecated = new ArrayList(
|
||||
Arrays.asList(new String[][] {
|
||||
// deprecated non-alias flags:
|
||||
{"MaxGCMinorPauseMillis", "1032"},
|
||||
{"MaxRAMFraction", "8"},
|
||||
{"MinRAMFraction", "2"},
|
||||
{"InitialRAMFraction", "64"},
|
||||
{"TLABStats", "false"},
|
||||
{"AllowRedefinitionToAddDeleteMethods", "true"},
|
||||
{"UseSharedSpaces", "false"},
|
||||
{"RequireSharedSpaces", "false"},
|
||||
{"DumpSharedSpaces", "false"},
|
||||
{"DynamicDumpSharedSpaces", "false"},
|
||||
|
||||
// deprecated alias flags (see also aliased_jvm_flags):
|
||||
{"DefaultMaxRAMFraction", "4"},
|
||||
{"CreateMinidumpOnCrash", "false"}
|
||||
// deprecated alias flags (see also aliased_jvm_flags):
|
||||
{"DefaultMaxRAMFraction", "4"},
|
||||
{"CreateMinidumpOnCrash", "false"}
|
||||
}
|
||||
));
|
||||
if (!Platform.isDebugBuild()) {
|
||||
deprecated.add(new String[]{"UseHeavyMonitors", "false"});
|
||||
}
|
||||
DEPRECATED_OPTIONS = deprecated.toArray(new String[][]{});
|
||||
};
|
||||
|
||||
static String getDeprecationString(String optionName) {
|
||||
|
@ -45,6 +45,14 @@
|
||||
* @run main/timeout=1600 MapLoops
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @summary Exercise multithreaded maps, using only heavy monitors.
|
||||
* @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch == "ppc64" | os.arch == "ppc64le"
|
||||
* @library /test/lib
|
||||
* @run main/othervm/timeout=1600 -XX:+IgnoreUnrecognizedVMOptions -XX:+UseHeavyMonitors -XX:+VerifyHeavyMonitors MapLoops
|
||||
*/
|
||||
|
||||
import static java.util.concurrent.TimeUnit.MILLISECONDS;
|
||||
|
||||
import java.util.List;
|
||||
|
Loading…
x
Reference in New Issue
Block a user