8229258: Rework markOop and markOopDesc into a simpler mark word value carrier

Reviewed-by: rkennke, coleenp, kbarrett, dcubed
This commit is contained in:
Stefan Karlsson 2019-08-06 10:48:21 +02:00
parent f075a3278b
commit ae5615c614
132 changed files with 1215 additions and 1236 deletions
src/hotspot
cpu
share

@ -1771,7 +1771,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
}
int bangsize = C->bang_size_in_bytes();
if (C->need_stack_bang(bangsize) && UseStackBanging)
__ generate_stack_overflow_check(bangsize);
@ -3508,7 +3508,7 @@ encode %{
assert_different_registers(oop, box, tmp, disp_hdr);
// Load markOop from object into displaced_header.
// Load markWord from object into displaced_header.
__ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
if (UseBiasedLocking && !UseOptoBiasInlining) {
@ -3516,17 +3516,17 @@ encode %{
}
// Check for existing monitor
__ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
__ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
// Set tmp to be (markOop of object | UNLOCK_VALUE).
__ orr(tmp, disp_hdr, markOopDesc::unlocked_value);
// Set tmp to be (markWord of object | UNLOCK_VALUE).
__ orr(tmp, disp_hdr, markWord::unlocked_value);
// Initialize the box. (Must happen before we update the object mark!)
__ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
// Compare object markOop with an unlocked value (tmp) and if
// equal exchange the stack address of our box with object markOop.
// On failure disp_hdr contains the possibly locked markOop.
// Compare object markWord with an unlocked value (tmp) and if
// equal exchange the stack address of our box with object markWord.
// On failure disp_hdr contains the possibly locked markWord.
__ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, disp_hdr);
__ br(Assembler::EQ, cont);
@ -3540,10 +3540,10 @@ encode %{
// We did not see an unlocked object so try the fast recursive case.
// Check if the owner is self by comparing the value in the
// markOop of object (disp_hdr) with the stack pointer.
// markWord of object (disp_hdr) with the stack pointer.
__ mov(rscratch1, sp);
__ sub(disp_hdr, disp_hdr, rscratch1);
__ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markOopDesc::lock_mask_in_place));
__ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
// If condition is true we are cont and hence we can store 0 as the
// displaced header in the box, which indicates that it is a recursive lock.
__ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
@ -3558,15 +3558,15 @@ encode %{
// otherwise m->owner may contain a thread or a stack address.
//
// Try to CAS m->owner from NULL to current thread.
__ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
__ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
__ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, noreg); // Sets flags for result
// Store a non-null value into the box to avoid looking like a re-entrant
// lock. The fast-path monitor unlock code checks for
// markOopDesc::monitor_value so use markOopDesc::unused_mark which has the
// markWord::monitor_value so use markWord::unused_mark which has the
// relevant bit set, and also matches ObjectSynchronizer::slow_enter.
__ mov(tmp, (address)markOopDesc::unused_mark());
__ mov(tmp, (address)markWord::unused_mark().value());
__ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
__ bind(cont);
@ -3598,10 +3598,10 @@ encode %{
// Handle existing monitor.
__ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
__ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
__ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
// Check if it is still a light weight lock, this is is true if we
// see the stack address of the basicLock in the markOop of the
// see the stack address of the basicLock in the markWord of the
// object.
__ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
@ -3612,7 +3612,7 @@ encode %{
// Handle existing monitor.
__ bind(object_has_monitor);
__ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
__ add(tmp, tmp, -markWord::monitor_value); // monitor
__ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
__ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
__ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.

@ -82,7 +82,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// Load object header
ldr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
orr(hdr, hdr, markOopDesc::unlocked_value);
orr(hdr, hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
str(hdr, Address(disp_hdr, 0));
// test if object header is still the same (i.e. unlocked), and if so, store the
@ -176,7 +176,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
ldr(t1, Address(klass, Klass::prototype_header_offset()));
} else {
// This assumes that all prototype bits fit in an int32_t
mov(t1, (int32_t)(intptr_t)markOopDesc::prototype());
mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
}
str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));

@ -242,9 +242,9 @@ void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssemb
Label done;
__ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
__ eon(tmp, tmp, zr);
__ ands(zr, tmp, markOopDesc::lock_mask_in_place);
__ ands(zr, tmp, markWord::lock_mask_in_place);
__ br(Assembler::NE, done);
__ orr(tmp, tmp, markOopDesc::marked_value);
__ orr(tmp, tmp, markWord::marked_value);
__ eon(dst, tmp, zr);
__ bind(done);
@ -548,11 +548,11 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble
Label slow_path;
__ ldr(tmp1, Address(res, oopDesc::mark_offset_in_bytes()));
__ eon(tmp1, tmp1, zr);
__ ands(zr, tmp1, markOopDesc::lock_mask_in_place);
__ ands(zr, tmp1, markWord::lock_mask_in_place);
__ br(Assembler::NE, slow_path);
// Decode forwarded object.
__ orr(tmp1, tmp1, markOopDesc::marked_value);
__ orr(tmp1, tmp1, markWord::marked_value);
__ eon(res, tmp1, zr);
__ b(*stub->continuation());
@ -665,11 +665,11 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator
Label slow_path;
__ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
__ eon(rscratch1, rscratch1, zr);
__ ands(zr, rscratch1, markOopDesc::lock_mask_in_place);
__ ands(zr, rscratch1, markWord::lock_mask_in_place);
__ br(Assembler::NE, slow_path);
// Decode forwarded object.
__ orr(rscratch1, rscratch1, markOopDesc::marked_value);
__ orr(rscratch1, rscratch1, markWord::marked_value);
__ eon(r0, rscratch1, zr);
__ ret(lr);

@ -472,7 +472,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
counters = BiasedLocking::counters();
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1, rscratch2, noreg);
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
Address saved_mark_addr(lock_reg, 0);
@ -489,15 +489,15 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
null_check_offset = offset();
ldr(swap_reg, mark_addr);
}
andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place);
cmp(tmp_reg, (u1)markOopDesc::biased_lock_pattern);
andr(tmp_reg, swap_reg, markWord::biased_lock_mask_in_place);
cmp(tmp_reg, (u1)markWord::biased_lock_pattern);
br(Assembler::NE, cas_label);
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
load_prototype_header(tmp_reg, obj_reg);
orr(tmp_reg, tmp_reg, rthread);
eor(tmp_reg, swap_reg, tmp_reg);
andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place));
andr(tmp_reg, tmp_reg, ~((int) markWord::age_mask_in_place));
if (counters != NULL) {
Label around;
cbnz(tmp_reg, around);
@ -520,7 +520,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place);
andr(rscratch1, tmp_reg, markWord::biased_lock_mask_in_place);
cbnz(rscratch1, try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@ -532,7 +532,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place);
andr(rscratch1, tmp_reg, markWord::epoch_mask_in_place);
cbnz(rscratch1, try_rebias);
// The epoch of the current bias is still valid but we know nothing
@ -543,7 +543,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
// don't accidentally blow away another thread's valid bias.
{
Label here;
mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
mov(rscratch1, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
andr(swap_reg, swap_reg, rscratch1);
orr(tmp_reg, swap_reg, rthread);
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
@ -628,8 +628,8 @@ void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, La
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
cmp(temp_reg, (u1)markOopDesc::biased_lock_pattern);
andr(temp_reg, temp_reg, markWord::biased_lock_mask_in_place);
cmp(temp_reg, (u1)markWord::biased_lock_pattern);
br(Assembler::EQ, done);
}

@ -3615,7 +3615,7 @@ void TemplateTable::_new() {
if (UseBiasedLocking) {
__ ldr(rscratch1, Address(r4, Klass::prototype_header_offset()));
} else {
__ mov(rscratch1, (intptr_t)markOopDesc::prototype());
__ mov(rscratch1, (intptr_t)markWord::prototype().value());
}
__ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
__ store_klass_gap(r0, zr); // zero klass gap for compressed oops

@ -92,7 +92,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
if(UseBiasedLocking && !len->is_valid()) {
ldr(tmp, Address(klass, Klass::prototype_header_offset()));
} else {
mov(tmp, (intptr_t)markOopDesc::prototype());
mov(tmp, (intptr_t)markWord::prototype().value());
}
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
@ -219,7 +219,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj,
ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
str(obj, Address(disp_hdr, obj_offset));
tst(hdr, markOopDesc::unlocked_value);
tst(hdr, markWord::unlocked_value);
b(fast_lock, ne);
// Check for recursive locking

@ -878,7 +878,7 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
// Test if object is already locked
tst(Rmark, markOopDesc::unlocked_value);
tst(Rmark, markWord::unlocked_value);
b(already_locked, eq);
// Save old object->mark() into BasicLock's displaced header

@ -1345,7 +1345,7 @@ int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Re
}
#endif
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
// Biased locking
@ -1367,8 +1367,8 @@ int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Re
// On MP platform loads could return 'stale' values in some cases.
// That is acceptable since either CAS or slow case path is taken in the worst case.
andr(tmp_reg, swap_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
cmp(tmp_reg, markOopDesc::biased_lock_pattern);
andr(tmp_reg, swap_reg, (uintx)markWord::biased_lock_mask_in_place);
cmp(tmp_reg, markWord::biased_lock_pattern);
b(cas_label, ne);
@ -1379,7 +1379,7 @@ int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Re
orr(tmp_reg, tmp_reg, Rthread);
eor(tmp_reg, tmp_reg, swap_reg);
bics(tmp_reg, tmp_reg, ((int) markOopDesc::age_mask_in_place));
bics(tmp_reg, tmp_reg, ((int) markWord::age_mask_in_place));
#ifndef PRODUCT
if (counters != NULL) {
@ -1401,7 +1401,7 @@ int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Re
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
tst(tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
tst(tmp_reg, (uintx)markWord::biased_lock_mask_in_place);
b(try_revoke_bias, ne);
// Biasing is still enabled for this data type. See whether the
@ -1413,7 +1413,7 @@ int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Re
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
tst(tmp_reg, (uintx)markOopDesc::epoch_mask_in_place);
tst(tmp_reg, (uintx)markWord::epoch_mask_in_place);
b(try_rebias, ne);
// tmp_reg has the age, epoch and pattern bits cleared
@ -1431,10 +1431,10 @@ int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Re
// until the assembler can be made smarter, we need to make some assumptions about the values
// so we can optimize this:
assert((markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed");
assert((markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed");
mov(swap_reg, AsmOperand(swap_reg, lsl, 23));
mov(swap_reg, AsmOperand(swap_reg, lsr, 23)); // markOop with thread bits cleared (for CAS)
mov(swap_reg, AsmOperand(swap_reg, lsr, 23)); // markWord with thread bits cleared (for CAS)
orr(tmp_reg, swap_reg, Rthread); // new mark
@ -1519,8 +1519,8 @@ void MacroAssembler::biased_locking_exit(Register obj_reg, Register tmp_reg, Lab
// the bias bit would be clear.
ldr(tmp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
andr(tmp_reg, tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
cmp(tmp_reg, markOopDesc::biased_lock_pattern);
andr(tmp_reg, tmp_reg, (uintx)markWord::biased_lock_mask_in_place);
cmp(tmp_reg, markWord::biased_lock_pattern);
b(done, eq);
}
@ -1993,7 +1993,7 @@ void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch,
// Invariant: Rmark loaded below does not contain biased lock pattern
ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
tst(Rmark, markOopDesc::unlocked_value);
tst(Rmark, markWord::unlocked_value);
b(fast_lock, ne);
// Check for recursive lock

@ -861,16 +861,16 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ ldr(Rtemp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
assert(markOopDesc::unlocked_value == 1, "adjust this code");
__ tbz(Rtemp, exact_log2(markOopDesc::unlocked_value), slow_case);
assert(markWord::unlocked_value == 1, "adjust this code");
__ tbz(Rtemp, exact_log2(markWord::unlocked_value), slow_case);
if (UseBiasedLocking) {
assert(is_power_of_2(markOopDesc::biased_lock_bit_in_place), "adjust this code");
__ tbnz(Rtemp, exact_log2(markOopDesc::biased_lock_bit_in_place), slow_case);
assert(is_power_of_2(markWord::biased_lock_bit_in_place), "adjust this code");
__ tbnz(Rtemp, exact_log2(markWord::biased_lock_bit_in_place), slow_case);
}
__ bics(Rtemp, Rtemp, ~markOopDesc::hash_mask_in_place);
__ mov(R0, AsmOperand(Rtemp, lsr, markOopDesc::hash_shift), ne);
__ bics(Rtemp, Rtemp, ~markWord::hash_mask_in_place);
__ mov(R0, AsmOperand(Rtemp, lsr, markWord::hash_shift), ne);
__ bx(LR, ne);
__ bind(slow_case);
@ -1172,7 +1172,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
__ sub(disp_hdr, FP, lock_slot_fp_offset);
__ tst(mark, markOopDesc::unlocked_value);
__ tst(mark, markWord::unlocked_value);
__ b(fast_lock, ne);
// Check for recursive lock

@ -4045,7 +4045,7 @@ void TemplateTable::_new() {
if (UseBiasedLocking) {
__ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
} else {
__ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
__ mov_slow(Rtemp, (intptr_t)markWord::prototype().value());
}
// mark
__ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));

@ -110,12 +110,12 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
}
// ... and mark it unlocked.
ori(Rmark, Rmark, markOopDesc::unlocked_value);
ori(Rmark, Rmark, markWord::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
std(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
// Compare object markOop with Rmark and if equal exchange Rscratch with object markOop.
// Compare object markWord with Rmark and if equal exchange Rscratch with object markWord.
assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/Rscratch,
@ -137,7 +137,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
bind(cas_failed);
// We did not find an unlocked object so see if this is a recursive case.
sub(Rscratch, Rscratch, R1_SP);
load_const_optimized(R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
load_const_optimized(R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
and_(R0/*==0?*/, Rscratch, R0);
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
bne(CCR0, slow_int);
@ -171,7 +171,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
}
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markOop of the object.
// the stack address of the basicLock in the markWord of the object.
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/R0,
/*compare_value=*/Rbox,
@ -215,7 +215,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
if (UseBiasedLocking && !len->is_valid()) {
ld(t1, in_bytes(Klass::prototype_header_offset()), klass);
} else {
load_const_optimized(t1, (intx)markOopDesc::prototype());
load_const_optimized(t1, (intx)markWord::prototype().value());
}
std(t1, oopDesc::mark_offset_in_bytes(), obj);
store_klass(obj, klass);

@ -881,7 +881,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
} else {
// template code:
//
// markOop displaced_header = obj->mark().set_unlocked();
// markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word.
@ -903,17 +903,17 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
assert_different_registers(displaced_header, object_mark_addr, current_header, tmp);
// markOop displaced_header = obj->mark().set_unlocked();
// markWord displaced_header = obj->mark().set_unlocked();
// Load markOop from object into displaced_header.
// Load markWord from object into displaced_header.
ld(displaced_header, oopDesc::mark_offset_in_bytes(), object);
if (UseBiasedLocking) {
biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case);
}
// Set displaced_header to be (markOop of object | UNLOCK_VALUE).
ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
ori(displaced_header, displaced_header, markWord::unlocked_value);
// monitor->lock()->set_displaced_header(displaced_header);
@ -949,12 +949,12 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// We did not see an unlocked object so try the fast recursive case.
// Check if owner is self by comparing the value in the markOop of object
// Check if owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP);
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
load_const_optimized(tmp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
load_const_optimized(tmp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
and_(R0/*==0?*/, current_header, tmp);
// If condition is true we are done and hence we can store 0 in the displaced

@ -2078,7 +2078,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// whether the epoch is still valid
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
"biased locking makes assumptions about bit layout");
if (PrintBiasedLockingStatistics) {
@ -2088,13 +2088,13 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
stwx(temp_reg, temp2_reg);
}
andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
andi(temp_reg, mark_reg, markWord::biased_lock_mask_in_place);
cmpwi(cr_reg, temp_reg, markWord::biased_lock_pattern);
bne(cr_reg, cas_label);
load_klass(temp_reg, obj_reg);
load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
orr(temp_reg, R16_thread, temp_reg);
xorr(temp_reg, mark_reg, temp_reg);
@ -2125,7 +2125,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
andi(temp2_reg, temp_reg, markWord::biased_lock_mask_in_place);
cmpwi(cr_reg, temp2_reg, 0);
bne(cr_reg, try_revoke_bias);
@ -2139,10 +2139,10 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// otherwise the manipulations it performs on the mark word are
// illegal.
int shift_amount = 64 - markOopDesc::epoch_shift;
int shift_amount = 64 - markWord::epoch_shift;
// rotate epoch bits to right (little) end and set other bits to 0
// [ big part | epoch | little part ] -> [ 0..0 | epoch ]
rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markWord::epoch_bits);
// branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
bne(CCR0, try_rebias);
@ -2152,9 +2152,9 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
markOopDesc::age_mask_in_place |
markOopDesc::epoch_mask_in_place));
andi(mark_reg, mark_reg, (markWord::biased_lock_mask_in_place |
markWord::age_mask_in_place |
markWord::epoch_mask_in_place));
orr(temp_reg, R16_thread, mark_reg);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
@ -2187,7 +2187,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
load_klass(temp_reg, obj_reg);
andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
andi(temp2_reg, mark_reg, markWord::age_mask_in_place);
orr(temp2_reg, R16_thread, temp2_reg);
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
orr(temp_reg, temp2_reg, temp_reg);
@ -2224,7 +2224,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// normal locking code.
load_klass(temp_reg, obj_reg);
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
andi(temp2_reg, mark_reg, markWord::age_mask_in_place);
orr(temp_reg, temp_reg, temp2_reg);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
@ -2236,7 +2236,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock());
// reload markOop in mark_reg before continuing with lightweight locking
// reload markWord in mark_reg before continuing with lightweight locking
ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
// Fall through to the normal CAS-based lock, because no matter what
@ -2264,9 +2264,9 @@ void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mar
// the bias bit would be clear.
ld(temp_reg, 0, mark_addr);
andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
andi(temp_reg, temp_reg, markWord::biased_lock_mask_in_place);
cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
cmpwi(cr_reg, temp_reg, markWord::biased_lock_pattern);
beq(cr_reg, done);
}
@ -2687,7 +2687,7 @@ void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
bind(L_rtm_retry);
}
andi_(R0, mark_word, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
andi_(R0, mark_word, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
bne(CCR0, IsInflated);
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
@ -2705,10 +2705,10 @@ void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
}
tbegin_();
beq(CCR0, L_on_abort);
ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked.
andi(R0, mark_word, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked
beq(flag, DONE_LABEL); // all done if unlocked
ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked.
andi(R0, mark_word, markWord::biased_lock_mask_in_place); // look at 3 lock bits
cmpwi(flag, R0, markWord::unlocked_value); // bits = 001 unlocked
beq(flag, DONE_LABEL); // all done if unlocked
if (UseRTMXendForLockBusy) {
tend_();
@ -2744,9 +2744,9 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
assert(UseRTMLocking, "why call this otherwise?");
Label L_rtm_retry, L_decrement_retry, L_on_abort;
// Clean monitor_value bit to get valid pointer.
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markWord::monitor_value;
// Store non-null, using boxReg instead of (intptr_t)markOopDesc::unused_mark().
// Store non-null, using boxReg instead of (intptr_t)markWord::unused_mark().
std(boxReg, BasicLock::displaced_header_offset_in_bytes(), boxReg);
const Register tmpReg = boxReg;
const Register owner_addr_Reg = mark_word;
@ -2791,7 +2791,7 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
// Restore owner_addr_Reg
ld(mark_word, oopDesc::mark_offset_in_bytes(), obj);
#ifdef ASSERT
andi_(R0, mark_word, markOopDesc::monitor_value);
andi_(R0, mark_word, markWord::monitor_value);
asm_assert_ne("must be inflated", 0xa754); // Deflating only allowed at safepoint.
#endif
addi(owner_addr_Reg, mark_word, owner_offset);
@ -2833,7 +2833,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
Label object_has_monitor;
Label cas_failed;
// Load markOop from object into displaced_header.
// Load markWord from object into displaced_header.
ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
@ -2851,11 +2851,11 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
andi_(temp, displaced_header, markOopDesc::monitor_value);
andi_(temp, displaced_header, markWord::monitor_value);
bne(CCR0, object_has_monitor);
// Set displaced_header to be (markOop of object | UNLOCK_VALUE).
ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
ori(displaced_header, displaced_header, markWord::unlocked_value);
// Load Compare Value application register.
@ -2863,7 +2863,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
// Compare object markOop with mark and if equal exchange scratch1 with object markOop.
// Compare object markWord with mark and if equal exchange scratch1 with object markWord.
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/displaced_header,
@ -2883,10 +2883,10 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
bind(cas_failed);
// We did not see an unlocked object so try the fast recursive case.
// Check if the owner is self by comparing the value in the markOop of object
// Check if the owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP);
load_const_optimized(temp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
load_const_optimized(temp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
and_(R0/*==0?*/, current_header, temp);
// If condition is true we are cont and hence we can store 0 as the
@ -2910,7 +2910,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
#endif // INCLUDE_RTM_OPT
// Try to CAS m->owner from NULL to current thread.
addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value);
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/(intptr_t)0,
@ -2957,12 +2957,12 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
if (UseRTMForStackLocks && use_rtm) {
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
Label L_regular_unlock;
ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword
andi(R0, current_header, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked
bne(flag, L_regular_unlock); // else RegularLock
tend_(); // otherwise end...
b(cont); // ... and we're done
ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword
andi(R0, current_header, markWord::biased_lock_mask_in_place); // look at 3 lock bits
cmpwi(flag, R0, markWord::unlocked_value); // bits = 001 unlocked
bne(flag, L_regular_unlock); // else RegularLock
tend_(); // otherwise end...
b(cont); // ... and we're done
bind(L_regular_unlock);
}
#endif
@ -2978,11 +2978,11 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
// The object has an existing monitor iff (mark & monitor_value) != 0.
RTM_OPT_ONLY( if (!(UseRTMForStackLocks && use_rtm)) ) // skip load if already done
ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
andi_(R0, current_header, markOopDesc::monitor_value);
andi_(R0, current_header, markWord::monitor_value);
bne(CCR0, object_has_monitor);
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markOop of the object.
// the stack address of the basicLock in the markWord of the object.
// Cmpxchg sets flag to cmpd(current_header, box).
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
@ -3000,7 +3000,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
b(cont);
bind(object_has_monitor);
addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
addi(current_header, current_header, -markWord::monitor_value); // monitor
ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
// It's inflated.

@ -3820,7 +3820,7 @@ void TemplateTable::_new() {
if (UseBiasedLocking) {
__ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
} else {
__ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
__ load_const_optimized(Rscratch, markWord::prototype().value(), R0);
}
__ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);

@ -96,7 +96,7 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd
}
// and mark it as unlocked.
z_oill(hdr, markOopDesc::unlocked_value);
z_oill(hdr, markWord::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
z_stg(hdr, Address(disp_hdr, (intptr_t)0));
// Test if object header is still the same (i.e. unlocked), and if so, store the
@ -115,19 +115,19 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd
// If the object header was not the same, it is now in the hdr register.
// => Test if it is a stack pointer into the same stack (recursive locking), i.e.:
//
// 1) (hdr & markOopDesc::lock_mask_in_place) == 0
// 1) (hdr & markWord::lock_mask_in_place) == 0
// 2) rsp <= hdr
// 3) hdr <= rsp + page_size
//
// These 3 tests can be done by evaluating the following expression:
//
// (hdr - Z_SP) & (~(page_size-1) | markOopDesc::lock_mask_in_place)
// (hdr - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place)
//
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
z_sgr(hdr, Z_SP);
load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0).
// For recursive locking, the result is zero. => Save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking).
@ -192,7 +192,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
z_lg(t1, Address(klass, Klass::prototype_header_offset()));
} else {
// This assumes that all prototype bits fit in an int32_t.
load_const_optimized(t1, (intx)markOopDesc::prototype());
load_const_optimized(t1, (intx)markWord::prototype().value());
}
z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes()));

@ -41,14 +41,14 @@
void initialize_body(Register objectFields, Register len_in_bytes, Register Rzero);
// locking
// hdr : Used to hold locked markOop to be CASed into obj, contents destroyed.
// hdr : Used to hold locked markWord to be CASed into obj, contents destroyed.
// obj : Must point to the object to lock, contents preserved.
// disp_hdr: Must point to the displaced header location, contents preserved.
// Returns code offset at which to add null check debug information.
void lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case);
// unlocking
// hdr : Used to hold original markOop to be CASed back into obj, contents destroyed.
// hdr : Used to hold original markWord to be CASed back into obj, contents destroyed.
// obj : Must point to the object to lock, contents preserved.
// disp_hdr: Must point to the displaced header location, contents destroyed.
void unlock_object(Register hdr, Register obj, Register lock, Label& slow_case);

@ -974,7 +974,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// template code:
//
// markOop displaced_header = obj->mark().set_unlocked();
// markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word.
@ -993,17 +993,17 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
NearLabel done;
NearLabel slow_case;
// markOop displaced_header = obj->mark().set_unlocked();
// markWord displaced_header = obj->mark().set_unlocked();
// Load markOop from object into displaced_header.
// Load markWord from object into displaced_header.
z_lg(displaced_header, oopDesc::mark_offset_in_bytes(), object);
if (UseBiasedLocking) {
biased_locking_enter(object, displaced_header, Z_R1, Z_R0, done, &slow_case);
}
// Set displaced_header to be (markOop of object | UNLOCK_VALUE).
z_oill(displaced_header, markOopDesc::unlocked_value);
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
z_oill(displaced_header, markWord::unlocked_value);
// monitor->lock()->set_displaced_header(displaced_header);
@ -1027,7 +1027,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// We did not see an unlocked object so try the fast recursive case.
// Check if owner is self by comparing the value in the markOop of object
// Check if owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
z_sgr(current_header, Z_SP);
@ -1035,7 +1035,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// The prior sequence "LGR, NGR, LTGR" can be done better
// (Z_R1 is temp and not used after here).
load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(Z_R0, current_header); // AND sets CC (result eq/ne 0)
// If condition is true we are done and hence we can store 0 in the displaced

@ -3198,15 +3198,15 @@ void MacroAssembler::biased_locking_enter(Register obj_reg,
// whether the epoch is still valid.
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits.
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
"biased locking makes assumptions about bit layout");
z_lr(temp_reg, mark_reg);
z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
z_chi(temp_reg, markOopDesc::biased_lock_pattern);
z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
z_chi(temp_reg, markWord::biased_lock_pattern);
z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked.
load_prototype_header(temp_reg, obj_reg);
load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
z_ogr(temp_reg, Z_thread);
z_xgr(temp_reg, mark_reg);
@ -3232,7 +3232,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg,
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place);
z_tmll(temp_reg, markWord::biased_lock_mask_in_place);
z_brnaz(try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@ -3244,7 +3244,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg,
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
z_tmll(temp_reg, markOopDesc::epoch_mask_in_place);
z_tmll(temp_reg, markWord::epoch_mask_in_place);
z_brnaz(try_rebias);
//----------------------------------------------------------------------------
@ -3254,8 +3254,8 @@ void MacroAssembler::biased_locking_enter(Register obj_reg,
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place |
markOopDesc::epoch_mask_in_place);
z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place |
markWord::epoch_mask_in_place);
z_lgr(temp_reg, Z_thread);
z_llgfr(mark_reg, mark_reg);
z_ogr(temp_reg, mark_reg);
@ -3287,7 +3287,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg,
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
load_prototype_header(temp_reg, obj_reg);
z_llgfr(mark_reg, mark_reg);
@ -3348,9 +3348,9 @@ void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg,
BLOCK_COMMENT("biased_locking_exit {");
z_lg(temp_reg, 0, mark_addr);
z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
z_chi(temp_reg, markOopDesc::biased_lock_pattern);
z_chi(temp_reg, markWord::biased_lock_pattern);
z_bre(done);
BLOCK_COMMENT("} biased_locking_exit");
}
@ -3363,7 +3363,7 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
BLOCK_COMMENT("compiler_fast_lock_object {");
// Load markOop from oop into mark.
// Load markWord from oop into mark.
z_lg(displacedHeader, 0, oop);
if (try_bias) {
@ -3372,13 +3372,13 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
z_lr(temp, displacedHeader);
z_nill(temp, markOopDesc::monitor_value);
z_nill(temp, markWord::monitor_value);
z_brne(object_has_monitor);
// Set mark to markOop | markOopDesc::unlocked_value.
z_oill(displacedHeader, markOopDesc::unlocked_value);
// Set mark to markWord | markWord::unlocked_value.
z_oill(displacedHeader, markWord::unlocked_value);
// Load Compare Value application register.
@ -3386,7 +3386,7 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
// Memory Fence (in cmpxchgd)
// Compare object markOop with mark and if equal exchange scratch1 with object markOop.
// Compare object markWord with mark and if equal exchange scratch1 with object markWord.
// If the compare-and-swap succeeded, then we found an unlocked object and we
// have now locked it.
@ -3397,7 +3397,7 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
// We did not see an unlocked object so try the fast recursive case.
z_sgr(currentHeader, Z_SP);
load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
load_const_optimized(temp, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(currentHeader, temp);
// z_brne(done);
@ -3407,7 +3407,7 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
z_bru(done);
Register zero = temp;
Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value.
Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
bind(object_has_monitor);
// The object's monitor m is unlocked iff m->owner == NULL,
// otherwise m->owner may contain a thread or a stack address.
@ -3456,12 +3456,12 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
z_nill(currentHeader, markOopDesc::monitor_value);
guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
z_nill(currentHeader, markWord::monitor_value);
z_brne(object_has_monitor);
// Check if it is still a light weight lock, this is true if we see
// the stack address of the basicLock in the markOop of the object
// the stack address of the basicLock in the markWord of the object
// copy box to currentHeader such that csg does not kill it.
z_lgr(currentHeader, box);
z_csg(currentHeader, displacedHeader, 0, oop);

@ -3880,7 +3880,7 @@ void TemplateTable::_new() {
__ z_stg(prototype, Address(RallocatedObject, oopDesc::mark_offset_in_bytes()));
} else {
__ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()),
(long)markOopDesc::prototype());
(long)markWord::prototype().value());
}
__ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops.

@ -97,12 +97,12 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
mov(Rbox, Rscratch);
// and mark it unlocked
or3(Rmark, markOopDesc::unlocked_value, Rmark);
or3(Rmark, markWord::unlocked_value, Rmark);
// save unlocked object header into the displaced header location on the stack
st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
// compare object markWord with Rmark and if equal exchange Rscratch with object markWord
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
cas_ptr(mark_addr.base(), Rmark, Rscratch);
// if compare/exchange succeeded we found an unlocked object and we now have locked it
@ -144,7 +144,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
}
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markOop of the object
// the stack address of the basicLock in the markWord of the object
cas_ptr(mark_addr.base(), Rbox, Rmark);
cmp(Rbox, Rmark);
@ -179,7 +179,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
if (UseBiasedLocking && !len->is_valid()) {
ld_ptr(klass, in_bytes(Klass::prototype_header_offset()), t1);
} else {
set((intx)markOopDesc::prototype(), t1);
set((intx)markWord::prototype().value(), t1);
}
st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
if (UseCompressedClassPointers) {

@ -1200,7 +1200,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
// load markOop from object into mark_reg
// load markWord from object into mark_reg
ld_ptr(mark_addr, mark_reg);
if (UseBiasedLocking) {
@ -1211,11 +1211,11 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
// we need a temporary register here as we do not want to clobber lock_reg
// (cas clobbers the destination register)
mov(lock_reg, temp_reg);
// set mark reg to be (markOop of object | UNLOCK_VALUE)
or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
// set mark reg to be (markWord of object | UNLOCK_VALUE)
or3(mark_reg, markWord::unlocked_value, mark_reg);
// initialize the box (Must happen before we update the object mark!)
st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
// compare and exchange object_addr, markOop | 1, stack address of basicLock
// compare and exchange object_addr, markWord | 1, stack address of basicLock
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
@ -1224,7 +1224,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
// We did not see an unlocked object so try the fast recursive case
// Check if owner is self by comparing the value in the markOop of object
// Check if owner is self by comparing the value in the markWord of object
// with the stack pointer
sub(temp_reg, SP, temp_reg);
sub(temp_reg, STACK_BIAS, temp_reg);
@ -1234,7 +1234,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
// (a) %sp -vs- markword proximity check, and,
// (b) verify mark word LSBs == 0 (Stack-locked).
//
// FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
// FFFFF003/FFFFFFFFFFFF003 is (markWord::lock_mask_in_place | -os::vm_page_size())
// Note that the page size used for %sp proximity testing is arbitrary and is
// unrelated to the actual MMU page size. We use a 'logical' page size of
// 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate

@ -2452,15 +2452,15 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// whether the epoch is still valid
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
and3(mark_reg, markWord::biased_lock_mask_in_place, temp_reg);
cmp_and_brx_short(temp_reg, markWord::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg);
andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
andcc(temp_reg, ~((int) markWord::age_mask_in_place), temp_reg);
if (counters != NULL) {
cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
// Reload mark_reg as we may need it later
@ -2483,7 +2483,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
btst(markWord::biased_lock_mask_in_place, temp_reg);
brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@ -2495,7 +2495,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
delayed()->btst(markWord::epoch_mask_in_place, temp_reg);
brx(Assembler::notZero, false, Assembler::pn, try_rebias);
// The epoch of the current bias is still valid but we know nothing
@ -2505,7 +2505,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
delayed()->and3(mark_reg,
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place,
mark_reg);
or3(G2_thread, mark_reg, temp_reg);
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
@ -2586,8 +2586,8 @@ void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg,
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
ld_ptr(mark_addr, temp_reg);
and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
cmp(temp_reg, markOopDesc::biased_lock_pattern);
and3(temp_reg, markWord::biased_lock_mask_in_place, temp_reg);
cmp(temp_reg, markWord::biased_lock_pattern);
brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
delayed();
if (!allow_delay_slot_filling) {
@ -2603,12 +2603,12 @@ void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg,
// box->dhw disposition - post-conditions at DONE_LABEL.
// - Successful inflated lock: box->dhw != 0.
// Any non-zero value suffices.
// Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark()
// Consider G2_thread, rsp, boxReg, or markWord::unused_mark()
// - Successful Stack-lock: box->dhw == mark.
// box->dhw must contain the displaced mark word value
// - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
// The slow-path fast_enter() and slow_enter() operators
// are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()).
// are responsible for setting box->dhw = NonZero (typically markWord::unused_mark()).
// - Biased: box->dhw is undefined
//
// SPARC refworkload performance - specifically jetstream and scimark - are
@ -2658,7 +2658,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// This presumes TSO, of course.
mov(0, Rscratch);
or3(Rmark, markOopDesc::unlocked_value, Rmark);
or3(Rmark, markWord::unlocked_value, Rmark);
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
cas_ptr(mark_addr.base(), Rmark, Rscratch);
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
@ -2712,7 +2712,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// set icc.zf : 1=success 0=failure
// ST box->displaced_header = NonZero.
// Any non-zero value suffices:
// markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
// markWord::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
// Intentional fall-through into done

@ -1835,19 +1835,19 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// hash_mask_in_place because it could be larger than 32 bits in a 64-bit
// vm: see markOop.hpp.
__ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header);
__ sethi(markOopDesc::hash_mask, mask);
__ btst(markOopDesc::unlocked_value, header);
__ sethi(markWord::hash_mask, mask);
__ btst(markWord::unlocked_value, header);
__ br(Assembler::zero, false, Assembler::pn, slowCase);
if (UseBiasedLocking) {
// Check if biased and fall through to runtime if so
__ delayed()->nop();
__ btst(markOopDesc::biased_lock_bit_in_place, header);
__ btst(markWord::biased_lock_bit_in_place, header);
__ br(Assembler::notZero, false, Assembler::pn, slowCase);
}
__ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
__ delayed()->or3(mask, markWord::hash_mask & 0x3ff, mask);
// Check for a valid (non-zero) hash code and get its value.
__ srlx(header, markOopDesc::hash_shift, hash);
__ srlx(header, markWord::hash_shift, hash);
__ andcc(hash, mask, hash);
__ br(Assembler::equal, false, Assembler::pn, slowCase);
__ delayed()->nop();

@ -3517,7 +3517,7 @@ void TemplateTable::_new() {
if (UseBiasedLocking) {
__ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
} else {
__ set((intptr_t)markOopDesc::prototype(), G4_scratch);
__ set((intptr_t)markWord::prototype().value(), G4_scratch);
}
__ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
__ store_klass_gap(G0, RallocatedObject); // klass gap if compressed

@ -61,7 +61,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// Load object header
movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
orptr(hdr, markOopDesc::unlocked_value);
orptr(hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
movptr(Address(disp_hdr, 0), hdr);
// test if object header is still the same (i.e. unlocked), and if so, store the
@ -156,7 +156,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else {
// This assumes that all prototype bits fit in an int32_t
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markWord::prototype().value());
}
#ifdef _LP64
if (UseCompressedClassPointers) { // Take care not to kill klass

@ -350,9 +350,9 @@ void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssemb
Label done;
__ movptr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
__ notptr(tmp);
__ testb(tmp, markOopDesc::marked_value);
__ testb(tmp, markWord::marked_value);
__ jccb(Assembler::notZero, done);
__ orptr(tmp, markOopDesc::marked_value);
__ orptr(tmp, markWord::marked_value);
__ notptr(tmp);
__ mov(dst, tmp);
__ bind(done);
@ -824,15 +824,15 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble
// then test for both bits clear.
__ notptr(tmp1);
#ifdef _LP64
__ testb(tmp1, markOopDesc::marked_value);
__ testb(tmp1, markWord::marked_value);
#else
// On x86_32, C1 register allocator can give us the register without 8-bit support.
// Do the full-register access and test to avoid compilation failures.
__ testptr(tmp1, markOopDesc::marked_value);
__ testptr(tmp1, markWord::marked_value);
#endif
__ jccb(Assembler::notZero, slow_path);
// Clear both lower bits. It's still inverted, so set them, and then invert back.
__ orptr(tmp1, markOopDesc::marked_value);
__ orptr(tmp1, markWord::marked_value);
__ notptr(tmp1);
// At this point, tmp1 contains the decoded forwarding pointer.
__ mov(res, tmp1);
@ -963,10 +963,10 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator
// Test if both lowest bits are set. We trick it by negating the bits
// then test for both bits clear.
__ notptr(tmp2);
__ testb(tmp2, markOopDesc::marked_value);
__ testb(tmp2, markWord::marked_value);
__ jccb(Assembler::notZero, slow_path);
// Clear both lower bits. It's still inverted, so set them, and then invert back.
__ orptr(tmp2, markOopDesc::marked_value);
__ orptr(tmp2, markWord::marked_value);
__ notptr(tmp2);
// At this point, tmp2 contains the decoded forwarding pointer.
__ mov(rax, tmp2);

@ -1115,7 +1115,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
assert(tmp_reg != noreg, "tmp_reg must be supplied");
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
NOT_LP64( Address saved_mark_addr(lock_reg, 0); )
@ -1135,8 +1135,8 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
movptr(swap_reg, mark_addr);
}
movptr(tmp_reg, swap_reg);
andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
andptr(tmp_reg, markWord::biased_lock_mask_in_place);
cmpptr(tmp_reg, markWord::biased_lock_pattern);
jcc(Assembler::notEqual, cas_label);
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
@ -1162,7 +1162,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
xorptr(swap_reg, tmp_reg);
Register header_reg = swap_reg;
#endif
andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
andptr(header_reg, ~((int) markWord::age_mask_in_place));
if (counters != NULL) {
cond_inc32(Assembler::zero,
ExternalAddress((address) counters->biased_lock_entry_count_addr()));
@ -1181,7 +1181,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
testptr(header_reg, markWord::biased_lock_mask_in_place);
jccb(Assembler::notZero, try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@ -1193,7 +1193,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
testptr(header_reg, markOopDesc::epoch_mask_in_place);
testptr(header_reg, markWord::epoch_mask_in_place);
jccb(Assembler::notZero, try_rebias);
// The epoch of the current bias is still valid but we know nothing
@ -1204,7 +1204,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
// don't accidentally blow away another thread's valid bias.
NOT_LP64( movptr(swap_reg, saved_mark_addr); )
andptr(swap_reg,
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
#ifdef _LP64
movptr(tmp_reg, swap_reg);
orptr(tmp_reg, r15_thread);
@ -1298,8 +1298,8 @@ void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, La
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
andptr(temp_reg, markWord::biased_lock_mask_in_place);
cmpptr(temp_reg, markWord::biased_lock_pattern);
jcc(Assembler::equal, done);
}
@ -1486,7 +1486,7 @@ void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Registe
bind(L_rtm_retry);
}
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
jcc(Assembler::notZero, IsInflated);
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
@ -1501,8 +1501,8 @@ void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Registe
}
xbegin(L_on_abort);
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits
cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked
jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
@ -1528,7 +1528,7 @@ void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Registe
// Use RTM for inflating locks
// inputs: objReg (object to lock)
// boxReg (on-stack box address (displaced header location) - KILLED)
// tmpReg (ObjectMonitor address + markOopDesc::monitor_value)
// tmpReg (ObjectMonitor address + markWord::monitor_value)
void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
Register scrReg, Register retry_on_busy_count_Reg,
Register retry_on_abort_count_Reg,
@ -1542,7 +1542,7 @@ void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Regi
int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
// Without cast to int32_t a movptr will destroy r10 which is typically obj
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
movptr(Address(boxReg, 0), (int32_t)intptr_t(markWord::unused_mark().value()));
movptr(boxReg, tmpReg); // Save ObjectMonitor address
if (RTMRetryCount > 0) {
@ -1748,11 +1748,11 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
#endif // INCLUDE_RTM_OPT
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // [FETCH]
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
jccb(Assembler::notZero, IsInflated);
// Attempt stack-locking ...
orptr (tmpReg, markOopDesc::unlocked_value);
orptr (tmpReg, markWord::unlocked_value);
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
lock();
cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg
@ -1776,7 +1776,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
jmp(DONE_LABEL);
bind(IsInflated);
// The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value
// The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value
#if INCLUDE_RTM_OPT
// Use the same RTM locking code in 32- and 64-bit VM.
@ -1791,7 +1791,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
// boxReg refers to the on-stack BasicLock in the current frame.
// We'd like to write:
// set box->_displaced_header = markOopDesc::unused_mark(). Any non-0 value suffices.
// set box->_displaced_header = markWord::unused_mark(). Any non-0 value suffices.
// This is convenient but results a ST-before-CAS penalty. The following CAS suffers
// additional latency as we have another ST in the store buffer that must drain.
@ -1836,9 +1836,9 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
lock();
cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
// Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
// Unconditionally set box->_displaced_header = markWord::unused_mark().
// Without cast to int32_t movptr will destroy r10 which is typically obj.
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
movptr(Address(boxReg, 0), (int32_t)intptr_t(markWord::unused_mark().value()));
// Intentional fall-through into DONE_LABEL ...
// Propagate ICC.ZF from CAS above into DONE_LABEL.
#endif // _LP64
@ -1906,20 +1906,20 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
if (UseRTMForStackLocks && use_rtm) {
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
Label L_regular_unlock;
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
xend(); // otherwise end...
jmp(DONE_LABEL); // ... and we're done
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits
cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked
jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
xend(); // otherwise end...
jmp(DONE_LABEL); // ... and we're done
bind(L_regular_unlock);
}
#endif
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword
testptr(tmpReg, markOopDesc::monitor_value); // Inflated?
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword
testptr(tmpReg, markWord::monitor_value); // Inflated?
jccb (Assembler::zero, Stacked);
// It's inflated.

@ -59,12 +59,12 @@ void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* mas
__ movptr(result, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
// check if locked
__ testptr(result, markOopDesc::unlocked_value);
__ testptr(result, markWord::unlocked_value);
__ jcc(Assembler::zero, slowCase);
if (UseBiasedLocking) {
// Check if biased and fall through to runtime if so
__ testptr(result, markOopDesc::biased_lock_bit_in_place);
__ testptr(result, markWord::biased_lock_bit_in_place);
__ jcc(Assembler::notZero, slowCase);
}
@ -73,16 +73,16 @@ void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* mas
// Read the header and build a mask to get its hash field.
// Depend on hash_mask being at most 32 bits and avoid the use of hash_mask_in_place
// because it could be larger than 32 bits in a 64-bit vm. See markOop.hpp.
__ shrptr(result, markOopDesc::hash_shift);
__ andptr(result, markOopDesc::hash_mask);
__ shrptr(result, markWord::hash_shift);
__ andptr(result, markWord::hash_mask);
#else
__ andptr(result, markOopDesc::hash_mask_in_place);
__ andptr(result, markWord::hash_mask_in_place);
#endif //_LP64
// test if hashCode exists
__ jcc(Assembler::zero, slowCase);
#ifndef _LP64
__ shrptr(result, markOopDesc::hash_shift);
__ shrptr(result, markWord::hash_shift);
#endif
__ ret(0);
__ bind(slowCase);

@ -4108,7 +4108,7 @@ void TemplateTable::_new() {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
} else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
(intptr_t)markOopDesc::prototype()); // header
(intptr_t)markWord::prototype().value()); // header
__ pop(rcx); // get saved klass back in the register.
}
#ifdef _LP64

@ -273,12 +273,12 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
if (method->is_synchronized()) {
monitor = (BasicObjectLock*) istate->stack_base();
oop lockee = monitor->obj();
markOop disp = lockee->mark()->set_unlocked();
markWord disp = lockee->mark().set_unlocked();
monitor->lock()->set_displaced_header(disp);
if (lockee->cas_set_mark((markOop)monitor, disp) != disp) {
if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
monitor->lock()->set_displaced_header(NULL);
if (lockee->cas_set_mark(markWord::from_pointer(monitor), disp) != disp) {
if (thread->is_lock_owned((address) disp.clear_lock_bits().to_pointer())) {
monitor->lock()->set_displaced_header(markWord::from_pointer(NULL));
}
else {
CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
@ -413,12 +413,12 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
// Unlock if necessary
if (monitor) {
BasicLock *lock = monitor->lock();
markOop header = lock->displaced_header();
markWord header = lock->displaced_header();
oop rcvr = monitor->obj();
monitor->set_obj(NULL);
if (header != NULL) {
markOop old_header = markOopDesc::encode(lock);
if (header.to_pointer() != NULL) {
markWord old_header = markWord::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
monitor->set_obj(rcvr); {
HandleMark hm(thread);

@ -35,8 +35,8 @@
// objects. We don't want to call the synchronizer hash code to install
// this value because it may safepoint.
static intptr_t object_hash(Klass* k) {
intptr_t hc = k->java_mirror()->mark()->hash();
return hc != markOopDesc::no_hash ? hc : os::random();
intptr_t hc = k->java_mirror()->mark().hash();
return hc != markWord::no_hash ? hc : os::random();
}
// Seed value used for each alternative hash calculated.

@ -2154,7 +2154,7 @@ void SystemDictionary::update_dictionary(unsigned int d_hash,
// NOTE that we must only do this when the class is initally
// defined, not each time it is referenced from a new class loader
if (oopDesc::equals(k->class_loader(), class_loader())) {
k->set_prototype_header(markOopDesc::biased_locking_prototype());
k->set_prototype_header(markWord::biased_locking_prototype());
}
}

@ -736,7 +736,7 @@ public:
size_t PromotionInfo::refillSize() const {
const size_t CMSSpoolBlockSize = 256;
const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markWord)
* CMSSpoolBlockSize);
return CompactibleFreeListSpace::adjustObjectSize(sz);
}

@ -1010,7 +1010,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
// Things to support parallel young-gen collection.
oop
ConcurrentMarkSweepGeneration::par_promote(int thread_num,
oop old, markOop m,
oop old, markWord m,
size_t word_sz) {
#ifndef PRODUCT
if (CMSHeap::heap()->promotion_should_fail()) {
@ -7776,10 +7776,10 @@ bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
assert(stack->capacity() > num, "Shouldn't bite more than can chew");
size_t i = num;
oop cur = _overflow_list;
const markOop proto = markOopDesc::prototype();
const markWord proto = markWord::prototype();
NOT_PRODUCT(ssize_t n = 0;)
for (oop next; i > 0 && cur != NULL; cur = next, i--) {
next = oop(cur->mark_raw());
next = oop(cur->mark_raw().to_pointer());
cur->set_mark_raw(proto); // until proven otherwise
assert(oopDesc::is_oop(cur), "Should be an oop");
bool res = stack->push(cur);
@ -7863,8 +7863,8 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
size_t i = num;
oop cur = prefix;
// Walk down the first "num" objects, unless we reach the end.
for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
if (cur->mark_raw() == NULL) {
for (; i > 1 && cur->mark_raw().to_pointer() != NULL; cur = oop(cur->mark_raw().to_pointer()), i--);
if (cur->mark_raw().to_pointer() == NULL) {
// We have "num" or fewer elements in the list, so there
// is nothing to return to the global list.
// Write back the NULL in lieu of the BUSY we wrote
@ -7874,9 +7874,9 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
}
} else {
// Chop off the suffix and return it to the global list.
assert(cur->mark_raw() != BUSY, "Error");
oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
cur->set_mark_raw(NULL); // break off suffix
assert(cur->mark_raw().to_pointer() != (void*)BUSY, "Error");
oop suffix_head = oop(cur->mark_raw().to_pointer()); // suffix will be put back on global list
cur->set_mark_raw(markWord::from_pointer(NULL)); // break off suffix
// It's possible that the list is still in the empty(busy) state
// we left it in a short while ago; in that case we may be
// able to place back the suffix without incurring the cost
@ -7896,18 +7896,18 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
// Too bad, someone else sneaked in (at least) an element; we'll need
// to do a splice. Find tail of suffix so we can prepend suffix to global
// list.
for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
for (cur = suffix_head; cur->mark_raw().to_pointer() != NULL; cur = (oop)(cur->mark_raw().to_pointer()));
oop suffix_tail = cur;
assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
assert(suffix_tail != NULL && suffix_tail->mark_raw().to_pointer() == NULL,
"Tautology");
observed_overflow_list = _overflow_list;
do {
cur_overflow_list = observed_overflow_list;
if (cur_overflow_list != BUSY) {
// Do the splice ...
suffix_tail->set_mark_raw(markOop(cur_overflow_list));
suffix_tail->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
} else { // cur_overflow_list == BUSY
suffix_tail->set_mark_raw(NULL);
suffix_tail->set_mark_raw(markWord::from_pointer(NULL));
}
// ... and try to place spliced list back on overflow_list ...
observed_overflow_list =
@ -7919,11 +7919,11 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
// Push the prefix elements on work_q
assert(prefix != NULL, "control point invariant");
const markOop proto = markOopDesc::prototype();
const markWord proto = markWord::prototype();
oop next;
NOT_PRODUCT(ssize_t n = 0;)
for (cur = prefix; cur != NULL; cur = next) {
next = oop(cur->mark_raw());
next = oop(cur->mark_raw().to_pointer());
cur->set_mark_raw(proto); // until proven otherwise
assert(oopDesc::is_oop(cur), "Should be an oop");
bool res = work_q->push(cur);
@ -7942,7 +7942,7 @@ void CMSCollector::push_on_overflow_list(oop p) {
NOT_PRODUCT(_num_par_pushes++;)
assert(oopDesc::is_oop(p), "Not an oop");
preserve_mark_if_necessary(p);
p->set_mark_raw((markOop)_overflow_list);
p->set_mark_raw(markWord::from_pointer(_overflow_list));
_overflow_list = p;
}
@ -7956,9 +7956,9 @@ void CMSCollector::par_push_on_overflow_list(oop p) {
do {
cur_overflow_list = observed_overflow_list;
if (cur_overflow_list != BUSY) {
p->set_mark_raw(markOop(cur_overflow_list));
p->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
} else {
p->set_mark_raw(NULL);
p->set_mark_raw(markWord::from_pointer(NULL));
}
observed_overflow_list =
Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
@ -7980,7 +7980,7 @@ void CMSCollector::par_push_on_overflow_list(oop p) {
// the VM can then be changed, incrementally, to deal with such
// failures where possible, thus, incrementally hardening the VM
// in such low resource situations.
void CMSCollector::preserve_mark_work(oop p, markOop m) {
void CMSCollector::preserve_mark_work(oop p, markWord m) {
_preserved_oop_stack.push(p);
_preserved_mark_stack.push(m);
assert(m == p->mark_raw(), "Mark word changed");
@ -7990,15 +7990,15 @@ void CMSCollector::preserve_mark_work(oop p, markOop m) {
// Single threaded
void CMSCollector::preserve_mark_if_necessary(oop p) {
markOop m = p->mark_raw();
if (m->must_be_preserved(p)) {
markWord m = p->mark_raw();
if (m.must_be_preserved(p)) {
preserve_mark_work(p, m);
}
}
void CMSCollector::par_preserve_mark_if_necessary(oop p) {
markOop m = p->mark_raw();
if (m->must_be_preserved(p)) {
markWord m = p->mark_raw();
if (m.must_be_preserved(p)) {
MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
// Even though we read the mark word without holding
// the lock, we are assured that it will not change
@ -8038,9 +8038,9 @@ void CMSCollector::restore_preserved_marks_if_any() {
oop p = _preserved_oop_stack.pop();
assert(oopDesc::is_oop(p), "Should be an oop");
assert(_span.contains(p), "oop should be in _span");
assert(p->mark_raw() == markOopDesc::prototype(),
assert(p->mark_raw() == markWord::prototype(),
"Set when taken from overflow list");
markOop m = _preserved_mark_stack.pop();
markWord m = _preserved_mark_stack.pop();
p->set_mark_raw(m);
}
assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),

@ -541,8 +541,8 @@ class CMSCollector: public CHeapObj<mtGC> {
// The following array-pair keeps track of mark words
// displaced for accommodating overflow list above.
// This code will likely be revisited under RFE#4922830.
Stack<oop, mtGC> _preserved_oop_stack;
Stack<markOop, mtGC> _preserved_mark_stack;
Stack<oop, mtGC> _preserved_oop_stack;
Stack<markWord, mtGC> _preserved_mark_stack;
// In support of multi-threaded concurrent phases
YieldingFlexibleWorkGang* _conc_workers;
@ -742,7 +742,7 @@ class CMSCollector: public CHeapObj<mtGC> {
void preserve_mark_if_necessary(oop p);
void par_preserve_mark_if_necessary(oop p);
void preserve_mark_work(oop p, markOop m);
void preserve_mark_work(oop p, markWord m);
void restore_preserved_marks_if_any();
NOT_PRODUCT(bool no_preserved_marks() const;)
// In support of testing overflow code
@ -1136,7 +1136,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Overrides for parallel promotion.
virtual oop par_promote(int thread_num,
oop obj, markOop m, size_t word_sz);
oop obj, markWord m, size_t word_sz);
virtual void par_promote_alloc_done(int thread_num);
virtual void par_oop_since_save_marks_iterate_done(int thread_num);

@ -56,14 +56,14 @@
class FreeChunk {
friend class VMStructs;
// For 64 bit compressed oops, the markOop encodes both the size and the
// For 64 bit compressed oops, the markWord encodes both the size and the
// indication that this is a FreeChunk and not an object.
volatile size_t _size;
FreeChunk* _prev;
FreeChunk* _next;
markOop mark() const volatile { return (markOop)_size; }
void set_mark(markOop m) { _size = (size_t)m; }
markWord mark() const volatile { return markWord((uintptr_t)_size); }
void set_mark(markWord m) { _size = (size_t)m.value(); }
public:
NOT_PRODUCT(static const size_t header_size();)
@ -79,7 +79,7 @@ class FreeChunk {
}
bool is_free() const volatile {
LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
LP64_ONLY(if (UseCompressedOops) return mark().is_cms_free_chunk(); else)
return (((intptr_t)_prev) & 0x1) == 0x1;
}
bool cantCoalesce() const {
@ -100,11 +100,11 @@ class FreeChunk {
debug_only(void* size_addr() const { return (void*)&_size; })
size_t size() const volatile {
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
LP64_ONLY(if (UseCompressedOops) return mark().get_size(); else )
return _size;
}
void set_size(size_t sz) {
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
LP64_ONLY(if (UseCompressedOops) set_mark(markWord::set_size_and_free(sz)); else )
_size = sz;
}
@ -126,7 +126,7 @@ class FreeChunk {
#ifdef _LP64
if (UseCompressedOops) {
OrderAccess::storestore();
set_mark(markOopDesc::prototype());
set_mark(markWord::prototype());
}
#endif
assert(!is_free(), "Error");

@ -1078,7 +1078,7 @@ oop ParNewGeneration::real_forwardee_slow(oop obj) {
oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
oop old,
size_t sz,
markOop m) {
markWord m) {
// In the sequential version, this assert also says that the object is
// not forwarded. That might not be the case here. It is the case that
// the caller observed it to be not forwarded at some time in the past.

@ -381,7 +381,7 @@ class ParNewGeneration: public DefNewGeneration {
// that must not contain a forwarding pointer (though one might be
// inserted in "obj"s mark word by a parallel thread).
oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
oop obj, size_t obj_sz, markOop m);
oop obj, size_t obj_sz, markWord m);
// in support of testing overflow code
NOT_PRODUCT(int _overflow_counter;)

@ -44,9 +44,9 @@ template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
// we need to ensure that it is copied (see comment in
// ParScanClosure::do_oop_work).
Klass* objK = obj->klass();
markOop m = obj->mark_raw();
markWord m = obj->mark_raw();
oop new_obj;
if (m->is_marked()) { // Contains forwarding pointer.
if (m.is_marked()) { // Contains forwarding pointer.
new_obj = ParNewGeneration::real_forwardee(obj);
} else {
size_t obj_sz = obj->size_given_klass(objK);
@ -108,9 +108,9 @@ inline void ParScanClosure::do_oop_work(T* p,
// overwritten with an overflow next pointer after the object is
// forwarded.
Klass* objK = obj->klass();
markOop m = obj->mark_raw();
markWord m = obj->mark_raw();
oop new_obj;
if (m->is_marked()) { // Contains forwarding pointer.
if (m.is_marked()) { // Contains forwarding pointer.
new_obj = ParNewGeneration::real_forwardee(obj);
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",

@ -62,12 +62,12 @@ inline void PromotedObject::setNext(PromotedObject* x) {
// Return the next displaced header, incrementing the pointer and
// recycling spool area as necessary.
markOop PromotionInfo::nextDisplacedHeader() {
markWord PromotionInfo::nextDisplacedHeader() {
assert(_spoolHead != NULL, "promotionInfo inconsistency");
assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
"Empty spool space: no displaced header can be fetched");
assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
markOop hdr = _spoolHead->displacedHdr[_firstIndex];
markWord hdr = _spoolHead->displacedHdr[_firstIndex];
// Spool forward
if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
// forward to next block, recycling this block into spare spool buffer
@ -93,15 +93,15 @@ void PromotionInfo::track(PromotedObject* trackOop) {
void PromotionInfo::track(PromotedObject* trackOop, Klass* klassOfOop) {
// make a copy of header as it may need to be spooled
markOop mark = oop(trackOop)->mark_raw();
markWord mark = oop(trackOop)->mark_raw();
trackOop->clear_next();
if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
if (mark.must_be_preserved_for_cms_scavenge(klassOfOop)) {
// save non-prototypical header, and mark oop
saveDisplacedHeader(mark);
trackOop->setDisplacedMark();
} else {
// we'd like to assert something like the following:
// assert(mark == markOopDesc::prototype(), "consistency check");
// assert(mark == markWord::prototype(), "consistency check");
// ... but the above won't work because the age bits have not (yet) been
// cleared. The remainder of the check would be identical to the
// condition checked in must_be_preserved() above, so we don't really
@ -123,7 +123,7 @@ void PromotionInfo::track(PromotedObject* trackOop, Klass* klassOfOop) {
// Save the given displaced header, incrementing the pointer and
// obtaining more spool area as necessary.
void PromotionInfo::saveDisplacedHeader(markOop hdr) {
void PromotionInfo::saveDisplacedHeader(markWord hdr) {
assert(_spoolHead != NULL && _spoolTail != NULL,
"promotionInfo inconsistency");
assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");

@ -93,19 +93,19 @@ class SpoolBlock: public FreeChunk {
protected:
SpoolBlock* nextSpoolBlock;
size_t bufferSize; // number of usable words in this block
markOop* displacedHdr; // the displaced headers start here
markWord* displacedHdr; // the displaced headers start here
// Note about bufferSize: it denotes the number of entries available plus 1;
// legal indices range from 1 through BufferSize - 1. See the verification
// code verify() that counts the number of displaced headers spooled.
size_t computeBufferSize() {
return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop);
return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markWord);
}
public:
void init() {
bufferSize = computeBufferSize();
displacedHdr = (markOop*)&displacedHdr;
displacedHdr = (markWord*)&displacedHdr;
nextSpoolBlock = NULL;
}
@ -151,8 +151,8 @@ class PromotionInfo {
void track(PromotedObject* trackOop, Klass* klassOfOop); // keep track of a promoted oop
void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
CompactibleFreeListSpace* space() const { return _space; }
markOop nextDisplacedHeader(); // get next header & forward spool pointer
void saveDisplacedHeader(markOop hdr);
markWord nextDisplacedHeader(); // get next header & forward spool pointer
void saveDisplacedHeader(markWord hdr);
// save header and forward spool
inline size_t refillSize() const;

@ -3137,7 +3137,7 @@ void G1CollectedHeap::restore_after_evac_failure() {
phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
}
void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m) {
if (!_evacuation_failed) {
_evacuation_failed = true;
}

@ -813,7 +813,7 @@ public:
// Preserve the mark of "obj", if necessary, in preparation for its mark
// word being overwritten with a self-forwarding-pointer.
void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m);
void preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m);
#ifndef PRODUCT
// Support for forcing evacuation failures. Analogous to

@ -282,7 +282,7 @@ void G1FullCollector::verify_after_marking() {
// Note: we can verify only the heap here. When an object is
// marked, the previous value of the mark word (including
// identity hash values, ages, etc) is preserved, and the mark
// word is set to markOop::marked_value - effectively removing
// word is set to markWord::marked_value - effectively removing
// any hash values from the mark word. These hash values are
// used when verifying the dictionaries and so removing them
// from the mark word can make verification of the dictionaries

@ -116,11 +116,11 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) {
} else {
// Make sure object has the correct mark-word set or that it will be
// fixed when restoring the preserved marks.
assert(object->mark_raw() == markOopDesc::prototype_for_object(object) || // Correct mark
object->mark_raw()->must_be_preserved(object) || // Will be restored by PreservedMarksSet
assert(object->mark_raw() == markWord::prototype_for_object(object) || // Correct mark
object->mark_raw().must_be_preserved(object) || // Will be restored by PreservedMarksSet
(UseBiasedLocking && object->has_bias_pattern_raw()), // Will be restored by BiasedLocking
"should have correct prototype obj: " PTR_FORMAT " mark: " PTR_FORMAT " prototype: " PTR_FORMAT,
p2i(object), p2i(object->mark_raw()), p2i(markOopDesc::prototype_for_object(object)));
p2i(object), object->mark_raw().value(), markWord::prototype_for_object(object).value());
}
assert(object->forwardee() == NULL, "should be forwarded to NULL");
}

@ -50,8 +50,8 @@ inline bool G1FullGCMarker::mark_object(oop obj) {
}
// Marked by us, preserve if needed.
markOop mark = obj->mark_raw();
if (mark->must_be_preserved(obj) &&
markWord mark = obj->mark_raw();
if (mark.must_be_preserved(obj) &&
!G1ArchiveAllocator::is_open_archive_object(obj)) {
preserved_stack()->push(obj, mark);
}

@ -77,11 +77,11 @@ template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
oop forwardee = obj->forwardee();
if (forwardee == NULL) {
// Not forwarded, return current reference.
assert(obj->mark_raw() == markOopDesc::prototype_for_object(obj) || // Correct mark
obj->mark_raw()->must_be_preserved(obj) || // Will be restored by PreservedMarksSet
assert(obj->mark_raw() == markWord::prototype_for_object(obj) || // Correct mark
obj->mark_raw().must_be_preserved(obj) || // Will be restored by PreservedMarksSet
(UseBiasedLocking && obj->has_bias_pattern_raw()), // Will be restored by BiasedLocking
"Must have correct prototype or be preserved, obj: " PTR_FORMAT ", mark: " PTR_FORMAT ", prototype: " PTR_FORMAT,
p2i(obj), p2i(obj->mark_raw()), p2i(markOopDesc::prototype_for_object(obj)));
p2i(obj), obj->mark_raw().value(), markWord::prototype_for_object(obj).value());
return;
}

@ -230,9 +230,9 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
const G1HeapRegionAttr state = _g1h->region_attr(obj);
if (state.is_in_cset()) {
oop forwardee;
markOop m = obj->mark_raw();
if (m->is_marked()) {
forwardee = (oop) m->decode_pointer();
markWord m = obj->mark_raw();
if (m.is_marked()) {
forwardee = (oop) m.decode_pointer();
} else {
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
}

@ -196,10 +196,10 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr const reg
}
}
G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age) {
G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
if (region_attr.is_young()) {
age = !m->has_displaced_mark_helper() ? m->age()
: m->displaced_mark_helper()->age();
age = !m.has_displaced_mark_helper() ? m.age()
: m.displaced_mark_helper().age();
if (age < _tenuring_threshold) {
return region_attr;
}
@ -223,7 +223,7 @@ void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_at
oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr,
oop const old,
markOop const old_mark) {
markWord const old_mark) {
const size_t word_sz = old->size();
HeapRegion* const from_region = _g1h->heap_region_containing(old);
// +1 to make the -1 indexes valid...
@ -281,18 +281,18 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
if (dest_attr.is_young()) {
if (age < markOopDesc::max_age) {
if (age < markWord::max_age) {
age++;
}
if (old_mark->has_displaced_mark_helper()) {
if (old_mark.has_displaced_mark_helper()) {
// In this case, we have to install the mark word first,
// otherwise obj looks to be forwarded (the old mark word,
// which contains the forward pointer, was copied)
obj->set_mark_raw(old_mark);
markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
old_mark->set_displaced_mark_helper(new_mark);
markWord new_mark = old_mark.displaced_mark_helper().set_age(age);
old_mark.set_displaced_mark_helper(new_mark);
} else {
obj->set_mark_raw(old_mark->set_age(age));
obj->set_mark_raw(old_mark.set_age(age));
}
_age_table.add(age, word_sz);
} else {
@ -376,7 +376,7 @@ void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
}
}
oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);

@ -203,7 +203,7 @@ private:
size_t word_sz,
bool previous_plab_refill_failed);
inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age);
inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age);
void report_promotion_event(G1HeapRegionAttr const dest_attr,
oop const old, size_t word_sz, uint age,
@ -214,7 +214,7 @@ private:
inline void trim_queue_to_threshold(uint threshold);
public:
oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markOop const old_mark);
oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markWord const old_mark);
void trim_queue();
void trim_queue_partially();
@ -225,7 +225,7 @@ public:
inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
// An attempt to evacuate "obj" has failed; take necessary steps.
oop handle_evacuation_failure_par(oop obj, markOop m);
oop handle_evacuation_failure_par(oop obj, markWord m);
template <typename T>
inline void remember_root_into_optional_region(T* p);

@ -53,9 +53,9 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
return;
}
markOop m = obj->mark_raw();
if (m->is_marked()) {
obj = (oop) m->decode_pointer();
markWord m = obj->mark_raw();
if (m.is_marked()) {
obj = (oop) m.decode_pointer();
} else {
obj = copy_to_survivor_space(region_attr, obj, m);
}

@ -75,7 +75,7 @@ PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() {
// The object forwarding code is duplicated. Factor this out!!!!!
//
// This method "precompacts" objects inside its space to dest. It places forwarding
// pointers into markOops for use by adjust_pointers. If "dest" should overflow, we
// pointers into markWords for use by adjust_pointers. If "dest" should overflow, we
// finish by compacting into our own space.
void PSMarkSweepDecorator::precompact() {
@ -113,8 +113,8 @@ void PSMarkSweepDecorator::precompact() {
const intx interval = PrefetchScanIntervalInBytes;
while (q < t) {
assert(oop(q)->mark_raw()->is_marked() || oop(q)->mark_raw()->is_unlocked() ||
oop(q)->mark_raw()->has_bias_pattern(),
assert(oop(q)->mark_raw().is_marked() || oop(q)->mark_raw().is_unlocked() ||
oop(q)->mark_raw().has_bias_pattern(),
"these are the only valid states during a mark sweep");
if (oop(q)->is_gc_marked()) {
/* prefetch beyond q */
@ -259,7 +259,7 @@ bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
if (allowed_deadspace_words >= deadlength) {
allowed_deadspace_words -= deadlength;
CollectedHeap::fill_with_object(q, deadlength);
oop(q)->set_mark_raw(oop(q)->mark_raw()->set_marked());
oop(q)->set_mark_raw(oop(q)->mark_raw().set_marked());
assert((int) deadlength == oop(q)->size(), "bad filler object size");
// Recall that we required "q == compaction_top".
return true;
@ -350,7 +350,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
q = t;
} else {
// $$$ Funky
q = (HeapWord*) oop(_first_dead)->mark_raw()->decode_pointer();
q = (HeapWord*) oop(_first_dead)->mark_raw().decode_pointer();
}
}
@ -361,7 +361,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
if (!oop(q)->is_gc_marked()) {
// mark is pointer to next marked oop
debug_only(prev_q = q);
q = (HeapWord*) oop(q)->mark_raw()->decode_pointer();
q = (HeapWord*) oop(q)->mark_raw().decode_pointer();
assert(q > prev_q, "we should be moving forward through memory");
} else {
// prefetch beyond q

@ -83,7 +83,7 @@ void PSPromotionLAB::flush() {
// so they can always fill with an array.
HeapWord* tlab_end = end() + filler_header_size;
typeArrayOop filler_oop = (typeArrayOop) top();
filler_oop->set_mark_raw(markOopDesc::prototype());
filler_oop->set_mark_raw(markWord::prototype());
filler_oop->set_klass(Universe::intArrayKlassObj());
const size_t array_length =
pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);

@ -345,7 +345,7 @@ void PSPromotionManager::process_array_chunk(oop old) {
}
}
oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
oop PSPromotionManager::oop_promotion_failed(oop obj, markWord obj_mark) {
assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
// Attempt to CAS in the header.

@ -178,7 +178,7 @@ class PSPromotionManager {
// Promotion methods
template<bool promote_immediately> oop copy_to_survivor_space(oop o);
oop oop_promotion_failed(oop obj, markOop obj_mark);
oop oop_promotion_failed(oop obj, markWord obj_mark);
void reset();
void register_preserved_marks(PreservedMarks* preserved_marks);

@ -157,16 +157,16 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
// NOTE! We must be very careful with any methods that access the mark
// in o. There may be multiple threads racing on it, and it may be forwarded
// at any time. Do not use oop methods for accessing the mark!
markOop test_mark = o->mark_raw();
markWord test_mark = o->mark_raw();
// The same test as "o->is_forwarded()"
if (!test_mark->is_marked()) {
if (!test_mark.is_marked()) {
bool new_obj_is_tenured = false;
size_t new_obj_size = o->size();
// Find the objects age, MT safe.
uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
test_mark->displaced_mark_helper()->age() : test_mark->age();
uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
test_mark.displaced_mark_helper().age() : test_mark.age();
if (!promote_immediately) {
// Try allocating obj in to-space (unless too old)
@ -260,7 +260,7 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
assert(new_obj == o->forwardee(), "Sanity");
// Increment age if obj still in new generation. Now that
// we're dealing with a markOop that cannot change, it is
// we're dealing with a markWord that cannot change, it is
// okay to use the non mt safe oop methods.
if (!new_obj_is_tenured) {
new_obj->incr_age();

@ -431,7 +431,7 @@ bool PSScavenge::invoke_no_policy() {
heap->print_heap_before_gc();
heap->trace_heap_before_gc(&_gc_tracer);
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
// Fill in TLABs
@ -823,8 +823,8 @@ void PSScavenge::initialize() {
// Arguments must have been parsed
if (AlwaysTenure || NeverTenure) {
assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
"MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
"MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold);
_tenuring_threshold = MaxTenuringThreshold;
} else {
// We want to smooth out our startup times for the AdaptiveSizePolicy

@ -749,7 +749,7 @@ void PSYoungGen::adjust_pointers() {
void PSYoungGen::compact() {
eden_mark_sweep()->compact(ZapUnusedHeapArea);
from_mark_sweep()->compact(ZapUnusedHeapArea);
// Mark sweep stores preserved markOops in to space, don't disturb!
// Mark sweep stores preserved markWords in to space, don't disturb!
to_mark_sweep()->compact(false);
}

@ -49,7 +49,7 @@ Stack<oop, mtGC> MarkSweep::_marking_stack;
Stack<ObjArrayTask, mtGC> MarkSweep::_objarray_stack;
Stack<oop, mtGC> MarkSweep::_preserved_oop_stack;
Stack<markOop, mtGC> MarkSweep::_preserved_mark_stack;
Stack<markWord, mtGC> MarkSweep::_preserved_mark_stack;
size_t MarkSweep::_preserved_count = 0;
size_t MarkSweep::_preserved_count_max = 0;
PreservedMark* MarkSweep::_preserved_marks = NULL;
@ -132,7 +132,7 @@ template <class T> inline void MarkSweep::follow_root(T* p) {
T heap_oop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(heap_oop)) {
oop obj = CompressedOops::decode_not_null(heap_oop);
if (!obj->mark_raw()->is_marked()) {
if (!obj->mark_raw().is_marked()) {
mark_object(obj);
follow_object(obj);
}
@ -152,9 +152,9 @@ void PreservedMark::restore() {
}
// We preserve the mark which should be replaced at the end and the location
// that it will go. Note that the object that this markOop belongs to isn't
// that it will go. Note that the object that this markWord belongs to isn't
// currently at that address but it will be after phase4
void MarkSweep::preserve_mark(oop obj, markOop mark) {
void MarkSweep::preserve_mark(oop obj, markWord mark) {
// We try to store preserved marks in the to space of the new generation since
// this is storage which should be available. Most of the time this should be
// sufficient space for the marks we need to preserve but if it isn't we fall
@ -204,7 +204,7 @@ void MarkSweep::restore_marks() {
// deal with the overflow
while (!_preserved_oop_stack.is_empty()) {
oop obj = _preserved_oop_stack.pop();
markOop mark = _preserved_mark_stack.pop();
markWord mark = _preserved_mark_stack.pop();
obj->set_mark_raw(mark);
}
}

@ -100,7 +100,7 @@ class MarkSweep : AllStatic {
static Stack<ObjArrayTask, mtGC> _objarray_stack;
// Space for storing/restoring mark word
static Stack<markOop, mtGC> _preserved_mark_stack;
static Stack<markWord, mtGC> _preserved_mark_stack;
static Stack<oop, mtGC> _preserved_oop_stack;
static size_t _preserved_count;
static size_t _preserved_count_max;
@ -137,7 +137,7 @@ class MarkSweep : AllStatic {
static STWGCTimer* gc_timer() { return _gc_timer; }
static SerialOldTracer* gc_tracer() { return _gc_tracer; }
static void preserve_mark(oop p, markOop mark);
static void preserve_mark(oop p, markWord mark);
// Save the mark word so it can be restored later
static void adjust_marks(); // Adjust the pointers in the preserved marks table
static void restore_marks(); // Restore the marks that we saved in preserve_mark
@ -199,10 +199,10 @@ class AdjustPointerClosure: public BasicOopsInGenClosure {
class PreservedMark {
private:
oop _obj;
markOop _mark;
markWord _mark;
public:
void init(oop obj, markOop mark) {
void init(oop obj, markWord mark) {
_obj = obj;
_mark = mark;
}

@ -37,10 +37,10 @@
inline void MarkSweep::mark_object(oop obj) {
// some marks may contain information we need to preserve so we store them away
// and overwrite the mark. We'll restore it at the end of markSweep.
markOop mark = obj->mark_raw();
obj->set_mark_raw(markOopDesc::prototype()->set_marked());
markWord mark = obj->mark_raw();
obj->set_mark_raw(markWord::prototype().set_marked());
if (mark->must_be_preserved(obj)) {
if (mark.must_be_preserved(obj)) {
preserve_mark(obj, mark);
}
}
@ -49,7 +49,7 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
T heap_oop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(heap_oop)) {
oop obj = CompressedOops::decode_not_null(heap_oop);
if (!obj->mark_raw()->is_marked()) {
if (!obj->mark_raw().is_marked()) {
mark_object(obj);
_marking_stack.push(obj);
}
@ -78,11 +78,11 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
oop obj = CompressedOops::decode_not_null(heap_oop);
assert(Universe::heap()->is_in(obj), "should be in heap");
oop new_obj = oop(obj->mark_raw()->decode_pointer());
oop new_obj = oop(obj->mark_raw().decode_pointer());
assert(new_obj != NULL || // is forwarding ptr?
obj->mark_raw() == markOopDesc::prototype() || // not gc marked?
(UseBiasedLocking && obj->mark_raw()->has_bias_pattern()),
assert(new_obj != NULL || // is forwarding ptr?
obj->mark_raw() == markWord::prototype() || // not gc marked?
(UseBiasedLocking && obj->mark_raw().has_bias_pattern()),
// not gc marked?
"should be forwarded");

@ -78,8 +78,8 @@ uint AgeTable::compute_tenuring_threshold(size_t desired_survivor_size) {
uint result;
if (AlwaysTenure || NeverTenure) {
assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
"MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is " UINTX_FORMAT, MaxTenuringThreshold);
assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
"MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is " UINTX_FORMAT, MaxTenuringThreshold);
result = MaxTenuringThreshold;
} else {
size_t total = 0;

@ -41,7 +41,7 @@ class AgeTable {
public:
// constants
enum { table_size = markOopDesc::max_age + 1 };
enum { table_size = markWord::max_age + 1 };
// instance variables
size_t sizes[table_size];

@ -811,12 +811,12 @@
\
product(uintx, MaxTenuringThreshold, 15, \
"Maximum value for tenuring threshold") \
range(0, markOopDesc::max_age + 1) \
range(0, markWord::max_age + 1) \
constraint(MaxTenuringThresholdConstraintFunc,AfterErgo) \
\
product(uintx, InitialTenuringThreshold, 7, \
"Initial value for tenuring threshold") \
range(0, markOopDesc::max_age + 1) \
range(0, markWord::max_age + 1) \
constraint(InitialTenuringThresholdConstraintFunc,AfterErgo) \
\
product(uintx, TargetSurvivorRatio, 50, \

@ -178,7 +178,7 @@ oop Generation::promote(oop obj, size_t obj_size) {
}
oop Generation::par_promote(int thread_num,
oop obj, markOop m, size_t word_sz) {
oop obj, markWord m, size_t word_sz) {
// Could do a bad general impl here that gets a lock. But no.
ShouldNotCallThis();
return NULL;

@ -300,7 +300,7 @@ class Generation: public CHeapObj<mtGC> {
// word of "obj" may have been overwritten with a forwarding pointer, and
// also taking care to copy the klass pointer *last*. Returns the new
// object if successful, or else NULL.
virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz);
virtual oop par_promote(int thread_num, oop obj, markWord m, size_t word_sz);
// Informs the current generation that all par_promote_alloc's in the
// collection have been completed; any supporting data structures can be

@ -387,7 +387,7 @@ oop MemAllocator::finish(HeapWord* mem) const {
oopDesc::set_mark_raw(mem, _klass->prototype_header());
} else {
// May be bootstrapping
oopDesc::set_mark_raw(mem, markOopDesc::prototype());
oopDesc::set_mark_raw(mem, markWord::prototype());
}
// Need a release store to ensure array/class length, mark word, and
// object zeroing are visible before setting the klass non-NULL, for

@ -32,16 +32,16 @@
void PreservedMarks::restore() {
while (!_stack.is_empty()) {
const OopAndMarkOop elem = _stack.pop();
const OopAndMarkWord elem = _stack.pop();
elem.set_mark();
}
assert_empty();
}
void PreservedMarks::adjust_during_full_gc() {
StackIterator<OopAndMarkOop, mtGC> iter(_stack);
StackIterator<OopAndMarkWord, mtGC> iter(_stack);
while (!iter.is_empty()) {
OopAndMarkOop* elem = iter.next_addr();
OopAndMarkWord* elem = iter.next_addr();
oop obj = elem->get_oop();
if (obj->is_forwarded()) {

@ -35,28 +35,28 @@ class WorkGang;
class PreservedMarks {
private:
class OopAndMarkOop {
class OopAndMarkWord {
private:
oop _o;
markOop _m;
markWord _m;
public:
OopAndMarkOop(oop obj, markOop m) : _o(obj), _m(m) { }
OopAndMarkWord(oop obj, markWord m) : _o(obj), _m(m) { }
oop get_oop() { return _o; }
inline void set_mark() const;
void set_oop(oop obj) { _o = obj; }
};
typedef Stack<OopAndMarkOop, mtGC> OopAndMarkOopStack;
typedef Stack<OopAndMarkWord, mtGC> OopAndMarkWordStack;
OopAndMarkOopStack _stack;
OopAndMarkWordStack _stack;
inline bool should_preserve_mark(oop obj, markOop m) const;
inline bool should_preserve_mark(oop obj, markWord m) const;
public:
size_t size() const { return _stack.size(); }
inline void push(oop obj, markOop m);
inline void push_if_necessary(oop obj, markOop m);
inline void push(oop obj, markWord m);
inline void push_if_necessary(oop obj, markWord m);
// Iterate over the stack, restore all preserved marks, and
// reclaim the memory taken up by the stack segments.
void restore();

@ -30,17 +30,17 @@
#include "oops/oop.inline.hpp"
#include "utilities/stack.inline.hpp"
inline bool PreservedMarks::should_preserve_mark(oop obj, markOop m) const {
return m->must_be_preserved_for_promotion_failure(obj);
inline bool PreservedMarks::should_preserve_mark(oop obj, markWord m) const {
return m.must_be_preserved_for_promotion_failure(obj);
}
inline void PreservedMarks::push(oop obj, markOop m) {
inline void PreservedMarks::push(oop obj, markWord m) {
assert(should_preserve_mark(obj, m), "pre-condition");
OopAndMarkOop elem(obj, m);
OopAndMarkWord elem(obj, m);
_stack.push(elem);
}
inline void PreservedMarks::push_if_necessary(oop obj, markOop m) {
inline void PreservedMarks::push_if_necessary(oop obj, markWord m) {
if (should_preserve_mark(obj, m)) {
push(obj, m);
}
@ -72,14 +72,14 @@ inline void PreservedMarksSet::restore(RestorePreservedMarksTaskExecutor* execut
}
inline PreservedMarks::PreservedMarks()
: _stack(OopAndMarkOopStack::default_segment_size(),
: _stack(OopAndMarkWordStack::default_segment_size(),
// This stack should be used very infrequently so there's
// no point in caching stack segments (there will be a
// waste of space most of the time). So we set the max
// cache size to 0.
0 /* max_cache_size */) { }
void PreservedMarks::OopAndMarkOop::set_mark() const {
void PreservedMarks::OopAndMarkWord::set_mark() const {
_o->set_mark_raw(_m);
}

@ -651,14 +651,14 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
// allocate uninitialized int array
typeArrayOop t = (typeArrayOop) allocate(size);
assert(t != NULL, "allocation should succeed");
t->set_mark_raw(markOopDesc::prototype());
t->set_mark_raw(markWord::prototype());
t->set_klass(Universe::intArrayKlassObj());
t->set_length((int)length);
} else {
assert(size == CollectedHeap::min_fill_size(),
"size for smallest fake object doesn't match");
instanceOop obj = (instanceOop) allocate(size);
obj->set_mark_raw(markOopDesc::prototype());
obj->set_mark_raw(markWord::prototype());
obj->set_klass_gap(0);
obj->set_klass(SystemDictionary::Object_klass());
}

@ -117,7 +117,7 @@ public:
_allowed_deadspace_words -= dead_length;
CollectedHeap::fill_with_object(dead_start, dead_length);
oop obj = oop(dead_start);
obj->set_mark_raw(obj->mark_raw()->set_marked());
obj->set_mark_raw(obj->mark_raw().set_marked());
assert(dead_length == (size_t)obj->size(), "bad filler object size");
log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
@ -164,8 +164,8 @@ inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* c
while (cur_obj < scan_limit) {
assert(!space->scanned_block_is_obj(cur_obj) ||
oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() ||
oop(cur_obj)->mark_raw()->has_bias_pattern(),
oop(cur_obj)->mark_raw().is_marked() || oop(cur_obj)->mark_raw().is_unlocked() ||
oop(cur_obj)->mark_raw().has_bias_pattern(),
"these are the only valid states during a mark sweep");
if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
// prefetch beyond cur_obj

@ -1458,9 +1458,9 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
phase->register_new_node(markword, ctrl);
// Test if object is forwarded. This is the case if lowest two bits are set.
Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markOopDesc::lock_mask_in_place));
Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markWord::lock_mask_in_place));
phase->register_new_node(masked, ctrl);
Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markOopDesc::marked_value));
Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markWord::marked_value));
phase->register_new_node(cmp, ctrl);
// Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr

@ -61,7 +61,7 @@ void ShenandoahAsserts::print_obj(ShenandoahMessageBuffer& msg, oop obj) {
r->print_on(&ss);
stringStream mw_ss;
obj->mark()->print_on(&mw_ss);
obj->mark().print_on(&mw_ss);
ShenandoahMarkingContext* const ctx = heap->marking_context();

@ -35,9 +35,9 @@ inline HeapWord* ShenandoahForwarding::get_forwardee_raw(oop obj) {
}
inline HeapWord* ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) {
markOop mark = obj->mark_raw();
if (mark->is_marked()) {
return (HeapWord*) mark->clear_lock_bits();
markWord mark = obj->mark_raw();
if (mark.is_marked()) {
return (HeapWord*) mark.clear_lock_bits().to_pointer();
} else {
return (HeapWord*) obj;
}
@ -49,21 +49,21 @@ inline oop ShenandoahForwarding::get_forwardee(oop obj) {
}
inline bool ShenandoahForwarding::is_forwarded(oop obj) {
return obj->mark_raw()->is_marked();
return obj->mark_raw().is_marked();
}
inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) {
markOop old_mark = obj->mark_raw();
if (old_mark->is_marked()) {
return (oop) old_mark->clear_lock_bits();
markWord old_mark = obj->mark_raw();
if (old_mark.is_marked()) {
return oop(old_mark.clear_lock_bits().to_pointer());
}
markOop new_mark = markOopDesc::encode_pointer_as_mark(update);
markOop prev_mark = obj->cas_set_mark_raw(new_mark, old_mark);
markWord new_mark = markWord::encode_pointer_as_mark(update);
markWord prev_mark = obj->cas_set_mark_raw(new_mark, old_mark);
if (prev_mark == old_mark) {
return update;
} else {
return (oop) prev_mark->clear_lock_bits();
return oop(prev_mark.clear_lock_bits().to_pointer());
}
}

@ -47,17 +47,17 @@ void ShenandoahStringDedup::enqueue_candidate(oop java_string) {
"Only from a GC worker thread");
if (java_string->age() <= StringDeduplicationAgeThreshold) {
const markOop mark = java_string->mark();
const markWord mark = java_string->mark();
// Having/had displaced header, too risk to deal with them, skip
if (mark == markOopDesc::INFLATING() || mark->has_displaced_mark_helper()) {
if (mark == markWord::INFLATING() || mark.has_displaced_mark_helper()) {
return;
}
// Increase string age and enqueue it when it rearches age threshold
markOop new_mark = mark->incr_age();
markWord new_mark = mark.incr_age();
if (mark == java_string->cas_set_mark(new_mark, mark)) {
if (mark->age() == StringDeduplicationAgeThreshold) {
if (mark.age() == StringDeduplicationAgeThreshold) {
StringDedupQueue::push(ShenandoahWorkerSession::worker_id(), java_string);
}
}

@ -666,17 +666,17 @@ BytecodeInterpreter::run(interpreterState istate) {
BasicObjectLock* mon = &istate->monitor_base()[-1];
mon->set_obj(rcvr);
bool success = false;
uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
markOop mark = rcvr->mark();
intptr_t hash = (intptr_t) markOopDesc::no_hash;
uintptr_t epoch_mask_in_place = (uintptr_t)markWord::epoch_mask_in_place;
markWord mark = rcvr->mark();
intptr_t hash = (intptr_t) markWord::no_hash;
// Implies UseBiasedLocking.
if (mark->has_bias_pattern()) {
if (mark.has_bias_pattern()) {
uintptr_t thread_ident;
uintptr_t anticipated_bias_locking_value;
thread_ident = (uintptr_t)istate->thread();
anticipated_bias_locking_value =
(((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
~((uintptr_t) markOopDesc::age_mask_in_place);
(((uintptr_t)rcvr->klass()->prototype_header().value() | thread_ident) ^ mark.value()) &
~((uintptr_t) markWord::age_mask_in_place);
if (anticipated_bias_locking_value == 0) {
// Already biased towards this thread, nothing to do.
@ -684,11 +684,11 @@ BytecodeInterpreter::run(interpreterState istate) {
(* BiasedLocking::biased_lock_entry_count_addr())++;
}
success = true;
} else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
} else if ((anticipated_bias_locking_value & markWord::biased_lock_mask_in_place) != 0) {
// Try to revoke bias.
markOop header = rcvr->klass()->prototype_header();
if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash);
markWord header = rcvr->klass()->prototype_header();
if (hash != markWord::no_hash) {
header = header.copy_set_hash(hash);
}
if (rcvr->cas_set_mark(header, mark) == mark) {
if (PrintBiasedLockingStatistics)
@ -696,9 +696,9 @@ BytecodeInterpreter::run(interpreterState istate) {
}
} else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
// Try to rebias.
markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
if (hash != markOopDesc::no_hash) {
new_header = new_header->copy_set_hash(hash);
markWord new_header( (intptr_t) rcvr->klass()->prototype_header().value() | thread_ident);
if (hash != markWord::no_hash) {
new_header = new_header.copy_set_hash(hash);
}
if (rcvr->cas_set_mark(new_header, mark) == mark) {
if (PrintBiasedLockingStatistics) {
@ -710,15 +710,15 @@ BytecodeInterpreter::run(interpreterState istate) {
success = true;
} else {
// Try to bias towards thread in case object is anonymously biased.
markOop header = (markOop) ((uintptr_t) mark &
((uintptr_t)markOopDesc::biased_lock_mask_in_place |
(uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash);
markWord header(mark.value() &
((uintptr_t)markWord::biased_lock_mask_in_place |
(uintptr_t)markWord::age_mask_in_place | epoch_mask_in_place));
if (hash != markWord::no_hash) {
header = header.copy_set_hash(hash);
}
markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
markWord new_header(header.value() | thread_ident);
// Debugging hint.
DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
DEBUG_ONLY(mon->lock()->set_displaced_header(markWord((uintptr_t) 0xdeaddead));)
if (rcvr->cas_set_mark(new_header, header) == header) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
@ -732,13 +732,13 @@ BytecodeInterpreter::run(interpreterState istate) {
// Traditional lightweight locking.
if (!success) {
markOop displaced = rcvr->mark()->set_unlocked();
markWord displaced = rcvr->mark().set_unlocked();
mon->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
if (call_vm || rcvr->cas_set_mark((markOop)mon, displaced) != displaced) {
if (call_vm || rcvr->cas_set_mark(markWord::from_pointer(mon), displaced) != displaced) {
// Is it simple recursive case?
if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
mon->lock()->set_displaced_header(NULL);
if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
mon->lock()->set_displaced_header(markWord::from_pointer(NULL));
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
}
@ -851,18 +851,18 @@ BytecodeInterpreter::run(interpreterState istate) {
assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
entry->set_obj(lockee);
bool success = false;
uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
uintptr_t epoch_mask_in_place = (uintptr_t)markWord::epoch_mask_in_place;
markOop mark = lockee->mark();
intptr_t hash = (intptr_t) markOopDesc::no_hash;
markWord mark = lockee->mark();
intptr_t hash = (intptr_t) markWord::no_hash;
// implies UseBiasedLocking
if (mark->has_bias_pattern()) {
if (mark.has_bias_pattern()) {
uintptr_t thread_ident;
uintptr_t anticipated_bias_locking_value;
thread_ident = (uintptr_t)istate->thread();
anticipated_bias_locking_value =
(((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
~((uintptr_t) markOopDesc::age_mask_in_place);
(((uintptr_t)lockee->klass()->prototype_header().value() | thread_ident) ^ mark.value()) &
~((uintptr_t) markWord::age_mask_in_place);
if (anticipated_bias_locking_value == 0) {
// already biased towards this thread, nothing to do
@ -870,11 +870,11 @@ BytecodeInterpreter::run(interpreterState istate) {
(* BiasedLocking::biased_lock_entry_count_addr())++;
}
success = true;
} else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
} else if ((anticipated_bias_locking_value & markWord::biased_lock_mask_in_place) != 0) {
// try revoke bias
markOop header = lockee->klass()->prototype_header();
if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash);
markWord header = lockee->klass()->prototype_header();
if (hash != markWord::no_hash) {
header = header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(header, mark) == mark) {
if (PrintBiasedLockingStatistics) {
@ -883,9 +883,9 @@ BytecodeInterpreter::run(interpreterState istate) {
}
} else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
// try rebias
markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
if (hash != markOopDesc::no_hash) {
new_header = new_header->copy_set_hash(hash);
markWord new_header( (intptr_t) lockee->klass()->prototype_header().value() | thread_ident);
if (hash != markWord::no_hash) {
new_header = new_header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(new_header, mark) == mark) {
if (PrintBiasedLockingStatistics) {
@ -897,14 +897,14 @@ BytecodeInterpreter::run(interpreterState istate) {
success = true;
} else {
// try to bias towards thread in case object is anonymously biased
markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
(uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash);
markWord header(mark.value() & ((uintptr_t)markWord::biased_lock_mask_in_place |
(uintptr_t)markWord::age_mask_in_place | epoch_mask_in_place));
if (hash != markWord::no_hash) {
header = header.copy_set_hash(hash);
}
markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
markWord new_header(header.value() | thread_ident);
// debugging hint
DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
DEBUG_ONLY(entry->lock()->set_displaced_header(markWord((uintptr_t) 0xdeaddead));)
if (lockee->cas_set_mark(new_header, header) == header) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
@ -918,13 +918,13 @@ BytecodeInterpreter::run(interpreterState istate) {
// traditional lightweight locking
if (!success) {
markOop displaced = lockee->mark()->set_unlocked();
markWord displaced = lockee->mark().set_unlocked();
entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
if (call_vm || lockee->cas_set_mark((markOop)entry, displaced) != displaced) {
if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
// Is it simple recursive case?
if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
entry->lock()->set_displaced_header(NULL);
if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
entry->lock()->set_displaced_header(markWord::from_pointer(NULL));
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
}
@ -1791,18 +1791,18 @@ run:
if (entry != NULL) {
entry->set_obj(lockee);
int success = false;
uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
uintptr_t epoch_mask_in_place = (uintptr_t)markWord::epoch_mask_in_place;
markOop mark = lockee->mark();
intptr_t hash = (intptr_t) markOopDesc::no_hash;
markWord mark = lockee->mark();
intptr_t hash = (intptr_t) markWord::no_hash;
// implies UseBiasedLocking
if (mark->has_bias_pattern()) {
if (mark.has_bias_pattern()) {
uintptr_t thread_ident;
uintptr_t anticipated_bias_locking_value;
thread_ident = (uintptr_t)istate->thread();
anticipated_bias_locking_value =
(((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
~((uintptr_t) markOopDesc::age_mask_in_place);
(((uintptr_t)lockee->klass()->prototype_header().value() | thread_ident) ^ mark.value()) &
~((uintptr_t) markWord::age_mask_in_place);
if (anticipated_bias_locking_value == 0) {
// already biased towards this thread, nothing to do
@ -1811,11 +1811,11 @@ run:
}
success = true;
}
else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
else if ((anticipated_bias_locking_value & markWord::biased_lock_mask_in_place) != 0) {
// try revoke bias
markOop header = lockee->klass()->prototype_header();
if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash);
markWord header = lockee->klass()->prototype_header();
if (hash != markWord::no_hash) {
header = header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(header, mark) == mark) {
if (PrintBiasedLockingStatistics)
@ -1824,9 +1824,9 @@ run:
}
else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
// try rebias
markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
if (hash != markOopDesc::no_hash) {
new_header = new_header->copy_set_hash(hash);
markWord new_header( (intptr_t) lockee->klass()->prototype_header().value() | thread_ident);
if (hash != markWord::no_hash) {
new_header = new_header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(new_header, mark) == mark) {
if (PrintBiasedLockingStatistics)
@ -1839,15 +1839,15 @@ run:
}
else {
// try to bias towards thread in case object is anonymously biased
markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
(uintptr_t)markOopDesc::age_mask_in_place |
epoch_mask_in_place));
if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash);
markWord header(mark.value() & ((uintptr_t)markWord::biased_lock_mask_in_place |
(uintptr_t)markWord::age_mask_in_place |
epoch_mask_in_place));
if (hash != markWord::no_hash) {
header = header.copy_set_hash(hash);
}
markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
markWord new_header(header.value() | thread_ident);
// debugging hint
DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
DEBUG_ONLY(entry->lock()->set_displaced_header(markWord((uintptr_t) 0xdeaddead));)
if (lockee->cas_set_mark(new_header, header) == header) {
if (PrintBiasedLockingStatistics)
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
@ -1861,13 +1861,13 @@ run:
// traditional lightweight locking
if (!success) {
markOop displaced = lockee->mark()->set_unlocked();
markWord displaced = lockee->mark().set_unlocked();
entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
if (call_vm || lockee->cas_set_mark((markOop)entry, displaced) != displaced) {
if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
// Is it simple recursive case?
if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
entry->lock()->set_displaced_header(NULL);
if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
entry->lock()->set_displaced_header(markWord::from_pointer(NULL));
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
}
@ -1890,13 +1890,13 @@ run:
while (most_recent != limit ) {
if ((most_recent)->obj() == lockee) {
BasicLock* lock = most_recent->lock();
markOop header = lock->displaced_header();
markWord header = lock->displaced_header();
most_recent->set_obj(NULL);
if (!lockee->mark()->has_bias_pattern()) {
if (!lockee->mark().has_bias_pattern()) {
bool call_vm = UseHeavyMonitors;
// If it isn't recursive we either must swap old header or call the runtime
if (header != NULL || call_vm) {
markOop old_header = markOopDesc::encode(lock);
if (header.to_pointer() != NULL || call_vm) {
markWord old_header = markWord::encode(lock);
if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
most_recent->set_obj(lockee);
@ -2182,7 +2182,7 @@ run:
if (UseBiasedLocking) {
result->set_mark(ik->prototype_header());
} else {
result->set_mark(markOopDesc::prototype());
result->set_mark(markWord::prototype());
}
result->set_klass_gap(0);
result->set_klass(ik);
@ -3035,13 +3035,13 @@ run:
oop lockee = end->obj();
if (lockee != NULL) {
BasicLock* lock = end->lock();
markOop header = lock->displaced_header();
markWord header = lock->displaced_header();
end->set_obj(NULL);
if (!lockee->mark()->has_bias_pattern()) {
if (!lockee->mark().has_bias_pattern()) {
// If it isn't recursive we either must swap old header or call the runtime
if (header != NULL) {
markOop old_header = markOopDesc::encode(lock);
if (header.to_pointer() != NULL) {
markWord old_header = markWord::encode(lock);
if (lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
end->set_obj(lockee);
@ -3110,14 +3110,14 @@ run:
}
} else {
BasicLock* lock = base->lock();
markOop header = lock->displaced_header();
markWord header = lock->displaced_header();
base->set_obj(NULL);
if (!rcvr->mark()->has_bias_pattern()) {
if (!rcvr->mark().has_bias_pattern()) {
base->set_obj(NULL);
// If it isn't recursive we either must swap old header or call the runtime
if (header != NULL) {
markOop old_header = markOopDesc::encode(lock);
if (header.to_pointer() != NULL) {
markWord old_header = markWord::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
base->set_obj(rcvr);

@ -131,7 +131,7 @@ void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
if (!_mark_bits->is_marked(pointee)) {
_mark_bits->mark_obj(pointee);
// is the pointee a sample object?
if (NULL == pointee->mark()) {
if (NULL == pointee->mark().to_pointer()) {
add_chain(reference, pointee);
}
@ -148,7 +148,7 @@ void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
void BFSClosure::add_chain(const oop* reference, const oop pointee) {
assert(pointee != NULL, "invariant");
assert(NULL == pointee->mark(), "invariant");
assert(NULL == pointee->mark().to_pointer(), "invariant");
Edge leak_edge(_current_parent, reference);
_edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2);
}

@ -121,7 +121,7 @@ void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
assert(_mark_bits->is_marked(pointee), "invariant");
// is the pointee a sample object?
if (NULL == pointee->mark()) {
if (NULL == pointee->mark().to_pointer()) {
add_chain();
}

@ -233,8 +233,8 @@ StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) {
StoredEdge* const leak_context_edge = put(edge->reference());
oop sample_object = edge->pointee();
assert(sample_object != NULL, "invariant");
assert(NULL == sample_object->mark(), "invariant");
sample_object->set_mark(markOop(leak_context_edge));
assert(NULL == sample_object->mark().to_pointer(), "invariant");
sample_object->set_mark(markWord::from_pointer(leak_context_edge));
return leak_context_edge;
}

@ -35,7 +35,7 @@
#include "runtime/handles.inline.hpp"
bool EdgeUtils::is_leak_edge(const Edge& edge) {
return (const Edge*)edge.pointee()->mark() == &edge;
return (const Edge*)edge.pointee()->mark().to_pointer() == &edge;
}
static int field_offset(const StoredEdge& edge) {

@ -36,45 +36,45 @@
//
class ObjectSampleMarker : public StackObj {
private:
class ObjectSampleMarkOop : public ResourceObj {
class ObjectSampleMarkWord : public ResourceObj {
friend class ObjectSampleMarker;
private:
oop _obj;
markOop _mark_oop;
ObjectSampleMarkOop(const oop obj,
const markOop mark_oop) : _obj(obj),
_mark_oop(mark_oop) {}
markWord _mark_word;
ObjectSampleMarkWord(const oop obj,
const markWord mark_word) : _obj(obj),
_mark_word(mark_word) {}
public:
ObjectSampleMarkOop() : _obj(NULL), _mark_oop(NULL) {}
ObjectSampleMarkWord() : _obj(NULL), _mark_word(markWord::zero()) {}
};
GrowableArray<ObjectSampleMarkOop>* _store;
GrowableArray<ObjectSampleMarkWord>* _store;
public:
ObjectSampleMarker() :
_store(new GrowableArray<ObjectSampleMarkOop>(16)) {}
_store(new GrowableArray<ObjectSampleMarkWord>(16)) {}
~ObjectSampleMarker() {
assert(_store != NULL, "invariant");
// restore the saved, original, markOop for sample objects
// restore the saved, original, markWord for sample objects
while (_store->is_nonempty()) {
ObjectSampleMarkOop sample_oop = _store->pop();
sample_oop._obj->set_mark(sample_oop._mark_oop);
assert(sample_oop._obj->mark() == sample_oop._mark_oop, "invariant");
ObjectSampleMarkWord sample_oop = _store->pop();
sample_oop._obj->set_mark(sample_oop._mark_word);
assert(sample_oop._obj->mark() == sample_oop._mark_word, "invariant");
}
}
void mark(oop obj) {
assert(obj != NULL, "invariant");
// save the original markOop
_store->push(ObjectSampleMarkOop(obj, obj->mark()));
// save the original markWord
_store->push(ObjectSampleMarkWord(obj, obj->mark()));
// now we will "poison" the mark word of the sample object
// to the intermediate monitor INFLATING state.
// This is an "impossible" state during a safepoint,
// hence we will use it to quickly identify sample objects
// during the reachability search from gc roots.
assert(NULL == markOopDesc::INFLATING(), "invariant");
obj->set_mark(markOopDesc::INFLATING());
assert(NULL == obj->mark(), "invariant");
assert(NULL == markWord::INFLATING().to_pointer(), "invariant");
obj->set_mark(markWord::INFLATING());
assert(NULL == obj->mark().to_pointer(), "invariant");
}
};

@ -111,7 +111,7 @@ void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store
traceid gc_root_id = 0;
const Edge* edge = NULL;
if (SafepointSynchronize::is_at_safepoint()) {
edge = (const Edge*)(*object_addr)->mark();
edge = (const Edge*)(*object_addr)->mark().to_pointer();
}
if (edge == NULL) {
// In order to dump out a representation of the event

@ -27,42 +27,42 @@
#include "jfr/leakprofiler/utilities/saveRestore.hpp"
#include "oops/oop.inline.hpp"
MarkOopContext::MarkOopContext() : _obj(NULL), _mark_oop(NULL) {}
MarkWordContext::MarkWordContext() : _obj(NULL), _mark_word(markWord::zero()) {}
MarkOopContext::MarkOopContext(const oop obj) : _obj(obj), _mark_oop(obj->mark()) {
assert(_obj->mark() == _mark_oop, "invariant");
MarkWordContext::MarkWordContext(const oop obj) : _obj(obj), _mark_word(obj->mark()) {
assert(_obj->mark() == _mark_word, "invariant");
// now we will "poison" the mark word of the object
// to the intermediate monitor INFLATING state.
// This is an "impossible" state during a safepoint,
// hence we will use it to quickly identify objects
// during the reachability search from gc roots.
assert(NULL == markOopDesc::INFLATING(), "invariant");
_obj->set_mark(markOopDesc::INFLATING());
assert(NULL == obj->mark(), "invariant");
assert(markWord::zero() == markWord::INFLATING(), "invariant");
_obj->set_mark(markWord::INFLATING());
assert(markWord::zero() == obj->mark(), "invariant");
}
MarkOopContext::~MarkOopContext() {
MarkWordContext::~MarkWordContext() {
if (_obj != NULL) {
_obj->set_mark(_mark_oop);
assert(_obj->mark() == _mark_oop, "invariant");
_obj->set_mark(_mark_word);
assert(_obj->mark() == _mark_word, "invariant");
}
}
MarkOopContext::MarkOopContext(const MarkOopContext& rhs) : _obj(NULL), _mark_oop(NULL) {
swap(const_cast<MarkOopContext&>(rhs));
MarkWordContext::MarkWordContext(const MarkWordContext& rhs) : _obj(NULL), _mark_word(markWord::zero()) {
swap(const_cast<MarkWordContext&>(rhs));
}
void MarkOopContext::operator=(MarkOopContext rhs) {
void MarkWordContext::operator=(MarkWordContext rhs) {
swap(rhs);
}
void MarkOopContext::swap(MarkOopContext& rhs) {
void MarkWordContext::swap(MarkWordContext& rhs) {
oop temp_obj = rhs._obj;
markOop temp_mark_oop = rhs._mark_oop;
markWord temp_mark_word = rhs._mark_word;
rhs._obj = _obj;
rhs._mark_oop = _mark_oop;
rhs._mark_word = _mark_word;
_obj = temp_obj;
_mark_oop = temp_mark_oop;
_mark_word = temp_mark_word;
}
CLDClaimContext::CLDClaimContext() : _cld(NULL) {}

@ -78,20 +78,20 @@ public:
* The destructor will restore the original mark oop.
*/
class MarkOopContext {
class MarkWordContext {
private:
oop _obj;
markOop _mark_oop;
void swap(MarkOopContext& rhs);
markWord _mark_word;
void swap(MarkWordContext& rhs);
public:
MarkOopContext();
MarkOopContext(const oop obj);
MarkOopContext(const MarkOopContext& rhs);
void operator=(MarkOopContext rhs);
~MarkOopContext();
MarkWordContext();
MarkWordContext(const oop obj);
MarkWordContext(const MarkWordContext& rhs);
void operator=(MarkWordContext rhs);
~MarkWordContext();
};
typedef SaveRestore<oop, ContextStore<oop, MarkOopContext> > SaveRestoreMarkOops;
typedef SaveRestore<oop, ContextStore<oop, MarkWordContext> > SaveRestoreMarkWords;
class ClassLoaderData;

@ -385,8 +385,8 @@ JRT_ENTRY_NO_ASYNC(void, JVMCIRuntime::monitorenter(JavaThread* thread, oopDesc*
IF_TRACE_jvmci_3 {
char type[O_BUFLEN];
obj->klass()->name()->as_C_string(type, O_BUFLEN);
markOop mark = obj->mark();
TRACE_jvmci_3("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(mark), p2i(lock));
markWord mark = obj->mark();
TRACE_jvmci_3("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, mark.value(), p2i(lock));
tty->flush();
}
if (PrintBiasedLockingStatistics) {
@ -435,7 +435,7 @@ JRT_LEAF(void, JVMCIRuntime::monitorexit(JavaThread* thread, oopDesc* obj, Basic
IF_TRACE_jvmci_3 {
char type[O_BUFLEN];
obj->klass()->name()->as_C_string(type, O_BUFLEN);
TRACE_jvmci_3("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(obj->mark()), p2i(lock));
TRACE_jvmci_3("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, obj->mark().value(), p2i(lock));
tty->flush();
}
JRT_END

@ -103,7 +103,7 @@
nonstatic_field(Array<Klass*>, _length, int) \
nonstatic_field(Array<Klass*>, _data[0], Klass*) \
\
volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \
volatile_nonstatic_field(BasicLock, _displaced_header, markWord) \
\
static_field(CodeCache, _low_bound, address) \
static_field(CodeCache, _high_bound, address) \
@ -194,7 +194,7 @@
nonstatic_field(Klass, _subklass, Klass*) \
nonstatic_field(Klass, _layout_helper, jint) \
nonstatic_field(Klass, _name, Symbol*) \
nonstatic_field(Klass, _prototype_header, markOop) \
nonstatic_field(Klass, _prototype_header, markWord) \
nonstatic_field(Klass, _next_sibling, Klass*) \
nonstatic_field(Klass, _java_mirror, OopHandle) \
nonstatic_field(Klass, _modifier_flags, jint) \
@ -257,7 +257,7 @@
volatile_nonstatic_field(ObjectMonitor, _EntryList, ObjectWaiter*) \
volatile_nonstatic_field(ObjectMonitor, _succ, Thread*) \
\
volatile_nonstatic_field(oopDesc, _mark, markOop) \
volatile_nonstatic_field(oopDesc, _mark, markWord) \
volatile_nonstatic_field(oopDesc, _metadata._klass, Klass*) \
\
static_field(os, _polling_page, address) \
@ -563,7 +563,7 @@
declare_constant(Klass::_lh_array_tag_type_value) \
declare_constant(Klass::_lh_array_tag_obj_value) \
\
declare_constant(markOopDesc::no_hash) \
declare_constant(markWord::no_hash) \
\
declare_constant(Method::_caller_sensitive) \
declare_constant(Method::_force_inline) \
@ -595,19 +595,19 @@
declare_constant(InvocationCounter::count_increment) \
declare_constant(InvocationCounter::count_shift) \
\
declare_constant(markOopDesc::hash_shift) \
declare_constant(markWord::hash_shift) \
\
declare_constant(markOopDesc::biased_lock_mask_in_place) \
declare_constant(markOopDesc::age_mask_in_place) \
declare_constant(markOopDesc::epoch_mask_in_place) \
declare_constant(markOopDesc::hash_mask) \
declare_constant(markOopDesc::hash_mask_in_place) \
declare_constant(markWord::biased_lock_mask_in_place) \
declare_constant(markWord::age_mask_in_place) \
declare_constant(markWord::epoch_mask_in_place) \
declare_constant(markWord::hash_mask) \
declare_constant(markWord::hash_mask_in_place) \
\
declare_constant(markOopDesc::unlocked_value) \
declare_constant(markOopDesc::biased_lock_pattern) \
declare_constant(markWord::unlocked_value) \
declare_constant(markWord::biased_lock_pattern) \
\
declare_constant(markOopDesc::no_hash_in_place) \
declare_constant(markOopDesc::no_lock_in_place) \
declare_constant(markWord::no_hash_in_place) \
declare_constant(markWord::no_lock_in_place) \
#define VM_ADDRESSES(declare_address, declare_preprocessor_address, declare_function) \
declare_function(SharedRuntime::register_finalizer) \

@ -98,7 +98,7 @@ void HeapShared::fixup_mapped_heap_regions() {
}
unsigned HeapShared::oop_hash(oop const& p) {
assert(!p->mark()->has_bias_pattern(),
assert(!p->mark().has_bias_pattern(),
"this object should never have been locked"); // so identity_hash won't safepoin
unsigned hash = (unsigned)p->identity_hash();
return hash;

@ -1185,12 +1185,12 @@ uintptr_t Universe::verify_oop_bits() {
}
uintptr_t Universe::verify_mark_mask() {
return markOopDesc::lock_mask_in_place;
return markWord::lock_mask_in_place;
}
uintptr_t Universe::verify_mark_bits() {
intptr_t mask = verify_mark_mask();
intptr_t bits = (intptr_t)markOopDesc::prototype();
intptr_t bits = (intptr_t)markWord::prototype().value();
assert((bits & ~mask) == 0, "no stray header bits");
return bits;
}

@ -623,9 +623,9 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large,
initialize(size, alignment, large, NULL, false);
}
assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
"area must be distinguishable from marks for mark-sweep");
assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
"area must be distinguishable from marks for mark-sweep");
if (base() != NULL) {

@ -34,7 +34,7 @@
// The layout of array Oops is:
//
// markOop
// markWord
// Klass* // 32 bits if compressed but declared 64 in LP64.
// length // shares klass memory or allocated after declared fields.

@ -195,7 +195,7 @@ void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word
// should be NULL before setting it.
Klass::Klass(KlassID id) : _id(id),
_java_mirror(NULL),
_prototype_header(markOopDesc::prototype()),
_prototype_header(markWord::prototype()),
_shared_class_path_index(-1) {
CDS_ONLY(_shared_class_flags = 0;)
CDS_JAVA_HEAP_ONLY(_archived_mirror = 0;)
@ -744,9 +744,9 @@ void Klass::oop_print_on(oop obj, outputStream* st) {
if (WizardMode) {
// print header
obj->mark()->print_on(st);
obj->mark().print_on(st);
st->cr();
st->print(BULLET"prototype_header: " INTPTR_FORMAT, p2i(_prototype_header));
st->print(BULLET"prototype_header: " INTPTR_FORMAT, _prototype_header.value());
st->cr();
}

@ -28,6 +28,7 @@
#include "classfile/classLoaderData.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
#include "oops/markOop.hpp"
#include "oops/metadata.hpp"
#include "oops/oop.hpp"
#include "oops/oopHandle.hpp"
@ -159,7 +160,7 @@ class Klass : public Metadata {
// Biased locking implementation and statistics
// (the 64-bit chunk goes first, to avoid some fragmentation)
jlong _last_biased_lock_bulk_revocation_time;
markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
markWord _prototype_header; // Used when biased locking is both enabled and disabled for this type
jint _biased_lock_revocation_count;
// vtable length
@ -619,9 +620,9 @@ protected:
// Biased locking support
// Note: the prototype header is always set up to be at least the
// prototype markOop. If biased locking is enabled it may further be
// prototype markWord. If biased locking is enabled it may further be
// biasable and have an epoch.
markOop prototype_header() const { return _prototype_header; }
markWord prototype_header() const { return _prototype_header; }
// NOTE: once instances of this klass are floating around in the
// system, this header must only be updated at a safepoint.
// NOTE 2: currently we only ever set the prototype header to the
@ -630,7 +631,7 @@ protected:
// wanting to reduce the initial scope of this optimization. There
// are potential problems in setting the bias pattern for
// JVM-internal oops.
inline void set_prototype_header(markOop header);
inline void set_prototype_header(markWord header);
static ByteSize prototype_header_offset() { return in_ByteSize(offset_of(Klass, _prototype_header)); }
int biased_lock_revocation_count() const { return (int) _biased_lock_revocation_count; }

@ -29,8 +29,8 @@
#include "oops/klass.hpp"
#include "oops/markOop.hpp"
inline void Klass::set_prototype_header(markOop header) {
assert(!header->has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances");
inline void Klass::set_prototype_header(markWord header) {
assert(!header.has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances");
_prototype_header = header;
}

@ -27,7 +27,7 @@
#include "runtime/thread.inline.hpp"
#include "runtime/objectMonitor.hpp"
void markOopDesc::print_on(outputStream* st) const {
void markWord::print_on(outputStream* st) const {
if (is_marked()) { // last bits = 11
st->print(" marked(" INTPTR_FORMAT ")", value());
} else if (has_monitor()) { // last bits = 10

@ -25,12 +25,11 @@
#ifndef SHARE_OOPS_MARKOOP_HPP
#define SHARE_OOPS_MARKOOP_HPP
#include "oops/oop.hpp"
#include "metaprogramming/integralConstant.hpp"
#include "metaprogramming/primitiveConversions.hpp"
#include "oops/oopsHierarchy.hpp"
// The markOop describes the header of an object.
//
// Note that the mark is not a real oop but just a word.
// It is placed in the oop hierarchy for historical reasons.
// The markWord describes the header of an object.
//
// Bit-format of an object header (most significant first, big endian layout below):
//
@ -101,12 +100,35 @@ class BasicLock;
class ObjectMonitor;
class JavaThread;
class markOopDesc: public oopDesc {
class markWord {
private:
// Conversion
uintptr_t value() const { return (uintptr_t) this; }
uintptr_t _value;
public:
explicit markWord(uintptr_t value) : _value(value) {}
markWord() { /* uninitialized */}
// It is critical for performance that this class be trivially
// destructable, copyable, and assignable.
static markWord from_pointer(void* ptr) {
return markWord((uintptr_t)ptr);
}
void* to_pointer() const {
return (void*)_value;
}
bool operator==(const markWord& other) const {
return _value == other._value;
}
bool operator!=(const markWord& other) const {
return !operator==(other);
}
// Conversion
uintptr_t value() const { return _value; }
// Constants
enum { age_bits = 4,
lock_bits = 2,
@ -164,6 +186,9 @@ class markOopDesc: public oopDesc {
enum { max_bias_epoch = epoch_mask };
// Creates a markWord with all bits set to zero.
static markWord zero() { return markWord(uintptr_t(0)); }
// Biased Locking accessors.
// These must be checked by all code which calls into the
// ObjectSynchronizer and other code. The biasing is not understood
@ -189,17 +214,17 @@ class markOopDesc: public oopDesc {
assert(has_bias_pattern(), "should not call this otherwise");
return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
}
markOop set_bias_epoch(int epoch) {
markWord set_bias_epoch(int epoch) {
assert(has_bias_pattern(), "should not call this otherwise");
assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
return markWord(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
}
markOop incr_bias_epoch() {
markWord incr_bias_epoch() {
return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
}
// Prototype mark for initialization
static markOop biased_locking_prototype() {
return markOop( biased_lock_pattern );
static markWord biased_locking_prototype() {
return markWord( biased_lock_pattern );
}
// lock accessors (note that these assume lock_shift == 0)
@ -214,7 +239,7 @@ class markOopDesc: public oopDesc {
}
bool is_neutral() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
// Special temporary state of the markOop while being inflated.
// Special temporary state of the markWord while being inflated.
// Code that looks at mark outside a lock need to take this into account.
bool is_being_inflated() const { return (value() == 0); }
@ -224,7 +249,7 @@ class markOopDesc: public oopDesc {
// check for and avoid overwriting a 0 value installed by some
// other thread. (They should spin or block instead. The 0 value
// is transient and *should* be short-lived).
static markOop INFLATING() { return (markOop) 0; } // inflate-in-progress
static markWord INFLATING() { return zero(); } // inflate-in-progress
// Should this header be preserved during GC?
inline bool must_be_preserved(oop obj_containing_mark) const;
@ -259,9 +284,9 @@ class markOopDesc: public oopDesc {
// WARNING: The following routines are used EXCLUSIVELY by
// synchronization functions. They are not really gc safe.
// They must get updated if markOop layout get changed.
markOop set_unlocked() const {
return markOop(value() | unlocked_value);
// They must get updated if markWord layout get changed.
markWord set_unlocked() const {
return markWord(value() | unlocked_value);
}
bool has_locker() const {
return ((value() & lock_mask_in_place) == locked_value);
@ -281,56 +306,56 @@ class markOopDesc: public oopDesc {
bool has_displaced_mark_helper() const {
return ((value() & unlocked_value) == 0);
}
markOop displaced_mark_helper() const {
markWord displaced_mark_helper() const {
assert(has_displaced_mark_helper(), "check");
intptr_t ptr = (value() & ~monitor_value);
return *(markOop*)ptr;
return *(markWord*)ptr;
}
void set_displaced_mark_helper(markOop m) const {
void set_displaced_mark_helper(markWord m) const {
assert(has_displaced_mark_helper(), "check");
intptr_t ptr = (value() & ~monitor_value);
*(markOop*)ptr = m;
((markWord*)ptr)->_value = m._value;
}
markOop copy_set_hash(intptr_t hash) const {
markWord copy_set_hash(intptr_t hash) const {
intptr_t tmp = value() & (~hash_mask_in_place);
tmp |= ((hash & hash_mask) << hash_shift);
return (markOop)tmp;
return markWord(tmp);
}
// it is only used to be stored into BasicLock as the
// indicator that the lock is using heavyweight monitor
static markOop unused_mark() {
return (markOop) marked_value;
static markWord unused_mark() {
return markWord(marked_value);
}
// the following two functions create the markOop to be
// the following two functions create the markWord to be
// stored into object header, it encodes monitor info
static markOop encode(BasicLock* lock) {
return (markOop) lock;
static markWord encode(BasicLock* lock) {
return from_pointer(lock);
}
static markOop encode(ObjectMonitor* monitor) {
static markWord encode(ObjectMonitor* monitor) {
intptr_t tmp = (intptr_t) monitor;
return (markOop) (tmp | monitor_value);
return markWord(tmp | monitor_value);
}
static markOop encode(JavaThread* thread, uint age, int bias_epoch) {
static markWord encode(JavaThread* thread, uint age, int bias_epoch) {
intptr_t tmp = (intptr_t) thread;
assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
assert(age <= max_age, "age too large");
assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
return markWord(tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
}
// used to encode pointers during GC
markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
markWord clear_lock_bits() { return markWord(value() & ~lock_mask_in_place); }
// age operations
markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }
markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); }
markWord set_marked() { return markWord((value() & ~lock_mask_in_place) | marked_value); }
markWord set_unmarked() { return markWord((value() & ~lock_mask_in_place) | unlocked_value); }
uint age() const { return mask_bits(value() >> age_shift, age_mask); }
markOop set_age(uint v) const {
uint age() const { return mask_bits(value() >> age_shift, age_mask); }
markWord set_age(uint v) const {
assert((v & ~age_mask) == 0, "shouldn't overflow age field");
return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
return markWord((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
}
markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
markWord incr_age() const { return age() == max_age ? markWord(_value) : set_age(age() + 1); }
// hash operations
intptr_t hash() const {
@ -342,24 +367,24 @@ class markOopDesc: public oopDesc {
}
// Prototype mark for initialization
static markOop prototype() {
return markOop( no_hash_in_place | no_lock_in_place );
static markWord prototype() {
return markWord( no_hash_in_place | no_lock_in_place );
}
// Helper function for restoration of unmarked mark oops during GC
static inline markOop prototype_for_object(oop obj);
static inline markWord prototype_for_object(oop obj);
// Debugging
void print_on(outputStream* st) const;
// Prepare address of oop for placement into mark
inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
inline static markWord encode_pointer_as_mark(void* p) { return from_pointer(p).set_marked(); }
// Recover address of oop from encoded form used in mark
inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return (void*)clear_lock_bits().value(); }
// These markOops indicate cms free chunk blocks and not objects.
// In 64 bit, the markOop is set to distinguish them from oops.
// These markWords indicate cms free chunk blocks and not objects.
// In 64 bit, the markWord is set to distinguish them from oops.
// These are defined in 32 bit mode for vmStructs.
const static uintptr_t cms_free_chunk_pattern = 0x1;
@ -374,9 +399,9 @@ class markOopDesc: public oopDesc {
(address_word)size_mask << size_shift;
#ifdef _LP64
static markOop cms_free_prototype() {
return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
((cms_free_chunk_pattern & cms_mask) << cms_shift));
static markWord cms_free_prototype() {
return markWord(((intptr_t)prototype().value() & ~cms_mask_in_place) |
((cms_free_chunk_pattern & cms_mask) << cms_shift));
}
uintptr_t cms_encoding() const {
return mask_bits(value() >> cms_shift, cms_mask);
@ -387,12 +412,22 @@ class markOopDesc: public oopDesc {
}
size_t get_size() const { return (size_t)(value() >> size_shift); }
static markOop set_size_and_free(size_t size) {
static markWord set_size_and_free(size_t size) {
assert((size & ~size_mask) == 0, "shouldn't overflow size field");
return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) |
(((intptr_t)size & size_mask) << size_shift));
return markWord(((intptr_t)cms_free_prototype().value() & ~size_mask_in_place) |
(((intptr_t)size & size_mask) << size_shift));
}
#endif // _LP64
};
// Support atomic operations.
template<>
struct PrimitiveConversions::Translate<markWord> : public TrueType {
typedef markWord Value;
typedef uintptr_t Decayed;
static Decayed decay(const Value& x) { return x.value(); }
static Value recover(Decayed x) { return Value(x); }
};
#endif // SHARE_OOPS_MARKOOP_HPP

@ -31,15 +31,15 @@
#include "runtime/globals.hpp"
// Should this header be preserved during GC (when biased locking is enabled)?
inline bool markOopDesc::must_be_preserved_with_bias(oop obj_containing_mark) const {
inline bool markWord::must_be_preserved_with_bias(oop obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
if (has_bias_pattern()) {
// Will reset bias at end of collection
// Mark words of biased and currently locked objects are preserved separately
return false;
}
markOop prototype_header = prototype_for_object(obj_containing_mark);
if (prototype_header->has_bias_pattern()) {
markWord prototype_header = prototype_for_object(obj_containing_mark);
if (prototype_header.has_bias_pattern()) {
// Individual instance which has its bias revoked; must return
// true for correctness
return true;
@ -48,7 +48,7 @@ inline bool markOopDesc::must_be_preserved_with_bias(oop obj_containing_mark) co
}
// Should this header be preserved during GC?
inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
inline bool markWord::must_be_preserved(oop obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias(obj_containing_mark);
@ -56,7 +56,7 @@ inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
// Should this header be preserved in the case of a promotion failure
// during scavenge (when biased locking is enabled)?
inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
inline bool markWord::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
// We don't explicitly save off the mark words of biased and
// currently-locked objects during scavenges, so if during a
@ -68,7 +68,7 @@ inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop o
// BiasedLocking::preserve_marks() / restore_marks() in the middle
// of a scavenge when a promotion failure has first been detected.
if (has_bias_pattern() ||
prototype_for_object(obj_containing_mark)->has_bias_pattern()) {
prototype_for_object(obj_containing_mark).has_bias_pattern()) {
return true;
}
return (!is_unlocked() || !has_no_hash());
@ -76,7 +76,7 @@ inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop o
// Should this header be preserved in the case of a promotion failure
// during scavenge?
inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
inline bool markWord::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
@ -85,11 +85,11 @@ inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_contain
// Same as must_be_preserved_with_bias_for_promotion_failure() except that
// it takes a Klass* argument, instead of the object of which this is the mark word.
inline bool markOopDesc::must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
inline bool markWord::must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
// CMS scavenges preserve mark words in similar fashion to promotion failures; see above
if (has_bias_pattern() ||
klass_of_obj_containing_mark->prototype_header()->has_bias_pattern()) {
klass_of_obj_containing_mark->prototype_header().has_bias_pattern()) {
return true;
}
return (!is_unlocked() || !has_no_hash());
@ -97,16 +97,16 @@ inline bool markOopDesc::must_be_preserved_with_bias_for_cms_scavenge(Klass* kla
// Same as must_be_preserved_for_promotion_failure() except that
// it takes a Klass* argument, instead of the object of which this is the mark word.
inline bool markOopDesc::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
inline bool markWord::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
}
inline markOop markOopDesc::prototype_for_object(oop obj) {
inline markWord markWord::prototype_for_object(oop obj) {
#ifdef ASSERT
markOop prototype_header = obj->klass()->prototype_header();
assert(prototype_header == prototype() || prototype_header->has_bias_pattern(), "corrupt prototype header");
markWord prototype_header = obj->klass()->prototype_header();
assert(prototype_header == prototype() || prototype_header.has_bias_pattern(), "corrupt prototype header");
#endif
return obj->klass()->prototype_header();
}

@ -2067,7 +2067,7 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
#endif // PRODUCT
};
// Something that can't be mistaken for an address or a markOop
// Something that can't be mistaken for an address or a markWord
Method* const JNIMethodBlock::_free_method = (Method*)55;
JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _top(0), _next(NULL) {

@ -37,7 +37,7 @@ class objArrayOopDesc : public arrayOopDesc {
friend class ObjArrayKlass;
friend class Runtime1;
friend class psPromotionManager;
friend class CSetMarkOopClosure;
friend class CSetMarkWordClosure;
friend class G1ParScanPartialArrayClosure;
template <class T> T* obj_at_addr(int index) const;

@ -105,14 +105,14 @@ bool oopDesc::is_oop(oop obj, bool ignore_mark_word) {
return false;
}
// Header verification: the mark is typically non-NULL. If we're
// at a safepoint, it must not be null.
// Header verification: the mark is typically non-zero. If we're
// at a safepoint, it must not be zero.
// Outside of a safepoint, the header could be changing (for example,
// another thread could be inflating a lock on this object).
if (ignore_mark_word) {
return true;
}
if (obj->mark_raw() != NULL) {
if (obj->mark_raw().value() != 0) {
return true;
}
return !SafepointSynchronize::is_at_safepoint();

Some files were not shown because too many files have changed in this diff Show More