8188764: Obsolete AssumeMP and then remove all support for non-MP builds
Reviewed-by: mikael, mdoerr, bulasevich, eosterlund
This commit is contained in:
parent
5cfc3bbdd4
commit
a3cd6a1a70
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -195,9 +195,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
|
||||
void CompiledDirectStaticCall::verify() {
|
||||
// Verify call.
|
||||
_call->verify();
|
||||
if (os::is_MP()) {
|
||||
_call->verify_alignment();
|
||||
}
|
||||
_call->verify_alignment();
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub(false /* is_aot */);
|
||||
|
@ -1950,24 +1950,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// didn't see any synchronization is progress, and escapes.
|
||||
__ mov(rscratch1, _thread_in_native_trans);
|
||||
|
||||
if(os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
|
||||
|
||||
// Force this write out before the read below
|
||||
__ dmb(Assembler::ISH);
|
||||
} else {
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(rthread, r2);
|
||||
}
|
||||
} else {
|
||||
if (UseMembar) {
|
||||
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
|
||||
|
||||
// Force this write out before the read below
|
||||
__ dmb(Assembler::ISH);
|
||||
} else {
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(rthread, r2);
|
||||
}
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
|
@ -1394,17 +1394,15 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
|
||||
if (os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ dmb(Assembler::ISH);
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(rthread, rscratch2);
|
||||
}
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ dmb(Assembler::ISH);
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(rthread, rscratch2);
|
||||
}
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
|
@ -5316,8 +5316,7 @@ instruct loadConD(regD dst, immD src, iRegP tmp) %{
|
||||
// Prefetch instructions.
|
||||
// Must be safe to execute with invalid address (cannot fault).
|
||||
|
||||
instruct prefetchAlloc_mp( memoryP mem ) %{
|
||||
predicate(os::is_MP());
|
||||
instruct prefetchAlloc( memoryP mem ) %{
|
||||
match( PrefetchAllocation mem );
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
@ -5333,23 +5332,6 @@ instruct prefetchAlloc_mp( memoryP mem ) %{
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
instruct prefetchAlloc_sp( memoryP mem ) %{
|
||||
predicate(!os::is_MP());
|
||||
match( PrefetchAllocation mem );
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
|
||||
format %{ "PLD $mem\t! Prefetch allocation" %}
|
||||
ins_encode %{
|
||||
#ifdef AARCH64
|
||||
__ prfm(pstl1keep, $mem$$Address);
|
||||
#else
|
||||
__ pld($mem$$Address);
|
||||
#endif
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
//----------Store Instructions-------------------------------------------------
|
||||
// Store Byte
|
||||
instruct storeB(memoryB mem, store_RegI src) %{
|
||||
|
@ -155,9 +155,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
|
||||
void CompiledDirectStaticCall::verify() {
|
||||
// Verify call.
|
||||
_call->verify();
|
||||
if (os::is_MP()) {
|
||||
_call->verify_alignment();
|
||||
}
|
||||
_call->verify_alignment();
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub(/*is_aot*/ false);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -127,13 +127,9 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
__ bic(R1, R1, JNIHandles::weak_tag_mask);
|
||||
#endif
|
||||
|
||||
if (os::is_MP()) {
|
||||
// Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
|
||||
__ andr(Rtmp1, Rsafept_cnt, (unsigned)1);
|
||||
__ ldr(Robj, Address(R1, Rtmp1));
|
||||
} else {
|
||||
__ ldr(Robj, Address(R1));
|
||||
}
|
||||
// Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
|
||||
__ andr(Rtmp1, Rsafept_cnt, (unsigned)1);
|
||||
__ ldr(Robj, Address(R1, Rtmp1));
|
||||
|
||||
#ifdef AARCH64
|
||||
__ add(Robj, Robj, AsmOperand(R2, lsr, 2));
|
||||
@ -198,25 +194,21 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if(os::is_MP()) {
|
||||
// Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
|
||||
// Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
|
||||
#if defined(__ABI_HARD__) && !defined(AARCH64)
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
__ fmrrd(Rres, Rres_hi, D0);
|
||||
__ eor(Rtmp2, Rres, Rres);
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
|
||||
} else
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
__ fmrrd(Rres, Rres_hi, D0);
|
||||
__ eor(Rtmp2, Rres, Rres);
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
|
||||
} else
|
||||
#endif // __ABI_HARD__ && !AARCH64
|
||||
{
|
||||
{
|
||||
#ifndef AARCH64
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
#endif // !AARCH64
|
||||
__ eor(Rtmp2, Rres, Rres);
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
|
||||
}
|
||||
} else {
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr));
|
||||
__ eor(Rtmp2, Rres, Rres);
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
|
||||
}
|
||||
__ cmp(Rsafept_cnt2, Rsafept_cnt);
|
||||
#ifdef AARCH64
|
||||
|
@ -1563,8 +1563,6 @@ FixedSizeCodeBlock::~FixedSizeCodeBlock() {
|
||||
// Serializes memory.
|
||||
// tmp register is not used on AArch64, this parameter is provided solely for better compatibility with 32-bit ARM
|
||||
void MacroAssembler::membar(Membar_mask_bits order_constraint, Register tmp) {
|
||||
if (!os::is_MP()) return;
|
||||
|
||||
// TODO-AARCH64 investigate dsb vs dmb effects
|
||||
if (order_constraint == StoreStore) {
|
||||
dmb(DMB_st);
|
||||
@ -1585,7 +1583,6 @@ void MacroAssembler::membar(Membar_mask_bits order_constraint,
|
||||
Register tmp,
|
||||
bool preserve_flags,
|
||||
Register load_tgt) {
|
||||
if (!os::is_MP()) return;
|
||||
|
||||
if (order_constraint == StoreStore) {
|
||||
dmb(DMB_st, tmp);
|
||||
|
@ -3146,15 +3146,11 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
const Register Rindex = R5_tmp;
|
||||
const Register Rflags = R5_tmp;
|
||||
|
||||
const bool gen_volatile_check = os::is_MP();
|
||||
|
||||
resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
|
||||
jvmti_post_field_access(Rcache, Rindex, is_static, false);
|
||||
load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
|
||||
|
||||
if (gen_volatile_check) {
|
||||
__ mov(Rflagsav, Rflags);
|
||||
}
|
||||
__ mov(Rflagsav, Rflags);
|
||||
|
||||
if (!is_static) pop_and_check_object(Robj);
|
||||
|
||||
@ -3391,16 +3387,13 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
|
||||
__ bind(Done);
|
||||
|
||||
if (gen_volatile_check) {
|
||||
// Check for volatile field
|
||||
Label notVolatile;
|
||||
__ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
// Check for volatile field
|
||||
Label notVolatile;
|
||||
__ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
|
||||
|
||||
__ bind(notVolatile);
|
||||
}
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
|
||||
|
||||
__ bind(notVolatile);
|
||||
}
|
||||
|
||||
void TemplateTable::getfield(int byte_no) {
|
||||
@ -3492,22 +3485,18 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
const Register Rindex = R5_tmp;
|
||||
const Register Rflags = R5_tmp;
|
||||
|
||||
const bool gen_volatile_check = os::is_MP();
|
||||
|
||||
resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
|
||||
jvmti_post_field_mod(Rcache, Rindex, is_static);
|
||||
load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
|
||||
|
||||
if (gen_volatile_check) {
|
||||
// Check for volatile field
|
||||
Label notVolatile;
|
||||
__ mov(Rflagsav, Rflags);
|
||||
__ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
// Check for volatile field
|
||||
Label notVolatile;
|
||||
__ mov(Rflagsav, Rflags);
|
||||
__ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
|
||||
|
||||
__ bind(notVolatile);
|
||||
}
|
||||
__ bind(notVolatile);
|
||||
|
||||
Label Done, Lint, shouldNotReachHere;
|
||||
Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
|
||||
@ -3733,36 +3722,33 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
|
||||
__ bind(Done);
|
||||
|
||||
if (gen_volatile_check) {
|
||||
Label notVolatile;
|
||||
if (is_static) {
|
||||
// Just check for volatile. Memory barrier for static final field
|
||||
// is handled by class initialization.
|
||||
__ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
|
||||
__ bind(notVolatile);
|
||||
} else {
|
||||
// Check for volatile field and final field
|
||||
Label skipMembar;
|
||||
Label notVolatile2;
|
||||
if (is_static) {
|
||||
// Just check for volatile. Memory barrier for static final field
|
||||
// is handled by class initialization.
|
||||
__ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
|
||||
volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
|
||||
__ bind(notVolatile2);
|
||||
} else {
|
||||
// Check for volatile field and final field
|
||||
Label skipMembar;
|
||||
|
||||
__ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
|
||||
1 << ConstantPoolCacheEntry::is_final_shift);
|
||||
__ b(skipMembar, eq);
|
||||
__ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
|
||||
1 << ConstantPoolCacheEntry::is_final_shift);
|
||||
__ b(skipMembar, eq);
|
||||
|
||||
__ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
__ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
|
||||
|
||||
// StoreLoad barrier after volatile field write
|
||||
volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
|
||||
__ b(skipMembar);
|
||||
// StoreLoad barrier after volatile field write
|
||||
volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
|
||||
__ b(skipMembar);
|
||||
|
||||
// StoreStore barrier after final field write
|
||||
__ bind(notVolatile);
|
||||
volatile_barrier(MacroAssembler::StoreStore, Rtemp);
|
||||
// StoreStore barrier after final field write
|
||||
__ bind(notVolatile2);
|
||||
volatile_barrier(MacroAssembler::StoreStore, Rtemp);
|
||||
|
||||
__ bind(skipMembar);
|
||||
}
|
||||
__ bind(skipMembar);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void TemplateTable::putfield(int byte_no) {
|
||||
@ -3832,31 +3818,25 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
const Register Rflags = Rtmp_save0; // R4/R19
|
||||
const Register Robj = R5_tmp;
|
||||
|
||||
const bool gen_volatile_check = os::is_MP();
|
||||
|
||||
// access constant pool cache
|
||||
__ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
|
||||
|
||||
__ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
|
||||
|
||||
if (gen_volatile_check) {
|
||||
// load flags to test volatile
|
||||
__ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
|
||||
}
|
||||
// load flags to test volatile
|
||||
__ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
|
||||
|
||||
// replace index with field offset from cache entry
|
||||
__ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
|
||||
|
||||
if (gen_volatile_check) {
|
||||
// Check for volatile store
|
||||
Label notVolatile;
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
// Check for volatile store
|
||||
Label notVolatile;
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
|
||||
// TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
|
||||
// TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
|
||||
|
||||
__ bind(notVolatile);
|
||||
}
|
||||
__ bind(notVolatile);
|
||||
|
||||
// Get object from stack
|
||||
pop_and_check_object(Robj);
|
||||
@ -3903,28 +3883,25 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (gen_volatile_check) {
|
||||
Label notVolatile;
|
||||
Label skipMembar;
|
||||
__ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
|
||||
1 << ConstantPoolCacheEntry::is_final_shift);
|
||||
__ b(skipMembar, eq);
|
||||
Label notVolatile2;
|
||||
Label skipMembar;
|
||||
__ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
|
||||
1 << ConstantPoolCacheEntry::is_final_shift);
|
||||
__ b(skipMembar, eq);
|
||||
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
|
||||
|
||||
// StoreLoad barrier after volatile field write
|
||||
volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
|
||||
__ b(skipMembar);
|
||||
// StoreLoad barrier after volatile field write
|
||||
volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
|
||||
__ b(skipMembar);
|
||||
|
||||
// StoreStore barrier after final field write
|
||||
__ bind(notVolatile);
|
||||
volatile_barrier(MacroAssembler::StoreStore, Rtemp);
|
||||
// StoreStore barrier after final field write
|
||||
__ bind(notVolatile2);
|
||||
volatile_barrier(MacroAssembler::StoreStore, Rtemp);
|
||||
|
||||
__ bind(skipMembar);
|
||||
}
|
||||
__ bind(skipMembar);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::fast_accessfield(TosState state) {
|
||||
transition(atos, state);
|
||||
|
||||
@ -3954,18 +3931,14 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
const Register Rindex = R3_tmp;
|
||||
const Register Roffset = R3_tmp;
|
||||
|
||||
const bool gen_volatile_check = os::is_MP();
|
||||
|
||||
// access constant pool cache
|
||||
__ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
|
||||
// replace index with field offset from cache entry
|
||||
__ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
|
||||
__ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
|
||||
|
||||
if (gen_volatile_check) {
|
||||
// load flags to test volatile
|
||||
__ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
||||
}
|
||||
// load flags to test volatile
|
||||
__ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
||||
|
||||
__ verify_oop(Robj);
|
||||
__ null_check(Robj, Rtemp);
|
||||
@ -4008,16 +3981,14 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (gen_volatile_check) {
|
||||
// Check for volatile load
|
||||
Label notVolatile;
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
// Check for volatile load
|
||||
Label notVolatile;
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
|
||||
// TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
|
||||
// TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
|
||||
|
||||
__ bind(notVolatile);
|
||||
}
|
||||
__ bind(notVolatile);
|
||||
}
|
||||
|
||||
|
||||
@ -4039,12 +4010,8 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
__ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
|
||||
__ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
|
||||
|
||||
const bool gen_volatile_check = os::is_MP();
|
||||
|
||||
if (gen_volatile_check) {
|
||||
// load flags to test volatile
|
||||
__ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
||||
}
|
||||
// load flags to test volatile
|
||||
__ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
||||
|
||||
// make sure exception is reported in correct bcp range (getfield is next instruction)
|
||||
__ add(Rbcp, Rbcp, 1);
|
||||
@ -4052,32 +4019,30 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
__ sub(Rbcp, Rbcp, 1);
|
||||
|
||||
#ifdef AARCH64
|
||||
if (gen_volatile_check) {
|
||||
Label notVolatile;
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
Label notVolatile;
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
|
||||
__ add(Rtemp, Robj, Roffset);
|
||||
__ add(Rtemp, Robj, Roffset);
|
||||
|
||||
if (state == itos) {
|
||||
if (state == itos) {
|
||||
__ ldar_w(R0_tos, Rtemp);
|
||||
} else if (state == atos) {
|
||||
if (UseCompressedOops) {
|
||||
__ ldar_w(R0_tos, Rtemp);
|
||||
} else if (state == atos) {
|
||||
if (UseCompressedOops) {
|
||||
__ ldar_w(R0_tos, Rtemp);
|
||||
__ decode_heap_oop(R0_tos);
|
||||
} else {
|
||||
__ ldar(R0_tos, Rtemp);
|
||||
}
|
||||
__ verify_oop(R0_tos);
|
||||
} else if (state == ftos) {
|
||||
__ ldar_w(R0_tos, Rtemp);
|
||||
__ fmov_sw(S0_tos, R0_tos);
|
||||
__ decode_heap_oop(R0_tos);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
__ ldar(R0_tos, Rtemp);
|
||||
}
|
||||
__ b(done);
|
||||
|
||||
__ bind(notVolatile);
|
||||
__ verify_oop(R0_tos);
|
||||
} else if (state == ftos) {
|
||||
__ ldar_w(R0_tos, Rtemp);
|
||||
__ fmov_sw(S0_tos, R0_tos);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
__ b(done);
|
||||
|
||||
__ bind(notVolatile);
|
||||
#endif // AARCH64
|
||||
|
||||
if (state == itos) {
|
||||
@ -4100,15 +4065,13 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
if (gen_volatile_check) {
|
||||
// Check for volatile load
|
||||
Label notVolatile;
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
// Check for volatile load
|
||||
Label notVolatile;
|
||||
__ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
|
||||
volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
|
||||
|
||||
__ bind(notVolatile);
|
||||
}
|
||||
__ bind(notVolatile);
|
||||
#endif // !AARCH64
|
||||
|
||||
__ bind(done);
|
||||
|
@ -215,9 +215,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
|
||||
void CompiledDirectStaticCall::verify() {
|
||||
// Verify call.
|
||||
_call->verify();
|
||||
if (os::is_MP()) {
|
||||
_call->verify_alignment();
|
||||
}
|
||||
_call->verify_alignment();
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub(/*is_aot*/ false);
|
||||
|
@ -2430,17 +2430,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
{
|
||||
Label no_block, sync;
|
||||
|
||||
if (os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below.
|
||||
__ fence();
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(R16_thread, r_temp_4, r_temp_5);
|
||||
}
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below.
|
||||
__ fence();
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(R16_thread, r_temp_4, r_temp_5);
|
||||
}
|
||||
|
||||
Register sync_state_addr = r_temp_4;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -145,9 +145,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
|
||||
void CompiledDirectStaticCall::verify() {
|
||||
// Verify call.
|
||||
_call->verify();
|
||||
if (os::is_MP()) {
|
||||
_call->verify_alignment();
|
||||
}
|
||||
_call->verify_alignment();
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub(/*is_aot*/ false);
|
||||
|
@ -593,7 +593,6 @@ class MacroAssembler: public Assembler {
|
||||
static int call_far_patchable_ret_addr_offset() { return call_far_patchable_size(); }
|
||||
|
||||
static bool call_far_patchable_requires_alignment_nop(address pc) {
|
||||
if (!os::is_MP()) return false;
|
||||
int size = call_far_patchable_size();
|
||||
return ((intptr_t)(pc + size) & 0x03L) != 0;
|
||||
}
|
||||
|
@ -2161,18 +2161,17 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
save_native_result(masm, ret_type, workspace_slot_offset); // Make Z_R2 available as work reg.
|
||||
|
||||
if (os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below.
|
||||
__ z_fence();
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(Z_thread, Z_R1, Z_R2);
|
||||
}
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below.
|
||||
__ z_fence();
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(Z_thread, Z_R1, Z_R2);
|
||||
}
|
||||
|
||||
__ safepoint_poll(sync, Z_R1);
|
||||
|
||||
__ load_and_test_int(Z_R0, Address(Z_thread, JavaThread::suspend_flags_offset()));
|
||||
|
@ -142,9 +142,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
|
||||
void CompiledDirectStaticCall::verify() {
|
||||
// Verify call.
|
||||
_call->verify();
|
||||
if (os::is_MP()) {
|
||||
_call->verify_alignment();
|
||||
}
|
||||
_call->verify_alignment();
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub(/*is_aot*/ false);
|
||||
|
@ -2786,7 +2786,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
|
||||
delayed()->
|
||||
st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||
|
||||
if (os::is_MP()) { membar(StoreLoad); }
|
||||
membar(StoreLoad);
|
||||
// Check that _succ is (or remains) non-zero
|
||||
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch);
|
||||
andcc(Rscratch, Rscratch, G0);
|
||||
|
@ -614,17 +614,12 @@ inline void MacroAssembler::ldfl(FloatRegisterImpl::Width w, Register s1, Regist
|
||||
// returns if membar generates anything, obviously this code should mirror
|
||||
// membar below.
|
||||
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
|
||||
if (!os::is_MP())
|
||||
return false; // Not needed on single CPU
|
||||
const Membar_mask_bits effective_mask =
|
||||
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
||||
return (effective_mask != 0);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
|
||||
// Uniprocessors do not need memory barriers
|
||||
if (!os::is_MP())
|
||||
return;
|
||||
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
|
||||
// 8.4.4.3, a.31 and a.50.
|
||||
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
|
||||
|
@ -2371,17 +2371,16 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// didn't see any synchronization is progress, and escapes.
|
||||
__ set(_thread_in_native_trans, G3_scratch);
|
||||
__ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
|
||||
if(os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::StoreLoad);
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(G2_thread, G1_scratch, G3_scratch);
|
||||
}
|
||||
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::StoreLoad);
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(G2_thread, G1_scratch, G3_scratch);
|
||||
}
|
||||
|
||||
Label L;
|
||||
|
@ -1373,17 +1373,16 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// didn't see any synchronization is progress, and escapes.
|
||||
__ set(_thread_in_native_trans, G3_scratch);
|
||||
__ st(G3_scratch, thread_state);
|
||||
if (os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::StoreLoad);
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(G2_thread, G1_scratch, G3_scratch);
|
||||
}
|
||||
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::StoreLoad);
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(G2_thread, G1_scratch, G3_scratch);
|
||||
}
|
||||
|
||||
Label L;
|
||||
|
@ -972,7 +972,6 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
|
||||
return ip;
|
||||
|
||||
case 0xF0: // Lock
|
||||
assert(os::is_MP(), "only on MP");
|
||||
goto again_after_prefix;
|
||||
|
||||
case 0xF3: // For SSE
|
||||
|
@ -1345,40 +1345,38 @@ private:
|
||||
|
||||
// Serializes memory and blows flags
|
||||
void membar(Membar_mask_bits order_constraint) {
|
||||
if (os::is_MP()) {
|
||||
// We only have to handle StoreLoad
|
||||
if (order_constraint & StoreLoad) {
|
||||
// All usable chips support "locked" instructions which suffice
|
||||
// as barriers, and are much faster than the alternative of
|
||||
// using cpuid instruction. We use here a locked add [esp-C],0.
|
||||
// This is conveniently otherwise a no-op except for blowing
|
||||
// flags, and introducing a false dependency on target memory
|
||||
// location. We can't do anything with flags, but we can avoid
|
||||
// memory dependencies in the current method by locked-adding
|
||||
// somewhere else on the stack. Doing [esp+C] will collide with
|
||||
// something on stack in current method, hence we go for [esp-C].
|
||||
// It is convenient since it is almost always in data cache, for
|
||||
// any small C. We need to step back from SP to avoid data
|
||||
// dependencies with other things on below SP (callee-saves, for
|
||||
// example). Without a clear way to figure out the minimal safe
|
||||
// distance from SP, it makes sense to step back the complete
|
||||
// cache line, as this will also avoid possible second-order effects
|
||||
// with locked ops against the cache line. Our choice of offset
|
||||
// is bounded by x86 operand encoding, which should stay within
|
||||
// [-128; +127] to have the 8-byte displacement encoding.
|
||||
//
|
||||
// Any change to this code may need to revisit other places in
|
||||
// the code where this idiom is used, in particular the
|
||||
// orderAccess code.
|
||||
// We only have to handle StoreLoad
|
||||
if (order_constraint & StoreLoad) {
|
||||
// All usable chips support "locked" instructions which suffice
|
||||
// as barriers, and are much faster than the alternative of
|
||||
// using cpuid instruction. We use here a locked add [esp-C],0.
|
||||
// This is conveniently otherwise a no-op except for blowing
|
||||
// flags, and introducing a false dependency on target memory
|
||||
// location. We can't do anything with flags, but we can avoid
|
||||
// memory dependencies in the current method by locked-adding
|
||||
// somewhere else on the stack. Doing [esp+C] will collide with
|
||||
// something on stack in current method, hence we go for [esp-C].
|
||||
// It is convenient since it is almost always in data cache, for
|
||||
// any small C. We need to step back from SP to avoid data
|
||||
// dependencies with other things on below SP (callee-saves, for
|
||||
// example). Without a clear way to figure out the minimal safe
|
||||
// distance from SP, it makes sense to step back the complete
|
||||
// cache line, as this will also avoid possible second-order effects
|
||||
// with locked ops against the cache line. Our choice of offset
|
||||
// is bounded by x86 operand encoding, which should stay within
|
||||
// [-128; +127] to have the 8-byte displacement encoding.
|
||||
//
|
||||
// Any change to this code may need to revisit other places in
|
||||
// the code where this idiom is used, in particular the
|
||||
// orderAccess code.
|
||||
|
||||
int offset = -VM_Version::L1_line_size();
|
||||
if (offset < -128) {
|
||||
offset = -128;
|
||||
}
|
||||
|
||||
lock();
|
||||
addl(Address(rsp, offset), 0);// Assert the lock# signal here
|
||||
int offset = -VM_Version::L1_line_size();
|
||||
if (offset < -128) {
|
||||
offset = -128;
|
||||
}
|
||||
|
||||
lock();
|
||||
addl(Address(rsp, offset), 0);// Assert the lock# signal here
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1906,9 +1906,7 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
assert(op->new_value()->as_register_lo() == rbx, "wrong register");
|
||||
assert(op->new_value()->as_register_hi() == rcx, "wrong register");
|
||||
Register addr = op->addr()->as_register();
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ lock();
|
||||
NOT_LP64(__ cmpxchg8(Address(addr, 0)));
|
||||
|
||||
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
|
||||
@ -1928,24 +1926,18 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
__ encode_heap_oop(cmpval);
|
||||
__ mov(rscratch1, newval);
|
||||
__ encode_heap_oop(rscratch1);
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ lock();
|
||||
// cmpval (rax) is implicitly used by this instruction
|
||||
__ cmpxchgl(rscratch1, Address(addr, 0));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ lock();
|
||||
__ cmpxchgptr(newval, Address(addr, 0));
|
||||
}
|
||||
} else {
|
||||
assert(op->code() == lir_cas_int, "lir_cas_int expected");
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ lock();
|
||||
__ cmpxchgl(newval, Address(addr, 0));
|
||||
}
|
||||
#ifdef _LP64
|
||||
@ -1958,9 +1950,7 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
assert(cmpval != newval, "cmp and new values must be in different registers");
|
||||
assert(cmpval != addr, "cmp and addr must be in different registers");
|
||||
assert(newval != addr, "new value and addr must be in different registers");
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ lock();
|
||||
__ cmpxchgq(newval, Address(addr, 0));
|
||||
#endif // _LP64
|
||||
} else {
|
||||
@ -2804,28 +2794,26 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
||||
|
||||
|
||||
void LIR_Assembler::align_call(LIR_Code code) {
|
||||
if (os::is_MP()) {
|
||||
// make sure that the displacement word of the call ends up word aligned
|
||||
int offset = __ offset();
|
||||
switch (code) {
|
||||
case lir_static_call:
|
||||
case lir_optvirtual_call:
|
||||
case lir_dynamic_call:
|
||||
offset += NativeCall::displacement_offset;
|
||||
break;
|
||||
case lir_icvirtual_call:
|
||||
offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
|
||||
break;
|
||||
case lir_virtual_call: // currently, sparc-specific for niagara
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
__ align(BytesPerWord, offset);
|
||||
// make sure that the displacement word of the call ends up word aligned
|
||||
int offset = __ offset();
|
||||
switch (code) {
|
||||
case lir_static_call:
|
||||
case lir_optvirtual_call:
|
||||
case lir_dynamic_call:
|
||||
offset += NativeCall::displacement_offset;
|
||||
break;
|
||||
case lir_icvirtual_call:
|
||||
offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
|
||||
break;
|
||||
case lir_virtual_call: // currently, sparc-specific for niagara
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
__ align(BytesPerWord, offset);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
|
||||
assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
|
||||
assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
|
||||
"must be aligned");
|
||||
__ call(AddressLiteral(op->addr(), rtype));
|
||||
add_call_info(code_offset(), op->info());
|
||||
@ -2835,8 +2823,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
|
||||
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
|
||||
__ ic_call(op->addr());
|
||||
add_call_info(code_offset(), op->info());
|
||||
assert(!os::is_MP() ||
|
||||
(__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
|
||||
assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
|
||||
"must be aligned");
|
||||
}
|
||||
|
||||
@ -2856,14 +2843,13 @@ void LIR_Assembler::emit_static_call_stub() {
|
||||
}
|
||||
|
||||
int start = __ offset();
|
||||
if (os::is_MP()) {
|
||||
// make sure that the displacement word of the call ends up word aligned
|
||||
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
|
||||
}
|
||||
|
||||
// make sure that the displacement word of the call ends up word aligned
|
||||
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
|
||||
__ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
|
||||
__ mov_metadata(rbx, (Metadata*)NULL);
|
||||
// must be set to -1 at code generation time
|
||||
assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
|
||||
assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
|
||||
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
|
||||
__ jump(RuntimeAddress(__ pc()));
|
||||
|
||||
@ -3992,9 +3978,7 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
|
||||
|
||||
if (data->type() == T_INT) {
|
||||
if (code == lir_xadd) {
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ lock();
|
||||
__ xaddl(as_Address(src->as_address_ptr()), data->as_register());
|
||||
} else {
|
||||
__ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
|
||||
@ -4017,9 +4001,7 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
|
||||
#ifdef _LP64
|
||||
assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
|
||||
if (code == lir_xadd) {
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ lock();
|
||||
__ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
|
||||
} else {
|
||||
__ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
|
||||
|
@ -65,7 +65,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
// test if object header is still the same (i.e. unlocked), and if so, store the
|
||||
// displaced header address in the object header - if it is not the same, get the
|
||||
// object header instead
|
||||
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
|
||||
MacroAssembler::lock(); // must be immediately before cmpxchg!
|
||||
cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
|
||||
// if the object header was the same, we're done
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
@ -126,7 +126,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
||||
// test if object header is pointing to the displaced header, and if so, restore
|
||||
// the displaced header in the object - if the object header is not pointing to
|
||||
// the displaced header, get the object header instead
|
||||
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
|
||||
MacroAssembler::lock(); // must be immediately before cmpxchg!
|
||||
cmpxchgptr(hdr, Address(obj, hdr_offset));
|
||||
// if the object header was not pointing to the displaced header,
|
||||
// we do unlocking via runtime call
|
||||
|
@ -198,9 +198,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
|
||||
void CompiledDirectStaticCall::verify() {
|
||||
// Verify call.
|
||||
_call->verify();
|
||||
if (os::is_MP()) {
|
||||
_call->verify_alignment();
|
||||
}
|
||||
_call->verify_alignment();
|
||||
|
||||
#ifdef ASSERT
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
|
||||
|
@ -1191,7 +1191,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
||||
assert(lock_offset == 0,
|
||||
"displaced header must be first word in BasicObjectLock");
|
||||
|
||||
if (os::is_MP()) lock();
|
||||
lock();
|
||||
cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
cond_inc32(Assembler::zero,
|
||||
@ -1288,7 +1288,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
||||
jcc(Assembler::zero, done);
|
||||
|
||||
// Atomic swap back the old header
|
||||
if (os::is_MP()) lock();
|
||||
lock();
|
||||
cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
// zero for simple unlock of a stack-lock case
|
||||
|
@ -75,15 +75,11 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
__ mov32 (rcx, counter);
|
||||
__ testb (rcx, 1);
|
||||
__ jcc (Assembler::notZero, slow);
|
||||
if (os::is_MP()) {
|
||||
__ mov(rax, rcx);
|
||||
__ andptr(rax, 1); // rax, must end up 0
|
||||
__ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
|
||||
// obj, notice rax, is 0.
|
||||
// rdx is data dependent on rcx.
|
||||
} else {
|
||||
__ movptr (rdx, Address(rsp, 2*wordSize)); // obj
|
||||
}
|
||||
__ mov(rax, rcx);
|
||||
__ andptr(rax, 1); // rax, must end up 0
|
||||
__ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
|
||||
// obj, notice rax, is 0.
|
||||
// rdx is data dependent on rcx.
|
||||
__ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID
|
||||
|
||||
__ clear_jweak_tag(rdx);
|
||||
@ -103,17 +99,13 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
}
|
||||
|
||||
Address ca1;
|
||||
if (os::is_MP()) {
|
||||
__ lea(rdx, counter);
|
||||
__ xorptr(rdx, rax);
|
||||
__ xorptr(rdx, rax);
|
||||
__ cmp32(rcx, Address(rdx, 0));
|
||||
// ca1 is the same as ca because
|
||||
// rax, ^ counter_addr ^ rax, = address
|
||||
// ca1 is data dependent on rax,.
|
||||
} else {
|
||||
__ cmp32(rcx, counter);
|
||||
}
|
||||
__ lea(rdx, counter);
|
||||
__ xorptr(rdx, rax);
|
||||
__ xorptr(rdx, rax);
|
||||
__ cmp32(rcx, Address(rdx, 0));
|
||||
// ca1 is the same as ca because
|
||||
// rax, ^ counter_addr ^ rax, = address
|
||||
// ca1 is data dependent on rax,.
|
||||
__ jcc (Assembler::notEqual, slow);
|
||||
|
||||
#ifndef _WINDOWS
|
||||
@ -196,15 +188,11 @@ address JNI_FastGetField::generate_fast_get_long_field() {
|
||||
__ mov32 (rcx, counter);
|
||||
__ testb (rcx, 1);
|
||||
__ jcc (Assembler::notZero, slow);
|
||||
if (os::is_MP()) {
|
||||
__ mov(rax, rcx);
|
||||
__ andptr(rax, 1); // rax, must end up 0
|
||||
__ movptr(rdx, Address(rsp, rax, Address::times_1, 3*wordSize));
|
||||
// obj, notice rax, is 0.
|
||||
// rdx is data dependent on rcx.
|
||||
} else {
|
||||
__ movptr(rdx, Address(rsp, 3*wordSize)); // obj
|
||||
}
|
||||
__ mov(rax, rcx);
|
||||
__ andptr(rax, 1); // rax, must end up 0
|
||||
__ movptr(rdx, Address(rsp, rax, Address::times_1, 3*wordSize));
|
||||
// obj, notice rax, is 0.
|
||||
// rdx is data dependent on rcx.
|
||||
__ movptr(rsi, Address(rsp, 4*wordSize)); // jfieldID
|
||||
|
||||
__ clear_jweak_tag(rdx);
|
||||
@ -220,19 +208,15 @@ address JNI_FastGetField::generate_fast_get_long_field() {
|
||||
__ movl(rdx, Address(rdx, rsi, Address::times_1, 4));
|
||||
#endif // _LP64
|
||||
|
||||
if (os::is_MP()) {
|
||||
__ lea(rsi, counter);
|
||||
__ xorptr(rsi, rdx);
|
||||
__ xorptr(rsi, rax);
|
||||
__ xorptr(rsi, rdx);
|
||||
__ xorptr(rsi, rax);
|
||||
__ cmp32(rcx, Address(rsi, 0));
|
||||
// ca1 is the same as ca because
|
||||
// rax, ^ rdx ^ counter_addr ^ rax, ^ rdx = address
|
||||
// ca1 is data dependent on both rax, and rdx.
|
||||
} else {
|
||||
__ cmp32(rcx, counter);
|
||||
}
|
||||
__ lea(rsi, counter);
|
||||
__ xorptr(rsi, rdx);
|
||||
__ xorptr(rsi, rax);
|
||||
__ xorptr(rsi, rdx);
|
||||
__ xorptr(rsi, rax);
|
||||
__ cmp32(rcx, Address(rsi, 0));
|
||||
// ca1 is the same as ca because
|
||||
// rax, ^ rdx ^ counter_addr ^ rax, ^ rdx = address
|
||||
// ca1 is data dependent on both rax, and rdx.
|
||||
__ jcc (Assembler::notEqual, slow);
|
||||
|
||||
__ pop (rsi);
|
||||
@ -288,15 +272,11 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
|
||||
__ mov32 (rcx, counter);
|
||||
__ testb (rcx, 1);
|
||||
__ jcc (Assembler::notZero, slow);
|
||||
if (os::is_MP()) {
|
||||
__ mov(rax, rcx);
|
||||
__ andptr(rax, 1); // rax, must end up 0
|
||||
__ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
|
||||
// obj, notice rax, is 0.
|
||||
// rdx is data dependent on rcx.
|
||||
} else {
|
||||
__ movptr(rdx, Address(rsp, 2*wordSize)); // obj
|
||||
}
|
||||
__ mov(rax, rcx);
|
||||
__ andptr(rax, 1); // rax, must end up 0
|
||||
__ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
|
||||
// obj, notice rax, is 0.
|
||||
// rdx is data dependent on rcx.
|
||||
__ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID
|
||||
|
||||
__ clear_jweak_tag(rdx);
|
||||
@ -318,20 +298,16 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
|
||||
}
|
||||
|
||||
Address ca1;
|
||||
if (os::is_MP()) {
|
||||
__ fst_s (Address(rsp, -4));
|
||||
__ lea(rdx, counter);
|
||||
__ movl (rax, Address(rsp, -4));
|
||||
// garbage hi-order bits on 64bit are harmless.
|
||||
__ xorptr(rdx, rax);
|
||||
__ xorptr(rdx, rax);
|
||||
__ cmp32(rcx, Address(rdx, 0));
|
||||
// rax, ^ counter_addr ^ rax, = address
|
||||
// ca1 is data dependent on the field
|
||||
// access.
|
||||
} else {
|
||||
__ cmp32(rcx, counter);
|
||||
}
|
||||
__ fst_s (Address(rsp, -4));
|
||||
__ lea(rdx, counter);
|
||||
__ movl (rax, Address(rsp, -4));
|
||||
// garbage hi-order bits on 64bit are harmless.
|
||||
__ xorptr(rdx, rax);
|
||||
__ xorptr(rdx, rax);
|
||||
__ cmp32(rcx, Address(rdx, 0));
|
||||
// rax, ^ counter_addr ^ rax, = address
|
||||
// ca1 is data dependent on the field
|
||||
// access.
|
||||
__ jcc (Assembler::notEqual, slow_with_pop);
|
||||
|
||||
#ifndef _WINDOWS
|
||||
|
@ -77,12 +77,11 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
__ mov (robj, c_rarg1);
|
||||
__ testb (rcounter, 1);
|
||||
__ jcc (Assembler::notZero, slow);
|
||||
if (os::is_MP()) {
|
||||
__ xorptr(robj, rcounter);
|
||||
__ xorptr(robj, rcounter); // obj, since
|
||||
// robj ^ rcounter ^ rcounter == robj
|
||||
// robj is data dependent on rcounter.
|
||||
}
|
||||
|
||||
__ xorptr(robj, rcounter);
|
||||
__ xorptr(robj, rcounter); // obj, since
|
||||
// robj ^ rcounter ^ rcounter == robj
|
||||
// robj is data dependent on rcounter.
|
||||
|
||||
__ mov (roffset, c_rarg2);
|
||||
__ shrptr(roffset, 2); // offset
|
||||
@ -104,15 +103,12 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (os::is_MP()) {
|
||||
__ lea(rcounter_addr, counter);
|
||||
// ca is data dependent on rax.
|
||||
__ xorptr(rcounter_addr, rax);
|
||||
__ xorptr(rcounter_addr, rax);
|
||||
__ cmpl (rcounter, Address(rcounter_addr, 0));
|
||||
} else {
|
||||
__ cmp32 (rcounter, counter);
|
||||
}
|
||||
// create data dependency on rax
|
||||
__ lea(rcounter_addr, counter);
|
||||
__ xorptr(rcounter_addr, rax);
|
||||
__ xorptr(rcounter_addr, rax);
|
||||
__ cmpl (rcounter, Address(rcounter_addr, 0));
|
||||
|
||||
__ jcc (Assembler::notEqual, slow);
|
||||
|
||||
__ ret (0);
|
||||
@ -181,12 +177,11 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
|
||||
__ mov (robj, c_rarg1);
|
||||
__ testb (rcounter, 1);
|
||||
__ jcc (Assembler::notZero, slow);
|
||||
if (os::is_MP()) {
|
||||
__ xorptr(robj, rcounter);
|
||||
__ xorptr(robj, rcounter); // obj, since
|
||||
// robj ^ rcounter ^ rcounter == robj
|
||||
// robj is data dependent on rcounter.
|
||||
}
|
||||
|
||||
__ xorptr(robj, rcounter);
|
||||
__ xorptr(robj, rcounter); // obj, since
|
||||
// robj ^ rcounter ^ rcounter == robj
|
||||
// robj is data dependent on rcounter.
|
||||
|
||||
// Both robj and rtmp are clobbered by try_resolve_jobject_in_native.
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
@ -204,16 +199,12 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (os::is_MP()) {
|
||||
__ lea(rcounter_addr, counter);
|
||||
__ movdq (rax, xmm0);
|
||||
// counter address is data dependent on xmm0.
|
||||
__ xorptr(rcounter_addr, rax);
|
||||
__ xorptr(rcounter_addr, rax);
|
||||
__ cmpl (rcounter, Address(rcounter_addr, 0));
|
||||
} else {
|
||||
__ cmp32 (rcounter, counter);
|
||||
}
|
||||
__ lea(rcounter_addr, counter);
|
||||
__ movdq (rax, xmm0);
|
||||
// counter address is data dependent on xmm0.
|
||||
__ xorptr(rcounter_addr, rax);
|
||||
__ xorptr(rcounter_addr, rax);
|
||||
__ cmpl (rcounter, Address(rcounter_addr, 0));
|
||||
__ jcc (Assembler::notEqual, slow);
|
||||
|
||||
__ ret (0);
|
||||
|
@ -1030,8 +1030,7 @@ void MacroAssembler::andptr(Register dst, int32_t imm32) {
|
||||
}
|
||||
|
||||
void MacroAssembler::atomic_incl(Address counter_addr) {
|
||||
if (os::is_MP())
|
||||
lock();
|
||||
lock();
|
||||
incrementl(counter_addr);
|
||||
}
|
||||
|
||||
@ -1046,8 +1045,7 @@ void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {
|
||||
|
||||
#ifdef _LP64
|
||||
void MacroAssembler::atomic_incq(Address counter_addr) {
|
||||
if (os::is_MP())
|
||||
lock();
|
||||
lock();
|
||||
incrementq(counter_addr);
|
||||
}
|
||||
|
||||
@ -1213,9 +1211,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
get_thread(tmp_reg);
|
||||
orptr(tmp_reg, swap_reg);
|
||||
#endif
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
@ -1248,9 +1244,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
orptr(tmp_reg, swap_reg);
|
||||
movptr(swap_reg, saved_mark_addr);
|
||||
#endif
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
|
||||
// If the biasing toward our thread failed, then another thread
|
||||
// succeeded in biasing it toward itself and we need to revoke that
|
||||
@ -1278,9 +1272,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
NOT_LP64( movptr(swap_reg, saved_mark_addr); )
|
||||
load_prototype_header(tmp_reg, obj_reg);
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
|
||||
// Fall through to the normal CAS-based lock, because no matter what
|
||||
// the result of the above CAS, some thread must have succeeded in
|
||||
@ -1376,9 +1368,7 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
|
||||
if (method_data != NULL) {
|
||||
// set rtm_state to "no rtm" in MDO
|
||||
mov_metadata(tmpReg, method_data);
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
|
||||
}
|
||||
jmpb(L_done);
|
||||
@ -1392,9 +1382,7 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
|
||||
if (method_data != NULL) {
|
||||
// set rtm_state to "always rtm" in MDO
|
||||
mov_metadata(tmpReg, method_data);
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
|
||||
}
|
||||
bind(L_done);
|
||||
@ -1605,9 +1593,7 @@ void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Regi
|
||||
get_thread(scrReg);
|
||||
Register threadReg = scrReg;
|
||||
#endif
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
|
||||
|
||||
if (RTMRetryCount > 0) {
|
||||
@ -1767,9 +1753,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||
// Attempt stack-locking ...
|
||||
orptr (tmpReg, markOopDesc::unlocked_value);
|
||||
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg
|
||||
if (counters != NULL) {
|
||||
cond_inc32(Assembler::equal,
|
||||
@ -1826,9 +1810,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||
// we later store "Self" into m->Owner. Transiently storing a stack address
|
||||
// (rsp or the address of the box) into m->owner is harmless.
|
||||
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||
movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3
|
||||
// If we weren't able to swing _owner from NULL to the BasicLock
|
||||
@ -1851,9 +1833,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||
movq(scrReg, tmpReg);
|
||||
xorq(tmpReg, tmpReg);
|
||||
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||
// Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
|
||||
// Without cast to int32_t movptr will destroy r10 which is typically obj.
|
||||
@ -2000,9 +1980,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||
// The "box" value on the stack is stable, so we can reload
|
||||
// and be assured we observe the same value as above.
|
||||
movptr(tmpReg, Address(boxReg, 0));
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
}
|
||||
lock();
|
||||
cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
|
||||
// Intention fall-thru into DONE_LABEL
|
||||
|
||||
@ -2036,16 +2014,16 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||
|
||||
xorptr(boxReg, boxReg);
|
||||
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
|
||||
if (os::is_MP()) {
|
||||
// Memory barrier/fence
|
||||
// Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ
|
||||
// Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
|
||||
// This is faster on Nehalem and AMD Shanghai/Barcelona.
|
||||
// See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
|
||||
// We might also restructure (ST Owner=0;barrier;LD _Succ) to
|
||||
// (mov box,0; xchgq box, &m->Owner; LD _succ) .
|
||||
lock(); addl(Address(rsp, 0), 0);
|
||||
}
|
||||
|
||||
// Memory barrier/fence
|
||||
// Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ
|
||||
// Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
|
||||
// This is faster on Nehalem and AMD Shanghai/Barcelona.
|
||||
// See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
|
||||
// We might also restructure (ST Owner=0;barrier;LD _Succ) to
|
||||
// (mov box,0; xchgq box, &m->Owner; LD _succ) .
|
||||
lock(); addl(Address(rsp, 0), 0);
|
||||
|
||||
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
|
||||
jccb (Assembler::notZero, LSuccess);
|
||||
|
||||
@ -2063,7 +2041,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||
|
||||
// box is really RAX -- the following CMPXCHG depends on that binding
|
||||
// cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
|
||||
if (os::is_MP()) { lock(); }
|
||||
lock();
|
||||
cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||
// There's no successor so we tried to regrab the lock.
|
||||
// If that didn't work, then another thread grabbed the
|
||||
@ -2081,7 +2059,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||
|
||||
bind (Stacked);
|
||||
movptr(tmpReg, Address (boxReg, 0)); // re-fetch
|
||||
if (os::is_MP()) { lock(); }
|
||||
lock();
|
||||
cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
|
||||
|
||||
#endif
|
||||
@ -2633,13 +2611,11 @@ void MacroAssembler::cmpoop(Register src1, jobject src2) {
|
||||
|
||||
void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
|
||||
if (reachable(adr)) {
|
||||
if (os::is_MP())
|
||||
lock();
|
||||
lock();
|
||||
cmpxchgptr(reg, as_Address(adr));
|
||||
} else {
|
||||
lea(rscratch1, adr);
|
||||
if (os::is_MP())
|
||||
lock();
|
||||
lock();
|
||||
cmpxchgptr(reg, Address(rscratch1, 0));
|
||||
}
|
||||
}
|
||||
|
@ -202,9 +202,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||
assert (instr_addr != NULL, "illegal address for code patching");
|
||||
|
||||
NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
|
||||
if (os::is_MP()) {
|
||||
guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
|
||||
}
|
||||
guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
|
||||
|
||||
// First patch dummy jmp in place
|
||||
unsigned char patch[4];
|
||||
@ -262,67 +260,14 @@ void NativeCall::set_destination_mt_safe(address dest) {
|
||||
assert(Patching_lock->is_locked() ||
|
||||
SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
|
||||
// Both C1 and C2 should now be generating code which aligns the patched address
|
||||
// to be within a single cache line except that C1 does not do the alignment on
|
||||
// uniprocessor systems.
|
||||
// to be within a single cache line.
|
||||
bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size ==
|
||||
((uintptr_t)displacement_address() + 3) / cache_line_size;
|
||||
|
||||
guarantee(!os::is_MP() || is_aligned, "destination must be aligned");
|
||||
guarantee(is_aligned, "destination must be aligned");
|
||||
|
||||
if (is_aligned) {
|
||||
// Simple case: The destination lies within a single cache line.
|
||||
set_destination(dest);
|
||||
} else if ((uintptr_t)instruction_address() / cache_line_size ==
|
||||
((uintptr_t)instruction_address()+1) / cache_line_size) {
|
||||
// Tricky case: The instruction prefix lies within a single cache line.
|
||||
intptr_t disp = dest - return_address();
|
||||
#ifdef AMD64
|
||||
guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
|
||||
#endif // AMD64
|
||||
|
||||
int call_opcode = instruction_address()[0];
|
||||
|
||||
// First patch dummy jump in place:
|
||||
{
|
||||
u_char patch_jump[2];
|
||||
patch_jump[0] = 0xEB; // jmp rel8
|
||||
patch_jump[1] = 0xFE; // jmp to self
|
||||
|
||||
assert(sizeof(patch_jump)==sizeof(short), "sanity check");
|
||||
*(short*)instruction_address() = *(short*)patch_jump;
|
||||
}
|
||||
// Invalidate. Opteron requires a flush after every write.
|
||||
wrote(0);
|
||||
|
||||
// (Note: We assume any reader which has already started to read
|
||||
// the unpatched call will completely read the whole unpatched call
|
||||
// without seeing the next writes we are about to make.)
|
||||
|
||||
// Next, patch the last three bytes:
|
||||
u_char patch_disp[5];
|
||||
patch_disp[0] = call_opcode;
|
||||
*(int32_t*)&patch_disp[1] = (int32_t)disp;
|
||||
assert(sizeof(patch_disp)==instruction_size, "sanity check");
|
||||
for (int i = sizeof(short); i < instruction_size; i++)
|
||||
instruction_address()[i] = patch_disp[i];
|
||||
|
||||
// Invalidate. Opteron requires a flush after every write.
|
||||
wrote(sizeof(short));
|
||||
|
||||
// (Note: We assume that any reader which reads the opcode we are
|
||||
// about to repatch will also read the writes we just made.)
|
||||
|
||||
// Finally, overwrite the jump:
|
||||
*(short*)instruction_address() = *(short*)patch_disp;
|
||||
// Invalidate. Opteron requires a flush after every write.
|
||||
wrote(0);
|
||||
|
||||
debug_only(verify());
|
||||
guarantee(destination() == dest, "patch succeeded");
|
||||
} else {
|
||||
// Impossible: One or the other must be atomically writable.
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
// The destination lies within a single cache line.
|
||||
set_destination(dest);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2007,12 +2007,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
|
||||
// src -> dest iff dest == rax, else rax, <- dest
|
||||
// *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
|
||||
__ lock();
|
||||
__ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::equal, lock_done);
|
||||
|
||||
@ -2091,19 +2088,17 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// didn't see any synchronization is progress, and escapes.
|
||||
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
|
||||
|
||||
if(os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::Membar_mask_bits(
|
||||
Assembler::LoadLoad | Assembler::LoadStore |
|
||||
Assembler::StoreLoad | Assembler::StoreStore));
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(thread, rcx);
|
||||
}
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::Membar_mask_bits(
|
||||
Assembler::LoadLoad | Assembler::LoadStore |
|
||||
Assembler::StoreLoad | Assembler::StoreStore));
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(thread, rcx);
|
||||
}
|
||||
|
||||
if (AlwaysRestoreFPU) {
|
||||
@ -2199,12 +2194,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ lea(rax, Address(rbp, lock_slot_rbp_offset));
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
|
||||
// src -> dest iff dest == rax, else rax, <- dest
|
||||
// *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
|
||||
__ lock();
|
||||
__ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::notEqual, slow_path_unlock);
|
||||
|
||||
|
@ -2464,11 +2464,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
|
||||
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
|
||||
// src -> dest iff dest == rax else rax <- dest
|
||||
__ lock();
|
||||
__ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::equal, lock_done);
|
||||
|
||||
@ -2558,19 +2555,17 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// didn't see any synchronization is progress, and escapes.
|
||||
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
|
||||
|
||||
if(os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::Membar_mask_bits(
|
||||
Assembler::LoadLoad | Assembler::LoadStore |
|
||||
Assembler::StoreLoad | Assembler::StoreStore));
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(r15_thread, rcx);
|
||||
}
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::Membar_mask_bits(
|
||||
Assembler::LoadLoad | Assembler::LoadStore |
|
||||
Assembler::StoreLoad | Assembler::StoreStore));
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(r15_thread, rcx);
|
||||
}
|
||||
|
||||
Label after_transition;
|
||||
@ -2661,9 +2656,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ movptr(old_hdr, Address(rax, 0));
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ lock();
|
||||
__ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ jcc(Assembler::notEqual, slow_path_unlock);
|
||||
|
||||
|
@ -607,7 +607,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ pc();
|
||||
|
||||
__ movl(rax, c_rarg2);
|
||||
if ( os::is_MP() ) __ lock();
|
||||
__ lock();
|
||||
__ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
|
||||
__ ret(0);
|
||||
|
||||
@ -633,7 +633,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ pc();
|
||||
|
||||
__ movsbq(rax, c_rarg2);
|
||||
if ( os::is_MP() ) __ lock();
|
||||
__ lock();
|
||||
__ cmpxchgb(c_rarg0, Address(c_rarg1, 0));
|
||||
__ ret(0);
|
||||
|
||||
@ -659,7 +659,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ pc();
|
||||
|
||||
__ movq(rax, c_rarg2);
|
||||
if ( os::is_MP() ) __ lock();
|
||||
__ lock();
|
||||
__ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
|
||||
__ ret(0);
|
||||
|
||||
@ -680,7 +680,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ pc();
|
||||
|
||||
__ movl(rax, c_rarg0);
|
||||
if ( os::is_MP() ) __ lock();
|
||||
__ lock();
|
||||
__ xaddl(Address(c_rarg1, 0), c_rarg0);
|
||||
__ addl(rax, c_rarg0);
|
||||
__ ret(0);
|
||||
@ -702,7 +702,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ pc();
|
||||
|
||||
__ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
|
||||
if ( os::is_MP() ) __ lock();
|
||||
__ lock();
|
||||
__ xaddptr(Address(c_rarg1, 0), c_rarg0);
|
||||
__ addptr(rax, c_rarg0);
|
||||
__ ret(0);
|
||||
|
@ -1090,19 +1090,17 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ movl(Address(thread, JavaThread::thread_state_offset()),
|
||||
_thread_in_native_trans);
|
||||
|
||||
if (os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::Membar_mask_bits(
|
||||
Assembler::LoadLoad | Assembler::LoadStore |
|
||||
Assembler::StoreLoad | Assembler::StoreStore));
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(thread, rcx);
|
||||
}
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ membar(Assembler::Membar_mask_bits(
|
||||
Assembler::LoadLoad | Assembler::LoadStore |
|
||||
Assembler::StoreLoad | Assembler::StoreStore));
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
// due to cache line collision.
|
||||
__ serialize_memory(thread, rcx);
|
||||
}
|
||||
|
||||
#ifndef _LP64
|
||||
|
@ -2714,7 +2714,6 @@ void TemplateTable::_return(TosState state) {
|
||||
|
||||
void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
|
||||
// Helper function to insert a is-volatile test and memory barrier
|
||||
if(!os::is_MP()) return; // Not needed on single CPU
|
||||
__ membar(order_constraint);
|
||||
}
|
||||
|
||||
@ -3493,13 +3492,12 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
__ get_cache_and_index_at_bcp(rcx, rbx, 1);
|
||||
// replace index with field offset from cache entry
|
||||
// [jk] not needed currently
|
||||
// if (os::is_MP()) {
|
||||
// __ movl(rdx, Address(rcx, rbx, Address::times_8,
|
||||
// in_bytes(ConstantPoolCache::base_offset() +
|
||||
// ConstantPoolCacheEntry::flags_offset())));
|
||||
// __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
// __ andl(rdx, 0x1);
|
||||
// }
|
||||
// __ movl(rdx, Address(rcx, rbx, Address::times_8,
|
||||
// in_bytes(ConstantPoolCache::base_offset() +
|
||||
// ConstantPoolCacheEntry::flags_offset())));
|
||||
// __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
// __ andl(rdx, 0x1);
|
||||
//
|
||||
__ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
|
||||
in_bytes(ConstantPoolCache::base_offset() +
|
||||
ConstantPoolCacheEntry::f2_offset())));
|
||||
@ -3544,13 +3542,11 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
// [jk] not needed currently
|
||||
// if (os::is_MP()) {
|
||||
// Label notVolatile;
|
||||
// __ testl(rdx, rdx);
|
||||
// __ jcc(Assembler::zero, notVolatile);
|
||||
// __ membar(Assembler::LoadLoad);
|
||||
// __ bind(notVolatile);
|
||||
//};
|
||||
}
|
||||
|
||||
void TemplateTable::fast_xaccess(TosState state) {
|
||||
@ -3585,17 +3581,15 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
}
|
||||
|
||||
// [jk] not needed currently
|
||||
// if (os::is_MP()) {
|
||||
// Label notVolatile;
|
||||
// __ movl(rdx, Address(rcx, rdx, Address::times_8,
|
||||
// in_bytes(ConstantPoolCache::base_offset() +
|
||||
// ConstantPoolCacheEntry::flags_offset())));
|
||||
// __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
// __ testl(rdx, 0x1);
|
||||
// __ jcc(Assembler::zero, notVolatile);
|
||||
// __ membar(Assembler::LoadLoad);
|
||||
// __ bind(notVolatile);
|
||||
// }
|
||||
// Label notVolatile;
|
||||
// __ movl(rdx, Address(rcx, rdx, Address::times_8,
|
||||
// in_bytes(ConstantPoolCache::base_offset() +
|
||||
// ConstantPoolCacheEntry::flags_offset())));
|
||||
// __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
|
||||
// __ testl(rdx, 0x1);
|
||||
// __ jcc(Assembler::zero, notVolatile);
|
||||
// __ membar(Assembler::LoadLoad);
|
||||
// __ bind(notVolatile);
|
||||
|
||||
__ decrement(rbcp);
|
||||
}
|
||||
|
@ -2804,11 +2804,7 @@ instruct onspinwait() %{
|
||||
|
||||
format %{
|
||||
$$template
|
||||
if (os::is_MP()) {
|
||||
$$emit$$"pause\t! membar_onspinwait"
|
||||
} else {
|
||||
$$emit$$"MEMBAR-onspinwait ! (empty encoding)"
|
||||
}
|
||||
$$emit$$"pause\t! membar_onspinwait"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ pause();
|
||||
|
@ -2087,8 +2087,7 @@ encode %{
|
||||
%}
|
||||
|
||||
enc_class lock_prefix( ) %{
|
||||
if( os::is_MP() )
|
||||
emit_opcode(cbuf,0xF0); // [Lock]
|
||||
emit_opcode(cbuf,0xF0); // [Lock]
|
||||
%}
|
||||
|
||||
// Cmp-xchg long value.
|
||||
@ -2102,8 +2101,7 @@ encode %{
|
||||
emit_opcode(cbuf,0x87);
|
||||
emit_opcode(cbuf,0xD9);
|
||||
// [Lock]
|
||||
if( os::is_MP() )
|
||||
emit_opcode(cbuf,0xF0);
|
||||
emit_opcode(cbuf,0xF0);
|
||||
// CMPXCHG8 [Eptr]
|
||||
emit_opcode(cbuf,0x0F);
|
||||
emit_opcode(cbuf,0xC7);
|
||||
@ -2115,8 +2113,7 @@ encode %{
|
||||
|
||||
enc_class enc_cmpxchg(eSIRegP mem_ptr) %{
|
||||
// [Lock]
|
||||
if( os::is_MP() )
|
||||
emit_opcode(cbuf,0xF0);
|
||||
emit_opcode(cbuf,0xF0);
|
||||
|
||||
// CMPXCHG [Eptr]
|
||||
emit_opcode(cbuf,0x0F);
|
||||
@ -2126,8 +2123,7 @@ encode %{
|
||||
|
||||
enc_class enc_cmpxchgb(eSIRegP mem_ptr) %{
|
||||
// [Lock]
|
||||
if( os::is_MP() )
|
||||
emit_opcode(cbuf,0xF0);
|
||||
emit_opcode(cbuf,0xF0);
|
||||
|
||||
// CMPXCHGB [Eptr]
|
||||
emit_opcode(cbuf,0x0F);
|
||||
@ -2137,8 +2133,7 @@ encode %{
|
||||
|
||||
enc_class enc_cmpxchgw(eSIRegP mem_ptr) %{
|
||||
// [Lock]
|
||||
if( os::is_MP() )
|
||||
emit_opcode(cbuf,0xF0);
|
||||
emit_opcode(cbuf,0xF0);
|
||||
|
||||
// 16-bit mode
|
||||
emit_opcode(cbuf, 0x66);
|
||||
@ -6764,11 +6759,7 @@ instruct membar_volatile(eFlagsReg cr) %{
|
||||
|
||||
format %{
|
||||
$$template
|
||||
if (os::is_MP()) {
|
||||
$$emit$$"LOCK ADDL [ESP + #0], 0\t! membar_volatile"
|
||||
} else {
|
||||
$$emit$$"MEMBAR-volatile ! (empty encoding)"
|
||||
}
|
||||
$$emit$$"LOCK ADDL [ESP + #0], 0\t! membar_volatile"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ membar(Assembler::StoreLoad);
|
||||
@ -7373,8 +7364,7 @@ instruct storeLConditional( memory mem, eADXRegL oldval, eBCXRegL newval, eFlags
|
||||
// rcx as the high order word of the new value to store but
|
||||
// our register encoding uses rbx.
|
||||
__ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
|
||||
if( os::is_MP() )
|
||||
__ lock();
|
||||
__ lock();
|
||||
__ cmpxchg8($mem$$Address);
|
||||
__ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
|
||||
%}
|
||||
@ -7499,7 +7489,7 @@ instruct xaddB_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "ADDB [$mem],$add" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ addb($mem$$Address, $add$$constant);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7511,7 +7501,7 @@ instruct xaddB( memory mem, xRegI newval, eFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "XADDB [$mem],$newval" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ xaddb($mem$$Address, $newval$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7523,7 +7513,7 @@ instruct xaddS_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "ADDS [$mem],$add" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ addw($mem$$Address, $add$$constant);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7534,7 +7524,7 @@ instruct xaddS( memory mem, rRegI newval, eFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "XADDS [$mem],$newval" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ xaddw($mem$$Address, $newval$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7546,7 +7536,7 @@ instruct xaddI_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "ADDL [$mem],$add" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ addl($mem$$Address, $add$$constant);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7557,7 +7547,7 @@ instruct xaddI( memory mem, rRegI newval, eFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "XADDL [$mem],$newval" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ xaddl($mem$$Address, $newval$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -2341,9 +2341,7 @@ encode %{
|
||||
|
||||
enc_class lock_prefix()
|
||||
%{
|
||||
if (os::is_MP()) {
|
||||
emit_opcode(cbuf, 0xF0); // lock
|
||||
}
|
||||
emit_opcode(cbuf, 0xF0); // lock
|
||||
%}
|
||||
|
||||
enc_class REX_mem(memory mem)
|
||||
@ -6601,11 +6599,7 @@ instruct membar_volatile(rFlagsReg cr) %{
|
||||
|
||||
format %{
|
||||
$$template
|
||||
if (os::is_MP()) {
|
||||
$$emit$$"lock addl [rsp + #0], 0\t! membar_volatile"
|
||||
} else {
|
||||
$$emit$$"MEMBAR-volatile ! (empty encoding)"
|
||||
}
|
||||
$$emit$$"lock addl [rsp + #0], 0\t! membar_volatile"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ membar(Assembler::StoreLoad);
|
||||
@ -7801,7 +7795,7 @@ instruct xaddB_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "ADDB [$mem],$add" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ addb($mem$$Address, $add$$constant);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7812,7 +7806,7 @@ instruct xaddB( memory mem, rRegI newval, rFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "XADDB [$mem],$newval" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ xaddb($mem$$Address, $newval$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7824,7 +7818,7 @@ instruct xaddS_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "ADDW [$mem],$add" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ addw($mem$$Address, $add$$constant);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7835,7 +7829,7 @@ instruct xaddS( memory mem, rRegI newval, rFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "XADDW [$mem],$newval" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ xaddw($mem$$Address, $newval$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7847,7 +7841,7 @@ instruct xaddI_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "ADDL [$mem],$add" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ addl($mem$$Address, $add$$constant);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7858,7 +7852,7 @@ instruct xaddI( memory mem, rRegI newval, rFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "XADDL [$mem],$newval" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ xaddl($mem$$Address, $newval$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7870,7 +7864,7 @@ instruct xaddL_no_res( memory mem, Universe dummy, immL32 add, rFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "ADDQ [$mem],$add" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ addq($mem$$Address, $add$$constant);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -7881,7 +7875,7 @@ instruct xaddL( memory mem, rRegL newval, rFlagsReg cr) %{
|
||||
effect(KILL cr);
|
||||
format %{ "XADDQ [$mem],$newval" %}
|
||||
ins_encode %{
|
||||
if (os::is_MP()) { __ lock(); }
|
||||
__ lock();
|
||||
__ xaddq($mem$$Address, $newval$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
@ -10898,7 +10892,7 @@ instruct rep_stos(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero,
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero,
|
||||
instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero,
|
||||
Universe dummy, rFlagsReg cr)
|
||||
%{
|
||||
predicate(((ClearArrayNode*)n)->is_large());
|
||||
@ -10942,7 +10936,7 @@ instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero,
|
||||
}
|
||||
%}
|
||||
ins_encode %{
|
||||
__ clear_mem($base$$Register, $cnt$$Register, $zero$$Register,
|
||||
__ clear_mem($base$$Register, $cnt$$Register, $zero$$Register,
|
||||
$tmp$$XMMRegister, true);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
|
@ -267,8 +267,7 @@ pid_t os::Linux::gettid() {
|
||||
|
||||
// Most versions of linux have a bug where the number of processors are
|
||||
// determined by looking at the /proc file system. In a chroot environment,
|
||||
// the system call returns 1. This causes the VM to act as if it is
|
||||
// a single processor and elide locking (see is_MP() call).
|
||||
// the system call returns 1.
|
||||
static bool unsafe_chroot_detected = false;
|
||||
static const char *unstable_chroot_error = "/proc file system not found.\n"
|
||||
"Java may be unstable running multithreaded in a chroot "
|
||||
|
@ -136,7 +136,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
|
||||
extern "C" {
|
||||
// defined in bsd_x86.s
|
||||
int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t, bool);
|
||||
int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
|
||||
void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
|
||||
}
|
||||
|
||||
|
@ -635,8 +635,7 @@ mmx_acs_CopyLeft:
|
||||
|
||||
# Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
|
||||
# volatile int64_t* dest,
|
||||
# int64_t compare_value,
|
||||
# bool is_MP)
|
||||
# int64_t compare_value)
|
||||
#
|
||||
.p2align 4,,15
|
||||
ELF_TYPE(_Atomic_cmpxchg_long,@function)
|
||||
@ -649,10 +648,8 @@ SYMBOL(_Atomic_cmpxchg_long):
|
||||
movl 24(%esp), %eax # 24(%esp) : compare_value (low)
|
||||
movl 28(%esp), %edx # 28(%esp) : compare_value (high)
|
||||
movl 20(%esp), %edi # 20(%esp) : dest
|
||||
cmpl $0, 32(%esp) # 32(%esp) : is_MP
|
||||
je 1f
|
||||
lock
|
||||
1: cmpxchg8b (%edi)
|
||||
cmpxchg8b (%edi)
|
||||
popl %edi
|
||||
popl %ebx
|
||||
ret
|
||||
|
@ -50,17 +50,12 @@
|
||||
//
|
||||
// inline void _OrderAccess_dsb() {
|
||||
// volatile intptr_t dummy = 0;
|
||||
// if (os::is_MP()) {
|
||||
// __asm__ volatile (
|
||||
// "mcr p15, 0, %0, c7, c10, 4"
|
||||
// : : "r" (dummy) : "memory");
|
||||
// }
|
||||
// __asm__ volatile (
|
||||
// "mcr p15, 0, %0, c7, c10, 4"
|
||||
// : : "r" (dummy) : "memory");
|
||||
// }
|
||||
|
||||
inline static void dmb_sy() {
|
||||
if (!os::is_MP()) {
|
||||
return;
|
||||
}
|
||||
#ifdef AARCH64
|
||||
__asm__ __volatile__ ("dmb sy" : : : "memory");
|
||||
#else
|
||||
@ -82,9 +77,6 @@ inline static void dmb_sy() {
|
||||
}
|
||||
|
||||
inline static void dmb_st() {
|
||||
if (!os::is_MP()) {
|
||||
return;
|
||||
}
|
||||
#ifdef AARCH64
|
||||
__asm__ __volatile__ ("dmb st" : : : "memory");
|
||||
#else
|
||||
@ -108,9 +100,6 @@ inline static void dmb_st() {
|
||||
// Load-Load/Store barrier
|
||||
inline static void dmb_ld() {
|
||||
#ifdef AARCH64
|
||||
if (!os::is_MP()) {
|
||||
return;
|
||||
}
|
||||
__asm__ __volatile__ ("dmb ld" : : : "memory");
|
||||
#else
|
||||
dmb_sy();
|
||||
|
@ -394,11 +394,9 @@ class PatchingStub: public CodeStub {
|
||||
_id(id)
|
||||
, _info(NULL)
|
||||
, _index(index) {
|
||||
if (os::is_MP()) {
|
||||
// force alignment of patch sites on MP hardware so we
|
||||
// can guarantee atomic writes to the patch site.
|
||||
align_patch_site(masm);
|
||||
}
|
||||
// force alignment of patch sites so we
|
||||
// can guarantee atomic writes to the patch site.
|
||||
align_patch_site(masm);
|
||||
_pc_start = masm->pc();
|
||||
masm->bind(_patch_site_entry);
|
||||
}
|
||||
|
@ -918,18 +918,16 @@ void InstructionPrinter::do_RuntimeCall(RuntimeCall* x) {
|
||||
}
|
||||
|
||||
void InstructionPrinter::do_MemBar(MemBar* x) {
|
||||
if (os::is_MP()) {
|
||||
LIR_Code code = x->code();
|
||||
switch (code) {
|
||||
case lir_membar_acquire : output()->print("membar_acquire"); break;
|
||||
case lir_membar_release : output()->print("membar_release"); break;
|
||||
case lir_membar : output()->print("membar"); break;
|
||||
case lir_membar_loadload : output()->print("membar_loadload"); break;
|
||||
case lir_membar_storestore: output()->print("membar_storestore"); break;
|
||||
case lir_membar_loadstore : output()->print("membar_loadstore"); break;
|
||||
case lir_membar_storeload : output()->print("membar_storeload"); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
LIR_Code code = x->code();
|
||||
switch (code) {
|
||||
case lir_membar_acquire : output()->print("membar_acquire"); break;
|
||||
case lir_membar_release : output()->print("membar_release"); break;
|
||||
case lir_membar : output()->print("membar"); break;
|
||||
case lir_membar_loadload : output()->print("membar_loadload"); break;
|
||||
case lir_membar_storestore: output()->print("membar_storestore"); break;
|
||||
case lir_membar_loadstore : output()->print("membar_loadstore"); break;
|
||||
case lir_membar_storeload : output()->print("membar_storeload"); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -446,10 +446,8 @@ void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
|
||||
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
|
||||
verify_oop_map(op->info());
|
||||
|
||||
if (os::is_MP()) {
|
||||
// must align calls sites, otherwise they can't be updated atomically on MP hardware
|
||||
align_call(op->code());
|
||||
}
|
||||
// must align calls sites, otherwise they can't be updated atomically
|
||||
align_call(op->code());
|
||||
|
||||
// emit the static call stub stuff out of line
|
||||
emit_static_call_stub();
|
||||
|
@ -3068,13 +3068,13 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_loadFence :
|
||||
if (os::is_MP()) __ membar_acquire();
|
||||
__ membar_acquire();
|
||||
break;
|
||||
case vmIntrinsics::_storeFence:
|
||||
if (os::is_MP()) __ membar_release();
|
||||
__ membar_release();
|
||||
break;
|
||||
case vmIntrinsics::_fullFence :
|
||||
if (os::is_MP()) __ membar();
|
||||
__ membar();
|
||||
break;
|
||||
case vmIntrinsics::_onSpinWait:
|
||||
__ on_spin_wait();
|
||||
@ -3623,18 +3623,16 @@ LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
|
||||
}
|
||||
|
||||
void LIRGenerator::do_MemBar(MemBar* x) {
|
||||
if (os::is_MP()) {
|
||||
LIR_Code code = x->code();
|
||||
switch(code) {
|
||||
case lir_membar_acquire : __ membar_acquire(); break;
|
||||
case lir_membar_release : __ membar_release(); break;
|
||||
case lir_membar : __ membar(); break;
|
||||
case lir_membar_loadload : __ membar_loadload(); break;
|
||||
case lir_membar_storestore: __ membar_storestore(); break;
|
||||
case lir_membar_loadstore : __ membar_loadstore(); break;
|
||||
case lir_membar_storeload : __ membar_storeload(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
LIR_Code code = x->code();
|
||||
switch(code) {
|
||||
case lir_membar_acquire : __ membar_acquire(); break;
|
||||
case lir_membar_release : __ membar_release(); break;
|
||||
case lir_membar : __ membar(); break;
|
||||
case lir_membar_loadload : __ membar_loadload(); break;
|
||||
case lir_membar_storestore: __ membar_storestore(); break;
|
||||
case lir_membar_loadstore : __ membar_loadstore(); break;
|
||||
case lir_membar_storeload : __ membar_storeload(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2748,9 +2748,7 @@ public:
|
||||
virtual void verify() const {
|
||||
// make sure code pattern is actually a call imm32 instruction
|
||||
_call->verify();
|
||||
if (os::is_MP()) {
|
||||
_call->verify_alignment();
|
||||
}
|
||||
_call->verify_alignment();
|
||||
}
|
||||
|
||||
virtual void verify_resolve_call(address dest) const {
|
||||
|
@ -135,7 +135,7 @@ LIR_Opr BarrierSetC1::atomic_add_at(LIRAccess& access, LIRItem& value) {
|
||||
|
||||
void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
|
||||
DecoratorSet decorators = access.decorators();
|
||||
bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
|
||||
bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses);
|
||||
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
|
||||
bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
|
||||
LIRGenerator* gen = access.gen();
|
||||
@ -144,7 +144,7 @@ void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
|
||||
value = gen->mask_boolean(access.base().opr(), value, access.access_emit_info());
|
||||
}
|
||||
|
||||
if (is_volatile && os::is_MP()) {
|
||||
if (is_volatile) {
|
||||
__ membar_release();
|
||||
}
|
||||
|
||||
@ -163,7 +163,7 @@ void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
|
||||
void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
|
||||
LIRGenerator *gen = access.gen();
|
||||
DecoratorSet decorators = access.decorators();
|
||||
bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
|
||||
bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses);
|
||||
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
|
||||
bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
|
||||
bool in_native = (decorators & IN_NATIVE) != 0;
|
||||
@ -181,7 +181,7 @@ void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
|
||||
__ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
|
||||
}
|
||||
|
||||
if (is_volatile && os::is_MP()) {
|
||||
if (is_volatile) {
|
||||
__ membar_acquire();
|
||||
}
|
||||
|
||||
|
@ -3779,13 +3779,7 @@ void copy_jni_function_table(const struct JNINativeInterface_ *new_jni_NativeInt
|
||||
void quicken_jni_functions() {
|
||||
// Replace Get<Primitive>Field with fast versions
|
||||
if (UseFastJNIAccessors && !JvmtiExport::can_post_field_access()
|
||||
&& !VerifyJNIFields && !CountJNICalls && !CheckJNICalls
|
||||
#if defined(_WINDOWS) && defined(IA32) && defined(COMPILER2)
|
||||
// windows x86 currently needs SEH wrapper and the gain of the fast
|
||||
// versions currently isn't certain for server vm on uniprocessor.
|
||||
&& os::is_MP()
|
||||
#endif
|
||||
) {
|
||||
&& !VerifyJNIFields && !CountJNICalls && !CheckJNICalls) {
|
||||
address func;
|
||||
func = JNI_FastGetField::generate_fast_get_boolean_field();
|
||||
if (func != (address)-1) {
|
||||
@ -3918,9 +3912,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) {
|
||||
|
||||
// We use Atomic::xchg rather than Atomic::add/dec since on some platforms
|
||||
// the add/dec implementations are dependent on whether we are running
|
||||
// on a multiprocessor, and at this stage of initialization the os::is_MP
|
||||
// function used to determine this will always return false. Atomic::xchg
|
||||
// does not have this problem.
|
||||
// on a multiprocessor Atomic::xchg does not have this problem.
|
||||
if (Atomic::xchg(1, &vm_created) == 1) {
|
||||
return JNI_EEXIST; // already created, or create attempt in progress
|
||||
}
|
||||
|
@ -526,7 +526,6 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
// --- Non-alias flags - sorted by obsolete_in then expired_in:
|
||||
{ "MaxGCMinorPauseMillis", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "UseConcMarkSweepGC", JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "AssumeMP", JDK_Version::jdk(10),JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "MonitorInUseLists", JDK_Version::jdk(10),JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "MaxRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
@ -549,6 +548,7 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
{ "SharedReadOnlySize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
|
||||
{ "SharedMiscDataSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
|
||||
{ "SharedMiscCodeSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
|
||||
{ "AssumeMP", JDK_Version::jdk(10), JDK_Version::jdk(12), JDK_Version::jdk(13) },
|
||||
{ "UnlinkSymbolsALot", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
|
||||
{ "AllowNonVirtualCalls", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
|
||||
{ "PrintSafepointStatistics", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
|
||||
|
@ -247,9 +247,6 @@ define_pd_global(uint64_t,MaxRAM, 1ULL*G);
|
||||
range(8, 256) \
|
||||
constraint(ObjectAlignmentInBytesConstraintFunc,AtParse) \
|
||||
\
|
||||
product(bool, AssumeMP, true, \
|
||||
"(Deprecated) Instruct the VM to assume multiple processors are available")\
|
||||
\
|
||||
/* UseMembar is theoretically a temp flag used for memory barrier */ \
|
||||
/* removal testing. It was supposed to be removed before FCS but has */ \
|
||||
/* been re-added (see 6401008) */ \
|
||||
|
@ -79,17 +79,15 @@ class InterfaceSupport: AllStatic {
|
||||
private:
|
||||
static void serialize_thread_state_internal(JavaThread* thread, bool needs_exception_handler) {
|
||||
// Make sure new state is seen by VM thread
|
||||
if (os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force a fence between the write above and read below
|
||||
OrderAccess::fence();
|
||||
if (UseMembar) {
|
||||
// Force a fence between the write above and read below
|
||||
OrderAccess::fence();
|
||||
} else {
|
||||
// store to serialize page so VM thread can do pseudo remote membar
|
||||
if (needs_exception_handler) {
|
||||
os::write_memory_serialize_page_with_handler(thread);
|
||||
} else {
|
||||
// store to serialize page so VM thread can do pseudo remote membar
|
||||
if (needs_exception_handler) {
|
||||
os::write_memory_serialize_page_with_handler(thread);
|
||||
} else {
|
||||
os::write_memory_serialize_page(thread);
|
||||
}
|
||||
os::write_memory_serialize_page(thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -226,8 +226,9 @@ class os: AllStatic {
|
||||
// the bootstrap routine for the stub generator needs to check
|
||||
// the processor count directly and leave the bootstrap routine
|
||||
// in place until called after initialization has ocurred.
|
||||
return AssumeMP || (_processor_count != 1);
|
||||
return (_processor_count != 1);
|
||||
}
|
||||
|
||||
static julong available_memory();
|
||||
static julong physical_memory();
|
||||
static bool has_allocatable_memory_limit(julong* limit);
|
||||
|
@ -44,7 +44,6 @@ public class VMDeprecatedOptions {
|
||||
{"MaxRAMFraction", "8"},
|
||||
{"MinRAMFraction", "2"},
|
||||
{"InitialRAMFraction", "64"},
|
||||
{"AssumeMP", "false"},
|
||||
{"UseMembar", "true"},
|
||||
{"TLABStats", "false"},
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user