Merge
This commit is contained in:
commit
3b786f8edc
@ -817,21 +817,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
Label _atomic_add_stub; // called from other stubs
|
Label _atomic_add_stub; // called from other stubs
|
||||||
|
|
||||||
|
|
||||||
// Support for void OrderAccess::fence().
|
|
||||||
//
|
|
||||||
address generate_fence() {
|
|
||||||
StubCodeMark mark(this, "StubRoutines", "fence");
|
|
||||||
address start = __ pc();
|
|
||||||
|
|
||||||
__ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore |
|
|
||||||
Assembler::StoreLoad | Assembler::StoreStore));
|
|
||||||
__ retl(false);
|
|
||||||
__ delayed()->nop();
|
|
||||||
|
|
||||||
return start;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------------------------------------------------
|
||||||
// The following routine generates a subroutine to throw an asynchronous
|
// The following routine generates a subroutine to throw an asynchronous
|
||||||
// UnknownError when an unsafe access gets a fault that could not be
|
// UnknownError when an unsafe access gets a fault that could not be
|
||||||
@ -2861,7 +2846,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
|
StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
|
||||||
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
||||||
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
|
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
|
||||||
StubRoutines::_fence_entry = generate_fence();
|
|
||||||
#endif // COMPILER2 !=> _LP64
|
#endif // COMPILER2 !=> _LP64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1438,26 +1438,12 @@ void Assembler::lock() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serializes memory.
|
// Emit mfence instruction
|
||||||
void Assembler::mfence() {
|
void Assembler::mfence() {
|
||||||
// Memory barriers are only needed on multiprocessors
|
NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
|
||||||
if (os::is_MP()) {
|
emit_byte( 0x0F );
|
||||||
if( LP64_ONLY(true ||) VM_Version::supports_sse2() ) {
|
emit_byte( 0xAE );
|
||||||
emit_byte( 0x0F ); // MFENCE; faster blows no regs
|
emit_byte( 0xF0 );
|
||||||
emit_byte( 0xAE );
|
|
||||||
emit_byte( 0xF0 );
|
|
||||||
} else {
|
|
||||||
// All usable chips support "locked" instructions which suffice
|
|
||||||
// as barriers, and are much faster than the alternative of
|
|
||||||
// using cpuid instruction. We use here a locked add [esp],0.
|
|
||||||
// This is conveniently otherwise a no-op except for blowing
|
|
||||||
// flags (which we save and restore.)
|
|
||||||
pushf(); // Save eflags register
|
|
||||||
lock();
|
|
||||||
addl(Address(rsp, 0), 0);// Assert the lock# signal here
|
|
||||||
popf(); // Restore eflags register
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::mov(Register dst, Register src) {
|
void Assembler::mov(Register dst, Register src) {
|
||||||
|
@ -1068,15 +1068,23 @@ private:
|
|||||||
LoadLoad = 1 << 0
|
LoadLoad = 1 << 0
|
||||||
};
|
};
|
||||||
|
|
||||||
// Serializes memory.
|
// Serializes memory and blows flags
|
||||||
void membar(Membar_mask_bits order_constraint) {
|
void membar(Membar_mask_bits order_constraint) {
|
||||||
// We only have to handle StoreLoad and LoadLoad
|
if (os::is_MP()) {
|
||||||
if (order_constraint & StoreLoad) {
|
// We only have to handle StoreLoad
|
||||||
// MFENCE subsumes LFENCE
|
if (order_constraint & StoreLoad) {
|
||||||
mfence();
|
// All usable chips support "locked" instructions which suffice
|
||||||
} /* [jk] not needed currently: else if (order_constraint & LoadLoad) {
|
// as barriers, and are much faster than the alternative of
|
||||||
lfence();
|
// using cpuid instruction. We use here a locked add [esp],0.
|
||||||
} */
|
// This is conveniently otherwise a no-op except for blowing
|
||||||
|
// flags.
|
||||||
|
// Any change to this code may need to revisit other places in
|
||||||
|
// the code where this idiom is used, in particular the
|
||||||
|
// orderAccess code.
|
||||||
|
lock();
|
||||||
|
addl(Address(rsp, 0), 0);// Assert the lock# signal here
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void mfence();
|
void mfence();
|
||||||
|
@ -637,7 +637,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
address generate_orderaccess_fence() {
|
address generate_orderaccess_fence() {
|
||||||
StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
|
StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
|
||||||
address start = __ pc();
|
address start = __ pc();
|
||||||
__ mfence();
|
__ membar(Assembler::StoreLoad);
|
||||||
__ ret(0);
|
__ ret(0);
|
||||||
|
|
||||||
return start;
|
return start;
|
||||||
|
@ -4288,24 +4288,6 @@ encode %{
|
|||||||
emit_opcode(cbuf, 0xC8 + $src2$$reg);
|
emit_opcode(cbuf, 0xC8 + $src2$$reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
enc_class enc_membar_acquire %{
|
|
||||||
// Doug Lea believes this is not needed with current Sparcs and TSO.
|
|
||||||
// MacroAssembler masm(&cbuf);
|
|
||||||
// masm.membar();
|
|
||||||
%}
|
|
||||||
|
|
||||||
enc_class enc_membar_release %{
|
|
||||||
// Doug Lea believes this is not needed with current Sparcs and TSO.
|
|
||||||
// MacroAssembler masm(&cbuf);
|
|
||||||
// masm.membar();
|
|
||||||
%}
|
|
||||||
|
|
||||||
enc_class enc_membar_volatile %{
|
|
||||||
MacroAssembler masm(&cbuf);
|
|
||||||
masm.membar(Assembler::Membar_mask_bits(Assembler::StoreLoad |
|
|
||||||
Assembler::StoreStore));
|
|
||||||
%}
|
|
||||||
|
|
||||||
// Atomically load the volatile long
|
// Atomically load the volatile long
|
||||||
enc_class enc_loadL_volatile( memory mem, stackSlotL dst ) %{
|
enc_class enc_loadL_volatile( memory mem, stackSlotL dst ) %{
|
||||||
emit_opcode(cbuf,0xDF);
|
emit_opcode(cbuf,0xDF);
|
||||||
@ -7498,9 +7480,9 @@ instruct membar_acquire() %{
|
|||||||
ins_cost(400);
|
ins_cost(400);
|
||||||
|
|
||||||
size(0);
|
size(0);
|
||||||
format %{ "MEMBAR-acquire" %}
|
format %{ "MEMBAR-acquire ! (empty encoding)" %}
|
||||||
ins_encode( enc_membar_acquire );
|
ins_encode();
|
||||||
ins_pipe(pipe_slow);
|
ins_pipe(empty);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct membar_acquire_lock() %{
|
instruct membar_acquire_lock() %{
|
||||||
@ -7519,9 +7501,9 @@ instruct membar_release() %{
|
|||||||
ins_cost(400);
|
ins_cost(400);
|
||||||
|
|
||||||
size(0);
|
size(0);
|
||||||
format %{ "MEMBAR-release" %}
|
format %{ "MEMBAR-release ! (empty encoding)" %}
|
||||||
ins_encode( enc_membar_release );
|
ins_encode( );
|
||||||
ins_pipe(pipe_slow);
|
ins_pipe(empty);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct membar_release_lock() %{
|
instruct membar_release_lock() %{
|
||||||
@ -7535,12 +7517,22 @@ instruct membar_release_lock() %{
|
|||||||
ins_pipe(empty);
|
ins_pipe(empty);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct membar_volatile() %{
|
instruct membar_volatile(eFlagsReg cr) %{
|
||||||
match(MemBarVolatile);
|
match(MemBarVolatile);
|
||||||
|
effect(KILL cr);
|
||||||
ins_cost(400);
|
ins_cost(400);
|
||||||
|
|
||||||
format %{ "MEMBAR-volatile" %}
|
format %{
|
||||||
ins_encode( enc_membar_volatile );
|
$$template
|
||||||
|
if (os::is_MP()) {
|
||||||
|
$$emit$$"LOCK ADDL [ESP + #0], 0\t! membar_volatile"
|
||||||
|
} else {
|
||||||
|
$$emit$$"MEMBAR-volatile ! (empty encoding)"
|
||||||
|
}
|
||||||
|
%}
|
||||||
|
ins_encode %{
|
||||||
|
__ membar(Assembler::StoreLoad);
|
||||||
|
%}
|
||||||
ins_pipe(pipe_slow);
|
ins_pipe(pipe_slow);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -4162,33 +4162,6 @@ encode %{
|
|||||||
// done:
|
// done:
|
||||||
%}
|
%}
|
||||||
|
|
||||||
enc_class enc_membar_acquire
|
|
||||||
%{
|
|
||||||
// [jk] not needed currently, if you enable this and it really
|
|
||||||
// emits code don't forget to the remove the "size(0)" line in
|
|
||||||
// membar_acquire()
|
|
||||||
// MacroAssembler masm(&cbuf);
|
|
||||||
// masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore |
|
|
||||||
// Assembler::LoadLoad));
|
|
||||||
%}
|
|
||||||
|
|
||||||
enc_class enc_membar_release
|
|
||||||
%{
|
|
||||||
// [jk] not needed currently, if you enable this and it really
|
|
||||||
// emits code don't forget to the remove the "size(0)" line in
|
|
||||||
// membar_release()
|
|
||||||
// MacroAssembler masm(&cbuf);
|
|
||||||
// masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore |
|
|
||||||
// Assembler::StoreStore));
|
|
||||||
%}
|
|
||||||
|
|
||||||
enc_class enc_membar_volatile
|
|
||||||
%{
|
|
||||||
MacroAssembler masm(&cbuf);
|
|
||||||
masm.membar(Assembler::Membar_mask_bits(Assembler::StoreLoad |
|
|
||||||
Assembler::StoreStore));
|
|
||||||
%}
|
|
||||||
|
|
||||||
// Safepoint Poll. This polls the safepoint page, and causes an
|
// Safepoint Poll. This polls the safepoint page, and causes an
|
||||||
// exception if it is not readable. Unfortunately, it kills
|
// exception if it is not readable. Unfortunately, it kills
|
||||||
// RFLAGS in the process.
|
// RFLAGS in the process.
|
||||||
@ -7458,7 +7431,7 @@ instruct membar_acquire()
|
|||||||
ins_cost(0);
|
ins_cost(0);
|
||||||
|
|
||||||
size(0);
|
size(0);
|
||||||
format %{ "MEMBAR-acquire" %}
|
format %{ "MEMBAR-acquire ! (empty encoding)" %}
|
||||||
ins_encode();
|
ins_encode();
|
||||||
ins_pipe(empty);
|
ins_pipe(empty);
|
||||||
%}
|
%}
|
||||||
@ -7481,7 +7454,7 @@ instruct membar_release()
|
|||||||
ins_cost(0);
|
ins_cost(0);
|
||||||
|
|
||||||
size(0);
|
size(0);
|
||||||
format %{ "MEMBAR-release" %}
|
format %{ "MEMBAR-release ! (empty encoding)" %}
|
||||||
ins_encode();
|
ins_encode();
|
||||||
ins_pipe(empty);
|
ins_pipe(empty);
|
||||||
%}
|
%}
|
||||||
@ -7498,13 +7471,22 @@ instruct membar_release_lock()
|
|||||||
ins_pipe(empty);
|
ins_pipe(empty);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct membar_volatile()
|
instruct membar_volatile(rFlagsReg cr) %{
|
||||||
%{
|
|
||||||
match(MemBarVolatile);
|
match(MemBarVolatile);
|
||||||
|
effect(KILL cr);
|
||||||
ins_cost(400);
|
ins_cost(400);
|
||||||
|
|
||||||
format %{ "MEMBAR-volatile" %}
|
format %{
|
||||||
ins_encode(enc_membar_volatile);
|
$$template
|
||||||
|
if (os::is_MP()) {
|
||||||
|
$$emit$$"lock addl [rsp + #0], 0\t! membar_volatile"
|
||||||
|
} else {
|
||||||
|
$$emit$$"MEMBAR-volatile ! (empty encoding)"
|
||||||
|
}
|
||||||
|
%}
|
||||||
|
ins_encode %{
|
||||||
|
__ membar(Assembler::StoreLoad);
|
||||||
|
%}
|
||||||
ins_pipe(pipe_slow);
|
ins_pipe(pipe_slow);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -29,13 +29,11 @@
|
|||||||
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
||||||
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
|
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
|
||||||
static jint (*atomic_add_func) (jint, volatile jint*);
|
static jint (*atomic_add_func) (jint, volatile jint*);
|
||||||
static void (*fence_func) ();
|
|
||||||
|
|
||||||
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
||||||
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
||||||
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
||||||
static jint atomic_add_bootstrap (jint, volatile jint*);
|
static jint atomic_add_bootstrap (jint, volatile jint*);
|
||||||
static void fence_bootstrap ();
|
|
||||||
|
|
||||||
static void setup_fpu() {}
|
static void setup_fpu() {}
|
||||||
|
|
||||||
|
@ -44,11 +44,12 @@ inline void OrderAccess::release() {
|
|||||||
|
|
||||||
inline void OrderAccess::fence() {
|
inline void OrderAccess::fence() {
|
||||||
if (os::is_MP()) {
|
if (os::is_MP()) {
|
||||||
|
// always use locked addl since mfence is sometimes expensive
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
__asm__ __volatile__ ("mfence":::"memory");
|
__asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
|
||||||
#else
|
#else
|
||||||
__asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
|
__asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
|
||||||
#endif // AMD64
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,22 +60,10 @@ inline void OrderAccess::release() {
|
|||||||
dummy = 0;
|
dummy = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(COMPILER2) || defined(_LP64)
|
|
||||||
|
|
||||||
inline void OrderAccess::fence() {
|
inline void OrderAccess::fence() {
|
||||||
_OrderAccess_fence();
|
_OrderAccess_fence();
|
||||||
}
|
}
|
||||||
|
|
||||||
#else // defined(COMPILER2) || defined(_LP64)
|
|
||||||
|
|
||||||
inline void OrderAccess::fence() {
|
|
||||||
if (os::is_MP()) {
|
|
||||||
(*os::fence_func)();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // defined(COMPILER2) || defined(_LP64)
|
|
||||||
|
|
||||||
#endif // _GNU_SOURCE
|
#endif // _GNU_SOURCE
|
||||||
|
|
||||||
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
|
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
|
||||||
|
@ -619,7 +619,6 @@ typedef jint xchg_func_t (jint, volatile jint*);
|
|||||||
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
||||||
typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
|
typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
|
||||||
typedef jint add_func_t (jint, volatile jint*);
|
typedef jint add_func_t (jint, volatile jint*);
|
||||||
typedef void fence_func_t ();
|
|
||||||
|
|
||||||
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
|
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
@ -681,25 +680,10 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
|
|||||||
return (*dest) += add_value;
|
return (*dest) += add_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::fence_bootstrap() {
|
|
||||||
// try to use the stub:
|
|
||||||
fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry());
|
|
||||||
|
|
||||||
if (func != NULL) {
|
|
||||||
os::fence_func = func;
|
|
||||||
(*func)();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
||||||
|
|
||||||
// don't have to do anything for a single thread
|
|
||||||
}
|
|
||||||
|
|
||||||
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
||||||
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
||||||
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
|
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
|
||||||
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
||||||
fence_func_t* os::fence_func = os::fence_bootstrap;
|
|
||||||
|
|
||||||
#endif // !_LP64 && !COMPILER2
|
#endif // !_LP64 && !COMPILER2
|
||||||
|
|
||||||
|
@ -29,13 +29,11 @@
|
|||||||
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
||||||
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
|
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
|
||||||
static jint (*atomic_add_func) (jint, volatile jint*);
|
static jint (*atomic_add_func) (jint, volatile jint*);
|
||||||
static void (*fence_func) ();
|
|
||||||
|
|
||||||
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
||||||
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
||||||
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
||||||
static jint atomic_add_bootstrap (jint, volatile jint*);
|
static jint atomic_add_bootstrap (jint, volatile jint*);
|
||||||
static void fence_bootstrap ();
|
|
||||||
|
|
||||||
static void setup_fpu() {}
|
static void setup_fpu() {}
|
||||||
|
|
||||||
|
@ -61,11 +61,8 @@ extern "C" {
|
|||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
}
|
}
|
||||||
inline void _OrderAccess_fence() {
|
inline void _OrderAccess_fence() {
|
||||||
#ifdef AMD64
|
// Always use locked addl since mfence is sometimes expensive
|
||||||
__asm__ __volatile__ ("mfence":::"memory");
|
|
||||||
#else
|
|
||||||
__asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
|
__asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
|
||||||
#endif // AMD64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -794,7 +794,6 @@ typedef jint xchg_func_t (jint, volatile jint*);
|
|||||||
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
||||||
typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
|
typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
|
||||||
typedef jint add_func_t (jint, volatile jint*);
|
typedef jint add_func_t (jint, volatile jint*);
|
||||||
typedef void fence_func_t ();
|
|
||||||
|
|
||||||
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
|
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
@ -856,25 +855,10 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
|
|||||||
return (*dest) += add_value;
|
return (*dest) += add_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::fence_bootstrap() {
|
|
||||||
// try to use the stub:
|
|
||||||
fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry());
|
|
||||||
|
|
||||||
if (func != NULL) {
|
|
||||||
os::fence_func = func;
|
|
||||||
(*func)();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
||||||
|
|
||||||
// don't have to do anything for a single thread
|
|
||||||
}
|
|
||||||
|
|
||||||
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
||||||
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
||||||
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
|
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
|
||||||
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
||||||
fence_func_t* os::fence_func = os::fence_bootstrap;
|
|
||||||
|
|
||||||
extern "C" _solaris_raw_setup_fpu(address ptr);
|
extern "C" _solaris_raw_setup_fpu(address ptr);
|
||||||
void os::setup_fpu() {
|
void os::setup_fpu() {
|
||||||
|
@ -32,13 +32,11 @@
|
|||||||
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
||||||
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
|
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
|
||||||
static jint (*atomic_add_func) (jint, volatile jint*);
|
static jint (*atomic_add_func) (jint, volatile jint*);
|
||||||
static void (*fence_func) ();
|
|
||||||
|
|
||||||
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
||||||
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
||||||
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
||||||
static jint atomic_add_bootstrap (jint, volatile jint*);
|
static jint atomic_add_bootstrap (jint, volatile jint*);
|
||||||
static void fence_bootstrap ();
|
|
||||||
|
|
||||||
static void setup_fpu();
|
static void setup_fpu();
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
@ -46,7 +46,7 @@ inline void OrderAccess::release() {
|
|||||||
|
|
||||||
inline void OrderAccess::fence() {
|
inline void OrderAccess::fence() {
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
(*os::fence_func)();
|
StubRoutines_fence();
|
||||||
#else
|
#else
|
||||||
if (os::is_MP()) {
|
if (os::is_MP()) {
|
||||||
__asm {
|
__asm {
|
||||||
|
@ -196,7 +196,6 @@ typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
|||||||
typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
|
typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
|
||||||
typedef jint add_func_t (jint, volatile jint*);
|
typedef jint add_func_t (jint, volatile jint*);
|
||||||
typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*);
|
typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*);
|
||||||
typedef void fence_func_t ();
|
|
||||||
|
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
|
|
||||||
@ -292,27 +291,11 @@ intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* des
|
|||||||
return (*dest) += add_value;
|
return (*dest) += add_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::fence_bootstrap() {
|
|
||||||
// try to use the stub:
|
|
||||||
fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry());
|
|
||||||
|
|
||||||
if (func != NULL) {
|
|
||||||
os::fence_func = func;
|
|
||||||
(*func)();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
||||||
|
|
||||||
// don't have to do anything for a single thread
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
||||||
xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap;
|
xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap;
|
||||||
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
||||||
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
||||||
add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap;
|
add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap;
|
||||||
fence_func_t* os::fence_func = os::fence_bootstrap;
|
|
||||||
|
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
|
||||||
|
@ -35,9 +35,6 @@
|
|||||||
static jint (*atomic_add_func) (jint, volatile jint*);
|
static jint (*atomic_add_func) (jint, volatile jint*);
|
||||||
static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*);
|
static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*);
|
||||||
|
|
||||||
static void (*fence_func) ();
|
|
||||||
|
|
||||||
|
|
||||||
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
||||||
static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*);
|
static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*);
|
||||||
|
|
||||||
@ -53,8 +50,6 @@
|
|||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
static jint atomic_add_bootstrap (jint, volatile jint*);
|
static jint atomic_add_bootstrap (jint, volatile jint*);
|
||||||
static intptr_t atomic_add_ptr_bootstrap (intptr_t, volatile intptr_t*);
|
static intptr_t atomic_add_ptr_bootstrap (intptr_t, volatile intptr_t*);
|
||||||
|
|
||||||
static void fence_bootstrap ();
|
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
|
||||||
static void setup_fpu();
|
static void setup_fpu();
|
||||||
|
@ -3154,6 +3154,8 @@ oopsHierarchy.cpp thread.hpp
|
|||||||
oopsHierarchy.cpp thread_<os_family>.inline.hpp
|
oopsHierarchy.cpp thread_<os_family>.inline.hpp
|
||||||
|
|
||||||
orderAccess.cpp orderAccess.hpp
|
orderAccess.cpp orderAccess.hpp
|
||||||
|
orderAccess.cpp stubRoutines.hpp
|
||||||
|
orderAccess.cpp thread.hpp
|
||||||
|
|
||||||
orderAccess.hpp allocation.hpp
|
orderAccess.hpp allocation.hpp
|
||||||
orderAccess.hpp os.hpp
|
orderAccess.hpp os.hpp
|
||||||
|
@ -26,3 +26,15 @@
|
|||||||
# include "incls/_orderAccess.cpp.incl"
|
# include "incls/_orderAccess.cpp.incl"
|
||||||
|
|
||||||
volatile intptr_t OrderAccess::dummy = 0;
|
volatile intptr_t OrderAccess::dummy = 0;
|
||||||
|
|
||||||
|
void OrderAccess::StubRoutines_fence() {
|
||||||
|
// Use a stub if it exists. It may not exist during bootstrap so do
|
||||||
|
// nothing in that case but assert if no fence code exists after threads have been created
|
||||||
|
void (*func)() = CAST_TO_FN_PTR(void (*)(), StubRoutines::fence_entry());
|
||||||
|
|
||||||
|
if (func != NULL) {
|
||||||
|
(*func)();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
}
|
||||||
|
@ -300,4 +300,10 @@ class OrderAccess : AllStatic {
|
|||||||
// In order to force a memory access, implementations may
|
// In order to force a memory access, implementations may
|
||||||
// need a volatile externally visible dummy variable.
|
// need a volatile externally visible dummy variable.
|
||||||
static volatile intptr_t dummy;
|
static volatile intptr_t dummy;
|
||||||
|
|
||||||
|
private:
|
||||||
|
// This is a helper that invokes the StubRoutines::fence_entry()
|
||||||
|
// routine if it exists, It should only be used by platforms that
|
||||||
|
// don't another way to do the inline eassembly.
|
||||||
|
static void StubRoutines_fence();
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user