8187977: Generalize Atomic::xchg to use templates

Reviewed-by: kbarrett, coleenp
This commit is contained in:
Erik Österlund 2017-09-26 21:37:01 +02:00
parent 5b3ed372a9
commit aa72ba3a64
16 changed files with 320 additions and 236 deletions

View File

@ -148,13 +148,15 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return result;
}
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp).
unsigned int old_value;
T old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
@ -182,15 +184,18 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
"memory"
);
return (jint) old_value;
return old_value;
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp).
long old_value;
T old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
@ -218,11 +223,7 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
"memory"
);
return (intptr_t) old_value;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
return old_value;
}
inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {

View File

@ -61,7 +61,11 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) co
return old_value;
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (exchange_value)
: "0" (exchange_value), "r" (dest)
@ -69,10 +73,6 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
return exchange_value;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
@ -118,7 +118,11 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) co
return old_value;
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("xchgq (%2),%0"
: "=r" (exchange_value)
: "0" (exchange_value), "r" (dest)
@ -144,10 +148,6 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#else // !AMD64
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
}
extern "C" {
// defined in bsd_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);

View File

@ -87,7 +87,7 @@ static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
/* Atomically write VALUE into `*PTR' and returns the previous
contents of `*PTR'. */
static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
for (;;) {
// Loop until success.
int prev = *ptr;
@ -148,7 +148,7 @@ static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
/* Atomically write VALUE into `*PTR' and returns the previous
contents of `*PTR'. */
static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
for (;;) {
// Loop until a __kernel_cmpxchg succeeds.
int prev = *ptr;
@ -207,18 +207,22 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return __sync_add_and_fetch(dest, add_value);
}
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM
return arm_lock_test_and_set(dest, exchange_value);
return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
#else
#ifdef M68K
return m68k_lock_test_and_set(dest, exchange_value);
return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
#else
// __sync_lock_test_and_set is a bizarrely named atomic exchange
// operation. Note that some platforms only support this with the
// limitation that the only valid value to store is the immediate
// constant 1. There is a test for this in JNI_CreateJavaVM().
jint result = __sync_lock_test_and_set (dest, exchange_value);
T result = __sync_lock_test_and_set (dest, exchange_value);
// All atomic operations are expected to be full memory barriers
// (see atomic.hpp). However, __sync_lock_test_and_set is not
// a full memory barrier, but an acquire barrier. Hence, this added
@ -229,24 +233,14 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
#endif // ARM
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
volatile intptr_t* dest) {
#ifdef ARM
return arm_lock_test_and_set(dest, exchange_value);
#else
#ifdef M68K
return m68k_lock_test_and_set(dest, exchange_value);
#else
intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T result = __sync_lock_test_and_set (dest, exchange_value);
__sync_synchronize();
return result;
#endif // M68K
#endif // ARM
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void *) xchg_ptr((intptr_t) exchange_value,
(volatile intptr_t*) dest);
}
// No direct support for cmpxchg of bytes; emulate using int.

View File

@ -57,19 +57,16 @@ struct Atomic::PlatformAdd
}
};
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
{
jint res = __sync_lock_test_and_set (dest, exchange_value);
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(byte_size == sizeof(T));
T res = __sync_lock_test_and_set(dest, exchange_value);
FULL_MEM_BARRIER;
return res;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
{
return (void *) xchg_ptr((intptr_t) exchange_value,
(volatile intptr_t*) dest);
}
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
@ -90,13 +87,6 @@ inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
{
intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
FULL_MEM_BARRIER;
return res;
}
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP

View File

@ -141,11 +141,15 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
: "memory");
return val;
}
#endif // AARCH64
#endif
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef AARCH64
jint old_val;
T old_val;
int tmp;
__asm__ volatile(
"1:\n\t"
@ -157,13 +161,17 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
: "memory");
return old_val;
#else
return (*os::atomic_xchg_func)(exchange_value, dest);
return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest);
#endif
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
#ifdef AARCH64
intptr_t old_val;
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T old_val;
int tmp;
__asm__ volatile(
"1:\n\t"
@ -174,14 +182,8 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
: [new_val] "r" (exchange_value), [dest] "r" (dest)
: "memory");
return old_val;
#else
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
#endif
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
#endif // AARCH64
// The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering

View File

@ -146,12 +146,14 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return result;
}
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
// Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp).
unsigned int old_value;
T old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
@ -179,15 +181,18 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
"memory"
);
return (jint) old_value;
return old_value;
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp).
long old_value;
T old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
@ -215,11 +220,7 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
"memory"
);
return (intptr_t) old_value;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
return old_value;
}
inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {

View File

@ -208,8 +208,12 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const {
//
// The return value is the (unchanged) value from memory as it was when the
// replacement succeeded.
inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) {
unsigned int old;
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
T old;
__asm__ __volatile__ (
" LLGF %[old],%[mem] \n\t" // get old value
@ -219,16 +223,20 @@ inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) {
: [old] "=&d" (old) // write-only, prev value irrelevant
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
//---< inputs >---
: [upd] "d" (xchg_val) // read-only, value to be written to memory
: [upd] "d" (exchange_value) // read-only, value to be written to memory
//---< clobbered >---
: "cc", "memory"
);
return (jint)old;
return old;
}
inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) {
unsigned long old;
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T old;
__asm__ __volatile__ (
" LG %[old],%[mem] \n\t" // get old value
@ -238,16 +246,12 @@ inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) {
: [old] "=&d" (old) // write-only, init from memory
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
//---< inputs >---
: [upd] "d" (xchg_val) // read-only, value to be written to memory
: [upd] "d" (exchange_value) // read-only, value to be written to memory
//---< clobbered >---
: "cc", "memory"
);
return (intptr_t)old;
}
inline void *Atomic::xchg_ptr(void *exchange_value, volatile void *dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
return old;
}
//----------------

View File

@ -95,9 +95,12 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return rv;
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
intptr_t rv = exchange_value;
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
T rv = exchange_value;
__asm__ volatile(
" swap [%2],%1\n\t"
: "=r" (rv)
@ -106,8 +109,12 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
return rv;
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
intptr_t rv = exchange_value;
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T rv = exchange_value;
__asm__ volatile(
"1:\n\t"
" mov %1, %%o3\n\t"
@ -123,10 +130,6 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
return rv;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
// No direct support for cmpxchg of bytes; emulate using int.
template<>
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};

View File

@ -61,7 +61,11 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) co
return old_value;
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (exchange_value)
: "0" (exchange_value), "r" (dest)
@ -69,10 +73,6 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
return exchange_value;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
@ -118,7 +118,11 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) co
return old_value;
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("xchgq (%2),%0"
: "=r" (exchange_value)
: "0" (exchange_value), "r" (dest)
@ -144,10 +148,6 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#else // !AMD64
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
}
extern "C" {
// defined in linux_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);

View File

@ -87,7 +87,7 @@ static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
/* Atomically write VALUE into `*PTR' and returns the previous
contents of `*PTR'. */
static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
for (;;) {
// Loop until success.
int prev = *ptr;
@ -148,7 +148,7 @@ static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
/* Atomically write VALUE into `*PTR' and returns the previous
contents of `*PTR'. */
static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
for (;;) {
// Loop until a __kernel_cmpxchg succeeds.
int prev = *ptr;
@ -201,18 +201,22 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return __sync_add_and_fetch(dest, add_value);
}
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM
return arm_lock_test_and_set(dest, exchange_value);
return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
#else
#ifdef M68K
return m68k_lock_test_and_set(dest, exchange_value);
return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
#else
// __sync_lock_test_and_set is a bizarrely named atomic exchange
// operation. Note that some platforms only support this with the
// limitation that the only valid value to store is the immediate
// constant 1. There is a test for this in JNI_CreateJavaVM().
jint result = __sync_lock_test_and_set (dest, exchange_value);
T result = __sync_lock_test_and_set (dest, exchange_value);
// All atomic operations are expected to be full memory barriers
// (see atomic.hpp). However, __sync_lock_test_and_set is not
// a full memory barrier, but an acquire barrier. Hence, this added
@ -223,24 +227,14 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
#endif // ARM
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
volatile intptr_t* dest) {
#ifdef ARM
return arm_lock_test_and_set(dest, exchange_value);
#else
#ifdef M68K
return m68k_lock_test_and_set(dest, exchange_value);
#else
intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T result = __sync_lock_test_and_set (dest, exchange_value);
__sync_synchronize();
return result;
#endif // M68K
#endif // ARM
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void *) xchg_ptr((intptr_t) exchange_value,
(volatile intptr_t*) dest);
}
// No direct support for cmpxchg of bytes; emulate using int.

View File

@ -43,16 +43,6 @@ inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value;
inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
// This is the interface to the atomic instructions in solaris_sparc.il.
// It's very messy because we need to support v8 and these instructions
// are illegal there. When sparc v8 is dropped, we can drop out lots of
// this code. Also compiler2 does not support v8 so the conditional code
// omits the instruction set check.
extern "C" jint _Atomic_swap32(jint exchange_value, volatile jint* dest);
extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
// Implement ADD using a CAS loop.
template<size_t byte_size>
struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
@ -69,16 +59,30 @@ struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
}
};
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
return _Atomic_swap32(exchange_value, dest);
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "swap [%2],%0"
: "=r" (exchange_value)
: "0" (exchange_value), "r" (dest)
: "memory");
return exchange_value;
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return _Atomic_swap64(exchange_value, dest);
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T old_value = *dest;
while (true) {
T result = cmpxchg(exchange_value, dest, old_value);
if (result == old_value) break;
old_value = result;
}
return old_value;
}
// No direct support for cmpxchg of bytes; emulate using int.

View File

@ -32,47 +32,6 @@
.end
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
//
// Arguments:
// exchange_value: O0
// dest: O1
//
// Results:
// O0: the value previously stored in dest
.inline _Atomic_swap32, 2
.volatile
swap [%o1],%o0
.nonvolatile
.end
// Support for intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t * dest).
//
// 64-bit
//
// Arguments:
// exchange_value: O0
// dest: O1
//
// Results:
// O0: the value previously stored in dest
.inline _Atomic_swap64, 2
.volatile
1:
mov %o0, %o3
ldx [%o1], %o2
casx [%o1], %o2, %o3
cmp %o2, %o3
bne %xcc, 1b
nop
mov %o2, %o0
.nonvolatile
.end
// Support for jlong Atomic::load and Atomic::store on v9.
//
// void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)

View File

@ -84,8 +84,26 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
reinterpret_cast<jlong volatile*>(dest)));
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
return _Atomic_xchg(exchange_value, dest);
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
return PrimitiveConversions::cast<T>(
_Atomic_xchg(PrimitiveConversions::cast<jint>(exchange_value),
reinterpret_cast<jint volatile*>(dest)));
}
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
_Atomic_xchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
reinterpret_cast<jlong volatile*>(dest)));
}
// Not using cmpxchg_using_helper here, because some configurations of
@ -135,16 +153,6 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
}
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP

View File

@ -81,17 +81,19 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
}
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
template<> \
template<typename T> \
inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
T volatile* dest) const { \
STATIC_ASSERT(ByteSize == sizeof(T)); \
return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
}
DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_ptr_func)
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
#undef DEFINE_STUB_XCHG
#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
template<> \
@ -128,7 +130,11 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
}
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
// alternative for InterlockedExchange
__asm {
mov eax, exchange_value;
@ -137,14 +143,6 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
}
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
}
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,

View File

@ -332,7 +332,7 @@ public:
static void disable_compilation_forever() {
UseCompiler = false;
AlwaysCompileLoopMethods = false;
Atomic::xchg(shutdown_compilation, &_should_compile_new_jobs);
Atomic::xchg(jint(shutdown_compilation), &_should_compile_new_jobs);
}
static bool is_compilation_disabled_forever() {

View File

@ -116,10 +116,19 @@ class Atomic : AllStatic {
// Performs atomic exchange of *dest with exchange_value. Returns old
// prior value of *dest. xchg*() provide:
// <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
inline static jint xchg (jint exchange_value, volatile jint* dest);
inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest);
inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
// The type T must be either a pointer type convertible to or equal
// to D, an integral/enum type equal to D, or a type equal to D that
// is primitive convertible using PrimitiveConversions.
template<typename T, typename D>
inline static D xchg(T exchange_value, volatile D* dest);
inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return xchg(exchange_value, dest);
}
inline static void* xchg_ptr(void* exchange_value, volatile void* dest) {
return xchg(exchange_value, reinterpret_cast<void* volatile*>(dest));
}
// Performs atomic compare of *dest and compare_value, and exchanges
// *dest with exchange_value if the comparison succeeded. Returns prior
@ -280,6 +289,45 @@ private:
public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
struct CmpxchgByteUsingInt;
private:
// Dispatch handler for xchg. Provides type-based validity
// checking and limited conversions around calls to the
// platform-specific implementation layer provided by
// PlatformXchg.
template<typename T, typename D, typename Enable = void>
struct XchgImpl;
// Platform-specific implementation of xchg. Support for sizes
// of 4, and sizeof(intptr_t) are required. The class is a function
// object that must be default constructable, with these requirements:
//
// - dest is of type T*.
// - exchange_value is of type T.
// - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
//
// Then
// platform_xchg(exchange_value, dest)
// must be a valid expression, returning a result convertible to T.
//
// A default definition is provided, which declares a function template
// T operator()(T, T volatile*, T, cmpxchg_memory_order) const
//
// For each required size, a platform must either provide an
// appropriate definition of that function, or must entirely
// specialize the class template for that size.
template<size_t byte_size> struct PlatformXchg;
// Support for platforms that implement some variants of xchg
// using a (typically out of line) non-template helper function.
// The generic arguments passed to PlatformXchg need to be
// translated to the appropriate type for the helper function, the
// helper invoked on the translated arguments, and the result
// translated back. Type is the parameter / return type of the
// helper function.
template<typename Type, typename Fn, typename T>
static T xchg_using_helper(Fn fn,
T exchange_value,
T volatile* dest);
};
template<typename From, typename To>
@ -353,6 +401,18 @@ struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
cmpxchg_memory_order order) const;
};
// Define the class before including platform file, which may specialize
// the operator definition. No generic definition of specializations
// of the operator template are provided, nor are there any generic
// specializations of the class. The platform file is responsible for
// providing those.
template<size_t byte_size>
struct Atomic::PlatformXchg VALUE_OBJ_CLASS_SPEC {
template<typename T>
T operator()(T exchange_value,
T volatile* dest) const;
};
// platform specific in-line definitions - must come before shared definitions
#include OS_CPU_HEADER(atomic)
@ -594,9 +654,75 @@ inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
}
inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
// Handle xchg for integral and enum types.
//
// All the involved types must be identical.
template<typename T>
struct Atomic::XchgImpl<
T, T,
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
VALUE_OBJ_CLASS_SPEC
{
T operator()(T exchange_value, T volatile* dest) const {
// Forward to the platform handler for the size of T.
return PlatformXchg<sizeof(T)>()(exchange_value, dest);
}
};
// Handle xchg for pointer types.
//
// The exchange_value must be implicitly convertible to the
// destination's type; it must be type-correct to store the
// exchange_value in the destination.
template<typename T, typename D>
struct Atomic::XchgImpl<
T*, D*,
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
VALUE_OBJ_CLASS_SPEC
{
D* operator()(T* exchange_value, D* volatile* dest) const {
// Allow derived to base conversion, and adding cv-qualifiers.
D* new_value = exchange_value;
return PlatformXchg<sizeof(D*)>()(new_value, dest);
}
};
// Handle xchg for types that have a translator.
//
// All the involved types must be identical.
//
// This translates the original call into a call on the decayed
// arguments, and returns the recovered result of that translated
// call.
template<typename T>
struct Atomic::XchgImpl<
T, T,
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
VALUE_OBJ_CLASS_SPEC
{
T operator()(T exchange_value, T volatile* dest) const {
typedef PrimitiveConversions::Translate<T> Translator;
typedef typename Translator::Decayed Decayed;
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
return Translator::recover(
xchg(Translator::decay(exchange_value),
reinterpret_cast<Decayed volatile*>(dest)));
}
};
template<typename Type, typename Fn, typename T>
inline T Atomic::xchg_using_helper(Fn fn,
T exchange_value,
T volatile* dest) {
STATIC_ASSERT(sizeof(Type) == sizeof(T));
return PrimitiveConversions::cast<T>(
fn(PrimitiveConversions::cast<Type>(exchange_value),
reinterpret_cast<Type volatile*>(dest)));
}
template<typename T, typename D>
inline D Atomic::xchg(T exchange_value, volatile D* dest) {
return XchgImpl<T, D>()(exchange_value, dest);
}
#endif // SHARE_VM_RUNTIME_ATOMIC_HPP