8316961: Fallback implementations for 64-bit Atomic::{add,xchg} on 32-bit platforms

Reviewed-by: eosterlund, dholmes, kbarrett, simonis
This commit is contained in:
Aleksey Shipilev 2023-10-25 08:29:58 +00:00
parent d7205e690f
commit ba7d08b819
5 changed files with 100 additions and 11 deletions

View File

@ -153,6 +153,14 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
}
// No direct support for 8-byte xchg; emulate using cmpxchg.
template<>
struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {};
// No direct support for 8-byte add; emulate using cmpxchg.
template<>
struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {};
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {

View File

@ -128,6 +128,13 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
return xchg_using_helper<int32_t>(ARMAtomicFuncs::_xchg_func, dest, exchange_value);
}
// No direct support for 8-byte xchg; emulate using cmpxchg.
template<>
struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {};
// No direct support for 8-byte add; emulate using cmpxchg.
template<>
struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {};
// The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering

View File

@ -153,6 +153,14 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
}
// No direct support for 8-byte xchg; emulate using cmpxchg.
template<>
struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {};
// No direct support for 8-byte add; emulate using cmpxchg.
template<>
struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {};
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {

View File

@ -398,11 +398,15 @@ private:
T compare_value,
T exchange_value);
// Support platforms that do not provide Read-Modify-Write
// byte-level atomic access. To use, derive PlatformCmpxchg<1> from
// this class.
// Support platforms that do not provide Read-Modify-Write atomic
// accesses for 1-byte and 8-byte widths. To use, derive PlatformCmpxchg<1>,
// PlatformAdd<S>, PlatformXchg<S> from these classes.
public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
struct CmpxchgByteUsingInt;
template<size_t byte_size>
struct XchgUsingCmpxchg;
template<size_t byte_size>
class AddUsingCmpxchg;
private:
// Dispatch handler for xchg. Provides type-based validity
@ -677,6 +681,47 @@ struct Atomic::CmpxchgByteUsingInt {
atomic_memory_order order) const;
};
// Define the class before including platform file, which may use this
// as a base class, requiring it be complete. The definition is later
// in this file, near the other definitions related to xchg.
template<size_t byte_size>
struct Atomic::XchgUsingCmpxchg {
template<typename T>
T operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const;
};
// Define the class before including platform file, which may use this
// as a base class, requiring it be complete.
template<size_t byte_size>
class Atomic::AddUsingCmpxchg {
public:
template<typename D, typename I>
static inline D add_then_fetch(D volatile* dest,
I add_value,
atomic_memory_order order) {
D addend = add_value;
return fetch_then_add(dest, add_value, order) + add_value;
}
template<typename D, typename I>
static inline D fetch_then_add(D volatile* dest,
I add_value,
atomic_memory_order order) {
STATIC_ASSERT(byte_size == sizeof(I));
STATIC_ASSERT(byte_size == sizeof(D));
D old_value;
D new_value;
do {
old_value = Atomic::load(dest);
new_value = old_value + add_value;
} while (old_value != Atomic::cmpxchg(dest, old_value, new_value, order));
return old_value;
}
};
// Define the class before including platform file, which may specialize
// the operator definition. No generic definition of specializations
// of the operator template are provided, nor are there any generic
@ -1170,4 +1215,18 @@ inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order or
return XchgImpl<D, T>()(dest, exchange_value, order);
}
template<size_t byte_size>
template<typename T>
inline T Atomic::XchgUsingCmpxchg<byte_size>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(byte_size == sizeof(T));
T old_value;
do {
old_value = Atomic::load(dest);
} while (old_value != Atomic::cmpxchg(dest, old_value, exchange_value, order));
return old_value;
}
#endif // SHARE_RUNTIME_ATOMIC_HPP

View File

@ -59,14 +59,14 @@ TEST_VM(AtomicAddTest, int32) {
Support().test_fetch_add();
}
// 64bit Atomic::add is only supported on 64bit platforms.
#ifdef _LP64
TEST_VM(AtomicAddTest, int64) {
// Check if 64-bit atomics are available on the machine.
if (!VM_Version::supports_cx8()) return;
using Support = AtomicAddTestSupport<int64_t>;
Support().test_add();
Support().test_fetch_add();
}
#endif // _LP64
TEST_VM(AtomicAddTest, ptr) {
uint _test_values[10] = {};
@ -108,13 +108,13 @@ TEST_VM(AtomicXchgTest, int32) {
Support().test();
}
// 64bit Atomic::xchg is only supported on 64bit platforms.
#ifdef _LP64
TEST_VM(AtomicXchgTest, int64) {
// Check if 64-bit atomics are available on the machine.
if (!VM_Version::supports_cx8()) return;
using Support = AtomicXchgTestSupport<int64_t>;
Support().test();
}
#endif // _LP64
template<typename T>
struct AtomicCmpxchgTestSupport {
@ -142,6 +142,9 @@ TEST_VM(AtomicCmpxchgTest, int32) {
}
TEST_VM(AtomicCmpxchgTest, int64) {
// Check if 64-bit atomics are available on the machine.
if (!VM_Version::supports_cx8()) return;
using Support = AtomicCmpxchgTestSupport<int64_t>;
Support().test();
}
@ -345,12 +348,16 @@ TEST_VM(AtomicBitopsTest, uint32) {
AtomicBitopsTestSupport<uint32_t>()();
}
#ifdef _LP64
TEST_VM(AtomicBitopsTest, int64) {
// Check if 64-bit atomics are available on the machine.
if (!VM_Version::supports_cx8()) return;
AtomicBitopsTestSupport<int64_t>()();
}
TEST_VM(AtomicBitopsTest, uint64) {
// Check if 64-bit atomics are available on the machine.
if (!VM_Version::supports_cx8()) return;
AtomicBitopsTestSupport<uint64_t>()();
}
#endif // _LP64