From ba7d08b8199172058bd369d880d2d6a9f9649319 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Wed, 25 Oct 2023 08:29:58 +0000 Subject: [PATCH] 8316961: Fallback implementations for 64-bit Atomic::{add,xchg} on 32-bit platforms Reviewed-by: eosterlund, dholmes, kbarrett, simonis --- src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp | 8 +++ .../os_cpu/linux_arm/atomic_linux_arm.hpp | 7 ++ .../os_cpu/linux_x86/atomic_linux_x86.hpp | 8 +++ src/hotspot/share/runtime/atomic.hpp | 65 ++++++++++++++++++- test/hotspot/gtest/runtime/test_atomic.cpp | 23 ++++--- 5 files changed, 100 insertions(+), 11 deletions(-) diff --git a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp index 77104194b0b..9ba246f553d 100644 --- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp +++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp @@ -153,6 +153,14 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, return cmpxchg_using_helper(_Atomic_cmpxchg_long, dest, compare_value, exchange_value); } +// No direct support for 8-byte xchg; emulate using cmpxchg. +template<> +struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {}; + +// No direct support for 8-byte add; emulate using cmpxchg. +template<> +struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {}; + template<> template inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { diff --git a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp index 814dbd9aab5..513217649e6 100644 --- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp +++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp @@ -128,6 +128,13 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, return xchg_using_helper(ARMAtomicFuncs::_xchg_func, dest, exchange_value); } +// No direct support for 8-byte xchg; emulate using cmpxchg. +template<> +struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {}; + +// No direct support for 8-byte add; emulate using cmpxchg. +template<> +struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {}; // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering diff --git a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp index 2e472a02068..0156546ba9b 100644 --- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp +++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp @@ -153,6 +153,14 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, return cmpxchg_using_helper(_Atomic_cmpxchg_long, dest, compare_value, exchange_value); } +// No direct support for 8-byte xchg; emulate using cmpxchg. +template<> +struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {}; + +// No direct support for 8-byte add; emulate using cmpxchg. +template<> +struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {}; + template<> template inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp index c85bf9055ab..ac0ce49d26e 100644 --- a/src/hotspot/share/runtime/atomic.hpp +++ b/src/hotspot/share/runtime/atomic.hpp @@ -398,11 +398,15 @@ private: T compare_value, T exchange_value); - // Support platforms that do not provide Read-Modify-Write - // byte-level atomic access. To use, derive PlatformCmpxchg<1> from - // this class. + // Support platforms that do not provide Read-Modify-Write atomic + // accesses for 1-byte and 8-byte widths. To use, derive PlatformCmpxchg<1>, + // PlatformAdd, PlatformXchg from these classes. public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. struct CmpxchgByteUsingInt; + template + struct XchgUsingCmpxchg; + template + class AddUsingCmpxchg; private: // Dispatch handler for xchg. Provides type-based validity @@ -677,6 +681,47 @@ struct Atomic::CmpxchgByteUsingInt { atomic_memory_order order) const; }; +// Define the class before including platform file, which may use this +// as a base class, requiring it be complete. The definition is later +// in this file, near the other definitions related to xchg. +template +struct Atomic::XchgUsingCmpxchg { + template + T operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const; +}; + +// Define the class before including platform file, which may use this +// as a base class, requiring it be complete. +template +class Atomic::AddUsingCmpxchg { +public: + template + static inline D add_then_fetch(D volatile* dest, + I add_value, + atomic_memory_order order) { + D addend = add_value; + return fetch_then_add(dest, add_value, order) + add_value; + } + + template + static inline D fetch_then_add(D volatile* dest, + I add_value, + atomic_memory_order order) { + STATIC_ASSERT(byte_size == sizeof(I)); + STATIC_ASSERT(byte_size == sizeof(D)); + + D old_value; + D new_value; + do { + old_value = Atomic::load(dest); + new_value = old_value + add_value; + } while (old_value != Atomic::cmpxchg(dest, old_value, new_value, order)); + return old_value; + } +}; + // Define the class before including platform file, which may specialize // the operator definition. No generic definition of specializations // of the operator template are provided, nor are there any generic @@ -1170,4 +1215,18 @@ inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order or return XchgImpl()(dest, exchange_value, order); } +template +template +inline T Atomic::XchgUsingCmpxchg::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(byte_size == sizeof(T)); + + T old_value; + do { + old_value = Atomic::load(dest); + } while (old_value != Atomic::cmpxchg(dest, old_value, exchange_value, order)); + return old_value; +} + #endif // SHARE_RUNTIME_ATOMIC_HPP diff --git a/test/hotspot/gtest/runtime/test_atomic.cpp b/test/hotspot/gtest/runtime/test_atomic.cpp index e7c6f9e3f2b..744714c6f7f 100644 --- a/test/hotspot/gtest/runtime/test_atomic.cpp +++ b/test/hotspot/gtest/runtime/test_atomic.cpp @@ -59,14 +59,14 @@ TEST_VM(AtomicAddTest, int32) { Support().test_fetch_add(); } -// 64bit Atomic::add is only supported on 64bit platforms. -#ifdef _LP64 TEST_VM(AtomicAddTest, int64) { + // Check if 64-bit atomics are available on the machine. + if (!VM_Version::supports_cx8()) return; + using Support = AtomicAddTestSupport; Support().test_add(); Support().test_fetch_add(); } -#endif // _LP64 TEST_VM(AtomicAddTest, ptr) { uint _test_values[10] = {}; @@ -108,13 +108,13 @@ TEST_VM(AtomicXchgTest, int32) { Support().test(); } -// 64bit Atomic::xchg is only supported on 64bit platforms. -#ifdef _LP64 TEST_VM(AtomicXchgTest, int64) { + // Check if 64-bit atomics are available on the machine. + if (!VM_Version::supports_cx8()) return; + using Support = AtomicXchgTestSupport; Support().test(); } -#endif // _LP64 template struct AtomicCmpxchgTestSupport { @@ -142,6 +142,9 @@ TEST_VM(AtomicCmpxchgTest, int32) { } TEST_VM(AtomicCmpxchgTest, int64) { + // Check if 64-bit atomics are available on the machine. + if (!VM_Version::supports_cx8()) return; + using Support = AtomicCmpxchgTestSupport; Support().test(); } @@ -345,12 +348,16 @@ TEST_VM(AtomicBitopsTest, uint32) { AtomicBitopsTestSupport()(); } -#ifdef _LP64 TEST_VM(AtomicBitopsTest, int64) { + // Check if 64-bit atomics are available on the machine. + if (!VM_Version::supports_cx8()) return; + AtomicBitopsTestSupport()(); } TEST_VM(AtomicBitopsTest, uint64) { + // Check if 64-bit atomics are available on the machine. + if (!VM_Version::supports_cx8()) return; + AtomicBitopsTestSupport()(); } -#endif // _LP64