diff --git a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp index e85bd60b226..6546adb6ff3 100644 --- a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp +++ b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp @@ -39,6 +39,12 @@ #define FULL_COMPILER_ATOMIC_SUPPORT #endif +#if defined(__clang_major__) +#define CORRECT_COMPILER_ATOMIC_SUPPORT +#elif defined(__GNUC__) && (__riscv_xlen <= 32 || __GNUC__ > 13) +#define CORRECT_COMPILER_ATOMIC_SUPPORT +#endif + template struct Atomic::PlatformAdd { template @@ -114,6 +120,44 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__(( } #endif +#ifndef CORRECT_COMPILER_ATOMIC_SUPPORT +// The implementation of `__atomic_compare_exchange` lacks sign extensions +// in GCC 13 and lower when using with 32-bit unsigned integers on RV64, +// so we should implement it manually. +// GCC bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=114130. +// See also JDK-8326936. +template<> +template +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)), + T compare_value, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(4 == sizeof(T)); + + int32_t old_value; + uint64_t rc_temp; + + if (order != memory_order_relaxed) { + FULL_MEM_BARRIER; + } + + __asm__ __volatile__ ( + "1: lr.w %0, %2 \n\t" + " bne %0, %3, 2f \n\t" + " sc.w %1, %4, %2 \n\t" + " bnez %1, 1b \n\t" + "2: \n\t" + : /*%0*/"=&r" (old_value), /*%1*/"=&r" (rc_temp), /*%2*/"+A" (*dest) + : /*%3*/"r" ((int64_t)(int32_t)compare_value), /*%4*/"r" (exchange_value) + : "memory" ); + + if (order != memory_order_relaxed) { + FULL_MEM_BARRIER; + } + return (T)old_value; +} +#endif + template template inline T Atomic::PlatformXchg::operator()(T volatile* dest, @@ -151,6 +195,10 @@ inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attri STATIC_ASSERT(byte_size >= 4); #endif +#ifndef CORRECT_COMPILER_ATOMIC_SUPPORT + STATIC_ASSERT(byte_size != 4); +#endif + STATIC_ASSERT(byte_size == sizeof(T)); if (order != memory_order_relaxed) { FULL_MEM_BARRIER; @@ -187,5 +235,6 @@ struct Atomic::PlatformOrderedStore }; #undef FULL_COMPILER_ATOMIC_SUPPORT +#undef CORRECT_COMPILER_ATOMIC_SUPPORT #endif // OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP