8253464: ARM32 Zero: atomic_copy64 is incorrect, breaking volatile stores

Reviewed-by: aph
This commit is contained in:
Aleksey Shipilev 2020-09-23 16:53:10 +00:00
parent 0bc01da702
commit c21690b518

@ -68,15 +68,23 @@
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
: "Q"(*(volatile double*)src));
#elif defined(__ARM_ARCH_7A__)
// Note that a ldrexd + clrex combination is only needed for
// correctness on the OS level (context-switches). In this
// case, clrex *may* be beneficial for performance. For now
// don't bother with clrex as this is Zero.
jlong tmp;
asm volatile ("ldrexd %0, [%1]\n"
: "=r"(tmp)
: "r"(src), "m"(src));
*(jlong *) dst = tmp;
// The only way to perform the atomic 64-bit load/store
// is to use ldrexd/strexd for both reads and writes.
// For store, we need to have the matching (fake) load first.
// Put clrex between exclusive ops on src and dst for clarity.
uint64_t tmp_r, tmp_w;
uint32_t flag_w;
asm volatile ("ldrexd %[tmp_r], [%[src]]\n"
"clrex\n"
"1:\n"
"ldrexd %[tmp_w], [%[dst]]\n"
"strexd %[flag_w], %[tmp_r], [%[dst]]\n"
"cmp %[flag_w], 0\n"
"bne 1b\n"
: [tmp_r] "=&r" (tmp_r), [tmp_w] "=&r" (tmp_w),
[flag_w] "=&r" (flag_w)
: [src] "r" (src), [dst] "r" (dst)
: "cc", "memory");
#else
*(jlong *) dst = *(const jlong *) src;
#endif