8202080: Introduce ordering semantics for Atomic::add and other RMW atomics
Reviewed-by: lucy, rehn, dholmes
This commit is contained in:
parent
6869c08e6a
commit
71a38a4720
src/hotspot
os_cpu
aix_ppc
bsd_x86
bsd_zero
linux_aarch64
linux_arm
linux_ppc
linux_s390
linux_sparc
linux_x86
linux_zero
solaris_sparc
solaris_x86
windows_x86
share
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,8 +26,8 @@
|
||||
#ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
|
||||
#define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
|
||||
|
||||
#ifndef _LP64
|
||||
#error "Atomic currently only impleneted for PPC64"
|
||||
#ifndef PPC64
|
||||
#error "Atomic currently only implemented for PPC64"
|
||||
#endif
|
||||
|
||||
#include "utilities/debug.hpp"
|
||||
@ -35,39 +35,39 @@
|
||||
// Implementation of class atomic
|
||||
|
||||
//
|
||||
// machine barrier instructions:
|
||||
// machine barrier instructions:
|
||||
//
|
||||
// - ppc_sync two-way memory barrier, aka fence
|
||||
// - ppc_lwsync orders Store|Store,
|
||||
// Load|Store,
|
||||
// Load|Load,
|
||||
// but not Store|Load
|
||||
// - ppc_eieio orders memory accesses for device memory (only)
|
||||
// - ppc_isync invalidates speculatively executed instructions
|
||||
// From the POWER ISA 2.06 documentation:
|
||||
// "[...] an isync instruction prevents the execution of
|
||||
// instructions following the isync until instructions
|
||||
// preceding the isync have completed, [...]"
|
||||
// From IBM's AIX assembler reference:
|
||||
// "The isync [...] instructions causes the processor to
|
||||
// refetch any instructions that might have been fetched
|
||||
// prior to the isync instruction. The instruction isync
|
||||
// causes the processor to wait for all previous instructions
|
||||
// to complete. Then any instructions already fetched are
|
||||
// discarded and instruction processing continues in the
|
||||
// environment established by the previous instructions."
|
||||
// - sync two-way memory barrier, aka fence
|
||||
// - lwsync orders Store|Store,
|
||||
// Load|Store,
|
||||
// Load|Load,
|
||||
// but not Store|Load
|
||||
// - eieio orders memory accesses for device memory (only)
|
||||
// - isync invalidates speculatively executed instructions
|
||||
// From the POWER ISA 2.06 documentation:
|
||||
// "[...] an isync instruction prevents the execution of
|
||||
// instructions following the isync until instructions
|
||||
// preceding the isync have completed, [...]"
|
||||
// From IBM's AIX assembler reference:
|
||||
// "The isync [...] instructions causes the processor to
|
||||
// refetch any instructions that might have been fetched
|
||||
// prior to the isync instruction. The instruction isync
|
||||
// causes the processor to wait for all previous instructions
|
||||
// to complete. Then any instructions already fetched are
|
||||
// discarded and instruction processing continues in the
|
||||
// environment established by the previous instructions."
|
||||
//
|
||||
// semantic barrier instructions:
|
||||
// (as defined in orderAccess.hpp)
|
||||
// semantic barrier instructions:
|
||||
// (as defined in orderAccess.hpp)
|
||||
//
|
||||
// - ppc_release orders Store|Store, (maps to ppc_lwsync)
|
||||
// Load|Store
|
||||
// - ppc_acquire orders Load|Store, (maps to ppc_lwsync)
|
||||
// Load|Load
|
||||
// - ppc_fence orders Store|Store, (maps to ppc_sync)
|
||||
// Load|Store,
|
||||
// Load|Load,
|
||||
// Store|Load
|
||||
// - release orders Store|Store, (maps to lwsync)
|
||||
// Load|Store
|
||||
// - acquire orders Load|Store, (maps to lwsync)
|
||||
// Load|Load
|
||||
// - fence orders Store|Store, (maps to sync)
|
||||
// Load|Store,
|
||||
// Load|Load,
|
||||
// Store|Load
|
||||
//
|
||||
|
||||
#define strasm_sync "\n sync \n"
|
||||
@ -79,80 +79,105 @@
|
||||
#define strasm_nobarrier ""
|
||||
#define strasm_nobarrier_clobber_memory ""
|
||||
|
||||
inline void pre_membar(atomic_memory_order order) {
|
||||
switch (order) {
|
||||
case memory_order_relaxed:
|
||||
case memory_order_acquire: break;
|
||||
case memory_order_release:
|
||||
case memory_order_acq_rel: __asm__ __volatile__ (strasm_lwsync); break;
|
||||
default /*conservative*/ : __asm__ __volatile__ (strasm_sync); break;
|
||||
}
|
||||
}
|
||||
|
||||
inline void post_membar(atomic_memory_order order) {
|
||||
switch (order) {
|
||||
case memory_order_relaxed:
|
||||
case memory_order_release: break;
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel: __asm__ __volatile__ (strasm_isync); break;
|
||||
default /*conservative*/ : __asm__ __volatile__ (strasm_sync); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
"1: lwarx %0, 0, %2 \n"
|
||||
" add %0, %0, %1 \n"
|
||||
" stwcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_isync
|
||||
: /*%0*/"=&r" (result)
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
post_membar(order);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
"1: ldarx %0, 0, %2 \n"
|
||||
" add %0, %0, %1 \n"
|
||||
" stdcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_isync
|
||||
: /*%0*/"=&r" (result)
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
post_membar(order);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
// Note that xchg doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* lwsync */
|
||||
strasm_lwsync
|
||||
/* atomic loop */
|
||||
"1: \n"
|
||||
" lwarx %[old_value], %[dest], %[zero] \n"
|
||||
" stwcx. %[exchange_value], %[dest], %[zero] \n"
|
||||
" bne- 1b \n"
|
||||
/* isync */
|
||||
strasm_sync
|
||||
/* exit */
|
||||
"2: \n"
|
||||
/* out */
|
||||
@ -168,13 +193,16 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
post_membar(order);
|
||||
|
||||
return old_value;
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
// Note that xchg doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
@ -182,16 +210,14 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* lwsync */
|
||||
strasm_lwsync
|
||||
/* atomic loop */
|
||||
"1: \n"
|
||||
" ldarx %[old_value], %[dest], %[zero] \n"
|
||||
" stdcx. %[exchange_value], %[dest], %[zero] \n"
|
||||
" bne- 1b \n"
|
||||
/* isync */
|
||||
strasm_sync
|
||||
/* exit */
|
||||
"2: \n"
|
||||
/* out */
|
||||
@ -207,33 +233,17 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
post_membar(order);
|
||||
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
|
||||
if (order != memory_order_relaxed) {
|
||||
__asm__ __volatile__ (
|
||||
/* fence */
|
||||
strasm_sync
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
|
||||
if (order != memory_order_relaxed) {
|
||||
__asm__ __volatile__ (
|
||||
/* fence */
|
||||
strasm_sync
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
|
||||
// Note that cmpxchg guarantees a two-way memory barrier across
|
||||
@ -254,7 +264,7 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
|
||||
unsigned int old_value, value32;
|
||||
|
||||
cmpxchg_pre_membar(order);
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* simple guard */
|
||||
@ -293,7 +303,7 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
cmpxchg_post_membar(order);
|
||||
post_membar(order);
|
||||
|
||||
return PrimitiveConversions::cast<T>((unsigned char)old_value);
|
||||
}
|
||||
@ -303,7 +313,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
|
||||
// Note that cmpxchg guarantees a two-way memory barrier across
|
||||
@ -313,7 +323,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
cmpxchg_pre_membar(order);
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* simple guard */
|
||||
@ -343,7 +353,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
cmpxchg_post_membar(order);
|
||||
post_membar(order);
|
||||
|
||||
return old_value;
|
||||
}
|
||||
@ -353,7 +363,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
|
||||
// Note that cmpxchg guarantees a two-way memory barrier across
|
||||
@ -363,7 +373,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
cmpxchg_pre_membar(order);
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* simple guard */
|
||||
@ -393,7 +403,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
cmpxchg_post_membar(order);
|
||||
post_membar(order);
|
||||
|
||||
return old_value;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,12 +32,13 @@ struct Atomic::PlatformAdd
|
||||
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D fetch_and_add(I add_value, D volatile* dest) const;
|
||||
D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
D old_value;
|
||||
@ -51,7 +52,8 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "xchgl (%2),%0"
|
||||
: "=r" (exchange_value)
|
||||
@ -65,7 +67,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order /* order */) const {
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
__asm__ volatile ( "lock cmpxchgb %1,(%3)"
|
||||
: "=a" (exchange_value)
|
||||
@ -79,7 +81,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order /* order */) const {
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "lock cmpxchgl %1,(%3)"
|
||||
: "=a" (exchange_value)
|
||||
@ -91,7 +93,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
#ifdef AMD64
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
D old_value;
|
||||
@ -105,7 +108,8 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ("xchgq (%2),%0"
|
||||
: "=r" (exchange_value)
|
||||
@ -119,7 +123,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order /* order */) const {
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
|
||||
: "=a" (exchange_value)
|
||||
@ -141,7 +145,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -164,12 +164,13 @@ struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
@ -186,7 +187,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
@ -196,7 +198,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef ARM
|
||||
return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
|
||||
@ -222,7 +225,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
__sync_synchronize();
|
||||
@ -238,7 +242,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef ARM
|
||||
return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
|
||||
@ -256,7 +260,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -39,7 +39,7 @@ struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const {
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
};
|
||||
@ -47,7 +47,8 @@ struct Atomic::PlatformAdd
|
||||
template<size_t byte_size>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(byte_size == sizeof(T));
|
||||
T res = __sync_lock_test_and_set(dest, exchange_value);
|
||||
FULL_MEM_BARRIER;
|
||||
@ -59,7 +60,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(byte_size == sizeof(T));
|
||||
if (order == memory_order_relaxed) {
|
||||
T value = compare_value;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,12 +81,13 @@ struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
#ifdef AARCH64
|
||||
@ -110,7 +111,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
#ifdef AARCH64
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
D val;
|
||||
@ -131,7 +133,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef AARCH64
|
||||
T old_val;
|
||||
@ -154,7 +157,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old_val;
|
||||
int tmp;
|
||||
@ -200,7 +204,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef AARCH64
|
||||
T rv;
|
||||
@ -230,7 +234,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
#ifdef AARCH64
|
||||
T rv;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -77,79 +77,105 @@
|
||||
#define strasm_nobarrier ""
|
||||
#define strasm_nobarrier_clobber_memory ""
|
||||
|
||||
inline void pre_membar(atomic_memory_order order) {
|
||||
switch (order) {
|
||||
case memory_order_relaxed:
|
||||
case memory_order_acquire: break;
|
||||
case memory_order_release:
|
||||
case memory_order_acq_rel: __asm__ __volatile__ (strasm_lwsync); break;
|
||||
default /*conservative*/ : __asm__ __volatile__ (strasm_sync); break;
|
||||
}
|
||||
}
|
||||
|
||||
inline void post_membar(atomic_memory_order order) {
|
||||
switch (order) {
|
||||
case memory_order_relaxed:
|
||||
case memory_order_release: break;
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel: __asm__ __volatile__ (strasm_isync); break;
|
||||
default /*conservative*/ : __asm__ __volatile__ (strasm_sync); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
"1: lwarx %0, 0, %2 \n"
|
||||
" add %0, %0, %1 \n"
|
||||
" stwcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_isync
|
||||
: /*%0*/"=&r" (result)
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
post_membar(order);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
"1: ldarx %0, 0, %2 \n"
|
||||
" add %0, %0, %1 \n"
|
||||
" stdcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_isync
|
||||
: /*%0*/"=&r" (result)
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
post_membar(order);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
// Note that xchg doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* lwsync */
|
||||
strasm_lwsync
|
||||
/* atomic loop */
|
||||
"1: \n"
|
||||
" lwarx %[old_value], %[dest], %[zero] \n"
|
||||
" stwcx. %[exchange_value], %[dest], %[zero] \n"
|
||||
" bne- 1b \n"
|
||||
/* isync */
|
||||
strasm_sync
|
||||
/* exit */
|
||||
"2: \n"
|
||||
/* out */
|
||||
@ -165,13 +191,16 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
post_membar(order);
|
||||
|
||||
return old_value;
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
// Note that xchg doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
@ -179,16 +208,14 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* lwsync */
|
||||
strasm_lwsync
|
||||
/* atomic loop */
|
||||
"1: \n"
|
||||
" ldarx %[old_value], %[dest], %[zero] \n"
|
||||
" stdcx. %[exchange_value], %[dest], %[zero] \n"
|
||||
" bne- 1b \n"
|
||||
/* isync */
|
||||
strasm_sync
|
||||
/* exit */
|
||||
"2: \n"
|
||||
/* out */
|
||||
@ -204,33 +231,17 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
post_membar(order);
|
||||
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
|
||||
if (order != memory_order_relaxed) {
|
||||
__asm__ __volatile__ (
|
||||
/* fence */
|
||||
strasm_sync
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
|
||||
if (order != memory_order_relaxed) {
|
||||
__asm__ __volatile__ (
|
||||
/* fence */
|
||||
strasm_sync
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
|
||||
// Note that cmpxchg guarantees a two-way memory barrier across
|
||||
@ -251,7 +262,7 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
|
||||
unsigned int old_value, value32;
|
||||
|
||||
cmpxchg_pre_membar(order);
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* simple guard */
|
||||
@ -290,7 +301,7 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
cmpxchg_post_membar(order);
|
||||
post_membar(order);
|
||||
|
||||
return PrimitiveConversions::cast<T>((unsigned char)old_value);
|
||||
}
|
||||
@ -300,7 +311,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
|
||||
// Note that cmpxchg guarantees a two-way memory barrier across
|
||||
@ -310,7 +321,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
cmpxchg_pre_membar(order);
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* simple guard */
|
||||
@ -340,7 +351,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
cmpxchg_post_membar(order);
|
||||
post_membar(order);
|
||||
|
||||
return old_value;
|
||||
}
|
||||
@ -350,7 +361,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
|
||||
// Note that cmpxchg guarantees a two-way memory barrier across
|
||||
@ -360,7 +371,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
cmpxchg_pre_membar(order);
|
||||
pre_membar(order);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* simple guard */
|
||||
@ -390,7 +401,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
"memory"
|
||||
);
|
||||
|
||||
cmpxchg_post_membar(order);
|
||||
post_membar(order);
|
||||
|
||||
return old_value;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -68,23 +68,31 @@
|
||||
// The return value of the method is the value that was successfully stored. At the
|
||||
// time the caller receives back control, the value in memory may have changed already.
|
||||
|
||||
// New atomic operations only include specific-operand-serialization, not full
|
||||
// memory barriers. We can use the Fast-BCR-Serialization Facility for them.
|
||||
inline void z196_fast_sync() {
|
||||
__asm__ __volatile__ ("bcr 14, 0" : : : "memory");
|
||||
}
|
||||
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
D old, upd;
|
||||
|
||||
if (VM_Version::has_LoadAndALUAtomicV1()) {
|
||||
if (order == memory_order_conservative) { z196_fast_sync(); }
|
||||
__asm__ __volatile__ (
|
||||
" LGFR 0,%[inc] \n\t" // save increment
|
||||
" LA 3,%[mem] \n\t" // force data address into ARG2
|
||||
@ -106,6 +114,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest) const {
|
||||
//---< clobbered >---
|
||||
: "cc", "r0", "r2", "r3", "memory"
|
||||
);
|
||||
if (order == memory_order_conservative) { z196_fast_sync(); }
|
||||
} else {
|
||||
__asm__ __volatile__ (
|
||||
" LLGF %[old],%[mem] \n\t" // get old value
|
||||
@ -129,13 +138,15 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest) const {
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
D old, upd;
|
||||
|
||||
if (VM_Version::has_LoadAndALUAtomicV1()) {
|
||||
if (order == memory_order_conservative) { z196_fast_sync(); }
|
||||
__asm__ __volatile__ (
|
||||
" LGR 0,%[inc] \n\t" // save increment
|
||||
" LA 3,%[mem] \n\t" // force data address into ARG2
|
||||
@ -157,6 +168,7 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const {
|
||||
//---< clobbered >---
|
||||
: "cc", "r0", "r2", "r3", "memory"
|
||||
);
|
||||
if (order == memory_order_conservative) { z196_fast_sync(); }
|
||||
} else {
|
||||
__asm__ __volatile__ (
|
||||
" LG %[old],%[mem] \n\t" // get old value
|
||||
@ -197,7 +209,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order unused) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T old;
|
||||
|
||||
@ -220,7 +233,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order unused) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old;
|
||||
|
||||
@ -278,7 +292,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val,
|
||||
T volatile* dest,
|
||||
T cmp_val,
|
||||
cmpxchg_memory_order unused) const {
|
||||
atomic_memory_order unused) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T old;
|
||||
|
||||
@ -302,7 +316,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val,
|
||||
T volatile* dest,
|
||||
T cmp_val,
|
||||
cmpxchg_memory_order unused) const {
|
||||
atomic_memory_order unused) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,12 +32,13 @@ struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
@ -59,7 +60,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
@ -82,7 +84,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T rv = exchange_value;
|
||||
__asm__ volatile(
|
||||
@ -96,7 +99,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T rv = exchange_value;
|
||||
__asm__ volatile(
|
||||
@ -123,7 +127,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T rv;
|
||||
__asm__ volatile(
|
||||
@ -139,7 +143,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T rv;
|
||||
__asm__ volatile(
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,12 +32,13 @@ struct Atomic::PlatformAdd
|
||||
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D fetch_and_add(I add_value, D volatile* dest) const;
|
||||
D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
D old_value;
|
||||
@ -51,7 +52,8 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "xchgl (%2),%0"
|
||||
: "=r" (exchange_value)
|
||||
@ -65,7 +67,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order /* order */) const {
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
__asm__ volatile ("lock cmpxchgb %1,(%3)"
|
||||
: "=a" (exchange_value)
|
||||
@ -79,7 +81,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order /* order */) const {
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ("lock cmpxchgl %1,(%3)"
|
||||
: "=a" (exchange_value)
|
||||
@ -92,7 +94,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
D old_value;
|
||||
@ -105,8 +108,8 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) co
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ("xchgq (%2),%0"
|
||||
: "=r" (exchange_value)
|
||||
@ -120,7 +123,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order /* order */) const {
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
|
||||
: "=a" (exchange_value)
|
||||
@ -142,7 +145,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -35,12 +35,13 @@ struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
@ -49,7 +50,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
@ -58,7 +60,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// __sync_lock_test_and_set is a bizarrely named atomic exchange
|
||||
// operation. Note that some platforms only support this with the
|
||||
@ -76,7 +79,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
__sync_synchronize();
|
||||
@ -92,7 +96,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||
}
|
||||
@ -102,7 +106,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||
}
|
||||
|
@ -31,7 +31,7 @@
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd {
|
||||
template<typename I, typename D>
|
||||
inline D operator()(I add_value, D volatile* dest) const {
|
||||
inline D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
|
||||
D old_value = *dest;
|
||||
while (true) {
|
||||
D new_value = old_value + add_value;
|
||||
@ -46,7 +46,8 @@ struct Atomic::PlatformAdd {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "swap [%2],%0"
|
||||
: "=r" (exchange_value)
|
||||
@ -58,7 +59,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old_value = *dest;
|
||||
while (true) {
|
||||
@ -78,7 +80,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T rv;
|
||||
__asm__ volatile(
|
||||
@ -94,7 +96,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T rv;
|
||||
__asm__ volatile(
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -45,13 +45,14 @@ struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
// Not using add_using_helper; see comment for cmpxchg.
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
return PrimitiveConversions::cast<D>(
|
||||
@ -62,7 +63,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
// Not using add_using_helper; see comment for cmpxchg.
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
return PrimitiveConversions::cast<D>(
|
||||
@ -73,7 +75,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
_Atomic_xchg(PrimitiveConversions::cast<int32_t>(exchange_value),
|
||||
@ -85,7 +88,8 @@ extern "C" int64_t _Atomic_xchg_long(int64_t exchange_value, volatile int64_t* d
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
_Atomic_xchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
|
||||
@ -103,7 +107,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
_Atomic_cmpxchg_byte(PrimitiveConversions::cast<int8_t>(exchange_value),
|
||||
@ -116,7 +120,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
_Atomic_cmpxchg(PrimitiveConversions::cast<int32_t>(exchange_value),
|
||||
@ -129,7 +133,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
_Atomic_cmpxchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,19 +47,21 @@ struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
#ifdef AMD64
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
|
||||
}
|
||||
|
||||
@ -67,7 +69,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<> \
|
||||
template<typename T> \
|
||||
inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
|
||||
T volatile* dest) const { \
|
||||
T volatile* dest, \
|
||||
atomic_memory_order order) const { \
|
||||
STATIC_ASSERT(ByteSize == sizeof(T)); \
|
||||
return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
|
||||
}
|
||||
@ -83,7 +86,7 @@ DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
|
||||
inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
|
||||
T volatile* dest, \
|
||||
T compare_value, \
|
||||
cmpxchg_memory_order order) const { \
|
||||
atomic_memory_order order) const { \
|
||||
STATIC_ASSERT(ByteSize == sizeof(T)); \
|
||||
return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
|
||||
}
|
||||
@ -98,7 +101,8 @@ DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
__asm {
|
||||
@ -113,7 +117,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// alternative for InterlockedExchange
|
||||
__asm {
|
||||
@ -128,7 +133,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
// alternative for InterlockedCompareExchange
|
||||
__asm {
|
||||
@ -144,7 +149,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// alternative for InterlockedCompareExchange
|
||||
__asm {
|
||||
@ -160,7 +165,7 @@ template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
int32_t ex_lo = (int32_t)exchange_value;
|
||||
int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 );
|
||||
|
@ -37,9 +37,14 @@
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
enum cmpxchg_memory_order {
|
||||
memory_order_relaxed,
|
||||
// Use value which doesn't interfere with C++2011. We need to be more conservative.
|
||||
enum atomic_memory_order {
|
||||
// The modes that align with C++11 are intended to
|
||||
// follow the same semantics.
|
||||
memory_order_relaxed = 0,
|
||||
memory_order_acquire = 2,
|
||||
memory_order_release = 3,
|
||||
memory_order_acq_rel = 4,
|
||||
// Strong two-way memory barrier.
|
||||
memory_order_conservative = 8
|
||||
};
|
||||
|
||||
@ -80,10 +85,12 @@ public:
|
||||
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>
|
||||
|
||||
template<typename I, typename D>
|
||||
inline static D add(I add_value, D volatile* dest);
|
||||
inline static D add(I add_value, D volatile* dest,
|
||||
atomic_memory_order order = memory_order_conservative);
|
||||
|
||||
template<typename I, typename D>
|
||||
inline static D sub(I sub_value, D volatile* dest);
|
||||
inline static D sub(I sub_value, D volatile* dest,
|
||||
atomic_memory_order order = memory_order_conservative);
|
||||
|
||||
// Atomically increment location. inc() provide:
|
||||
// <fence> increment-dest <membar StoreLoad|StoreStore>
|
||||
@ -91,7 +98,8 @@ public:
|
||||
// type. If it is a pointer type, then the increment is
|
||||
// scaled to the size of the type pointed to by the pointer.
|
||||
template<typename D>
|
||||
inline static void inc(D volatile* dest);
|
||||
inline static void inc(D volatile* dest,
|
||||
atomic_memory_order order = memory_order_conservative);
|
||||
|
||||
// Atomically decrement a location. dec() provide:
|
||||
// <fence> decrement-dest <membar StoreLoad|StoreStore>
|
||||
@ -99,7 +107,8 @@ public:
|
||||
// type. If it is a pointer type, then the decrement is
|
||||
// scaled to the size of the type pointed to by the pointer.
|
||||
template<typename D>
|
||||
inline static void dec(D volatile* dest);
|
||||
inline static void dec(D volatile* dest,
|
||||
atomic_memory_order order = memory_order_conservative);
|
||||
|
||||
// Performs atomic exchange of *dest with exchange_value. Returns old
|
||||
// prior value of *dest. xchg*() provide:
|
||||
@ -108,7 +117,8 @@ public:
|
||||
// to D, an integral/enum type equal to D, or a type equal to D that
|
||||
// is primitive convertible using PrimitiveConversions.
|
||||
template<typename T, typename D>
|
||||
inline static D xchg(T exchange_value, volatile D* dest);
|
||||
inline static D xchg(T exchange_value, volatile D* dest,
|
||||
atomic_memory_order order = memory_order_conservative);
|
||||
|
||||
// Performs atomic compare of *dest and compare_value, and exchanges
|
||||
// *dest with exchange_value if the comparison succeeded. Returns prior
|
||||
@ -119,7 +129,7 @@ public:
|
||||
inline static D cmpxchg(T exchange_value,
|
||||
D volatile* dest,
|
||||
U compare_value,
|
||||
cmpxchg_memory_order order = memory_order_conservative);
|
||||
atomic_memory_order order = memory_order_conservative);
|
||||
|
||||
// Performs atomic compare of *dest and NULL, and replaces *dest
|
||||
// with exchange_value if the comparison succeeded. Returns true if
|
||||
@ -128,7 +138,7 @@ public:
|
||||
// alternative to the Double-Checked Locking Pattern.
|
||||
template<typename T, typename D>
|
||||
inline static bool replace_if_null(T* value, D* volatile* dest,
|
||||
cmpxchg_memory_order order = memory_order_conservative);
|
||||
atomic_memory_order order = memory_order_conservative);
|
||||
|
||||
private:
|
||||
WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
|
||||
@ -272,7 +282,7 @@ private:
|
||||
//
|
||||
// - dest is of type T*.
|
||||
// - exchange_value and compare_value are of type T.
|
||||
// - order is of type cmpxchg_memory_order.
|
||||
// - order is of type atomic_memory_order.
|
||||
// - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
|
||||
//
|
||||
// Then
|
||||
@ -280,7 +290,7 @@ private:
|
||||
// must be a valid expression, returning a result convertible to T.
|
||||
//
|
||||
// A default definition is provided, which declares a function template
|
||||
// T operator()(T, T volatile*, T, cmpxchg_memory_order) const
|
||||
// T operator()(T, T volatile*, T, atomic_memory_order) const
|
||||
//
|
||||
// For each required size, a platform must either provide an
|
||||
// appropriate definition of that function, or must entirely
|
||||
@ -327,7 +337,7 @@ private:
|
||||
// must be a valid expression, returning a result convertible to T.
|
||||
//
|
||||
// A default definition is provided, which declares a function template
|
||||
// T operator()(T, T volatile*, T, cmpxchg_memory_order) const
|
||||
// T operator()(T, T volatile*, T, atomic_memory_order) const
|
||||
//
|
||||
// For each required size, a platform must either provide an
|
||||
// appropriate definition of that function, or must entirely
|
||||
@ -488,33 +498,33 @@ struct Atomic::PlatformStore {
|
||||
template<typename Derived>
|
||||
struct Atomic::FetchAndAdd {
|
||||
template<typename I, typename D>
|
||||
D operator()(I add_value, D volatile* dest) const;
|
||||
D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
struct Atomic::AddAndFetch {
|
||||
template<typename I, typename D>
|
||||
D operator()(I add_value, D volatile* dest) const;
|
||||
D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
template<typename D>
|
||||
inline void Atomic::inc(D volatile* dest) {
|
||||
inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
|
||||
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
|
||||
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
|
||||
Atomic::add(I(1), dest);
|
||||
Atomic::add(I(1), dest, order);
|
||||
}
|
||||
|
||||
template<typename D>
|
||||
inline void Atomic::dec(D volatile* dest) {
|
||||
inline void Atomic::dec(D volatile* dest, atomic_memory_order order) {
|
||||
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
|
||||
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
|
||||
// Assumes two's complement integer representation.
|
||||
#pragma warning(suppress: 4146)
|
||||
Atomic::add(I(-1), dest);
|
||||
Atomic::add(I(-1), dest, order);
|
||||
}
|
||||
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::sub(I sub_value, D volatile* dest) {
|
||||
inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) {
|
||||
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
|
||||
STATIC_ASSERT(IsIntegral<I>::value);
|
||||
// If D is a pointer type, use [u]intptr_t as the addend type,
|
||||
@ -527,7 +537,7 @@ inline D Atomic::sub(I sub_value, D volatile* dest) {
|
||||
AddendType addend = sub_value;
|
||||
// Assumes two's complement integer representation.
|
||||
#pragma warning(suppress: 4146) // In case AddendType is not signed.
|
||||
return Atomic::add(-addend, dest);
|
||||
return Atomic::add(-addend, dest, order);
|
||||
}
|
||||
|
||||
// Define the class before including platform file, which may specialize
|
||||
@ -541,7 +551,7 @@ struct Atomic::PlatformCmpxchg {
|
||||
T operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const;
|
||||
atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
// Define the class before including platform file, which may use this
|
||||
@ -552,7 +562,7 @@ struct Atomic::CmpxchgByteUsingInt {
|
||||
T operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const;
|
||||
atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
// Define the class before including platform file, which may specialize
|
||||
@ -564,7 +574,8 @@ template<size_t byte_size>
|
||||
struct Atomic::PlatformXchg {
|
||||
template<typename T>
|
||||
T operator()(T exchange_value,
|
||||
T volatile* dest) const;
|
||||
T volatile* dest,
|
||||
atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
// platform specific in-line definitions - must come before shared definitions
|
||||
@ -589,8 +600,9 @@ inline void Atomic::store(T store_value, volatile D* dest) {
|
||||
}
|
||||
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::add(I add_value, D volatile* dest) {
|
||||
return AddImpl<I, D>()(add_value, dest);
|
||||
inline D Atomic::add(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) {
|
||||
return AddImpl<I, D>()(add_value, dest, order);
|
||||
}
|
||||
|
||||
template<typename I, typename D>
|
||||
@ -601,9 +613,9 @@ struct Atomic::AddImpl<
|
||||
(sizeof(I) <= sizeof(D)) &&
|
||||
(IsSigned<I>::value == IsSigned<D>::value)>::type>
|
||||
{
|
||||
D operator()(I add_value, D volatile* dest) const {
|
||||
D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
|
||||
D addend = add_value;
|
||||
return PlatformAdd<sizeof(D)>()(addend, dest);
|
||||
return PlatformAdd<sizeof(D)>()(addend, dest, order);
|
||||
}
|
||||
};
|
||||
|
||||
@ -612,14 +624,14 @@ struct Atomic::AddImpl<
|
||||
I, P*,
|
||||
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
|
||||
{
|
||||
P* operator()(I add_value, P* volatile* dest) const {
|
||||
P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const {
|
||||
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
|
||||
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
|
||||
typedef typename Conditional<IsSigned<I>::value,
|
||||
intptr_t,
|
||||
uintptr_t>::type CI;
|
||||
CI addend = add_value;
|
||||
return PlatformAdd<sizeof(P*)>()(addend, dest);
|
||||
return PlatformAdd<sizeof(P*)>()(addend, dest, order);
|
||||
}
|
||||
};
|
||||
|
||||
@ -634,13 +646,13 @@ struct Atomic::AddImpl<
|
||||
// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
|
||||
template<>
|
||||
struct Atomic::AddImpl<short, short> {
|
||||
short operator()(short add_value, short volatile* dest) const {
|
||||
short operator()(short add_value, short volatile* dest, atomic_memory_order order) const {
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
|
||||
int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1));
|
||||
int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1), order);
|
||||
#else
|
||||
assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
|
||||
int new_value = Atomic::add(add_value << 16, (volatile int*)(dest));
|
||||
int new_value = Atomic::add(add_value << 16, (volatile int*)(dest), order);
|
||||
#endif
|
||||
return (short)(new_value >> 16); // preserves sign
|
||||
}
|
||||
@ -648,24 +660,26 @@ struct Atomic::AddImpl<short, short> {
|
||||
|
||||
template<typename Derived>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
I addend = add_value;
|
||||
// If D is a pointer type P*, scale by sizeof(P).
|
||||
if (IsPointer<D>::value) {
|
||||
addend *= sizeof(typename RemovePointer<D>::type);
|
||||
}
|
||||
D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
|
||||
D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order);
|
||||
return old + add_value;
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
|
||||
inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest,
|
||||
atomic_memory_order order) const {
|
||||
// If D is a pointer type P*, scale by sizeof(P).
|
||||
if (IsPointer<D>::value) {
|
||||
add_value *= sizeof(typename RemovePointer<D>::type);
|
||||
}
|
||||
return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest);
|
||||
return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order);
|
||||
}
|
||||
|
||||
template<typename Type, typename Fn, typename I, typename D>
|
||||
@ -679,13 +693,13 @@ template<typename T, typename D, typename U>
|
||||
inline D Atomic::cmpxchg(T exchange_value,
|
||||
D volatile* dest,
|
||||
U compare_value,
|
||||
cmpxchg_memory_order order) {
|
||||
atomic_memory_order order) {
|
||||
return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
|
||||
cmpxchg_memory_order order) {
|
||||
atomic_memory_order order) {
|
||||
// Presently using a trivial implementation in terms of cmpxchg.
|
||||
// Consider adding platform support, to permit the use of compiler
|
||||
// intrinsics like gcc's __sync_bool_compare_and_swap.
|
||||
@ -702,7 +716,7 @@ struct Atomic::CmpxchgImpl<
|
||||
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
|
||||
{
|
||||
T operator()(T exchange_value, T volatile* dest, T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
// Forward to the platform handler for the size of T.
|
||||
return PlatformCmpxchg<sizeof(T)>()(exchange_value,
|
||||
dest,
|
||||
@ -728,7 +742,7 @@ struct Atomic::CmpxchgImpl<
|
||||
typename RemoveCV<U>::type>::value>::type>
|
||||
{
|
||||
D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
// Allow derived to base conversion, and adding cv-qualifiers.
|
||||
D* new_value = exchange_value;
|
||||
// Don't care what the CV qualifiers for compare_value are,
|
||||
@ -751,7 +765,7 @@ struct Atomic::CmpxchgImpl<
|
||||
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
|
||||
{
|
||||
T operator()(T exchange_value, T volatile* dest, T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
typedef PrimitiveConversions::Translate<T> Translator;
|
||||
typedef typename Translator::Decayed Decayed;
|
||||
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
|
||||
@ -779,7 +793,7 @@ template<typename T>
|
||||
inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
|
||||
uint8_t canon_exchange_value = exchange_value;
|
||||
uint8_t canon_compare_value = compare_value;
|
||||
@ -821,9 +835,9 @@ struct Atomic::XchgImpl<
|
||||
T, T,
|
||||
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
|
||||
{
|
||||
T operator()(T exchange_value, T volatile* dest) const {
|
||||
T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
|
||||
// Forward to the platform handler for the size of T.
|
||||
return PlatformXchg<sizeof(T)>()(exchange_value, dest);
|
||||
return PlatformXchg<sizeof(T)>()(exchange_value, dest, order);
|
||||
}
|
||||
};
|
||||
|
||||
@ -837,10 +851,10 @@ struct Atomic::XchgImpl<
|
||||
T*, D*,
|
||||
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
|
||||
{
|
||||
D* operator()(T* exchange_value, D* volatile* dest) const {
|
||||
D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const {
|
||||
// Allow derived to base conversion, and adding cv-qualifiers.
|
||||
D* new_value = exchange_value;
|
||||
return PlatformXchg<sizeof(D*)>()(new_value, dest);
|
||||
return PlatformXchg<sizeof(D*)>()(new_value, dest, order);
|
||||
}
|
||||
};
|
||||
|
||||
@ -856,13 +870,14 @@ struct Atomic::XchgImpl<
|
||||
T, T,
|
||||
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
|
||||
{
|
||||
T operator()(T exchange_value, T volatile* dest) const {
|
||||
T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
|
||||
typedef PrimitiveConversions::Translate<T> Translator;
|
||||
typedef typename Translator::Decayed Decayed;
|
||||
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
|
||||
return Translator::recover(
|
||||
xchg(Translator::decay(exchange_value),
|
||||
reinterpret_cast<Decayed volatile*>(dest)));
|
||||
reinterpret_cast<Decayed volatile*>(dest),
|
||||
order));
|
||||
}
|
||||
};
|
||||
|
||||
@ -877,8 +892,8 @@ inline T Atomic::xchg_using_helper(Fn fn,
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
inline D Atomic::xchg(T exchange_value, volatile D* dest) {
|
||||
return XchgImpl<T, D>()(exchange_value, dest);
|
||||
inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) {
|
||||
return XchgImpl<T, D>()(exchange_value, dest, order);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_RUNTIME_ATOMIC_HPP
|
||||
|
@ -59,7 +59,8 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure {
|
||||
void GlobalCounter::write_synchronize() {
|
||||
assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
|
||||
// Atomic::add must provide fence since we have storeload dependency.
|
||||
volatile uintx gbl_cnt = Atomic::add((uintx)COUNTER_INCREMENT, &_global_counter._counter);
|
||||
volatile uintx gbl_cnt = Atomic::add((uintx)COUNTER_INCREMENT, &_global_counter._counter,
|
||||
memory_order_conservative);
|
||||
// Do all RCU threads.
|
||||
CounterThreadCheck ctc(gbl_cnt);
|
||||
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user