8234736: Harmonize parameter order in Atomic - store
Reviewed-by: rehn, dholmes
This commit is contained in:
parent
e527ce4b57
commit
8db2c1158e
@ -3264,7 +3264,7 @@ uint os::processor_id() {
|
||||
|
||||
while (processor_id < 0) {
|
||||
if (Atomic::cmpxchg(-2, &mapping[apic_id], -1) == -1) {
|
||||
Atomic::store(Atomic::add(1, &next_processor_id) - 1, &mapping[apic_id]);
|
||||
Atomic::store(&mapping[apic_id], Atomic::add(1, &next_processor_id) - 1);
|
||||
}
|
||||
processor_id = Atomic::load(&mapping[apic_id]);
|
||||
}
|
||||
|
@ -161,8 +161,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||
T volatile* dest) const {
|
||||
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
|
||||
T store_value) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||
}
|
||||
@ -173,7 +173,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm__ volatile ( "xchgb (%2),%0"
|
||||
: "=q" (v)
|
||||
: "0" (v), "r" (p)
|
||||
@ -185,7 +185,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm__ volatile ( "xchgw (%2),%0"
|
||||
: "=r" (v)
|
||||
: "0" (v), "r" (p)
|
||||
@ -197,7 +197,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm__ volatile ( "xchgl (%2),%0"
|
||||
: "=r" (v)
|
||||
: "0" (v), "r" (p)
|
||||
@ -210,7 +210,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm__ volatile ( "xchgq (%2), %0"
|
||||
: "=r" (v)
|
||||
: "0" (v), "r" (p)
|
||||
|
@ -276,8 +276,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||
T volatile* dest) const {
|
||||
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
|
||||
T store_value) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||
}
|
||||
|
@ -88,14 +88,14 @@ template<size_t byte_size>
|
||||
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
|
||||
void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
|
||||
};
|
||||
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); }
|
||||
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
|
||||
};
|
||||
|
||||
#endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
|
||||
|
@ -54,8 +54,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||
T volatile* dest) const {
|
||||
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
|
||||
T store_value) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
(*os::atomic_store_long_func)(
|
||||
PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||
|
@ -161,8 +161,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||
T volatile* dest) const {
|
||||
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
|
||||
T store_value) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||
}
|
||||
@ -173,7 +173,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm__ volatile ( "xchgb (%2),%0"
|
||||
: "=q" (v)
|
||||
: "0" (v), "r" (p)
|
||||
@ -185,7 +185,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm__ volatile ( "xchgw (%2),%0"
|
||||
: "=r" (v)
|
||||
: "0" (v), "r" (p)
|
||||
@ -197,7 +197,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm__ volatile ( "xchgl (%2),%0"
|
||||
: "=r" (v)
|
||||
: "0" (v), "r" (p)
|
||||
@ -210,7 +210,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm__ volatile ( "xchgq (%2), %0"
|
||||
: "=r" (v)
|
||||
: "0" (v), "r" (p)
|
||||
|
@ -122,8 +122,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||
T volatile* dest) const {
|
||||
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
|
||||
T store_value) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||
}
|
||||
|
@ -213,8 +213,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||
T volatile* dest) const {
|
||||
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
|
||||
T store_value) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
volatile T* src = &store_value;
|
||||
__asm {
|
||||
@ -234,7 +234,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm {
|
||||
mov edx, p;
|
||||
mov al, v;
|
||||
@ -247,7 +247,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm {
|
||||
mov edx, p;
|
||||
mov ax, v;
|
||||
@ -260,7 +260,7 @@ template<>
|
||||
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
|
||||
{
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
__asm {
|
||||
mov edx, p;
|
||||
mov eax, v;
|
||||
|
@ -189,8 +189,8 @@ void SymbolTable::delete_symbol(Symbol* sym) {
|
||||
}
|
||||
}
|
||||
|
||||
void SymbolTable::reset_has_items_to_clean() { Atomic::store(false, &_has_items_to_clean); }
|
||||
void SymbolTable::mark_has_items_to_clean() { Atomic::store(true, &_has_items_to_clean); }
|
||||
void SymbolTable::reset_has_items_to_clean() { Atomic::store(&_has_items_to_clean, false); }
|
||||
void SymbolTable::mark_has_items_to_clean() { Atomic::store(&_has_items_to_clean, true); }
|
||||
bool SymbolTable::has_items_to_clean() { return Atomic::load(&_has_items_to_clean); }
|
||||
|
||||
void SymbolTable::item_added() {
|
||||
|
@ -615,7 +615,7 @@ bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool cl
|
||||
if (md != NULL && md->is_method()) {
|
||||
Method* method = static_cast<Method*>(md);
|
||||
if (!method->method_holder()->is_loader_alive()) {
|
||||
Atomic::store((Method*)NULL, r->metadata_addr());
|
||||
Atomic::store(r->metadata_addr(), (Method*)NULL);
|
||||
|
||||
if (!r->metadata_is_immediate()) {
|
||||
r->fix_metadata_relocation();
|
||||
|
@ -300,7 +300,7 @@ nmethodBucket* DependencyContext::dependencies_not_unloading() {
|
||||
|
||||
// Relaxed accessors
|
||||
void DependencyContext::set_dependencies(nmethodBucket* b) {
|
||||
Atomic::store(b, _dependency_context_addr);
|
||||
Atomic::store(_dependency_context_addr, b);
|
||||
}
|
||||
|
||||
nmethodBucket* DependencyContext::dependencies() {
|
||||
@ -313,7 +313,7 @@ nmethodBucket* DependencyContext::dependencies() {
|
||||
void DependencyContext::cleaning_start() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be");
|
||||
uint64_t epoch = ++_cleaning_epoch_monotonic;
|
||||
Atomic::store(epoch, &_cleaning_epoch);
|
||||
Atomic::store(&_cleaning_epoch, epoch);
|
||||
}
|
||||
|
||||
// The epilogue marks the end of dependency context cleanup by the GC,
|
||||
@ -323,7 +323,7 @@ void DependencyContext::cleaning_start() {
|
||||
// was called. That allows dependency contexts to be cleaned concurrently.
|
||||
void DependencyContext::cleaning_end() {
|
||||
uint64_t epoch = 0;
|
||||
Atomic::store(epoch, &_cleaning_epoch);
|
||||
Atomic::store(&_cleaning_epoch, epoch);
|
||||
}
|
||||
|
||||
// This function skips over nmethodBuckets in the list corresponding to
|
||||
@ -358,7 +358,7 @@ nmethodBucket* nmethodBucket::next() {
|
||||
}
|
||||
|
||||
void nmethodBucket::set_next(nmethodBucket* b) {
|
||||
Atomic::store(b, &_next);
|
||||
Atomic::store(&_next, b);
|
||||
}
|
||||
|
||||
nmethodBucket* nmethodBucket::purge_list_next() {
|
||||
@ -366,5 +366,5 @@ nmethodBucket* nmethodBucket::purge_list_next() {
|
||||
}
|
||||
|
||||
void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
|
||||
Atomic::store(b, &_purge_list_next);
|
||||
Atomic::store(&_purge_list_next, b);
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ ExceptionCache* ExceptionCache::next() {
|
||||
}
|
||||
|
||||
void ExceptionCache::set_next(ExceptionCache *ec) {
|
||||
Atomic::store(ec, &_next);
|
||||
Atomic::store(&_next, ec);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
@ -55,7 +55,7 @@ u_char G1BlockOffsetTable::offset_array(size_t index) const {
|
||||
}
|
||||
|
||||
void G1BlockOffsetTable::set_offset_array_raw(size_t index, u_char offset) {
|
||||
Atomic::store(offset, &_offset_array[index]);
|
||||
Atomic::store(&_offset_array[index], offset);
|
||||
}
|
||||
|
||||
void G1BlockOffsetTable::set_offset_array(size_t index, u_char offset) {
|
||||
|
@ -329,7 +329,7 @@ void SATBMarkQueueSet::print_all(const char* msg) {
|
||||
#endif // PRODUCT
|
||||
|
||||
void SATBMarkQueueSet::abandon_completed_buffers() {
|
||||
Atomic::store(size_t(0), &_count_and_process_flag);
|
||||
Atomic::store(&_count_and_process_flag, size_t(0));
|
||||
BufferNode* buffers_to_delete = _list.pop_all();
|
||||
while (buffers_to_delete != NULL) {
|
||||
BufferNode* bn = buffers_to_delete;
|
||||
|
@ -305,7 +305,7 @@ void ShenandoahHeapRegion::make_committed_bypass() {
|
||||
}
|
||||
|
||||
void ShenandoahHeapRegion::clear_live_data() {
|
||||
Atomic::release_store_fence<size_t>(&_live_data, 0);
|
||||
Atomic::release_store_fence(&_live_data, (size_t)0);
|
||||
}
|
||||
|
||||
void ShenandoahHeapRegion::reset_alloc_metadata() {
|
||||
|
@ -178,12 +178,12 @@ void ShenandoahPacer::setup_for_idle() {
|
||||
size_t ShenandoahPacer::update_and_get_progress_history() {
|
||||
if (_progress == -1) {
|
||||
// First initialization, report some prior
|
||||
Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
|
||||
Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
|
||||
return (size_t) (_heap->max_capacity() * 0.1);
|
||||
} else {
|
||||
// Record history, and reply historical data
|
||||
_progress_history->add(_progress);
|
||||
Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
|
||||
Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
|
||||
return (size_t) (_progress_history->avg() * HeapWordSize);
|
||||
}
|
||||
}
|
||||
@ -192,7 +192,7 @@ void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) {
|
||||
size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
|
||||
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
|
||||
Atomic::xchg((intptr_t)initial, &_budget);
|
||||
Atomic::store(tax_rate, &_tax_rate);
|
||||
Atomic::store(&_tax_rate, tax_rate);
|
||||
Atomic::inc(&_epoch);
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ inline bool ZForwarding::is_pinned() const {
|
||||
}
|
||||
|
||||
inline void ZForwarding::set_pinned() {
|
||||
Atomic::store(true, &_pinned);
|
||||
Atomic::store(&_pinned, true);
|
||||
}
|
||||
|
||||
inline bool ZForwarding::inc_refcount() {
|
||||
|
@ -53,7 +53,7 @@ inline void ZReentrantLock::lock() {
|
||||
|
||||
if (owner != thread) {
|
||||
_lock.lock();
|
||||
Atomic::store(thread, &_owner);
|
||||
Atomic::store(&_owner, thread);
|
||||
}
|
||||
|
||||
_count++;
|
||||
@ -66,7 +66,7 @@ inline void ZReentrantLock::unlock() {
|
||||
_count--;
|
||||
|
||||
if (_count == 0) {
|
||||
Atomic::store((Thread*)NULL, &_owner);
|
||||
Atomic::store(&_owner, (Thread*)NULL);
|
||||
_lock.unlock();
|
||||
}
|
||||
}
|
||||
|
@ -487,7 +487,7 @@ bool ZMark::try_terminate() {
|
||||
// Flush before termination
|
||||
if (!try_flush(&_work_nterminateflush)) {
|
||||
// No more work available, skip further flush attempts
|
||||
Atomic::store(false, &_work_terminateflush);
|
||||
Atomic::store(&_work_terminateflush, false);
|
||||
}
|
||||
|
||||
// Don't terminate, regardless of whether we successfully
|
||||
|
@ -258,7 +258,7 @@ private:
|
||||
volatile bool _failed;
|
||||
|
||||
void set_failed() {
|
||||
Atomic::store(true, &_failed);
|
||||
Atomic::store(&_failed, true);
|
||||
}
|
||||
|
||||
void unlink(nmethod* nm) {
|
||||
|
@ -41,7 +41,7 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) {
|
||||
*dest += add_value;
|
||||
#else
|
||||
julong value = Atomic::load(dest);
|
||||
Atomic::store(value + add_value, dest);
|
||||
Atomic::store(dest, value + add_value);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
@ -174,7 +174,7 @@ template <DecoratorSet ds, typename T>
|
||||
inline typename EnableIf<
|
||||
HasDecorator<ds, MO_RELAXED>::value>::type
|
||||
RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
|
||||
Atomic::store(value, reinterpret_cast<volatile T*>(addr));
|
||||
Atomic::store(reinterpret_cast<volatile T*>(addr), value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
|
@ -410,7 +410,7 @@ void Klass::set_next_sibling(Klass* s) {
|
||||
// Does not need release semantics. If used by cleanup, it will link to
|
||||
// already safely published data, and if used by inserts, will be published
|
||||
// safely using cmpxchg.
|
||||
Atomic::store(s, &_next_sibling);
|
||||
Atomic::store(&_next_sibling, s);
|
||||
}
|
||||
|
||||
void Klass::append_to_sibling_list() {
|
||||
|
@ -2244,7 +2244,7 @@ public:
|
||||
_rtm_state = (int)rstate;
|
||||
}
|
||||
void atomic_set_rtm_state(RTMState rstate) {
|
||||
Atomic::store((int)rstate, &_rtm_state);
|
||||
Atomic::store(&_rtm_state, (int)rstate);
|
||||
}
|
||||
|
||||
static int rtm_state_offset_in_bytes() {
|
||||
|
@ -61,7 +61,7 @@ void oopDesc::set_mark(markWord m) {
|
||||
}
|
||||
|
||||
void oopDesc::set_mark_raw(markWord m) {
|
||||
Atomic::store(m, &_mark);
|
||||
Atomic::store(&_mark, m);
|
||||
}
|
||||
|
||||
void oopDesc::set_mark_raw(HeapWord* mem, markWord m) {
|
||||
|
@ -3689,7 +3689,7 @@ void copy_jni_function_table(const struct JNINativeInterface_ *new_jni_NativeInt
|
||||
intptr_t *a = (intptr_t *) jni_functions();
|
||||
intptr_t *b = (intptr_t *) new_jni_NativeInterface;
|
||||
for (uint i=0; i < sizeof(struct JNINativeInterface_)/sizeof(void *); i++) {
|
||||
Atomic::store(*b++, a++);
|
||||
Atomic::store(a++, *b++);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,13 +79,13 @@ public:
|
||||
// The type T must be either a pointer type convertible to or equal
|
||||
// to D, an integral/enum type equal to D, or a type equal to D that
|
||||
// is primitive convertible using PrimitiveConversions.
|
||||
template<typename T, typename D>
|
||||
inline static void store(T store_value, volatile D* dest);
|
||||
template<typename D, typename T>
|
||||
inline static void store(volatile D* dest, T store_value);
|
||||
|
||||
template <typename T, typename D>
|
||||
template <typename D, typename T>
|
||||
inline static void release_store(volatile D* dest, T store_value);
|
||||
|
||||
template <typename T, typename D>
|
||||
template <typename D, typename T>
|
||||
inline static void release_store_fence(volatile D* dest, T store_value);
|
||||
|
||||
// Atomically load from a location
|
||||
@ -168,7 +168,7 @@ protected:
|
||||
// Dispatch handler for store. Provides type-based validity
|
||||
// checking and limited conversions around calls to the platform-
|
||||
// specific implementation layer provided by PlatformOp.
|
||||
template<typename T, typename D, typename PlatformOp, typename Enable = void>
|
||||
template<typename D, typename T, typename PlatformOp, typename Enable = void>
|
||||
struct StoreImpl;
|
||||
|
||||
// Platform-specific implementation of store. Support for sizes
|
||||
@ -450,9 +450,9 @@ struct Atomic::StoreImpl<
|
||||
PlatformOp,
|
||||
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
|
||||
{
|
||||
void operator()(T new_value, T volatile* dest) const {
|
||||
void operator()(T volatile* dest, T new_value) const {
|
||||
// Forward to the platform handler for the size of T.
|
||||
PlatformOp()(new_value, dest);
|
||||
PlatformOp()(dest, new_value);
|
||||
}
|
||||
};
|
||||
|
||||
@ -461,16 +461,16 @@ struct Atomic::StoreImpl<
|
||||
// The new_value must be implicitly convertible to the
|
||||
// destination's type; it must be type-correct to store the
|
||||
// new_value in the destination.
|
||||
template<typename T, typename D, typename PlatformOp>
|
||||
template<typename D, typename T, typename PlatformOp>
|
||||
struct Atomic::StoreImpl<
|
||||
T*, D*,
|
||||
D*, T*,
|
||||
PlatformOp,
|
||||
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
|
||||
{
|
||||
void operator()(T* new_value, D* volatile* dest) const {
|
||||
void operator()(D* volatile* dest, T* new_value) const {
|
||||
// Allow derived to base conversion, and adding cv-qualifiers.
|
||||
D* value = new_value;
|
||||
PlatformOp()(value, dest);
|
||||
PlatformOp()(dest, value);
|
||||
}
|
||||
};
|
||||
|
||||
@ -486,12 +486,12 @@ struct Atomic::StoreImpl<
|
||||
PlatformOp,
|
||||
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
|
||||
{
|
||||
void operator()(T new_value, T volatile* dest) const {
|
||||
void operator()(T volatile* dest, T new_value) const {
|
||||
typedef PrimitiveConversions::Translate<T> Translator;
|
||||
typedef typename Translator::Decayed Decayed;
|
||||
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
|
||||
PlatformOp()(Translator::decay(new_value),
|
||||
reinterpret_cast<Decayed volatile*>(dest));
|
||||
PlatformOp()(reinterpret_cast<Decayed volatile*>(dest),
|
||||
Translator::decay(new_value));
|
||||
}
|
||||
};
|
||||
|
||||
@ -504,8 +504,8 @@ struct Atomic::StoreImpl<
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformStore {
|
||||
template<typename T>
|
||||
void operator()(T new_value,
|
||||
T volatile* dest) const {
|
||||
void operator()(T volatile* dest,
|
||||
T new_value) const {
|
||||
STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
|
||||
(void)const_cast<T&>(*dest = new_value);
|
||||
}
|
||||
@ -654,28 +654,28 @@ inline T Atomic::load_acquire(const volatile T* p) {
|
||||
return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
inline void Atomic::store(T store_value, volatile D* dest) {
|
||||
StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
|
||||
template<typename D, typename T>
|
||||
inline void Atomic::store(volatile D* dest, T store_value) {
|
||||
StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value);
|
||||
}
|
||||
|
||||
template<size_t byte_size, ScopedFenceType type>
|
||||
struct Atomic::PlatformOrderedStore {
|
||||
template <typename T>
|
||||
void operator()(T v, volatile T* p) const {
|
||||
void operator()(volatile T* p, T v) const {
|
||||
ScopedFence<type> f((void*)p);
|
||||
Atomic::store(v, p);
|
||||
Atomic::store(p, v);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, typename D>
|
||||
template <typename D, typename T>
|
||||
inline void Atomic::release_store(volatile D* p, T v) {
|
||||
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
|
||||
StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);
|
||||
}
|
||||
|
||||
template <typename T, typename D>
|
||||
template <typename D, typename T>
|
||||
inline void Atomic::release_store_fence(volatile D* p, T v) {
|
||||
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
|
||||
StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
|
||||
}
|
||||
|
||||
template<typename I, typename D>
|
||||
|
@ -40,7 +40,7 @@ class BasicLock {
|
||||
}
|
||||
|
||||
void set_displaced_header(markWord header) {
|
||||
Atomic::store(header, &_displaced_header);
|
||||
Atomic::store(&_displaced_header, header);
|
||||
}
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
|
@ -44,7 +44,7 @@ inline volatile markWord* ObjectMonitor::header_addr() {
|
||||
}
|
||||
|
||||
inline void ObjectMonitor::set_header(markWord hdr) {
|
||||
Atomic::store(hdr, &_header);
|
||||
Atomic::store(&_header, hdr);
|
||||
}
|
||||
|
||||
inline jint ObjectMonitor::waiters() const {
|
||||
@ -63,7 +63,7 @@ inline void ObjectMonitor::clear() {
|
||||
assert(_object != NULL, "must be non-NULL");
|
||||
assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
|
||||
|
||||
Atomic::store(markWord::zero(), &_header);
|
||||
Atomic::store(&_header, markWord::zero());
|
||||
_object = NULL;
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ class AttachListener: AllStatic {
|
||||
|
||||
public:
|
||||
static void set_state(AttachListenerState new_state) {
|
||||
Atomic::store(new_state, &_state);
|
||||
Atomic::store(&_state, new_state);
|
||||
}
|
||||
|
||||
static AttachListenerState get_state() {
|
||||
@ -103,7 +103,7 @@ class AttachListener: AllStatic {
|
||||
}
|
||||
|
||||
static void set_initialized() {
|
||||
Atomic::store(AL_INITIALIZED, &_state);
|
||||
Atomic::store(&_state, AL_INITIALIZED);
|
||||
}
|
||||
|
||||
// indicates if this VM supports attach-on-demand
|
||||
|
@ -170,7 +170,7 @@ public:
|
||||
// if value is in an instance of this specialization of LockFreeStack,
|
||||
// there must be no concurrent push or pop operations on that stack.
|
||||
static void set_next(T& value, T* new_next) {
|
||||
Atomic::store(new_next, next_ptr(value));
|
||||
Atomic::store(next_ptr(value), new_next);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -399,7 +399,7 @@ jlong VMError::get_current_timestamp() {
|
||||
|
||||
void VMError::record_reporting_start_time() {
|
||||
const jlong now = get_current_timestamp();
|
||||
Atomic::store(now, &_reporting_start_time);
|
||||
Atomic::store(&_reporting_start_time, now);
|
||||
}
|
||||
|
||||
jlong VMError::get_reporting_start_time() {
|
||||
@ -408,7 +408,7 @@ jlong VMError::get_reporting_start_time() {
|
||||
|
||||
void VMError::record_step_start_time() {
|
||||
const jlong now = get_current_timestamp();
|
||||
Atomic::store(now, &_step_start_time);
|
||||
Atomic::store(&_step_start_time, now);
|
||||
}
|
||||
|
||||
jlong VMError::get_step_start_time() {
|
||||
@ -416,7 +416,7 @@ jlong VMError::get_step_start_time() {
|
||||
}
|
||||
|
||||
void VMError::clear_step_start_time() {
|
||||
return Atomic::store((jlong)0, &_step_start_time);
|
||||
return Atomic::store(&_step_start_time, (jlong)0);
|
||||
}
|
||||
|
||||
void VMError::report(outputStream* st, bool _verbose) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user