8188813: Generalize OrderAccess to use templates

Reviewed-by: dholmes, coleenp
This commit is contained in:
Erik Österlund 2017-10-09 14:39:59 +02:00
parent 21ee7f4b2a
commit bf5816a2c5
21 changed files with 376 additions and 433 deletions

View File

@ -78,16 +78,17 @@ inline void OrderAccess::acquire() { inlasm_lwsync(); }
inline void OrderAccess::release() { inlasm_lwsync(); }
inline void OrderAccess::fence() { inlasm_sync(); }
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (const volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jint OrderAccess::specialized_load_acquire<jint> (const volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (const volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
};
#undef inlasm_sync
#undef inlasm_lwsync
#undef inlasm_eieio
#undef inlasm_isync
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,46 +64,57 @@ inline void OrderAccess::fence() {
}
template<>
inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte* p, jbyte v) {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
inline void OrderAccess::specialized_release_store_fence<jint> (volatile jint* p, jint v) {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#ifdef AMD64
template<>
inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong* p, jlong v) {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#endif // AMD64
template<>
inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat* p, jfloat v) {
release_store_fence((volatile jint*)p, jint_cast(v));
}
template<>
inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
release_store_fence((volatile jlong*)p, jlong_cast(v));
}
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -74,6 +74,4 @@ inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::release() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::fence() { FULL_MEM_BARRIER; }
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP

View File

@ -50,93 +50,28 @@ inline void OrderAccess::fence() {
FULL_MEM_BARRIER;
}
inline jbyte OrderAccess::load_acquire(const volatile jbyte* p)
{ jbyte data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jshort OrderAccess::load_acquire(const volatile jshort* p)
{ jshort data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jint OrderAccess::load_acquire(const volatile jint* p)
{ jint data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jlong OrderAccess::load_acquire(const volatile jlong* p)
{ jlong data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jubyte OrderAccess::load_acquire(const volatile jubyte* p)
{ jubyte data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jushort OrderAccess::load_acquire(const volatile jushort* p)
{ jushort data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline juint OrderAccess::load_acquire(const volatile juint* p)
{ juint data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline julong OrderAccess::load_acquire(const volatile julong* p)
{ julong data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jfloat OrderAccess::load_acquire(const volatile jfloat* p)
{ jfloat data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jdouble OrderAccess::load_acquire(const volatile jdouble* p)
{ jdouble data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p)
{ intptr_t data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p)
{ void* data; __atomic_load((void* const volatile *)p, &data, __ATOMIC_ACQUIRE); return data; }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const { T data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
};
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile jshort* p, jshort v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile jint* p, jint v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile jlong* p, jlong v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile jushort* p, jushort v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile juint* p, juint v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile julong* p, julong v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v)
{ __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v)
{ __atomic_store((void* volatile *)p, &v, __ATOMIC_RELEASE); }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const { __atomic_store(p, &v, __ATOMIC_RELEASE); }
};
inline void OrderAccess::store_fence(jbyte* p, jbyte v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(jint* p, jint v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(juint* p, juint v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(julong* p, julong v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v)
{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_ptr(p, v); fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_ptr(p, v); fence(); }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
};
#endif // OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP

View File

@ -33,7 +33,6 @@
// - we define the high level barriers below and use the general
// implementation in orderAccess.inline.hpp, with customizations
// on AARCH64 via the specialized_* template functions
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
// Memory Ordering on ARM is weak.
//
@ -131,91 +130,126 @@ inline void OrderAccess::fence() { dmb_sy(); }
#ifdef AARCH64
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte>(const volatile jbyte* p) {
volatile jbyte result;
__asm__ volatile(
"ldarb %w[res], [%[ptr]]"
: [res] "=&r" (result)
: [ptr] "r" (p)
: "memory");
return result;
}
template<>
struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const {
volatile T result;
__asm__ volatile(
"ldarb %w[res], [%[ptr]]"
: [res] "=&r" (result)
: [ptr] "r" (p)
: "memory");
return result;
}
};
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) {
volatile jshort result;
__asm__ volatile(
"ldarh %w[res], [%[ptr]]"
: [res] "=&r" (result)
: [ptr] "r" (p)
: "memory");
return result;
}
template<>
struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const {
volatile T result;
__asm__ volatile(
"ldarh %w[res], [%[ptr]]"
: [res] "=&r" (result)
: [ptr] "r" (p)
: "memory");
return result;
}
};
template<> inline jint OrderAccess::specialized_load_acquire<jint>(const volatile jint* p) {
volatile jint result;
__asm__ volatile(
"ldar %w[res], [%[ptr]]"
: [res] "=&r" (result)
: [ptr] "r" (p)
: "memory");
return result;
}
template<>
struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const {
volatile T result;
__asm__ volatile(
"ldar %w[res], [%[ptr]]"
: [res] "=&r" (result)
: [ptr] "r" (p)
: "memory");
return result;
}
};
template<> inline jfloat OrderAccess::specialized_load_acquire<jfloat>(const volatile jfloat* p) {
return jfloat_cast(specialized_load_acquire((const volatile jint*)p));
}
template<>
struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const {
volatile T result;
__asm__ volatile(
"ldar %[res], [%[ptr]]"
: [res] "=&r" (result)
: [ptr] "r" (p)
: "memory");
return result;
}
};
// This is implicit as jlong and intptr_t are both "long int"
//template<> inline jlong OrderAccess::specialized_load_acquire(const volatile jlong* p) {
// return (volatile jlong)specialized_load_acquire((const volatile intptr_t*)p);
//}
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile(
"stlrb %w[val], [%[ptr]]"
:
: [ptr] "r" (p), [val] "r" (v)
: "memory");
}
};
template<> inline intptr_t OrderAccess::specialized_load_acquire<intptr_t>(const volatile intptr_t* p) {
volatile intptr_t result;
__asm__ volatile(
"ldar %[res], [%[ptr]]"
: [res] "=&r" (result)
: [ptr] "r" (p)
: "memory");
return result;
}
template<>
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile(
"stlrh %w[val], [%[ptr]]"
:
: [ptr] "r" (p), [val] "r" (v)
: "memory");
}
};
template<> inline jdouble OrderAccess::specialized_load_acquire<jdouble>(const volatile jdouble* p) {
return jdouble_cast(specialized_load_acquire((const volatile intptr_t*)p));
}
template<>
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile(
"stlr %w[val], [%[ptr]]"
:
: [ptr] "r" (p), [val] "r" (v)
: "memory");
}
};
template<>
struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile(
"stlr %[val], [%[ptr]]"
:
: [ptr] "r" (p), [val] "r" (v)
: "memory");
}
};
template<> inline void OrderAccess::specialized_release_store<jbyte>(volatile jbyte* p, jbyte v) {
__asm__ volatile(
"stlrb %w[val], [%[ptr]]"
:
: [ptr] "r" (p), [val] "r" (v)
: "memory");
}
template<> inline void OrderAccess::specialized_release_store<jshort>(volatile jshort* p, jshort v) {
__asm__ volatile(
"stlrh %w[val], [%[ptr]]"
:
: [ptr] "r" (p), [val] "r" (v)
: "memory");
}
template<> inline void OrderAccess::specialized_release_store<jint>(volatile jint* p, jint v) {
__asm__ volatile(
"stlr %w[val], [%[ptr]]"
:
: [ptr] "r" (p), [val] "r" (v)
: "memory");
}
template<> inline void OrderAccess::specialized_release_store<jlong>(volatile jlong* p, jlong v) {
__asm__ volatile(
"stlr %[val], [%[ptr]]"
:
: [ptr] "r" (p), [val] "r" (v)
: "memory");
}
#endif // AARCH64
#endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP

View File

@ -80,10 +80,14 @@ inline void OrderAccess::acquire() { inlasm_lwsync(); }
inline void OrderAccess::release() { inlasm_lwsync(); }
inline void OrderAccess::fence() { inlasm_sync(); }
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (const volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jint OrderAccess::specialized_load_acquire<jint> (const volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (const volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
};
#undef inlasm_sync
#undef inlasm_lwsync
@ -91,6 +95,4 @@ template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (const vol
#undef inlasm_isync
#undef inlasm_acquire_reg
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP

View File

@ -74,10 +74,13 @@ inline void OrderAccess::acquire() { inlasm_zarch_acquire(); }
inline void OrderAccess::release() { inlasm_zarch_release(); }
inline void OrderAccess::fence() { inlasm_zarch_sync(); }
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (const volatile jbyte* p) { register jbyte t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) { register jshort t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jint OrderAccess::specialized_load_acquire<jint> (const volatile jint* p) { register jint t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (const volatile jlong* p) { register jlong t = *p; inlasm_zarch_acquire(); return t; }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const { register T t = *p; inlasm_zarch_acquire(); return t; }
};
#undef inlasm_compiler_barrier
#undef inlasm_zarch_sync
@ -85,8 +88,4 @@ template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (const vol
#undef inlasm_zarch_acquire
#undef inlasm_zarch_fence
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,6 +48,4 @@ inline void OrderAccess::fence() {
__asm__ volatile ("membar #StoreLoad" : : : "memory");
}
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,46 +60,57 @@ inline void OrderAccess::fence() {
}
template<>
inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte* p, jbyte v) {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
inline void OrderAccess::specialized_release_store_fence<jint> (volatile jint* p, jint v) {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#ifdef AMD64
template<>
inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong* p, jlong v) {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#endif // AMD64
template<>
inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat* p, jfloat v) {
release_store_fence((volatile jint*)p, jint_cast(v));
}
template<>
inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
release_store_fence((volatile jlong*)p, jlong_cast(v));
}
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -83,6 +83,4 @@ inline void OrderAccess::release() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::fence() { FULL_MEM_BARRIER; }
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,6 +52,4 @@ inline void OrderAccess::fence() {
__asm__ volatile ("membar #StoreLoad" : : : "memory");
}
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,6 +58,4 @@ inline void OrderAccess::fence() {
compiler_barrier();
}
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,42 +74,46 @@ inline void OrderAccess::fence() {
#ifndef AMD64
template<>
inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte* p, jbyte v) {
__asm {
mov edx, p;
mov al, v;
xchg al, byte ptr [edx];
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm {
mov edx, p;
mov al, v;
xchg al, byte ptr [edx];
}
}
}
};
template<>
inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
__asm {
mov edx, p;
mov ax, v;
xchg ax, word ptr [edx];
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm {
mov edx, p;
mov ax, v;
xchg ax, word ptr [edx];
}
}
}
};
template<>
inline void OrderAccess::specialized_release_store_fence<jint> (volatile jint* p, jint v) {
__asm {
mov edx, p;
mov eax, v;
xchg eax, dword ptr [edx];
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm {
mov edx, p;
mov eax, v;
xchg eax, dword ptr [edx];
}
}
}
};
#endif // AMD64
template<>
inline void OrderAccess::specialized_release_store_fence<jfloat>(volatile jfloat* p, jfloat v) {
release_store_fence((volatile jint*)p, jint_cast(v));
}
template<>
inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
release_store_fence((volatile jlong*)p, jlong_cast(v));
}
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,10 +30,10 @@
#include "runtime/orderAccess.inline.hpp"
template <class T> inline void CardTableModRefBS::inline_write_ref_field(T* field, oop newVal, bool release) {
jbyte* byte = byte_for((void*)field);
volatile jbyte* byte = byte_for((void*)field);
if (release) {
// Perform a releasing store if requested.
OrderAccess::release_store((volatile jbyte*) byte, dirty_card);
OrderAccess::release_store(byte, jbyte(dirty_card));
} else {
*byte = dirty_card;
}

View File

@ -167,4 +167,24 @@ inline T PrimitiveConversions::cast(U x) {
return Cast<T, U>()(x);
}
// jfloat and jdouble translation to integral types
template<>
struct PrimitiveConversions::Translate<jdouble> : public TrueType {
typedef double Value;
typedef int64_t Decayed;
static Decayed decay(Value x) { return PrimitiveConversions::cast<Decayed>(x); }
static Value recover(Decayed x) { return PrimitiveConversions::cast<Value>(x); }
};
template<>
struct PrimitiveConversions::Translate<jfloat> : public TrueType {
typedef float Value;
typedef int32_t Decayed;
static Decayed decay(Value x) { return PrimitiveConversions::cast<Decayed>(x); }
static Value recover(Decayed x) { return PrimitiveConversions::cast<Value>(x); }
};
#endif // SHARE_VM_METAPROGRAMMING_PRIMITIVECONVERSIONS_HPP

View File

@ -501,7 +501,7 @@ jchar oopDesc::char_field_acquire(int offset) const { return O
void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); }
jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); }
void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), (contents & 1)); }
void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), jboolean(contents & 1)); }
jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); }
void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); }

View File

@ -177,6 +177,15 @@ struct PrimitiveConversions::Translate<oop> : public TrueType {
(void)const_cast<oop&>(oop::operator=(o)); \
return *this; \
} \
}; \
\
template<> \
struct PrimitiveConversions::Translate<type##Oop> : public TrueType { \
typedef type##Oop Value; \
typedef type##OopDesc* Decayed; \
\
static Decayed decay(Value x) { return (type##OopDesc*)x.obj(); } \
static Value recover(Decayed x) { return type##Oop(x); } \
};
DEF_OOP(instance);

View File

@ -44,7 +44,7 @@ enum cmpxchg_memory_order {
};
class Atomic : AllStatic {
public:
public:
// Atomic operations on jlong types are not available on all 32-bit
// platforms. If atomic ops on jlongs are defined here they must only
// be used from code that verifies they are available at runtime and
@ -175,6 +175,7 @@ private:
// that is needed here.
template<typename From, typename To> struct IsPointerConvertible;
protected:
// Dispatch handler for store. Provides type-based validity
// checking and limited conversions around calls to the platform-
// specific implementation layer provided by PlatformOp.
@ -226,6 +227,7 @@ private:
// requires more for e.g. 64 bit loads, a specialization is required
template<size_t byte_size> struct PlatformLoad;
private:
// Dispatch handler for add. Provides type-based validity checking
// and limited conversions around calls to the platform-specific
// implementation layer provided by PlatformAdd.

View File

@ -526,7 +526,7 @@ void Monitor::IUnlock(bool RelaxAssert) {
// Note that the OrderAccess::storeload() fence that appears after unlock store
// provides for progress conditions and succession and is _not related to exclusion
// safety or lock release consistency.
OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], jbyte(0)); // drop outer lock
OrderAccess::storeload();
ParkEvent * const w = _OnDeck; // raw load as we will just return if non-NULL

View File

@ -26,6 +26,7 @@
#define SHARE_VM_RUNTIME_ORDERACCESS_HPP
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
// Memory Access Ordering Model
//
@ -252,7 +253,7 @@ class ScopedFence : public ScopedFenceGeneral<T> {
void postfix() { ScopedFenceGeneral<T>::postfix(); }
};
class OrderAccess : AllStatic {
class OrderAccess : private Atomic {
public:
// barriers
static void loadload();
@ -264,44 +265,20 @@ class OrderAccess : AllStatic {
static void release();
static void fence();
static jbyte load_acquire(const volatile jbyte* p);
static jshort load_acquire(const volatile jshort* p);
static jint load_acquire(const volatile jint* p);
static jlong load_acquire(const volatile jlong* p);
static jubyte load_acquire(const volatile jubyte* p);
static jushort load_acquire(const volatile jushort* p);
static juint load_acquire(const volatile juint* p);
static julong load_acquire(const volatile julong* p);
static jfloat load_acquire(const volatile jfloat* p);
static jdouble load_acquire(const volatile jdouble* p);
template <typename T>
static T load_acquire(const volatile T* p);
static intptr_t load_ptr_acquire(const volatile intptr_t* p);
static void* load_ptr_acquire(const volatile void* p);
static void release_store(volatile jbyte* p, jbyte v);
static void release_store(volatile jshort* p, jshort v);
static void release_store(volatile jint* p, jint v);
static void release_store(volatile jlong* p, jlong v);
static void release_store(volatile jubyte* p, jubyte v);
static void release_store(volatile jushort* p, jushort v);
static void release_store(volatile juint* p, juint v);
static void release_store(volatile julong* p, julong v);
static void release_store(volatile jfloat* p, jfloat v);
static void release_store(volatile jdouble* p, jdouble v);
template <typename T, typename D>
static void release_store(volatile D* p, T v);
static void release_store_ptr(volatile intptr_t* p, intptr_t v);
static void release_store_ptr(volatile void* p, void* v);
static void release_store_fence(volatile jbyte* p, jbyte v);
static void release_store_fence(volatile jshort* p, jshort v);
static void release_store_fence(volatile jint* p, jint v);
static void release_store_fence(volatile jlong* p, jlong v);
static void release_store_fence(volatile jubyte* p, jubyte v);
static void release_store_fence(volatile jushort* p, jushort v);
static void release_store_fence(volatile juint* p, juint v);
static void release_store_fence(volatile julong* p, julong v);
static void release_store_fence(volatile jfloat* p, jfloat v);
static void release_store_fence(volatile jdouble* p, jdouble v);
template <typename T, typename D>
static void release_store_fence(volatile D* p, T v);
static void release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
static void release_store_ptr_fence(volatile void* p, void* v);
@ -313,45 +290,34 @@ class OrderAccess : AllStatic {
static void StubRoutines_fence();
// Give platforms a variation point to specialize.
template<typename T> static T specialized_load_acquire (const volatile T* p);
template<typename T> static void specialized_release_store (volatile T* p, T v);
template<typename T> static void specialized_release_store_fence(volatile T* p, T v);
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
template<typename FieldType, ScopedFenceType FenceType>
static void ordered_store(volatile FieldType* p, FieldType v);
template<typename FieldType, ScopedFenceType FenceType>
static FieldType ordered_load(const volatile FieldType* p);
};
static void store(volatile jbyte* p, jbyte v);
static void store(volatile jshort* p, jshort v);
static void store(volatile jint* p, jint v);
static void store(volatile jlong* p, jlong v);
static void store(volatile jdouble* p, jdouble v);
static void store(volatile jfloat* p, jfloat v);
// The following methods can be specialized using simple template specialization
// in the platform specific files for optimization purposes. Otherwise the
// generalized variant is used.
static jbyte load(const volatile jbyte* p);
static jshort load(const volatile jshort* p);
static jint load(const volatile jint* p);
static jlong load(const volatile jlong* p);
static jdouble load(const volatile jdouble* p);
static jfloat load(const volatile jfloat* p);
template<size_t byte_size, ScopedFenceType type>
struct OrderAccess::PlatformOrderedStore VALUE_OBJ_CLASS_SPEC {
template <typename T>
void operator()(T v, volatile T* p) const {
ordered_store<T, type>(p, v);
}
};
// The following store_fence methods are deprecated and will be removed
// when all repos conform to the new generalized OrderAccess.
static void store_fence(jbyte* p, jbyte v);
static void store_fence(jshort* p, jshort v);
static void store_fence(jint* p, jint v);
static void store_fence(jlong* p, jlong v);
static void store_fence(jubyte* p, jubyte v);
static void store_fence(jushort* p, jushort v);
static void store_fence(juint* p, juint v);
static void store_fence(julong* p, julong v);
static void store_fence(jfloat* p, jfloat v);
static void store_fence(jdouble* p, jdouble v);
static void store_ptr_fence(intptr_t* p, intptr_t v);
static void store_ptr_fence(void** p, void* v);
template<size_t byte_size, ScopedFenceType type>
struct OrderAccess::PlatformOrderedLoad VALUE_OBJ_CLASS_SPEC {
template <typename T>
T operator()(const volatile T* p) const {
return ordered_load<T, type>(p);
}
};
#endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP

View File

@ -26,14 +26,11 @@
#ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
#define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "utilities/macros.hpp"
#include OS_CPU_HEADER_INLINE(orderAccess)
#ifdef VM_HAS_GENERALIZED_ORDER_ACCESS
template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }
template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }
@ -43,80 +40,42 @@ template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAcc
template <typename FieldType, ScopedFenceType FenceType>
inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) {
ScopedFence<FenceType> f((void*)p);
store(p, v);
Atomic::store(v, p);
}
template <typename FieldType, ScopedFenceType FenceType>
inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) {
ScopedFence<FenceType> f((void*)p);
return load(p);
return Atomic::load(p);
}
inline jbyte OrderAccess::load_acquire(const volatile jbyte* p) { return specialized_load_acquire(p); }
inline jshort OrderAccess::load_acquire(const volatile jshort* p) { return specialized_load_acquire(p); }
inline jint OrderAccess::load_acquire(const volatile jint* p) { return specialized_load_acquire(p); }
inline jlong OrderAccess::load_acquire(const volatile jlong* p) { return specialized_load_acquire(p); }
inline jfloat OrderAccess::load_acquire(const volatile jfloat* p) { return specialized_load_acquire(p); }
inline jdouble OrderAccess::load_acquire(const volatile jdouble* p) { return specialized_load_acquire(p); }
inline jubyte OrderAccess::load_acquire(const volatile jubyte* p) { return (jubyte) specialized_load_acquire((const volatile jbyte*)p); }
inline jushort OrderAccess::load_acquire(const volatile jushort* p) { return (jushort)specialized_load_acquire((const volatile jshort*)p); }
inline juint OrderAccess::load_acquire(const volatile juint* p) { return (juint) specialized_load_acquire((const volatile jint*)p); }
inline julong OrderAccess::load_acquire(const volatile julong* p) { return (julong) specialized_load_acquire((const volatile jlong*)p); }
template <typename T>
inline T OrderAccess::load_acquire(const volatile T* p) {
return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
}
inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p) { return (intptr_t)specialized_load_acquire(p); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*)specialized_load_acquire((const volatile intptr_t*)p); }
inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p) {
return load_acquire(p);
}
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jint* p, jint v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { specialized_release_store((volatile jbyte*) p, (jbyte) v); }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { specialized_release_store((volatile jshort*)p, (jshort)v); }
inline void OrderAccess::release_store(volatile juint* p, juint v) { specialized_release_store((volatile jint*) p, (jint) v); }
inline void OrderAccess::release_store(volatile julong* p, julong v) { specialized_release_store((volatile jlong*) p, (jlong) v); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) {
return load_acquire(static_cast<void* const volatile *>(p));
}
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { specialized_release_store((volatile intptr_t*)p, (intptr_t)v); }
template <typename T, typename D>
inline void OrderAccess::release_store(volatile D* p, T v) {
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
}
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { specialized_release_store_fence((volatile jbyte*) p, (jbyte) v); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { specialized_release_store_fence((volatile jshort*)p, (jshort)v); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { specialized_release_store_fence((volatile jint*) p, (jint) v); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { specialized_release_store_fence((volatile jlong*) p, (jlong) v); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { release_store(p, v); }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { release_store(static_cast<void* volatile*>(p), v); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { specialized_release_store_fence((volatile intptr_t*)p, (intptr_t)v); }
template <typename T, typename D>
inline void OrderAccess::release_store_fence(volatile D* p, T v) {
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
}
// The following methods can be specialized using simple template specialization
// in the platform specific files for optimization purposes. Otherwise the
// generalized variant is used.
template<typename T> inline T OrderAccess::specialized_load_acquire (const volatile T* p) { return ordered_load<T, X_ACQUIRE>(p); }
template<typename T> inline void OrderAccess::specialized_release_store (volatile T* p, T v) { ordered_store<T, RELEASE_X>(p, v); }
template<typename T> inline void OrderAccess::specialized_release_store_fence(volatile T* p, T v) { ordered_store<T, RELEASE_X_FENCE>(p, v); }
// Generalized atomic volatile accesses valid in OrderAccess
// All other types can be expressed in terms of these.
inline void OrderAccess::store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::store(volatile jdouble* p, jdouble v) { Atomic::store(jlong_cast(v), (volatile jlong*)p); }
inline void OrderAccess::store(volatile jfloat* p, jfloat v) { *p = v; }
inline jbyte OrderAccess::load(const volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load(const volatile jshort* p) { return *p; }
inline jint OrderAccess::load(const volatile jint* p) { return *p; }
inline jlong OrderAccess::load(const volatile jlong* p) { return Atomic::load(p); }
inline jdouble OrderAccess::load(const volatile jdouble* p) { return jdouble_cast(Atomic::load((const volatile jlong*)p)); }
inline jfloat OrderAccess::load(const volatile jfloat* p) { return *p; }
#endif // VM_HAS_GENERALIZED_ORDER_ACCESS
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_fence(p, v); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_fence(static_cast<void* volatile*>(p), v); }
#endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP