8234562: Move OrderAccess::release_store*/load_acquire to Atomic

Reviewed-by: rehn, dholmes
This commit is contained in:
Stefan Karlsson 2019-11-25 12:22:13 +01:00
parent e06c17ce33
commit e527ce4b57
97 changed files with 554 additions and 570 deletions

View File

@ -374,7 +374,7 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
// Finally patch out the jump.
volatile juint *jump_addr = (volatile juint*)instr_addr;
// Release not needed because caller uses invalidate_range after copying the remaining bytes.
//OrderAccess::release_store(jump_addr, *((juint*)code_buffer));
//Atomic::release_store(jump_addr, *((juint*)code_buffer));
*jump_addr = *((juint*)code_buffer); // atomically store code over branch instruction
ICache::ppc64_flush_icache_bytes(instr_addr, NativeGeneralJump::instruction_size);
}

View File

@ -51,7 +51,6 @@
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/semaphore.hpp"
@ -3209,7 +3208,7 @@ static volatile int* volatile apic_to_processor_mapping = NULL;
static volatile int next_processor_id = 0;
static inline volatile int* get_apic_to_processor_mapping() {
volatile int* mapping = OrderAccess::load_acquire(&apic_to_processor_mapping);
volatile int* mapping = Atomic::load_acquire(&apic_to_processor_mapping);
if (mapping == NULL) {
// Calculate possible number space for APIC ids. This space is not necessarily
// in the range [0, number_of_processors).
@ -3240,7 +3239,7 @@ static inline volatile int* get_apic_to_processor_mapping() {
if (!Atomic::replace_if_null(mapping, &apic_to_processor_mapping)) {
FREE_C_HEAP_ARRAY(int, mapping);
mapping = OrderAccess::load_acquire(&apic_to_processor_mapping);
mapping = Atomic::load_acquire(&apic_to_processor_mapping);
}
}

View File

@ -3747,7 +3747,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
// The first thread that reached this point, initializes the critical section.
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
} else if (OrderAccess::load_acquire(&process_exiting) == 0) {
} else if (Atomic::load_acquire(&process_exiting) == 0) {
if (what != EPT_THREAD) {
// Atomically set process_exiting before the critical section
// to increase the visibility between racing threads.
@ -3755,7 +3755,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
}
EnterCriticalSection(&crit_sect);
if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
// Remove from the array those handles of the threads that have completed exiting.
for (i = 0, j = 0; i < handle_count; ++i) {
res = WaitForSingleObject(handles[i], 0 /* don't wait */);
@ -3868,7 +3868,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
}
if (!registered &&
OrderAccess::load_acquire(&process_exiting) != 0 &&
Atomic::load_acquire(&process_exiting) != 0 &&
process_exiting != GetCurrentThreadId()) {
// Some other thread is about to call exit(), so we don't let
// the current unregistered thread proceed to exit() or _endthreadex()

View File

@ -30,6 +30,7 @@
#error "Atomic currently only implemented for PPC64"
#endif
#include "orderAccess_aix_ppc.hpp"
#include "utilities/debug.hpp"
// Implementation of class atomic
@ -399,4 +400,15 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
return old_value;
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE> {
template <typename T>
T operator()(const volatile T* p) const {
T t = Atomic::load(p);
// Use twi-isync for load_acquire (faster than lwsync).
__asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
return t;
}
};
#endif // OS_CPU_AIX_PPC_ATOMIC_AIX_PPC_HPP

View File

@ -64,8 +64,6 @@
#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory");
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
// Use twi-isync for load_acquire (faster than lwsync).
#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
inline void OrderAccess::loadload() { inlasm_lwsync(); }
inline void OrderAccess::storestore() { inlasm_lwsync(); }
@ -78,13 +76,6 @@ inline void OrderAccess::fence() { inlasm_sync(); }
inline void OrderAccess::cross_modify_fence()
{ inlasm_isync(); }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
};
#undef inlasm_sync
#undef inlasm_lwsync
#undef inlasm_eieio

View File

@ -169,4 +169,54 @@ inline void Atomic::PlatformStore<8>::operator()(T store_value,
#endif // AMD64
template<>
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#ifdef AMD64
template<>
struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#endif // AMD64
#endif // OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP

View File

@ -64,54 +64,4 @@ inline void OrderAccess::cross_modify_fence() {
__asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
}
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#ifdef AMD64
template<>
struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#endif // AMD64
#endif // OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP

View File

@ -32,10 +32,6 @@
// Note that memory_order_conservative requires a full barrier after atomic stores.
// See https://patchwork.kernel.org/patch/3575821/
#define FULL_MEM_BARRIER __sync_synchronize()
#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@ -81,4 +77,25 @@ inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
}
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
{
template <typename T>
void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const { release_store(p, v); OrderAccess::fence(); }
};
#endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP

View File

@ -37,6 +37,10 @@ inline void OrderAccess::storestore() { release(); }
inline void OrderAccess::loadstore() { acquire(); }
inline void OrderAccess::storeload() { fence(); }
#define FULL_MEM_BARRIER __sync_synchronize()
#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
inline void OrderAccess::acquire() {
READ_MEM_BARRIER;
}
@ -51,25 +55,4 @@ inline void OrderAccess::fence() {
inline void OrderAccess::cross_modify_fence() { }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
};
template<size_t byte_size>
struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
{
template <typename T>
void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
};
template<size_t byte_size>
struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
};
#endif // OS_CPU_LINUX_AARCH64_ORDERACCESS_LINUX_AARCH64_HPP

View File

@ -30,6 +30,7 @@
#error "Atomic currently only implemented for PPC64"
#endif
#include "orderAccess_linux_ppc.hpp"
#include "utilities/debug.hpp"
// Implementation of class atomic
@ -399,4 +400,16 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
return old_value;
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const {
T t = Atomic::load(p);
// Use twi-isync for load_acquire (faster than lwsync).
__asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
return t;
}
};
#endif // OS_CPU_LINUX_PPC_ATOMIC_LINUX_PPC_HPP

View File

@ -68,8 +68,6 @@
#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory");
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
// Use twi-isync for load_acquire (faster than lwsync).
#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
inline void OrderAccess::loadload() { inlasm_lwsync(); }
inline void OrderAccess::storestore() { inlasm_lwsync(); }
@ -82,17 +80,9 @@ inline void OrderAccess::fence() { inlasm_sync(); }
inline void OrderAccess::cross_modify_fence()
{ inlasm_isync(); }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
};
#undef inlasm_sync
#undef inlasm_lwsync
#undef inlasm_eieio
#undef inlasm_isync
#undef inlasm_acquire_reg
#endif // OS_CPU_LINUX_PPC_ORDERACCESS_LINUX_PPC_HPP

View File

@ -335,4 +335,11 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val,
return old;
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T t = *p; OrderAccess::acquire(); return t; }
};
#endif // OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP

View File

@ -76,13 +76,6 @@ inline void OrderAccess::release() { inlasm_zarch_release(); }
inline void OrderAccess::fence() { inlasm_zarch_sync(); }
inline void OrderAccess::cross_modify_fence() { inlasm_zarch_sync(); }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T t = *p; inlasm_zarch_acquire(); return t; }
};
#undef inlasm_compiler_barrier
#undef inlasm_zarch_sync
#undef inlasm_zarch_release

View File

@ -169,4 +169,54 @@ inline void Atomic::PlatformStore<8>::operator()(T store_value,
#endif // AMD64
template<>
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#ifdef AMD64
template<>
struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#endif // AMD64
#endif // OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP

View File

@ -66,54 +66,4 @@ inline void OrderAccess::cross_modify_fence() {
#endif
}
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
template<>
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#ifdef AMD64
template<>
struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
};
#endif // AMD64
#endif // OS_CPU_LINUX_X86_ORDERACCESS_LINUX_X86_HPP

View File

@ -27,6 +27,17 @@
#include "runtime/os.hpp"
// Note that in MSVC, volatile memory accesses are explicitly
// guaranteed to have acquire release semantics (w.r.t. compiler
// reordering) and therefore does not even need a compiler barrier
// for normal acquire release accesses. And all generalized
// bound calls like release_store go through Atomic::load
// and Atomic::store which do volatile memory accesses.
template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }
template<> inline void ScopedFence<RELEASE_X>::prefix() { }
template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
// The following alternative implementations are needed because
// Windows 95 doesn't support (some of) the corresponding Windows NT
// calls. Furthermore, these versions allow inlining in the caller.
@ -218,4 +229,45 @@ inline void Atomic::PlatformStore<8>::operator()(T store_value,
#pragma warning(default: 4035) // Enables warnings reporting missing return statement
#ifndef AMD64
template<>
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm {
mov edx, p;
mov al, v;
xchg al, byte ptr [edx];
}
}
};
template<>
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm {
mov edx, p;
mov ax, v;
xchg ax, word ptr [edx];
}
}
};
template<>
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm {
mov edx, p;
mov eax, v;
xchg eax, dword ptr [edx];
}
}
};
#endif // AMD64
#endif // OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP

View File

@ -39,17 +39,6 @@ inline void compiler_barrier() {
_ReadWriteBarrier();
}
// Note that in MSVC, volatile memory accesses are explicitly
// guaranteed to have acquire release semantics (w.r.t. compiler
// reordering) and therefore does not even need a compiler barrier
// for normal acquire release accesses. And all generalized
// bound calls like release_store go through OrderAccess::load
// and OrderAccess::store which do volatile memory accesses.
template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }
template<> inline void ScopedFence<RELEASE_X>::prefix() { }
template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { compiler_barrier(); }
@ -74,45 +63,4 @@ inline void OrderAccess::cross_modify_fence() {
__cpuid(regs, 0);
}
#ifndef AMD64
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm {
mov edx, p;
mov al, v;
xchg al, byte ptr [edx];
}
}
};
template<>
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm {
mov edx, p;
mov ax, v;
xchg ax, word ptr [edx];
}
}
};
template<>
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
{
template <typename T>
void operator()(T v, volatile T* p) const {
__asm {
mov edx, p;
mov eax, v;
xchg eax, dword ptr [edx];
}
}
};
#endif // AMD64
#endif // OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP

View File

@ -29,11 +29,11 @@
#include "runtime/orderAccess.hpp"
// Next entry in class path
inline ClassPathEntry* ClassPathEntry::next() const { return OrderAccess::load_acquire(&_next); }
inline ClassPathEntry* ClassPathEntry::next() const { return Atomic::load_acquire(&_next); }
inline void ClassPathEntry::set_next(ClassPathEntry* next) {
// may have unlocked readers, so ensure visibility.
OrderAccess::release_store(&_next, next);
Atomic::release_store(&_next, next);
}
inline ClassPathEntry* ClassLoader::classpath_entry(int n) {

View File

@ -187,11 +187,11 @@ ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
if (_head == NULL || _head->_size == Chunk::CAPACITY) {
Chunk* next = new Chunk(_head);
OrderAccess::release_store(&_head, next);
Atomic::release_store(&_head, next);
}
oop* handle = &_head->_data[_head->_size];
NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
OrderAccess::release_store(&_head->_size, _head->_size + 1);
Atomic::release_store(&_head->_size, _head->_size + 1);
return handle;
}
@ -214,10 +214,10 @@ inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chu
}
void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
Chunk* head = OrderAccess::load_acquire(&_head);
Chunk* head = Atomic::load_acquire(&_head);
if (head != NULL) {
// Must be careful when reading size of head
oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size));
oops_do_chunk(f, head, Atomic::load_acquire(&head->_size));
for (Chunk* c = head->_next; c != NULL; c = c->_next) {
oops_do_chunk(f, c, c->_size);
}
@ -326,7 +326,7 @@ void ClassLoaderData::oops_do(OopClosure* f, int claim_value, bool clear_mod_oop
void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
klass_closure->do_klass(k);
assert(k != k->next_link(), "no loops!");
}
@ -334,7 +334,7 @@ void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
void ClassLoaderData::classes_do(void f(Klass * const)) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
f(k);
assert(k != k->next_link(), "no loops!");
}
@ -342,7 +342,7 @@ void ClassLoaderData::classes_do(void f(Klass * const)) {
void ClassLoaderData::methods_do(void f(Method*)) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
InstanceKlass::cast(k)->methods_do(f);
}
@ -351,7 +351,7 @@ void ClassLoaderData::methods_do(void f(Method*)) {
void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Do not filter ArrayKlass oops here...
if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
#ifdef ASSERT
@ -366,7 +366,7 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) {
f(InstanceKlass::cast(k));
}
@ -465,7 +465,7 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
k->set_next_link(old_value);
// Link the new item into the list, making sure the linked class is stable
// since the list can be walked without a lock
OrderAccess::release_store(&_klasses, k);
Atomic::release_store(&_klasses, k);
if (k->is_array_klass()) {
ClassLoaderDataGraph::inc_array_classes(1);
} else {
@ -552,7 +552,7 @@ void ClassLoaderData::unload() {
ModuleEntryTable* ClassLoaderData::modules() {
// Lazily create the module entry table at first request.
// Lock-free access requires load_acquire.
ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules);
ModuleEntryTable* modules = Atomic::load_acquire(&_modules);
if (modules == NULL) {
MutexLocker m1(Module_lock);
// Check if _modules got allocated while we were waiting for this lock.
@ -562,7 +562,7 @@ ModuleEntryTable* ClassLoaderData::modules() {
{
MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Ensure _modules is stable, since it is examined without a lock
OrderAccess::release_store(&_modules, modules);
Atomic::release_store(&_modules, modules);
}
}
}
@ -752,7 +752,7 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
// The reason for the delayed allocation is because some class loaders are
// simply for delegating with no metadata of their own.
// Lock-free access requires load_acquire.
ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace);
if (metaspace == NULL) {
MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
// Check if _metaspace got allocated while we were waiting for this lock.
@ -768,7 +768,7 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
}
// Ensure _metaspace is stable, since it is examined without a lock
OrderAccess::release_store(&_metaspace, metaspace);
Atomic::release_store(&_metaspace, metaspace);
}
}
return metaspace;
@ -969,7 +969,7 @@ void ClassLoaderData::verify() {
bool ClassLoaderData::contains_klass(Klass* klass) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k == klass) return true;
}
return false;

View File

@ -59,13 +59,13 @@ void ClassLoaderDataGraph::clear_claimed_marks() {
//
// Any ClassLoaderData added after or during walking the list are prepended to
// _head. Their claim mark need not be handled here.
for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) {
for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != NULL; cld = cld->next()) {
cld->clear_claim();
}
}
void ClassLoaderDataGraph::clear_claimed_marks(int claim) {
for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) {
for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != NULL; cld = cld->next()) {
cld->clear_claim(claim);
}
}
@ -220,7 +220,7 @@ ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_unsaf
// First install the new CLD to the Graph.
cld->set_next(_head);
OrderAccess::release_store(&_head, cld);
Atomic::release_store(&_head, cld);
// Next associate with the class_loader.
if (!is_unsafe_anonymous) {

View File

@ -113,7 +113,7 @@ void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) {
//-----------------------------------------------------------------------------
ExceptionCache* CompiledMethod::exception_cache_acquire() const {
return OrderAccess::load_acquire(&_exception_cache);
return Atomic::load_acquire(&_exception_cache);
}
void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {

View File

@ -61,7 +61,7 @@ inline address CompiledMethod::get_deopt_original_pc(const frame* fr) {
// class ExceptionCache methods
inline int ExceptionCache::count() { return OrderAccess::load_acquire(&_count); }
inline int ExceptionCache::count() { return Atomic::load_acquire(&_count); }
address ExceptionCache::pc_at(int index) {
assert(index >= 0 && index < count(),"");
@ -74,7 +74,7 @@ address ExceptionCache::handler_at(int index) {
}
// increment_count is only called under lock, but there may be concurrent readers.
inline void ExceptionCache::increment_count() { OrderAccess::release_store(&_count, _count + 1); }
inline void ExceptionCache::increment_count() { Atomic::release_store(&_count, _count + 1); }
#endif // SHARE_CODE_COMPILEDMETHOD_INLINE_HPP

View File

@ -281,7 +281,7 @@ bool DependencyContext::claim_cleanup() {
nmethodBucket* DependencyContext::dependencies_not_unloading() {
for (;;) {
// Need acquire becase the read value could come from a concurrent insert.
nmethodBucket* head = OrderAccess::load_acquire(_dependency_context_addr);
nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
if (head == NULL || !head->get_nmethod()->is_unloading()) {
return head;
}

View File

@ -158,13 +158,13 @@ G1CodeRootSet::~G1CodeRootSet() {
}
G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
return OrderAccess::load_acquire(&_table);
return Atomic::load_acquire(&_table);
}
void G1CodeRootSet::allocate_small_table() {
G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
OrderAccess::release_store(&_table, temp);
Atomic::release_store(&_table, temp);
}
void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
@ -194,7 +194,7 @@ void G1CodeRootSet::move_to_large() {
G1CodeRootSetTable::purge_list_append(_table);
OrderAccess::release_store(&_table, temp);
Atomic::release_store(&_table, temp);
}
void G1CodeRootSet::purge() {

View File

@ -219,7 +219,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
// some mark bits may not yet seem cleared or a 'later' update
// performed by a concurrent thread could be undone when the
// zeroing becomes visible). This requires store ordering.
OrderAccess::release_store(&_fine_grain_regions[ind], prt);
Atomic::release_store(&_fine_grain_regions[ind], prt);
_n_fine_entries++;
// Transfer from sparse to fine-grain.

View File

@ -190,7 +190,7 @@ public:
// We need access in order to union things into the base table.
BitMap* bm() { return &_bm; }
HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); }
HeapRegion* hr() const { return Atomic::load_acquire(&_hr); }
jint occupied() const {
// Overkill, but if we ever need it...

View File

@ -65,7 +65,7 @@ inline void PerRegionTable::init(HeapRegion* hr, bool clear_links_to_all_list) {
_bm.clear();
// Make sure that the bitmap clearing above has been finished before publishing
// this PRT to concurrent threads.
OrderAccess::release_store(&_hr, hr);
Atomic::release_store(&_hr, hr);
}
template <class Closure>

View File

@ -27,14 +27,14 @@
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/atomic.hpp"
template <DecoratorSet decorators, typename T>
inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
volatile CardValue* byte = _card_table->byte_for(field);
if (_card_table->scanned_concurrently()) {
// Perform a releasing store if the card table is scanned concurrently
OrderAccess::release_store(byte, CardTable::dirty_card_val());
Atomic::release_store(byte, CardTable::dirty_card_val());
} else {
*byte = CardTable::dirty_card_val();
}

View File

@ -51,7 +51,7 @@ void ConcurrentGCThread::run() {
// Signal thread has terminated
MonitorLocker ml(Terminator_lock);
OrderAccess::release_store(&_has_terminated, true);
Atomic::release_store(&_has_terminated, true);
ml.notify_all();
}
@ -60,7 +60,7 @@ void ConcurrentGCThread::stop() {
assert(!has_terminated(), "Invalid state");
// Signal thread to terminate
OrderAccess::release_store_fence(&_should_terminate, true);
Atomic::release_store_fence(&_should_terminate, true);
stop_service();
@ -72,9 +72,9 @@ void ConcurrentGCThread::stop() {
}
bool ConcurrentGCThread::should_terminate() const {
return OrderAccess::load_acquire(&_should_terminate);
return Atomic::load_acquire(&_should_terminate);
}
bool ConcurrentGCThread::has_terminated() const {
return OrderAccess::load_acquire(&_has_terminated);
return Atomic::load_acquire(&_has_terminated);
}

View File

@ -140,7 +140,7 @@ size_t OopStorage::ActiveArray::block_count() const {
}
size_t OopStorage::ActiveArray::block_count_acquire() const {
return OrderAccess::load_acquire(&_block_count);
return Atomic::load_acquire(&_block_count);
}
void OopStorage::ActiveArray::increment_refcount() const {
@ -161,7 +161,7 @@ bool OopStorage::ActiveArray::push(Block* block) {
*block_ptr(index) = block;
// Use a release_store to ensure all the setup is complete before
// making the block visible.
OrderAccess::release_store(&_block_count, index + 1);
Atomic::release_store(&_block_count, index + 1);
return true;
} else {
return false;
@ -264,8 +264,8 @@ uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
bool OopStorage::Block::is_safe_to_delete() const {
assert(is_empty(), "precondition");
OrderAccess::loadload();
return (OrderAccess::load_acquire(&_release_refcount) == 0) &&
(OrderAccess::load_acquire(&_deferred_updates_next) == NULL);
return (Atomic::load_acquire(&_release_refcount) == 0) &&
(Atomic::load_acquire(&_deferred_updates_next) == NULL);
}
OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
@ -514,7 +514,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// Update new_array refcount to account for the new reference.
new_array->increment_refcount();
// Install new_array, ensuring its initialization is complete first.
OrderAccess::release_store(&_active_array, new_array);
Atomic::release_store(&_active_array, new_array);
// Wait for any readers that could read the old array from _active_array.
// Can't use GlobalCounter here, because this is called from allocate(),
// which may be called in the scope of a GlobalCounter critical section
@ -532,7 +532,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// using it.
OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
ActiveArray* result = OrderAccess::load_acquire(&_active_array);
ActiveArray* result = Atomic::load_acquire(&_active_array);
result->increment_refcount();
return result;
}
@ -645,7 +645,7 @@ bool OopStorage::reduce_deferred_updates() {
// Atomically pop a block off the list, if any available.
// No ABA issue because this is only called by one thread at a time.
// The atomicity is wrto pushes by release().
Block* block = OrderAccess::load_acquire(&_deferred_updates);
Block* block = Atomic::load_acquire(&_deferred_updates);
while (true) {
if (block == NULL) return false;
// Try atomic pop of block from list.
@ -833,23 +833,23 @@ bool OopStorage::has_cleanup_work_and_reset() {
void OopStorage::record_needs_cleanup() {
// Set local flag first, else service thread could wake up and miss
// the request. This order may instead (rarely) unnecessarily notify.
OrderAccess::release_store(&_needs_cleanup, true);
OrderAccess::release_store_fence(&needs_cleanup_requested, true);
Atomic::release_store(&_needs_cleanup, true);
Atomic::release_store_fence(&needs_cleanup_requested, true);
}
bool OopStorage::delete_empty_blocks() {
// Service thread might have oopstorage work, but not for this object.
// Check for deferred updates even though that's not a service thread
// trigger; since we're here, we might as well process them.
if (!OrderAccess::load_acquire(&_needs_cleanup) &&
(OrderAccess::load_acquire(&_deferred_updates) == NULL)) {
if (!Atomic::load_acquire(&_needs_cleanup) &&
(Atomic::load_acquire(&_deferred_updates) == NULL)) {
return false;
}
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Clear the request before processing.
OrderAccess::release_store_fence(&_needs_cleanup, false);
Atomic::release_store_fence(&_needs_cleanup, false);
// Other threads could be adding to the empty block count or the
// deferred update list while we're working. Set an upper bound on
@ -993,7 +993,7 @@ void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
data->_processed += data->_segment_end - data->_segment_start;
size_t start = OrderAccess::load_acquire(&_next_block);
size_t start = Atomic::load_acquire(&_next_block);
if (start >= _block_count) {
return finish_iteration(data); // No more blocks available.
}

View File

@ -224,7 +224,7 @@ bool BufferNode::Allocator::try_transfer_pending() {
log_trace(gc, ptrqueue, freelist)
("Transferred %s pending to free: " SIZE_FORMAT, name(), count);
}
OrderAccess::release_store(&_transfer_lock, false);
Atomic::release_store(&_transfer_lock, false);
return true;
}

View File

@ -68,7 +68,7 @@ bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) {
// assignment. However, casting to E& means that we trigger an
// unused-value warning. So, we cast the E& to void.
(void)const_cast<E&>(_elems[localBot] = t);
OrderAccess::release_store(&_bottom, increment_index(localBot));
Atomic::release_store(&_bottom, increment_index(localBot));
TASKQUEUE_STATS_ONLY(stats.record_push());
return true;
}
@ -89,7 +89,7 @@ GenericTaskQueue<E, F, N>::push(E t) {
// assignment. However, casting to E& means that we trigger an
// unused-value warning. So, we cast the E& to void.
(void) const_cast<E&>(_elems[localBot] = t);
OrderAccess::release_store(&_bottom, increment_index(localBot));
Atomic::release_store(&_bottom, increment_index(localBot));
TASKQUEUE_STATS_ONLY(stats.record_push());
return true;
} else {
@ -210,7 +210,7 @@ bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
#ifndef CPU_MULTI_COPY_ATOMIC
OrderAccess::fence();
#endif
uint localBot = OrderAccess::load_acquire(&_bottom);
uint localBot = Atomic::load_acquire(&_bottom);
uint n_elems = size(localBot, oldAge.top());
if (n_elems == 0) {
return false;

View File

@ -38,7 +38,7 @@ ShenandoahEvacOOMHandler::ShenandoahEvacOOMHandler() :
}
void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() {
while ((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) {
while ((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) {
os::naked_short_sleep(1);
}
// At this point we are sure that no threads can evacuate anything. Raise
@ -48,7 +48,7 @@ void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() {
}
void ShenandoahEvacOOMHandler::enter_evacuation() {
jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac);
jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac);
assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity");
assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
@ -79,7 +79,7 @@ void ShenandoahEvacOOMHandler::enter_evacuation() {
void ShenandoahEvacOOMHandler::leave_evacuation() {
if (!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity");
assert((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity");
// NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive.
Atomic::dec(&_threads_in_evac);
} else {
@ -96,7 +96,7 @@ void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() {
assert(ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity");
assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac);
jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac);
while (true) {
jint other = Atomic::cmpxchg((threads_in_evac - 1) | OOM_MARKER_MASK,
&_threads_in_evac, threads_in_evac);
@ -113,8 +113,8 @@ void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() {
void ShenandoahEvacOOMHandler::clear() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity");
OrderAccess::release_store_fence<jint>(&_threads_in_evac, 0);
assert((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity");
Atomic::release_store_fence<jint>(&_threads_in_evac, 0);
}
ShenandoahEvacOOMScope::ShenandoahEvacOOMScope() {

View File

@ -601,7 +601,7 @@ void ShenandoahHeap::post_initialize() {
}
size_t ShenandoahHeap::used() const {
return OrderAccess::load_acquire(&_used);
return Atomic::load_acquire(&_used);
}
size_t ShenandoahHeap::committed() const {
@ -624,7 +624,7 @@ void ShenandoahHeap::increase_used(size_t bytes) {
}
void ShenandoahHeap::set_used(size_t bytes) {
OrderAccess::release_store_fence(&_used, bytes);
Atomic::release_store_fence(&_used, bytes);
}
void ShenandoahHeap::decrease_used(size_t bytes) {
@ -2114,11 +2114,11 @@ address ShenandoahHeap::gc_state_addr() {
}
size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
return Atomic::load_acquire(&_bytes_allocated_since_gc_start);
}
void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
Atomic::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
}
void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {

View File

@ -305,7 +305,7 @@ void ShenandoahHeapRegion::make_committed_bypass() {
}
void ShenandoahHeapRegion::clear_live_data() {
OrderAccess::release_store_fence<size_t>(&_live_data, 0);
Atomic::release_store_fence<size_t>(&_live_data, 0);
}
void ShenandoahHeapRegion::reset_alloc_metadata() {
@ -351,7 +351,7 @@ void ShenandoahHeapRegion::set_live_data(size_t s) {
}
size_t ShenandoahHeapRegion::get_live_data_words() const {
return OrderAccess::load_acquire(&_live_data);
return Atomic::load_acquire(&_live_data);
}
size_t ShenandoahHeapRegion::get_live_data_bytes() const {

View File

@ -47,19 +47,19 @@ typedef struct ShenandoahSharedFlag {
}
void set() {
OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)SET);
Atomic::release_store_fence(&value, (ShenandoahSharedValue)SET);
}
void unset() {
OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET);
Atomic::release_store_fence(&value, (ShenandoahSharedValue)UNSET);
}
bool is_set() const {
return OrderAccess::load_acquire(&value) == SET;
return Atomic::load_acquire(&value) == SET;
}
bool is_unset() const {
return OrderAccess::load_acquire(&value) == UNSET;
return Atomic::load_acquire(&value) == UNSET;
}
void set_cond(bool val) {
@ -118,7 +118,7 @@ typedef struct ShenandoahSharedBitmap {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
while (true) {
ShenandoahSharedValue ov = OrderAccess::load_acquire(&value);
ShenandoahSharedValue ov = Atomic::load_acquire(&value);
if ((ov & mask_val) != 0) {
// already set
return;
@ -136,7 +136,7 @@ typedef struct ShenandoahSharedBitmap {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
while (true) {
ShenandoahSharedValue ov = OrderAccess::load_acquire(&value);
ShenandoahSharedValue ov = Atomic::load_acquire(&value);
if ((ov & mask_val) == 0) {
// already unset
return;
@ -151,7 +151,7 @@ typedef struct ShenandoahSharedBitmap {
}
void clear() {
OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)0);
Atomic::release_store_fence(&value, (ShenandoahSharedValue)0);
}
bool is_set(uint mask) const {
@ -160,11 +160,11 @@ typedef struct ShenandoahSharedBitmap {
bool is_unset(uint mask) const {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
return (OrderAccess::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0;
return (Atomic::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0;
}
bool is_clear() const {
return (OrderAccess::load_acquire(&value)) == 0;
return (Atomic::load_acquire(&value)) == 0;
}
void set_cond(uint mask, bool val) {
@ -211,11 +211,11 @@ struct ShenandoahSharedEnumFlag {
void set(T v) {
assert (v >= 0, "sanity");
assert (v < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)v);
Atomic::release_store_fence(&value, (ShenandoahSharedValue)v);
}
T get() const {
return (T)OrderAccess::load_acquire(&value);
return (T)Atomic::load_acquire(&value);
}
T cmpxchg(T new_value, T expected) {

View File

@ -756,12 +756,12 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label,
if (r->is_humongous()) {
// For humongous objects, test if start region is marked live, and if so,
// all humongous regions in that chain have live data equal to their "used".
juint start_live = OrderAccess::load_acquire(&ld[r->humongous_start_region()->region_number()]);
juint start_live = Atomic::load_acquire(&ld[r->humongous_start_region()->region_number()]);
if (start_live > 0) {
verf_live = (juint)(r->used() / HeapWordSize);
}
} else {
verf_live = OrderAccess::load_acquire(&ld[r->region_number()]);
verf_live = Atomic::load_acquire(&ld[r->region_number()]);
}
size_t reg_live = r->get_live_data_words();

View File

@ -54,9 +54,9 @@ void ZLiveMap::reset(size_t index) {
// Multiple threads can enter here, make sure only one of them
// resets the marking information while the others busy wait.
for (uint32_t seqnum = OrderAccess::load_acquire(&_seqnum);
for (uint32_t seqnum = Atomic::load_acquire(&_seqnum);
seqnum != ZGlobalSeqNum;
seqnum = OrderAccess::load_acquire(&_seqnum)) {
seqnum = Atomic::load_acquire(&_seqnum)) {
if ((seqnum != seqnum_initializing) &&
(Atomic::cmpxchg(seqnum_initializing, &_seqnum, seqnum) == seqnum)) {
// Reset marking information
@ -73,7 +73,7 @@ void ZLiveMap::reset(size_t index) {
// before the update of the page seqnum, such that when the
// up-to-date seqnum is load acquired, the bit maps will not
// contain stale information.
OrderAccess::release_store(&_seqnum, ZGlobalSeqNum);
Atomic::release_store(&_seqnum, ZGlobalSeqNum);
break;
}

View File

@ -39,7 +39,7 @@ inline void ZLiveMap::reset() {
}
inline bool ZLiveMap::is_marked() const {
return OrderAccess::load_acquire(&_seqnum) == ZGlobalSeqNum;
return Atomic::load_acquire(&_seqnum) == ZGlobalSeqNum;
}
inline uint32_t ZLiveMap::live_objects() const {

View File

@ -78,7 +78,7 @@ ZReentrantLock* ZNMethodData::lock() {
}
ZNMethodDataOops* ZNMethodData::oops() const {
return OrderAccess::load_acquire(&_oops);
return Atomic::load_acquire(&_oops);
}
ZNMethodDataOops* ZNMethodData::swap_oops(ZNMethodDataOops* new_oops) {

View File

@ -82,7 +82,7 @@ uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
size_t size,
ZAllocationFlags flags) {
uintptr_t addr = 0;
ZPage* page = OrderAccess::load_acquire(shared_page);
ZPage* page = Atomic::load_acquire(shared_page);
if (page != NULL) {
addr = page->alloc_object_atomic(size);
@ -304,7 +304,7 @@ size_t ZObjectAllocator::used() const {
size_t ZObjectAllocator::remaining() const {
assert(ZThread::is_java(), "Should be a Java thread");
const ZPage* const page = OrderAccess::load_acquire(shared_small_page_addr());
const ZPage* const page = Atomic::load_acquire(shared_small_page_addr());
if (page != NULL) {
return page->remaining();
}

View File

@ -448,7 +448,7 @@ OopMapCache::~OopMapCache() {
}
OopMapCacheEntry* OopMapCache::entry_at(int i) const {
return OrderAccess::load_acquire(&(_array[i % _size]));
return Atomic::load_acquire(&(_array[i % _size]));
}
bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {

View File

@ -27,7 +27,7 @@
#include "jfr/utilities/jfrTypes.hpp"
#include "memory/allocation.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/atomic.hpp"
#define USED_BIT 1
#define METHOD_USED_BIT (USED_BIT << 2)
@ -91,16 +91,16 @@ class JfrTraceIdEpoch : AllStatic {
}
static bool has_changed_tag_state() {
if (OrderAccess::load_acquire(&_tag_state)) {
OrderAccess::release_store(&_tag_state, false);
if (Atomic::load_acquire(&_tag_state)) {
Atomic::release_store(&_tag_state, false);
return true;
}
return false;
}
static void set_changed_tag_state() {
if (!OrderAccess::load_acquire(&_tag_state)) {
OrderAccess::release_store(&_tag_state, true);
if (!Atomic::load_acquire(&_tag_state)) {
Atomic::release_store(&_tag_state, true);
}
}
};

View File

@ -46,19 +46,19 @@ static uint64_t serialized_generation = 0;
inline void set_generation(uint64_t value, uint64_t* const dest) {
assert(dest != NULL, "invariant");
OrderAccess::release_store(dest, value);
Atomic::release_store(dest, value);
}
static void increment_store_generation() {
const uint64_t current_serialized = OrderAccess::load_acquire(&serialized_generation);
const uint64_t current_stored = OrderAccess::load_acquire(&store_generation);
const uint64_t current_serialized = Atomic::load_acquire(&serialized_generation);
const uint64_t current_stored = Atomic::load_acquire(&store_generation);
if (current_serialized == current_stored) {
set_generation(current_serialized + 1, &store_generation);
}
}
static bool increment_serialized_generation() {
const uint64_t current_stored = OrderAccess::load_acquire(&store_generation);
const uint64_t current_serialized = OrderAccess::load_acquire(&serialized_generation);
const uint64_t current_stored = Atomic::load_acquire(&store_generation);
const uint64_t current_serialized = Atomic::load_acquire(&serialized_generation);
if (current_stored != current_serialized) {
set_generation(current_stored, &serialized_generation);
return true;

View File

@ -58,9 +58,9 @@ class JfrHashtableBucket : public CHeapObj<mtTracing> {
TableEntry* _entry;
TableEntry* get_entry() const {
return (TableEntry*)OrderAccess::load_acquire(&_entry);
return (TableEntry*)Atomic::load_acquire(&_entry);
}
void set_entry(TableEntry* entry) { OrderAccess::release_store(&_entry, entry);}
void set_entry(TableEntry* entry) { Atomic::release_store(&_entry, entry);}
TableEntry** entry_addr() { return &_entry; }
};

View File

@ -44,7 +44,7 @@ void LogDecorations::initialize(jlong vm_start_time) {
}
const char* LogDecorations::host_name() {
const char* host_name = OrderAccess::load_acquire(&_host_name);
const char* host_name = Atomic::load_acquire(&_host_name);
if (host_name == NULL) {
char buffer[1024];
if (os::get_host_name(buffer, sizeof(buffer))) {

View File

@ -128,7 +128,7 @@ size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
}
size_t MetaspaceGC::capacity_until_GC() {
size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
size_t value = Atomic::load_acquire(&_capacity_until_GC);
assert(value >= MetaspaceSize, "Not initialized properly?");
return value;
}

View File

@ -134,7 +134,7 @@ RawAccessBarrier<decorators>::load_internal(void* addr) {
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
OrderAccess::fence();
}
return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
}
template <DecoratorSet decorators>
@ -142,7 +142,7 @@ template <DecoratorSet ds, typename T>
inline typename EnableIf<
HasDecorator<ds, MO_ACQUIRE>::value, T>::type
RawAccessBarrier<decorators>::load_internal(void* addr) {
return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
}
template <DecoratorSet decorators>
@ -158,7 +158,7 @@ template <DecoratorSet ds, typename T>
inline typename EnableIf<
HasDecorator<ds, MO_SEQ_CST>::value>::type
RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
OrderAccess::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
Atomic::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
}
template <DecoratorSet decorators>
@ -166,7 +166,7 @@ template <DecoratorSet ds, typename T>
inline typename EnableIf<
HasDecorator<ds, MO_RELEASE>::value>::type
RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
OrderAccess::release_store(reinterpret_cast<volatile T*>(addr), value);
Atomic::release_store(reinterpret_cast<volatile T*>(addr), value);
}
template <DecoratorSet decorators>

View File

@ -27,7 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/metaspace.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/atomic.hpp"
#include "utilities/align.hpp"
// Array for metadata allocation
@ -122,8 +122,8 @@ protected:
T* adr_at(const int i) { assert(i >= 0 && i< _length, "oob: 0 <= %d < %d", i, _length); return &_data[i]; }
int find(const T& x) { return index_of(x); }
T at_acquire(const int i) { return OrderAccess::load_acquire(adr_at(i)); }
void release_at_put(int i, T x) { OrderAccess::release_store(adr_at(i), x); }
T at_acquire(const int i) { return Atomic::load_acquire(adr_at(i)); }
void release_at_put(int i, T x) { Atomic::release_store(adr_at(i), x); }
static int size(int length) {
size_t bytes = align_up(byte_sizeof(length), BytesPerWord);

View File

@ -29,11 +29,11 @@
#include "oops/arrayKlass.hpp"
inline Klass* ArrayKlass::higher_dimension_acquire() const {
return OrderAccess::load_acquire(&_higher_dimension);
return Atomic::load_acquire(&_higher_dimension);
}
inline void ArrayKlass::release_set_higher_dimension(Klass* k) {
OrderAccess::release_store(&_higher_dimension, k);
Atomic::release_store(&_higher_dimension, k);
}
#endif // SHARE_OOPS_ARRAYKLASS_INLINE_HPP

View File

@ -232,7 +232,7 @@ void ConstantPool::klass_at_put(int class_index, int name_index, int resolved_kl
symbol_at_put(name_index, name);
name->increment_refcount();
Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
OrderAccess::release_store(adr, k);
Atomic::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* non-NULL, so we need hardware store ordering here.
@ -249,7 +249,7 @@ void ConstantPool::klass_at_put(int class_index, Klass* k) {
CPKlassSlot kslot = klass_slot_at(class_index);
int resolved_klass_index = kslot.resolved_klass_index();
Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
OrderAccess::release_store(adr, k);
Atomic::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* non-NULL, so we need hardware store ordering here.
@ -525,7 +525,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which,
trace_class_resolution(this_cp, k);
}
Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index);
OrderAccess::release_store(adr, k);
Atomic::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* stored in _resolved_klasses is non-NULL, so we need
// hardware store ordering here.

View File

@ -33,7 +33,7 @@ inline CPSlot ConstantPool::slot_at(int which) const {
assert(is_within_bounds(which), "index out of bounds");
assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool");
// Uses volatile because the klass slot changes without a lock.
intptr_t adr = OrderAccess::load_acquire(obj_at_addr(which));
intptr_t adr = Atomic::load_acquire(obj_at_addr(which));
assert(adr != 0 || which == 0, "cp entry for klass should not be zero");
return CPSlot(adr);
}
@ -46,7 +46,7 @@ inline Klass* ConstantPool::resolved_klass_at(int which) const { // Used by Com
assert(tag_at(kslot.name_index()).is_symbol(), "sanity");
Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index());
return OrderAccess::load_acquire(adr);
return Atomic::load_acquire(adr);
}
inline bool ConstantPool::is_pseudo_string_at(int which) {

View File

@ -97,7 +97,7 @@ void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
// Need to flush pending stores here before bytecode is written.
OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
}
void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
@ -107,17 +107,17 @@ void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
// Need to flush pending stores here before bytecode is written.
OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
}
// Sets f1, ordering with previous writes.
void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
assert(f1 != NULL, "");
OrderAccess::release_store(&_f1, f1);
Atomic::release_store(&_f1, f1);
}
void ConstantPoolCacheEntry::set_indy_resolution_failed() {
OrderAccess::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift));
Atomic::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift));
}
// Note that concurrent update of both bytecodes can leave one of them

View File

@ -29,7 +29,7 @@
#include "oops/oopHandle.inline.hpp"
#include "runtime/orderAccess.hpp"
inline int ConstantPoolCacheEntry::indices_ord() const { return OrderAccess::load_acquire(&_indices); }
inline int ConstantPoolCacheEntry::indices_ord() const { return Atomic::load_acquire(&_indices); }
inline Bytecodes::Code ConstantPoolCacheEntry::bytecode_1() const {
return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask);
@ -53,7 +53,7 @@ inline Method* ConstantPoolCacheEntry::f2_as_interface_method() const {
return (Method*)_f2;
}
inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)OrderAccess::load_acquire(&_f1); }
inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)Atomic::load_acquire(&_f1); }
inline Method* ConstantPoolCacheEntry::f1_as_method() const {
Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), "");
@ -75,7 +75,7 @@ inline bool ConstantPoolCacheEntry::has_local_signature() const {
return (!is_f1_null()) && (_flags & (1 << has_local_signature_shift)) != 0;
}
inline intx ConstantPoolCacheEntry::flags_ord() const { return (intx)OrderAccess::load_acquire(&_flags); }
inline intx ConstantPoolCacheEntry::flags_ord() const { return (intx)Atomic::load_acquire(&_flags); }
inline bool ConstantPoolCacheEntry::indy_resolution_failed() const {
intx flags = flags_ord();

View File

@ -1097,7 +1097,7 @@ Klass* InstanceKlass::implementor() const {
return NULL;
} else {
// This load races with inserts, and therefore needs acquire.
Klass* kls = OrderAccess::load_acquire(k);
Klass* kls = Atomic::load_acquire(k);
if (kls != NULL && !kls->is_loader_alive()) {
return NULL; // don't return unloaded class
} else {
@ -1113,7 +1113,7 @@ void InstanceKlass::set_implementor(Klass* k) {
Klass* volatile* addr = adr_implementor();
assert(addr != NULL, "null addr");
if (addr != NULL) {
OrderAccess::release_store(addr, k);
Atomic::release_store(addr, k);
}
}
@ -1370,14 +1370,14 @@ void InstanceKlass::mask_for(const methodHandle& method, int bci,
InterpreterOopMap* entry_for) {
// Lazily create the _oop_map_cache at first request
// Lock-free access requires load_acquire.
OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache);
OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache);
if (oop_map_cache == NULL) {
MutexLocker x(OopMapCacheAlloc_lock);
// Check if _oop_map_cache was allocated while we were waiting for this lock
if ((oop_map_cache = _oop_map_cache) == NULL) {
oop_map_cache = new OopMapCache();
// Ensure _oop_map_cache is stable, since it is examined without a lock
OrderAccess::release_store(&_oop_map_cache, oop_map_cache);
Atomic::release_store(&_oop_map_cache, oop_map_cache);
}
}
// _oop_map_cache is constant after init; lookup below does its own locking.
@ -2114,7 +2114,7 @@ jmethodID InstanceKlass::get_jmethod_id_fetch_or_update(
// The jmethodID cache can be read while unlocked so we have to
// make sure the new jmethodID is complete before installing it
// in the cache.
OrderAccess::release_store(&jmeths[idnum+1], id);
Atomic::release_store(&jmeths[idnum+1], id);
} else {
*to_dealloc_id_p = new_id; // save new id for later delete
}
@ -2196,7 +2196,7 @@ void InstanceKlass::clean_implementors_list() {
assert (ClassUnloading, "only called for ClassUnloading");
for (;;) {
// Use load_acquire due to competing with inserts
Klass* impl = OrderAccess::load_acquire(adr_implementor());
Klass* impl = Atomic::load_acquire(adr_implementor());
if (impl != NULL && !impl->is_loader_alive()) {
// NULL this field, might be an unloaded klass or NULL
Klass* volatile* klass = adr_implementor();

View File

@ -35,19 +35,19 @@
#include "utilities/macros.hpp"
inline Klass* InstanceKlass::array_klasses_acquire() const {
return OrderAccess::load_acquire(&_array_klasses);
return Atomic::load_acquire(&_array_klasses);
}
inline void InstanceKlass::release_set_array_klasses(Klass* k) {
OrderAccess::release_store(&_array_klasses, k);
Atomic::release_store(&_array_klasses, k);
}
inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const {
return OrderAccess::load_acquire(&_methods_jmethod_ids);
return Atomic::load_acquire(&_methods_jmethod_ids);
}
inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
OrderAccess::release_store(&_methods_jmethod_ids, jmeths);
Atomic::release_store(&_methods_jmethod_ids, jmeths);
}
// The iteration over the oops in objects is a hot path in the GC code.

View File

@ -362,7 +362,7 @@ InstanceKlass* Klass::superklass() const {
Klass* Klass::subklass(bool log) const {
// Need load_acquire on the _subklass, because it races with inserts that
// publishes freshly initialized data.
for (Klass* chain = OrderAccess::load_acquire(&_subklass);
for (Klass* chain = Atomic::load_acquire(&_subklass);
chain != NULL;
// Do not need load_acquire on _next_sibling, because inserts never
// create _next_sibling edges to dead data.
@ -402,7 +402,7 @@ Klass* Klass::next_sibling(bool log) const {
void Klass::set_subklass(Klass* s) {
assert(s != this, "sanity check");
OrderAccess::release_store(&_subklass, s);
Atomic::release_store(&_subklass, s);
}
void Klass::set_next_sibling(Klass* s) {
@ -427,7 +427,7 @@ void Klass::append_to_sibling_list() {
super->clean_subklass();
for (;;) {
Klass* prev_first_subklass = OrderAccess::load_acquire(&_super->_subklass);
Klass* prev_first_subklass = Atomic::load_acquire(&_super->_subklass);
if (prev_first_subklass != NULL) {
// set our sibling to be the superklass' previous first subklass
assert(prev_first_subklass->is_loader_alive(), "May not attach not alive klasses");
@ -446,7 +446,7 @@ void Klass::append_to_sibling_list() {
void Klass::clean_subklass() {
for (;;) {
// Need load_acquire, due to contending with concurrent inserts
Klass* subklass = OrderAccess::load_acquire(&_subklass);
Klass* subklass = Atomic::load_acquire(&_subklass);
if (subklass == NULL || subklass->is_loader_alive()) {
return;
}

View File

@ -1247,7 +1247,7 @@ void Method::restore_unshareable_info(TRAPS) {
}
address Method::from_compiled_entry_no_trampoline() const {
CompiledMethod *code = OrderAccess::load_acquire(&_code);
CompiledMethod *code = Atomic::load_acquire(&_code);
if (code) {
return code->verified_entry_point();
} else {
@ -1273,7 +1273,7 @@ address Method::verified_code_entry() {
// Not inline to avoid circular ref.
bool Method::check_code() const {
// cached in a register or local. There's a race on the value of the field.
CompiledMethod *code = OrderAccess::load_acquire(&_code);
CompiledMethod *code = Atomic::load_acquire(&_code);
return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
}

View File

@ -29,23 +29,23 @@
#include "runtime/orderAccess.hpp"
inline address Method::from_compiled_entry() const {
return OrderAccess::load_acquire(&_from_compiled_entry);
return Atomic::load_acquire(&_from_compiled_entry);
}
inline address Method::from_interpreted_entry() const {
return OrderAccess::load_acquire(&_from_interpreted_entry);
return Atomic::load_acquire(&_from_interpreted_entry);
}
inline void Method::set_method_data(MethodData* data) {
// The store into method must be released. On platforms without
// total store order (TSO) the reference may become visible before
// the initialization of data otherwise.
OrderAccess::release_store(&_method_data, data);
Atomic::release_store(&_method_data, data);
}
inline CompiledMethod* volatile Method::code() const {
assert( check_code(), "" );
return OrderAccess::load_acquire(&_code);
return Atomic::load_acquire(&_code);
}
// Write (bci, line number) pair to stream

View File

@ -1415,7 +1415,7 @@ ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout
for (;; dp = next_extra(dp)) {
assert(dp < end, "moved past end of extra data");
// No need for "OrderAccess::load_acquire" ops,
// No need for "Atomic::load_acquire" ops,
// since the data structure is monotonic.
switch(dp->tag()) {
case DataLayout::no_tag:
@ -1550,7 +1550,7 @@ void MethodData::print_data_on(outputStream* st) const {
DataLayout* end = args_data_limit();
for (;; dp = next_extra(dp)) {
assert(dp < end, "moved past end of extra data");
// No need for "OrderAccess::load_acquire" ops,
// No need for "Atomic::load_acquire" ops,
// since the data structure is monotonic.
switch(dp->tag()) {
case DataLayout::no_tag:

View File

@ -29,7 +29,7 @@
#include "runtime/orderAccess.hpp"
inline void DataLayout::release_set_cell_at(int index, intptr_t value) {
OrderAccess::release_store(&_cells[index], value);
Atomic::release_store(&_cells[index], value);
}
inline void ProfileData::release_set_intptr_at(int index, intptr_t value) {

View File

@ -110,9 +110,9 @@ Klass* oopDesc::klass_or_null_acquire() const volatile {
// Workaround for non-const load_acquire parameter.
const volatile narrowKlass* addr = &_metadata._compressed_klass;
volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
return CompressedKlassPointers::decode(OrderAccess::load_acquire(xaddr));
return CompressedKlassPointers::decode(Atomic::load_acquire(xaddr));
} else {
return OrderAccess::load_acquire(&_metadata._klass);
return Atomic::load_acquire(&_metadata._klass);
}
}
@ -156,10 +156,10 @@ void oopDesc::set_klass(Klass* k) {
void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
CHECK_SET_KLASS(klass);
if (UseCompressedClassPointers) {
OrderAccess::release_store(compressed_klass_addr(mem),
CompressedKlassPointers::encode_not_null(klass));
Atomic::release_store(compressed_klass_addr(mem),
CompressedKlassPointers::encode_not_null(klass));
} else {
OrderAccess::release_store(klass_addr(mem), klass);
Atomic::release_store(klass_addr(mem), klass);
}
}
@ -356,7 +356,7 @@ oop oopDesc::forwardee() const {
// The forwardee is used when copying during scavenge and mark-sweep.
// It does need to clear the low two locking- and GC-related bits.
oop oopDesc::forwardee_acquire() const {
return (oop) OrderAccess::load_acquire(&_mark).decode_pointer();
return (oop) Atomic::load_acquire(&_mark).decode_pointer();
}
// The following method needs to be MT safe.

View File

@ -274,7 +274,7 @@ void jfieldIDWorkaround::verify_instance_jfieldID(Klass* k, jfieldID id) {
uintx count = 0;
while (Atomic::cmpxchg(1, &JNIHistogram_lock, 0) != 0) {
while (OrderAccess::load_acquire(&JNIHistogram_lock) != 0) {
while (Atomic::load_acquire(&JNIHistogram_lock) != 0) {
count +=1;
if ( (WarnOnStalledSpinLock > 0)
&& (count % WarnOnStalledSpinLock == 0)) {
@ -3916,7 +3916,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) {
*(JNIEnv**)penv = 0;
// reset vm_created last to avoid race condition. Use OrderAccess to
// control both compiler and architectural-based reordering.
OrderAccess::release_store(&vm_created, 0);
Atomic::release_store(&vm_created, 0);
}
// Flush stdout and stderr before exit.

View File

@ -234,7 +234,7 @@ void trace_class_resolution(Klass* to_class) {
uintx count = 0;
while (Atomic::cmpxchg(1, &JVMHistogram_lock, 0) != 0) {
while (OrderAccess::load_acquire(&JVMHistogram_lock) != 0) {
while (Atomic::load_acquire(&JVMHistogram_lock) != 0) {
count +=1;
if ( (WarnOnStalledSpinLock > 0)
&& (count % WarnOnStalledSpinLock == 0)) {

View File

@ -255,11 +255,11 @@ class JvmtiEnvBase : public CHeapObj<mtInternal> {
}
JvmtiTagMap* tag_map_acquire() {
return OrderAccess::load_acquire(&_tag_map);
return Atomic::load_acquire(&_tag_map);
}
void release_set_tag_map(JvmtiTagMap* tag_map) {
OrderAccess::release_store(&_tag_map, tag_map);
Atomic::release_store(&_tag_map, tag_map);
}
// return true if event is enabled globally or for any thread

View File

@ -147,7 +147,7 @@ void JvmtiRawMonitor::simple_enter(Thread* self) {
void JvmtiRawMonitor::simple_exit(Thread* self) {
guarantee(_owner == self, "invariant");
OrderAccess::release_store(&_owner, (Thread*)NULL);
Atomic::release_store(&_owner, (Thread*)NULL);
OrderAccess::fence();
if (_entry_list == NULL) {
return;

View File

@ -34,6 +34,7 @@
#include "metaprogramming/primitiveConversions.hpp"
#include "metaprogramming/removeCV.hpp"
#include "metaprogramming/removePointer.hpp"
#include "runtime/orderAccess.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
@ -48,6 +49,12 @@ enum atomic_memory_order {
memory_order_conservative = 8
};
enum ScopedFenceType {
X_ACQUIRE
, RELEASE_X
, RELEASE_X_FENCE
};
class Atomic : AllStatic {
public:
// Atomic operations on int64 types are not available on all 32-bit
@ -75,12 +82,21 @@ public:
template<typename T, typename D>
inline static void store(T store_value, volatile D* dest);
template <typename T, typename D>
inline static void release_store(volatile D* dest, T store_value);
template <typename T, typename D>
inline static void release_store_fence(volatile D* dest, T store_value);
// Atomically load from a location
// The type T must be either a pointer type, an integral/enum type,
// or a type that is primitive convertible using PrimitiveConversions.
template<typename T>
inline static T load(const volatile T* dest);
template <typename T>
inline static T load_acquire(const volatile T* dest);
// Atomically add to a location. Returns updated value. add*() provide:
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>
@ -200,6 +216,10 @@ protected:
// requires more for e.g. 64 bit loads, a specialization is required
template<size_t byte_size> struct PlatformLoad;
// Give platforms a variation point to specialize.
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
private:
// Dispatch handler for add. Provides type-based validity checking
// and limited conversions around calls to the platform-specific
@ -578,6 +598,32 @@ struct Atomic::PlatformXchg {
atomic_memory_order order) const;
};
template <ScopedFenceType T>
class ScopedFenceGeneral: public StackObj {
public:
void prefix() {}
void postfix() {}
};
// The following methods can be specialized using simple template specialization
// in the platform specific files for optimization purposes. Otherwise the
// generalized variant is used.
template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }
template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
template <ScopedFenceType T>
class ScopedFence : public ScopedFenceGeneral<T> {
void *const _field;
public:
ScopedFence(void *const field) : _field(field) { prefix(); }
~ScopedFence() { postfix(); }
void prefix() { ScopedFenceGeneral<T>::prefix(); }
void postfix() { ScopedFenceGeneral<T>::postfix(); }
};
// platform specific in-line definitions - must come before shared definitions
#include OS_CPU_HEADER(atomic)
@ -594,11 +640,44 @@ inline T Atomic::load(const volatile T* dest) {
return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
}
template<size_t byte_size, ScopedFenceType type>
struct Atomic::PlatformOrderedLoad {
template <typename T>
T operator()(const volatile T* p) const {
ScopedFence<type> f((void*)p);
return Atomic::load(p);
}
};
template <typename T>
inline T Atomic::load_acquire(const volatile T* p) {
return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
}
template<typename T, typename D>
inline void Atomic::store(T store_value, volatile D* dest) {
StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
}
template<size_t byte_size, ScopedFenceType type>
struct Atomic::PlatformOrderedStore {
template <typename T>
void operator()(T v, volatile T* p) const {
ScopedFence<type> f((void*)p);
Atomic::store(v, p);
}
};
template <typename T, typename D>
inline void Atomic::release_store(volatile D* p, T v) {
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
}
template <typename T, typename D>
inline void Atomic::release_store_fence(volatile D* p, T v) {
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
}
template<typename I, typename D>
inline D Atomic::add(I add_value, D volatile* dest,
atomic_memory_order order) {

View File

@ -294,7 +294,7 @@ void HandshakeState::process_self_inner(JavaThread* thread) {
if (!_semaphore.trywait()) {
_semaphore.wait_with_safepoint_check(thread);
}
HandshakeOperation* op = OrderAccess::load_acquire(&_operation);
HandshakeOperation* op = Atomic::load_acquire(&_operation);
if (op != NULL) {
HandleMark hm(thread);
CautiouslyPreserveExceptionMark pem(thread);

View File

@ -195,7 +195,7 @@ void exit_globals() {
static volatile bool _init_completed = false;
bool is_init_completed() {
return OrderAccess::load_acquire(&_init_completed);
return Atomic::load_acquire(&_init_completed);
}
void wait_init_completed() {
@ -208,6 +208,6 @@ void wait_init_completed() {
void set_init_completed() {
assert(Universe::is_fully_initialized(), "Should have completed initialization");
MonitorLocker ml(InitCompleted_lock, Monitor::_no_safepoint_check_flag);
OrderAccess::release_store(&_init_completed, true);
Atomic::release_store(&_init_completed, true);
ml.notify_all();
}

View File

@ -88,7 +88,7 @@ RuntimeHistogramElement::RuntimeHistogramElement(const char* elementName) {
uintx count = 0;
while (Atomic::cmpxchg(1, &RuntimeHistogram_lock, 0) != 0) {
while (OrderAccess::load_acquire(&RuntimeHistogram_lock) != 0) {
while (Atomic::load_acquire(&RuntimeHistogram_lock) != 0) {
count +=1;
if ( (WarnOnStalledSpinLock > 0)
&& (count % WarnOnStalledSpinLock == 0)) {

View File

@ -916,8 +916,8 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
// release semantics: prior loads and stores from within the critical section
// must not float (reorder) past the following store that drops the lock.
OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
OrderAccess::storeload(); // See if we need to wake a successor
Atomic::release_store(&_owner, (void*)NULL); // drop the lock
OrderAccess::storeload(); // See if we need to wake a successor
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
return;
}
@ -1092,7 +1092,7 @@ void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
Wakee = NULL;
// Drop the lock
OrderAccess::release_store(&_owner, (void*)NULL);
Atomic::release_store(&_owner, (void*)NULL);
OrderAccess::fence(); // ST _owner vs LD in unpark()
DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);

View File

@ -26,7 +26,6 @@
#define SHARE_RUNTIME_ORDERACCESS_HPP
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "utilities/macros.hpp"
// Memory Access Ordering Model
@ -231,30 +230,7 @@
// order. If their implementations change such that these assumptions
// are violated, a whole lot of code will break.
enum ScopedFenceType {
X_ACQUIRE
, RELEASE_X
, RELEASE_X_FENCE
};
template <ScopedFenceType T>
class ScopedFenceGeneral: public StackObj {
public:
void prefix() {}
void postfix() {}
};
template <ScopedFenceType T>
class ScopedFence : public ScopedFenceGeneral<T> {
void *const _field;
public:
ScopedFence(void *const field) : _field(field) { prefix(); }
~ScopedFence() { postfix(); }
void prefix() { ScopedFenceGeneral<T>::prefix(); }
void postfix() { ScopedFenceGeneral<T>::postfix(); }
};
class OrderAccess : private Atomic {
class OrderAccess : public AllStatic {
public:
// barriers
static void loadload();
@ -267,85 +243,13 @@ class OrderAccess : private Atomic {
static void fence();
static void cross_modify_fence();
template <typename T>
static T load_acquire(const volatile T* p);
template <typename T, typename D>
static void release_store(volatile D* p, T v);
template <typename T, typename D>
static void release_store_fence(volatile D* p, T v);
private:
private:
// This is a helper that invokes the StubRoutines::fence_entry()
// routine if it exists, It should only be used by platforms that
// don't have another way to do the inline assembly.
static void StubRoutines_fence();
// Give platforms a variation point to specialize.
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
template<typename FieldType, ScopedFenceType FenceType>
static void ordered_store(volatile FieldType* p, FieldType v);
template<typename FieldType, ScopedFenceType FenceType>
static FieldType ordered_load(const volatile FieldType* p);
};
// The following methods can be specialized using simple template specialization
// in the platform specific files for optimization purposes. Otherwise the
// generalized variant is used.
template<size_t byte_size, ScopedFenceType type>
struct OrderAccess::PlatformOrderedStore {
template <typename T>
void operator()(T v, volatile T* p) const {
ordered_store<T, type>(p, v);
}
};
template<size_t byte_size, ScopedFenceType type>
struct OrderAccess::PlatformOrderedLoad {
template <typename T>
T operator()(const volatile T* p) const {
return ordered_load<T, type>(p);
}
};
#include OS_CPU_HEADER(orderAccess)
template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }
template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
template <typename FieldType, ScopedFenceType FenceType>
inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) {
ScopedFence<FenceType> f((void*)p);
Atomic::store(v, p);
}
template <typename FieldType, ScopedFenceType FenceType>
inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) {
ScopedFence<FenceType> f((void*)p);
return Atomic::load(p);
}
template <typename T>
inline T OrderAccess::load_acquire(const volatile T* p) {
return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
}
template <typename T, typename D>
inline void OrderAccess::release_store(volatile D* p, T v) {
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
}
template <typename T, typename D>
inline void OrderAccess::release_store_fence(volatile D* p, T v) {
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
}
#endif // SHARE_RUNTIME_ORDERACCESS_HPP

View File

@ -156,7 +156,7 @@ void PerfMemory::initialize() {
_prologue->overflow = 0;
_prologue->mod_time_stamp = 0;
OrderAccess::release_store(&_initialized, 1);
Atomic::release_store(&_initialized, 1);
}
void PerfMemory::destroy() {
@ -269,5 +269,5 @@ char* PerfMemory::get_perfdata_file_path() {
}
bool PerfMemory::is_initialized() {
return OrderAccess::load_acquire(&_initialized) != 0;
return Atomic::load_acquire(&_initialized) != 0;
}

View File

@ -328,7 +328,7 @@ void SafepointSynchronize::arm_safepoint() {
assert((_safepoint_counter & 0x1) == 0, "must be even");
// The store to _safepoint_counter must happen after any stores in arming.
OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1);
Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1);
// We are synchronizing
OrderAccess::storestore(); // Ordered with _safepoint_counter
@ -482,7 +482,7 @@ void SafepointSynchronize::disarm_safepoint() {
// Set the next dormant (even) safepoint id.
assert((_safepoint_counter & 0x1) == 1, "must be odd");
OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1);
Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1);
OrderAccess::fence(); // Keep the local state from floating up.
@ -968,15 +968,15 @@ void ThreadSafepointState::destroy(JavaThread *thread) {
}
uint64_t ThreadSafepointState::get_safepoint_id() const {
return OrderAccess::load_acquire(&_safepoint_id);
return Atomic::load_acquire(&_safepoint_id);
}
void ThreadSafepointState::reset_safepoint_id() {
OrderAccess::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter);
Atomic::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter);
}
void ThreadSafepointState::set_safepoint_id(uint64_t safepoint_id) {
OrderAccess::release_store(&_safepoint_id, safepoint_id);
Atomic::release_store(&_safepoint_id, safepoint_id);
}
void ThreadSafepointState::examine_state_of_thread(uint64_t safepoint_count) {

View File

@ -889,7 +889,7 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob
// Visitors ...
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = _BLOCKSIZE - 1; i > 0; i--) {
@ -1118,7 +1118,7 @@ ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
temp[0]._next_om = g_block_list;
// There are lock-free uses of g_block_list so make sure that
// the previous stores happen before we update g_block_list.
OrderAccess::release_store(&g_block_list, temp);
Atomic::release_store(&g_block_list, temp);
// Add the new string of ObjectMonitors to the global free list
temp[_BLOCKSIZE - 1]._next_om = g_free_list;
@ -2169,7 +2169,7 @@ int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
// the list of extant blocks without taking a lock.
int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header");
if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {

View File

@ -1269,7 +1269,7 @@ NonJavaThread::List NonJavaThread::_the_list;
NonJavaThread::Iterator::Iterator() :
_protect_enter(_the_list._protect.enter()),
_current(OrderAccess::load_acquire(&_the_list._head))
_current(Atomic::load_acquire(&_the_list._head))
{}
NonJavaThread::Iterator::~Iterator() {
@ -1278,7 +1278,7 @@ NonJavaThread::Iterator::~Iterator() {
void NonJavaThread::Iterator::step() {
assert(!end(), "precondition");
_current = OrderAccess::load_acquire(&_current->_next);
_current = Atomic::load_acquire(&_current->_next);
}
NonJavaThread::NonJavaThread() : Thread(), _next(NULL) {
@ -1291,8 +1291,8 @@ void NonJavaThread::add_to_the_list() {
MutexLocker ml(NonJavaThreadsList_lock, Mutex::_no_safepoint_check_flag);
// Initialize BarrierSet-related data before adding to list.
BarrierSet::barrier_set()->on_thread_attach(this);
OrderAccess::release_store(&_next, _the_list._head);
OrderAccess::release_store(&_the_list._head, this);
Atomic::release_store(&_next, _the_list._head);
Atomic::release_store(&_the_list._head, this);
}
void NonJavaThread::remove_from_the_list() {

View File

@ -67,7 +67,7 @@ inline void Thread::clear_trace_flag() {
}
inline jlong Thread::cooked_allocated_bytes() {
jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
jlong allocated_bytes = Atomic::load_acquire(&_allocated_bytes);
if (UseTLAB) {
size_t used_bytes = tlab().used_bytes();
if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
@ -87,11 +87,11 @@ inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_val
}
inline ThreadsList* Thread::get_threads_hazard_ptr() {
return (ThreadsList*)OrderAccess::load_acquire(&_threads_hazard_ptr);
return (ThreadsList*)Atomic::load_acquire(&_threads_hazard_ptr);
}
inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) {
OrderAccess::release_store_fence(&_threads_hazard_ptr, new_list);
Atomic::release_store_fence(&_threads_hazard_ptr, new_list);
}
inline void JavaThread::set_ext_suspended() {
@ -118,7 +118,7 @@ inline JavaThreadState JavaThread::thread_state() const {
#if defined(PPC64) || defined (AARCH64)
// Use membars when accessing volatile _thread_state. See
// Threads::create_vm() for size checks.
return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
return (JavaThreadState) Atomic::load_acquire((volatile jint*)&_thread_state);
#else
return _thread_state;
#endif
@ -128,7 +128,7 @@ inline void JavaThread::set_thread_state(JavaThreadState s) {
#if defined(PPC64) || defined (AARCH64)
// Use membars when accessing volatile _thread_state. See
// Threads::create_vm() for size checks.
OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
Atomic::release_store((volatile jint*)&_thread_state, (jint)s);
#else
_thread_state = s;
#endif
@ -200,7 +200,7 @@ inline bool JavaThread::stack_guards_enabled() {
// The release make sure this store is done after storing the handshake
// operation or global state
inline void JavaThread::set_polling_page_release(void* poll_value) {
OrderAccess::release_store(polling_page_addr(), poll_value);
Atomic::release_store(polling_page_addr(), poll_value);
}
// Caller is responsible for using a memory barrier if needed.
@ -211,14 +211,14 @@ inline void JavaThread::set_polling_page(void* poll_value) {
// The aqcquire make sure reading of polling page is done before
// the reading the handshake operation or the global state
inline volatile void* JavaThread::get_polling_page() {
return OrderAccess::load_acquire(polling_page_addr());
return Atomic::load_acquire(polling_page_addr());
}
inline bool JavaThread::is_exiting() const {
// Use load-acquire so that setting of _terminated by
// JavaThread::exit() is seen more quickly.
TerminatedTypes l_terminated = (TerminatedTypes)
OrderAccess::load_acquire((volatile jint *) &_terminated);
Atomic::load_acquire((volatile jint *) &_terminated);
return l_terminated == _thread_exiting || check_is_terminated(l_terminated);
}
@ -226,19 +226,19 @@ inline bool JavaThread::is_terminated() const {
// Use load-acquire so that setting of _terminated by
// JavaThread::exit() is seen more quickly.
TerminatedTypes l_terminated = (TerminatedTypes)
OrderAccess::load_acquire((volatile jint *) &_terminated);
Atomic::load_acquire((volatile jint *) &_terminated);
return check_is_terminated(l_terminated);
}
inline void JavaThread::set_terminated(TerminatedTypes t) {
// use release-store so the setting of _terminated is seen more quickly
OrderAccess::release_store((volatile jint *) &_terminated, (jint) t);
Atomic::release_store((volatile jint *) &_terminated, (jint) t);
}
// special for Threads::remove() which is static:
inline void JavaThread::set_terminated_value() {
// use release-store so the setting of _terminated is seen more quickly
OrderAccess::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
Atomic::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
}
// Allow tracking of class initialization monitor use

View File

@ -139,9 +139,9 @@ void ThreadHeapSampler::check_for_sampling(oop obj, size_t allocation_size, size
}
int ThreadHeapSampler::get_sampling_interval() {
return OrderAccess::load_acquire(&_sampling_interval);
return Atomic::load_acquire(&_sampling_interval);
}
void ThreadHeapSampler::set_sampling_interval(int sampling_interval) {
OrderAccess::release_store(&_sampling_interval, sampling_interval);
Atomic::release_store(&_sampling_interval, sampling_interval);
}

View File

@ -779,7 +779,7 @@ void ThreadsSMRSupport::clear_delete_notify() {
bool ThreadsSMRSupport::delete_notify() {
// Use load_acquire() in order to see any updates to _delete_notify
// earlier than when delete_lock is grabbed.
return (OrderAccess::load_acquire(&_delete_notify) != 0);
return (Atomic::load_acquire(&_delete_notify) != 0);
}
// Safely free a ThreadsList after a Threads::add() or Threads::remove().

View File

@ -78,7 +78,7 @@ inline void ThreadsSMRSupport::update_tlh_time_max(uint new_value) {
}
inline ThreadsList* ThreadsSMRSupport::get_java_thread_list() {
return (ThreadsList*)OrderAccess::load_acquire(&_java_thread_list);
return (ThreadsList*)Atomic::load_acquire(&_java_thread_list);
}
inline bool ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread *thread) {

View File

@ -203,16 +203,16 @@ void VMOperationTimeoutTask::task() {
}
bool VMOperationTimeoutTask::is_armed() {
return OrderAccess::load_acquire(&_armed) != 0;
return Atomic::load_acquire(&_armed) != 0;
}
void VMOperationTimeoutTask::arm() {
_arm_time = os::javaTimeMillis();
OrderAccess::release_store_fence(&_armed, 1);
Atomic::release_store_fence(&_armed, 1);
}
void VMOperationTimeoutTask::disarm() {
OrderAccess::release_store_fence(&_armed, 0);
Atomic::release_store_fence(&_armed, 0);
}
//------------------------------------------------------------------------------------------------------------------

View File

@ -65,7 +65,7 @@ MemoryManager* MemoryManager::get_metaspace_memory_manager() {
instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_mgr_obj points to or implies.
instanceOop mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
instanceOop mgr_obj = Atomic::load_acquire(&_memory_mgr_obj);
if (mgr_obj == NULL) {
// It's ok for more than one thread to execute the code up to the locked region.
// Extra manager instances will just be gc'ed.
@ -118,7 +118,7 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
//
// The lock has done an acquire, so the load can't float above it, but
// we need to do a load_acquire as above.
mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
mgr_obj = Atomic::load_acquire(&_memory_mgr_obj);
if (mgr_obj != NULL) {
return mgr_obj;
}
@ -130,7 +130,7 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// with creating the management object are visible before publishing
// its address. The unlock will publish the store to _memory_mgr_obj
// because it does a release first.
OrderAccess::release_store(&_memory_mgr_obj, mgr_obj);
Atomic::release_store(&_memory_mgr_obj, mgr_obj);
}
}

View File

@ -77,7 +77,7 @@ void MemoryPool::add_manager(MemoryManager* mgr) {
instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_pool_obj points to or implies.
instanceOop pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
instanceOop pool_obj = Atomic::load_acquire(&_memory_pool_obj);
if (pool_obj == NULL) {
// It's ok for more than one thread to execute the code up to the locked region.
// Extra pool instances will just be gc'ed.
@ -118,7 +118,7 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
//
// The lock has done an acquire, so the load can't float above it,
// but we need to do a load_acquire as above.
pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
pool_obj = Atomic::load_acquire(&_memory_pool_obj);
if (pool_obj != NULL) {
return pool_obj;
}
@ -130,7 +130,7 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// with creating the pool are visible before publishing its address.
// The unlock will publish the store to _memory_pool_obj because
// it does a release first.
OrderAccess::release_store(&_memory_pool_obj, pool_obj);
Atomic::release_store(&_memory_pool_obj, pool_obj);
}
}

View File

@ -48,7 +48,7 @@ inline const BitMap::bm_word_t BitMap::load_word_ordered(const volatile bm_word_
memory_order == memory_order_acquire ||
memory_order == memory_order_conservative,
"unexpected memory ordering");
return OrderAccess::load_acquire(addr);
return Atomic::load_acquire(addr);
}
}

View File

@ -58,7 +58,7 @@ inline typename ConcurrentHashTable<CONFIG, F>::Node*
ConcurrentHashTable<CONFIG, F>::
Node::next() const
{
return OrderAccess::load_acquire(&_next);
return Atomic::load_acquire(&_next);
}
// Bucket
@ -67,7 +67,7 @@ inline typename ConcurrentHashTable<CONFIG, F>::Node*
ConcurrentHashTable<CONFIG, F>::
Bucket::first_raw() const
{
return OrderAccess::load_acquire(&_first);
return Atomic::load_acquire(&_first);
}
template <typename CONFIG, MEMFLAGS F>
@ -79,7 +79,7 @@ inline void ConcurrentHashTable<CONFIG, F>::
// Due to this assert this methods is not static.
assert(is_locked(), "Must be locked.");
Node** tmp = (Node**)dst;
OrderAccess::release_store(tmp, clear_set_state(node, *dst));
Atomic::release_store(tmp, clear_set_state(node, *dst));
}
template <typename CONFIG, MEMFLAGS F>
@ -88,7 +88,7 @@ ConcurrentHashTable<CONFIG, F>::
Bucket::first() const
{
// We strip the states bit before returning the ptr.
return clear_state(OrderAccess::load_acquire(&_first));
return clear_state(Atomic::load_acquire(&_first));
}
template <typename CONFIG, MEMFLAGS F>
@ -173,7 +173,7 @@ inline void ConcurrentHashTable<CONFIG, F>::
assert(is_locked(), "Must be locked.");
assert(!have_redirect(),
"Unlocking a bucket after it has reached terminal state.");
OrderAccess::release_store(&_first, clear_state(first()));
Atomic::release_store(&_first, clear_state(first()));
}
template <typename CONFIG, MEMFLAGS F>
@ -181,7 +181,7 @@ inline void ConcurrentHashTable<CONFIG, F>::
Bucket::redirect()
{
assert(is_locked(), "Must be locked.");
OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
}
// InternalTable
@ -217,8 +217,8 @@ inline ConcurrentHashTable<CONFIG, F>::
_cs_context(GlobalCounter::critical_section_begin(_thread))
{
// This version is published now.
if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) {
Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
}
}
@ -289,13 +289,13 @@ inline void ConcurrentHashTable<CONFIG, F>::
assert(_resize_lock_owner == thread, "Re-size lock not held");
OrderAccess::fence(); // Prevent below load from floating up.
// If no reader saw this version we can skip write_synchronize.
if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
if (Atomic::load_acquire(&_invisible_epoch) == thread) {
return;
}
assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
// We set this/next version that we are synchronizing for to not published.
// A reader will zero this flag if it reads this/next version.
OrderAccess::release_store(&_invisible_epoch, thread);
Atomic::release_store(&_invisible_epoch, thread);
GlobalCounter::write_synchronize();
}
@ -374,7 +374,7 @@ inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
ConcurrentHashTable<CONFIG, F>::
get_table() const
{
return OrderAccess::load_acquire(&_table);
return Atomic::load_acquire(&_table);
}
template <typename CONFIG, MEMFLAGS F>
@ -382,7 +382,7 @@ inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
ConcurrentHashTable<CONFIG, F>::
get_new_table() const
{
return OrderAccess::load_acquire(&_new_table);
return Atomic::load_acquire(&_new_table);
}
template <typename CONFIG, MEMFLAGS F>
@ -392,7 +392,7 @@ ConcurrentHashTable<CONFIG, F>::
{
InternalTable* old_table = _table;
// Publish the new table.
OrderAccess::release_store(&_table, _new_table);
Atomic::release_store(&_table, _new_table);
// All must see this.
GlobalCounter::write_synchronize();
// _new_table not read any more.

View File

@ -74,7 +74,7 @@ class ConcurrentHashTable<CONFIG, F>::BucketsOperation {
// Returns false if all ranges are claimed.
bool have_more_work() {
return OrderAccess::load_acquire(&_next_to_claim) >= _stop_task;
return Atomic::load_acquire(&_next_to_claim) >= _stop_task;
}
void thread_owns_resize_lock(Thread* thread) {

View File

@ -41,7 +41,7 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure {
SpinYield yield;
// Loops on this thread until it has exited the critical read section.
while(true) {
uintx cnt = OrderAccess::load_acquire(thread->get_rcu_counter());
uintx cnt = Atomic::load_acquire(thread->get_rcu_counter());
// This checks if the thread's counter is active. And if so is the counter
// for a pre-existing reader (belongs to this grace period). A pre-existing
// reader will have a lower counter than the global counter version for this

View File

@ -40,7 +40,7 @@ GlobalCounter::critical_section_begin(Thread *thread) {
if ((new_cnt & COUNTER_ACTIVE) == 0) {
new_cnt = Atomic::load(&_global_counter._counter) | COUNTER_ACTIVE;
}
OrderAccess::release_store_fence(thread->get_rcu_counter(), new_cnt);
Atomic::release_store_fence(thread->get_rcu_counter(), new_cnt);
return static_cast<CSContext>(old_cnt);
}
@ -49,8 +49,8 @@ GlobalCounter::critical_section_end(Thread *thread, CSContext context) {
assert(thread == Thread::current(), "must be current thread");
assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
// Restore the counter value from before the associated begin.
OrderAccess::release_store(thread->get_rcu_counter(),
static_cast<uintx>(context));
Atomic::release_store(thread->get_rcu_counter(),
static_cast<uintx>(context));
}
class GlobalCounter::CriticalSection {

View File

@ -88,7 +88,7 @@ template <MEMFLAGS F> inline void HashtableBucket<F>::set_entry(BasicHashtableEn
// SystemDictionary are read without locks. The new entry must be
// complete before other threads can be allowed to see it
// via a store to _buckets[index].
OrderAccess::release_store(&_entry, l);
Atomic::release_store(&_entry, l);
}
@ -97,7 +97,7 @@ template <MEMFLAGS F> inline BasicHashtableEntry<F>* HashtableBucket<F>::get_ent
// SystemDictionary are read without locks. The new entry must be
// complete before other threads can be allowed to see it
// via a store to _buckets[index].
return OrderAccess::load_acquire(&_entry);
return Atomic::load_acquire(&_entry);
}

View File

@ -85,7 +85,7 @@ void SingleWriterSynchronizer::synchronize() {
// to complete, e.g. for the value of old_ptr to catch up with old.
// Loop because there could be pending wakeups unrelated to this
// synchronize request.
while (old != OrderAccess::load_acquire(old_ptr)) {
while (old != Atomic::load_acquire(old_ptr)) {
_wakeup.wait();
}
// (5) Drain any pending wakeups. A critical section exit may have

View File

@ -27,7 +27,6 @@
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/semaphore.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
@ -108,7 +107,7 @@ public:
{}
virtual void main_run() {
while (OrderAccess::load_acquire(_continue_running)) {
while (Atomic::load_acquire(_continue_running)) {
uint id = _set->claim_par_id();
_set->release_par_id(id);
++_allocations;
@ -147,7 +146,7 @@ TEST_VM(G1FreeIdSetTest, stress) {
ThreadInVMfromNative invm(this_thread);
this_thread->sleep(milliseconds_to_run);
}
OrderAccess::release_store(&continue_running, false);
Atomic::release_store(&continue_running, false);
for (uint i = 0; i < nthreads; ++i) {
ThreadInVMfromNative invm(this_thread);
post.wait_with_safepoint_check(this_thread);

View File

@ -26,7 +26,7 @@
#include "gc/shared/ptrQueue.hpp"
#include "memory/allocation.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/atomic.hpp"
#include "runtime/semaphore.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/globalCounter.inline.hpp"
@ -150,7 +150,7 @@ public:
{}
virtual void main_run() {
while (OrderAccess::load_acquire(_continue_running)) {
while (Atomic::load_acquire(_continue_running)) {
BufferNode* node = _allocator->allocate();
_cbl->push(node);
++_allocations;
@ -184,7 +184,7 @@ public:
BufferNode* node = _cbl->pop();
if (node != NULL) {
_allocator->release(node);
} else if (!OrderAccess::load_acquire(_continue_running)) {
} else if (!Atomic::load_acquire(_continue_running)) {
return;
}
ThreadBlockInVM tbiv(this); // Safepoint check.
@ -226,12 +226,12 @@ static void run_test(BufferNode::Allocator* allocator, CompletedList* cbl) {
ThreadInVMfromNative invm(this_thread);
this_thread->sleep(milliseconds_to_run);
}
OrderAccess::release_store(&allocator_running, false);
Atomic::release_store(&allocator_running, false);
for (uint i = 0; i < nthreads; ++i) {
ThreadInVMfromNative invm(this_thread);
post.wait_with_safepoint_check(this_thread);
}
OrderAccess::release_store(&processor_running, false);
Atomic::release_store(&processor_running, false);
for (uint i = 0; i < nthreads; ++i) {
ThreadInVMfromNative invm(this_thread);
post.wait_with_safepoint_check(this_thread);

View File

@ -23,7 +23,6 @@
#include "precompiled.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "utilities/globalCounter.hpp"
#include "utilities/globalCounter.inline.hpp"
@ -48,14 +47,14 @@ public:
_wrt_start->signal();
while (!_exit) {
GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(this);
volatile TestData* test = OrderAccess::load_acquire(_test);
long value = OrderAccess::load_acquire(&test->test_value);
volatile TestData* test = Atomic::load_acquire(_test);
long value = Atomic::load_acquire(&test->test_value);
ASSERT_EQ(value, GOOD_VALUE);
GlobalCounter::critical_section_end(this, cs_context);
{
GlobalCounter::CriticalSection cs(this);
volatile TestData* test = OrderAccess::load_acquire(_test);
long value = OrderAccess::load_acquire(&test->test_value);
volatile TestData* test = Atomic::load_acquire(_test);
long value = Atomic::load_acquire(&test->test_value);
ASSERT_EQ(value, GOOD_VALUE);
}
}
@ -82,7 +81,7 @@ public:
TestData* tmp = new TestData();
tmp->test_value = GOOD_VALUE;
OrderAccess::release_store_fence(&test, tmp);
Atomic::release_store_fence(&test, tmp);
reader1->doit();
reader2->doit();
@ -99,7 +98,7 @@ public:
volatile TestData* free_tmp = test;
tmp = new TestData();
tmp->test_value = GOOD_VALUE;
OrderAccess::release_store(&test, tmp);
Atomic::release_store(&test, tmp);
GlobalCounter::write_synchronize();
free_tmp->test_value = BAD_VALUE;
delete free_tmp;

View File

@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "metaprogramming/isRegisteredEnum.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "utilities/globalCounter.hpp"
#include "utilities/globalCounter.inline.hpp"
@ -57,21 +56,21 @@ protected:
~RCUNestedThread() {}
void set_state(NestedTestState new_state) {
OrderAccess::release_store(&_state, new_state);
Atomic::release_store(&_state, new_state);
}
void wait_with_state(NestedTestState new_state) {
SpinYield spinner;
OrderAccess::release_store(&_state, new_state);
while (!OrderAccess::load_acquire(&_proceed)) {
Atomic::release_store(&_state, new_state);
while (!Atomic::load_acquire(&_proceed)) {
spinner.wait();
}
OrderAccess::release_store(&_proceed, false);
Atomic::release_store(&_proceed, false);
}
public:
NestedTestState state() const {
return OrderAccess::load_acquire(&_state);
return Atomic::load_acquire(&_state);
}
void wait_for_state(NestedTestState goal) {
@ -82,7 +81,7 @@ public:
}
void proceed() {
OrderAccess::release_store(&_proceed, true);
Atomic::release_store(&_proceed, true);
}
};

View File

@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/lockFreeStack.hpp"
#include "threadHelper.inline.hpp"
@ -226,21 +225,21 @@ public:
{}
virtual void main_run() {
OrderAccess::release_store_fence(&_ready, true);
Atomic::release_store_fence(&_ready, true);
while (true) {
Element* e = _from->pop();
if (e != NULL) {
_to->push(*e);
Atomic::inc(_processed);
++_local_processed;
} else if (OrderAccess::load_acquire(_processed) == _process_limit) {
} else if (Atomic::load_acquire(_processed) == _process_limit) {
tty->print_cr("thread %u processed " SIZE_FORMAT, _id, _local_processed);
return;
}
}
}
bool ready() const { return OrderAccess::load_acquire(&_ready); }
bool ready() const { return Atomic::load_acquire(&_ready); }
};
TEST_VM(LockFreeStackTest, stress) {

View File

@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
@ -56,14 +56,14 @@ public:
virtual void main_run() {
size_t iterations = 0;
size_t values_changed = 0;
while (OrderAccess::load_acquire(_continue_running) != 0) {
while (Atomic::load_acquire(_continue_running) != 0) {
{ ThreadBlockInVM tbiv(this); } // Safepoint check outside critical section.
++iterations;
SingleWriterSynchronizer::CriticalSection cs(_synchronizer);
uintx value = OrderAccess::load_acquire(_synchronized_value);
uintx value = Atomic::load_acquire(_synchronized_value);
uintx new_value = value;
for (uint i = 0; i < reader_iterations; ++i) {
new_value = OrderAccess::load_acquire(_synchronized_value);
new_value = Atomic::load_acquire(_synchronized_value);
// A reader can see either the value it first read after
// entering the critical section, or that value + 1. No other
// values are possible.
@ -97,7 +97,7 @@ public:
{}
virtual void main_run() {
while (OrderAccess::load_acquire(_continue_running) != 0) {
while (Atomic::load_acquire(_continue_running) != 0) {
++*_synchronized_value;
_synchronizer->synchronize();
{ ThreadBlockInVM tbiv(this); } // Safepoint check.

View File

@ -49,9 +49,9 @@ public:
// Similar to how a JavaThread would stop in a safepoint.
while (!_exit) {
// Load the published tag.
tag = OrderAccess::load_acquire(&wait_tag);
tag = Atomic::load_acquire(&wait_tag);
// Publish the tag this thread is going to wait for.
OrderAccess::release_store(&_on_barrier, tag);
Atomic::release_store(&_on_barrier, tag);
if (_on_barrier == 0) {
SpinPause();
continue;
@ -60,9 +60,9 @@ public:
// Wait until we are woken.
_wait_barrier->wait(tag);
// Verify that we do not see an invalid value.
vv = OrderAccess::load_acquire(&valid_value);
vv = Atomic::load_acquire(&valid_value);
ASSERT_EQ((vv & 0x1), 0);
OrderAccess::release_store(&_on_barrier, 0);
Atomic::release_store(&_on_barrier, 0);
}
}
};
@ -104,7 +104,7 @@ public:
// Arm next tag.
wb.arm(next_tag);
// Publish tag.
OrderAccess::release_store_fence(&wait_tag, next_tag);
Atomic::release_store_fence(&wait_tag, next_tag);
// Wait until threads picked up new tag.
while (reader1->_on_barrier != wait_tag ||
@ -115,12 +115,12 @@ public:
}
// Set an invalid value.
OrderAccess::release_store(&valid_value, valid_value + 1); // odd
Atomic::release_store(&valid_value, valid_value + 1); // odd
os::naked_yield();
// Set a valid value.
OrderAccess::release_store(&valid_value, valid_value + 1); // even
Atomic::release_store(&valid_value, valid_value + 1); // even
// Publish inactive tag.
OrderAccess::release_store_fence(&wait_tag, 0); // Stores in WB must not float up.
Atomic::release_store_fence(&wait_tag, 0); // Stores in WB must not float up.
wb.disarm();
// Wait until threads done valid_value verification.