8188220: Remove Atomic::*_ptr() uses and overloads from hotspot

With the new template functions these are unnecessary.

Reviewed-by: kbarrett, dholmes, eosterlund
This commit is contained in:
Coleen Phillimore 2017-10-16 22:36:06 -04:00
parent cb9e7bf51a
commit 39b068db11
83 changed files with 376 additions and 458 deletions

View File

@ -863,7 +863,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// //
// markOop displaced_header = obj->mark().set_unlocked(); // markOop displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header); // monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word. // // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header)) // } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case. // // Simple recursive case.
@ -901,7 +901,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
std(displaced_header, BasicObjectLock::lock_offset_in_bytes() + std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor); BasicLock::displaced_header_offset_in_bytes(), monitor);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// Store stack address of the BasicObjectLock (this is monitor) into object. // Store stack address of the BasicObjectLock (this is monitor) into object.
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes()); addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
@ -977,7 +977,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_e
// if ((displaced_header = monitor->displaced_header()) == NULL) { // if ((displaced_header = monitor->displaced_header()) == NULL) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL); // monitor->set_obj(NULL);
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word. // // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL); // monitor->set_obj(NULL);
// } else { // } else {
@ -1010,7 +1010,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_e
cmpdi(CCR0, displaced_header, 0); cmpdi(CCR0, displaced_header, 0);
beq(CCR0, free_slot); // recursive unlock beq(CCR0, free_slot); // recursive unlock
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word. // // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL); // monitor->set_obj(NULL);

View File

@ -149,8 +149,7 @@ void VM_Version::initialize() {
print_features(); print_features();
} }
// PPC64 supports 8-byte compare-exchange operations (see // PPC64 supports 8-byte compare-exchange operations (see Atomic::cmpxchg)
// Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
// and 'atomic long memory ops' (see Unsafe_GetLongVolatile). // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
_supports_cx8 = true; _supports_cx8 = true;

View File

@ -914,7 +914,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// //
// markOop displaced_header = obj->mark().set_unlocked(); // markOop displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header); // monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word. // // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header)) // } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case. // // Simple recursive case.
@ -949,7 +949,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() + z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor); BasicLock::displaced_header_offset_in_bytes(), monitor);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// Store stack address of the BasicObjectLock (this is monitor) into object. // Store stack address of the BasicObjectLock (this is monitor) into object.
add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object); add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object);
@ -1021,7 +1021,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
// if ((displaced_header = monitor->displaced_header()) == NULL) { // if ((displaced_header = monitor->displaced_header()) == NULL) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL); // monitor->set_obj(NULL);
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word. // // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL); // monitor->set_obj(NULL);
// } else { // } else {
@ -1062,7 +1062,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
BasicLock::displaced_header_offset_in_bytes())); BasicLock::displaced_header_offset_in_bytes()));
z_bre(done); // displaced_header == 0 -> goto done z_bre(done); // displaced_header == 0 -> goto done
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word. // // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL); // monitor->set_obj(NULL);

View File

@ -224,7 +224,7 @@ void VM_Version::initialize() {
} }
// z/Architecture supports 8-byte compare-exchange operations // z/Architecture supports 8-byte compare-exchange operations
// (see Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr) // (see Atomic::cmpxchg)
// and 'atomic long memory ops' (see Unsafe_GetLongVolatile). // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
_supports_cx8 = true; _supports_cx8 = true;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -566,7 +566,7 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
// Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest)
// //
// Arguments : // Arguments :
// c_rarg0: exchange_value // c_rarg0: exchange_value
@ -574,8 +574,8 @@ class StubGenerator: public StubCodeGenerator {
// //
// Result: // Result:
// *dest <- ex, return (orig *dest) // *dest <- ex, return (orig *dest)
address generate_atomic_xchg_ptr() { address generate_atomic_xchg_long() {
StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long");
address start = __ pc(); address start = __ pc();
__ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
@ -4998,7 +4998,7 @@ class StubGenerator: public StubCodeGenerator {
// atomic calls // atomic calls
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long();
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();

View File

@ -276,7 +276,7 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
markOop disp = lockee->mark()->set_unlocked(); markOop disp = lockee->mark()->set_unlocked();
monitor->lock()->set_displaced_header(disp); monitor->lock()->set_displaced_header(disp);
if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) { if (Atomic::cmpxchg((markOop)monitor, lockee->mark_addr(), disp) != disp) {
if (thread->is_lock_owned((address) disp->clear_lock_bits())) { if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
monitor->lock()->set_displaced_header(NULL); monitor->lock()->set_displaced_header(NULL);
} }
@ -420,7 +420,8 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
monitor->set_obj(NULL); monitor->set_obj(NULL);
if (header != NULL) { if (header != NULL) {
if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { markOop old_header = markOopDesc::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
monitor->set_obj(rcvr); { monitor->set_obj(rcvr); {
HandleMark hm(thread); HandleMark hm(thread);
CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor)); CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor));

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010, 2015 Red Hat, Inc. * Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -253,9 +253,8 @@ class StubGenerator: public StubCodeGenerator {
// atomic calls // atomic calls
StubRoutines::_atomic_xchg_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_xchg_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_xchg_ptr_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_xchg_long_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_ptr_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();

View File

@ -137,7 +137,7 @@ template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T)); STATIC_ASSERT(4 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire // Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp). // (see synchronizer.cpp).
T old_value; T old_value;
@ -176,7 +176,7 @@ template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire // Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp). // (see synchronizer.cpp).
T old_value; T old_value;

View File

@ -134,7 +134,7 @@ template<>
template<typename T> template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const { T volatile* dest) const {
// Note that xchg_ptr doesn't necessarily do an acquire // Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp). // (see synchronizer.cpp).
T old_value; T old_value;
@ -173,7 +173,7 @@ template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire // Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp). // (see synchronizer.cpp).
T old_value; T old_value;

View File

@ -73,7 +73,7 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
} }
DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func) DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_ptr_func) DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
#undef DEFINE_STUB_XCHG #undef DEFINE_STUB_XCHG

View File

@ -219,7 +219,7 @@ void os::initialize_thread(Thread* thr) {
// Atomics and Stub Functions // Atomics and Stub Functions
typedef jint xchg_func_t (jint, volatile jint*); typedef jint xchg_func_t (jint, volatile jint*);
typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*); typedef intptr_t xchg_long_func_t (jlong, volatile jlong*);
typedef jint cmpxchg_func_t (jint, volatile jint*, jint); typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte); typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte);
typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong); typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
@ -243,12 +243,12 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
return old_value; return old_value;
} }
intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) { intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) {
// try to use the stub: // try to use the stub:
xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry()); xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
if (func != NULL) { if (func != NULL) {
os::atomic_xchg_ptr_func = func; os::atomic_xchg_long_func = func;
return (*func)(exchange_value, dest); return (*func)(exchange_value, dest);
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
@ -338,7 +338,7 @@ intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* des
} }
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap; xchg_long_func_t* os::atomic_xchg_long_func = os::atomic_xchg_long_bootstrap;
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap; cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
// //
#ifdef AMD64 #ifdef AMD64
static jint (*atomic_xchg_func) (jint, volatile jint*); static jint (*atomic_xchg_func) (jint, volatile jint*);
static intptr_t (*atomic_xchg_ptr_func) (intptr_t, volatile intptr_t*); static intptr_t (*atomic_xchg_long_func) (jlong, volatile jlong*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte); static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte);
@ -40,7 +40,7 @@
static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*); static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*); static jint atomic_xchg_bootstrap (jint, volatile jint*);
static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*); static intptr_t atomic_xchg_long_bootstrap (jlong, volatile jlong*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte); static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte);

View File

@ -236,11 +236,9 @@ DelayedConstant* DelayedConstant::add(BasicType type,
if (dcon->match(type, cfn)) if (dcon->match(type, cfn))
return dcon; return dcon;
if (dcon->value_fn == NULL) { if (dcon->value_fn == NULL) {
// (cmpxchg not because this is multi-threaded but because I'm paranoid) dcon->value_fn = cfn;
if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) {
dcon->type = type; dcon->type = type;
return dcon; return dcon;
}
} }
} }
// If this assert is hit (in pre-integration testing!) then re-evaluate // If this assert is hit (in pre-integration testing!) then re-evaluate

View File

@ -48,13 +48,11 @@ private:
ClassPathEntry* volatile _next; ClassPathEntry* volatile _next;
public: public:
// Next entry in class path // Next entry in class path
ClassPathEntry* next() const { ClassPathEntry* next() const { return OrderAccess::load_acquire(&_next); }
return (ClassPathEntry*) OrderAccess::load_ptr_acquire(&_next);
}
virtual ~ClassPathEntry() {} virtual ~ClassPathEntry() {}
void set_next(ClassPathEntry* next) { void set_next(ClassPathEntry* next) {
// may have unlocked readers, so ensure visibility. // may have unlocked readers, so ensure visibility.
OrderAccess::release_store_ptr(&_next, next); OrderAccess::release_store(&_next, next);
} }
virtual bool is_jrt() = 0; virtual bool is_jrt() = 0;
virtual bool is_jar_file() const = 0; virtual bool is_jar_file() const = 0;

View File

@ -82,11 +82,6 @@
#include "trace/tracing.hpp" #include "trace/tracing.hpp"
#endif #endif
// helper function to avoid in-line casts
template <typename T> static T* load_ptr_acquire(T* volatile *p) {
return static_cast<T*>(OrderAccess::load_ptr_acquire(p));
}
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@ -152,7 +147,7 @@ ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
oop* ClassLoaderData::ChunkedHandleList::add(oop o) { oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
if (_head == NULL || _head->_size == Chunk::CAPACITY) { if (_head == NULL || _head->_size == Chunk::CAPACITY) {
Chunk* next = new Chunk(_head); Chunk* next = new Chunk(_head);
OrderAccess::release_store_ptr(&_head, next); OrderAccess::release_store(&_head, next);
} }
oop* handle = &_head->_data[_head->_size]; oop* handle = &_head->_data[_head->_size];
*handle = o; *handle = o;
@ -169,7 +164,7 @@ inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chu
} }
void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
Chunk* head = (Chunk*) OrderAccess::load_ptr_acquire(&_head); Chunk* head = OrderAccess::load_acquire(&_head);
if (head != NULL) { if (head != NULL) {
// Must be careful when reading size of head // Must be careful when reading size of head
oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size)); oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size));
@ -257,24 +252,24 @@ void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
} }
void ClassLoaderData::classes_do(KlassClosure* klass_closure) { void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_ptr_acquire // Lock-free access requires load_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
klass_closure->do_klass(k); klass_closure->do_klass(k);
assert(k != k->next_link(), "no loops!"); assert(k != k->next_link(), "no loops!");
} }
} }
void ClassLoaderData::classes_do(void f(Klass * const)) { void ClassLoaderData::classes_do(void f(Klass * const)) {
// Lock-free access requires load_ptr_acquire // Lock-free access requires load_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
f(k); f(k);
assert(k != k->next_link(), "no loops!"); assert(k != k->next_link(), "no loops!");
} }
} }
void ClassLoaderData::methods_do(void f(Method*)) { void ClassLoaderData::methods_do(void f(Method*)) {
// Lock-free access requires load_ptr_acquire // Lock-free access requires load_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
InstanceKlass::cast(k)->methods_do(f); InstanceKlass::cast(k)->methods_do(f);
} }
@ -282,8 +277,8 @@ void ClassLoaderData::methods_do(void f(Method*)) {
} }
void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_ptr_acquire // Lock-free access requires load_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Do not filter ArrayKlass oops here... // Do not filter ArrayKlass oops here...
if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) { if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
klass_closure->do_klass(k); klass_closure->do_klass(k);
@ -292,8 +287,8 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
} }
void ClassLoaderData::classes_do(void f(InstanceKlass*)) { void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
// Lock-free access requires load_ptr_acquire // Lock-free access requires load_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) { if (k->is_instance_klass()) {
f(InstanceKlass::cast(k)); f(InstanceKlass::cast(k));
} }
@ -449,7 +444,7 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
k->set_next_link(old_value); k->set_next_link(old_value);
// Link the new item into the list, making sure the linked class is stable // Link the new item into the list, making sure the linked class is stable
// since the list can be walked without a lock // since the list can be walked without a lock
OrderAccess::release_store_ptr(&_klasses, k); OrderAccess::release_store(&_klasses, k);
} }
if (publicize && k->class_loader_data() != NULL) { if (publicize && k->class_loader_data() != NULL) {
@ -589,8 +584,8 @@ void ClassLoaderData::unload() {
ModuleEntryTable* ClassLoaderData::modules() { ModuleEntryTable* ClassLoaderData::modules() {
// Lazily create the module entry table at first request. // Lazily create the module entry table at first request.
// Lock-free access requires load_ptr_acquire. // Lock-free access requires load_acquire.
ModuleEntryTable* modules = load_ptr_acquire(&_modules); ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules);
if (modules == NULL) { if (modules == NULL) {
MutexLocker m1(Module_lock); MutexLocker m1(Module_lock);
// Check if _modules got allocated while we were waiting for this lock. // Check if _modules got allocated while we were waiting for this lock.
@ -600,7 +595,7 @@ ModuleEntryTable* ClassLoaderData::modules() {
{ {
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Ensure _modules is stable, since it is examined without a lock // Ensure _modules is stable, since it is examined without a lock
OrderAccess::release_store_ptr(&_modules, modules); OrderAccess::release_store(&_modules, modules);
} }
} }
} }
@ -737,8 +732,8 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
// to create smaller arena for Reflection class loaders also. // to create smaller arena for Reflection class loaders also.
// The reason for the delayed allocation is because some class loaders are // The reason for the delayed allocation is because some class loaders are
// simply for delegating with no metadata of their own. // simply for delegating with no metadata of their own.
// Lock-free access requires load_ptr_acquire. // Lock-free access requires load_acquire.
Metaspace* metaspace = load_ptr_acquire(&_metaspace); Metaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
if (metaspace == NULL) { if (metaspace == NULL) {
MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
// Check if _metaspace got allocated while we were waiting for this lock. // Check if _metaspace got allocated while we were waiting for this lock.
@ -760,7 +755,7 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
} }
// Ensure _metaspace is stable, since it is examined without a lock // Ensure _metaspace is stable, since it is examined without a lock
OrderAccess::release_store_ptr(&_metaspace, metaspace); OrderAccess::release_store(&_metaspace, metaspace);
} }
} }
return metaspace; return metaspace;
@ -914,8 +909,8 @@ void ClassLoaderData::verify() {
} }
bool ClassLoaderData::contains_klass(Klass* klass) { bool ClassLoaderData::contains_klass(Klass* klass) {
// Lock-free access requires load_ptr_acquire // Lock-free access requires load_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k == klass) return true; if (k == klass) return true;
} }
return false; return false;
@ -948,7 +943,7 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRA
if (!is_anonymous) { if (!is_anonymous) {
ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader()); ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
// First, Atomically set it // First, Atomically set it
ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL); ClassLoaderData* old = Atomic::cmpxchg(cld, cld_addr, (ClassLoaderData*)NULL);
if (old != NULL) { if (old != NULL) {
delete cld; delete cld;
// Returns the data. // Returns the data.
@ -963,7 +958,7 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRA
do { do {
cld->set_next(next); cld->set_next(next);
ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next); ClassLoaderData* exchanged = Atomic::cmpxchg(cld, list_head, next);
if (exchanged == next) { if (exchanged == next) {
LogTarget(Debug, class, loader, data) lt; LogTarget(Debug, class, loader, data) lt;
if (lt.is_enabled()) { if (lt.is_enabled()) {
@ -1387,7 +1382,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
while (head != NULL) { while (head != NULL) {
Klass* next = next_klass_in_cldg(head); Klass* next = next_klass_in_cldg(head);
Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head); Klass* old_head = Atomic::cmpxchg(next, &_next_klass, head);
if (old_head == head) { if (old_head == head) {
return head; // Won the CAS. return head; // Won the CAS.

View File

@ -194,7 +194,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
Chunk(Chunk* c) : _next(c), _size(0) { } Chunk(Chunk* c) : _next(c), _size(0) { }
}; };
Chunk* _head; Chunk* volatile _head;
void oops_do_chunk(OopClosure* f, Chunk* c, const juint size); void oops_do_chunk(OopClosure* f, Chunk* c, const juint size);

View File

@ -161,10 +161,10 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
void set_pd_set(ProtectionDomainEntry* new_head) { _pd_set = new_head; } void set_pd_set(ProtectionDomainEntry* new_head) { _pd_set = new_head; }
ProtectionDomainEntry* pd_set_acquire() const { ProtectionDomainEntry* pd_set_acquire() const {
return (ProtectionDomainEntry*)OrderAccess::load_ptr_acquire(&_pd_set); return OrderAccess::load_acquire(&_pd_set);
} }
void release_set_pd_set(ProtectionDomainEntry* new_head) { void release_set_pd_set(ProtectionDomainEntry* new_head) {
OrderAccess::release_store_ptr(&_pd_set, new_head); OrderAccess::release_store(&_pd_set, new_head);
} }
// Tells whether the initiating class' protection domain can access the klass in this entry // Tells whether the initiating class' protection domain can access the klass in this entry

View File

@ -69,14 +69,14 @@ static void* volatile _verify_byte_codes_fn = NULL;
static volatile jint _is_new_verify_byte_codes_fn = (jint) true; static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
static void* verify_byte_codes_fn() { static void* verify_byte_codes_fn() {
if (OrderAccess::load_ptr_acquire(&_verify_byte_codes_fn) == NULL) { if (OrderAccess::load_acquire(&_verify_byte_codes_fn) == NULL) {
void *lib_handle = os::native_java_library(); void *lib_handle = os::native_java_library();
void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion"); void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func); OrderAccess::release_store(&_verify_byte_codes_fn, func);
if (func == NULL) { if (func == NULL) {
_is_new_verify_byte_codes_fn = false; _is_new_verify_byte_codes_fn = false;
func = os::dll_lookup(lib_handle, "VerifyClassCodes"); func = os::dll_lookup(lib_handle, "VerifyClassCodes");
OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func); OrderAccess::release_store(&_verify_byte_codes_fn, func);
} }
} }
return (void*)_verify_byte_codes_fn; return (void*)_verify_byte_codes_fn;

View File

@ -288,7 +288,7 @@ public:
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here. // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
ExceptionCache* exception_cache() const { return _exception_cache; } ExceptionCache* exception_cache() const { return _exception_cache; }
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); } void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store(&_exception_cache, ec); }
address handler_for_exception_and_pc(Handle exception, address pc); address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(BoolObjectClosure* is_alive); void clean_exception_cache(BoolObjectClosure* is_alive);

View File

@ -1652,20 +1652,16 @@ nmethod* volatile nmethod::_oops_do_mark_nmethods;
// This code must be MP safe, because it is used from parallel GC passes. // This code must be MP safe, because it is used from parallel GC passes.
bool nmethod::test_set_oops_do_mark() { bool nmethod::test_set_oops_do_mark() {
assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
nmethod* observed_mark_link = _oops_do_mark_link; if (_oops_do_mark_link == NULL) {
if (observed_mark_link == NULL) {
// Claim this nmethod for this thread to mark. // Claim this nmethod for this thread to mark.
observed_mark_link = (nmethod*) if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) {
Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
if (observed_mark_link == NULL) {
// Atomically append this nmethod (now claimed) to the head of the list: // Atomically append this nmethod (now claimed) to the head of the list:
nmethod* observed_mark_nmethods = _oops_do_mark_nmethods; nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
for (;;) { for (;;) {
nmethod* required_mark_nmethods = observed_mark_nmethods; nmethod* required_mark_nmethods = observed_mark_nmethods;
_oops_do_mark_link = required_mark_nmethods; _oops_do_mark_link = required_mark_nmethods;
observed_mark_nmethods = (nmethod*) observed_mark_nmethods =
Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods); Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods);
if (observed_mark_nmethods == required_mark_nmethods) if (observed_mark_nmethods == required_mark_nmethods)
break; break;
} }
@ -1681,9 +1677,9 @@ bool nmethod::test_set_oops_do_mark() {
void nmethod::oops_do_marking_prologue() { void nmethod::oops_do_marking_prologue() {
if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); } if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row"); assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
// We use cmpxchg_ptr instead of regular assignment here because the user // We use cmpxchg instead of regular assignment here because the user
// may fork a bunch of threads, and we need them all to see the same state. // may fork a bunch of threads, and we need them all to see the same state.
void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL); nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL);
guarantee(observed == NULL, "no races in this sequential code"); guarantee(observed == NULL, "no races in this sequential code");
} }
@ -1698,8 +1694,8 @@ void nmethod::oops_do_marking_epilogue() {
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark")); NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
cur = next; cur = next;
} }
void* required = _oops_do_mark_nmethods; nmethod* required = _oops_do_mark_nmethods;
void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required); nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
guarantee(observed == required, "no races in this sequential code"); guarantee(observed == required, "no races in this sequential code");
if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); } if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
} }

View File

@ -1077,7 +1077,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
NOT_PRODUCT( NOT_PRODUCT(
Atomic::inc(&_numObjectsPromoted); Atomic::inc(&_numObjectsPromoted);
Atomic::add_ptr(alloc_sz, &_numWordsPromoted); Atomic::add(alloc_sz, &_numWordsPromoted);
) )
return obj; return obj;
@ -3180,7 +3180,7 @@ void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
HeapWord* cur = read; HeapWord* cur = read;
while (f > read) { while (f > read) {
cur = read; cur = read;
read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur); read = Atomic::cmpxchg(f, &_global_finger, cur);
if (cur == read) { if (cur == read) {
// our cas succeeded // our cas succeeded
assert(_global_finger >= f, "protocol consistency"); assert(_global_finger >= f, "protocol consistency");
@ -7853,7 +7853,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
return false; return false;
} }
// Grab the entire list; we'll put back a suffix // Grab the entire list; we'll put back a suffix
oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
Thread* tid = Thread::current(); Thread* tid = Thread::current();
// Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
// set to ParallelGCThreads. // set to ParallelGCThreads.
@ -7868,7 +7868,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
return false; return false;
} else if (_overflow_list != BUSY) { } else if (_overflow_list != BUSY) {
// Try and grab the prefix // Try and grab the prefix
prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
} }
} }
// If the list was found to be empty, or we spun long // If the list was found to be empty, or we spun long
@ -7881,7 +7881,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
if (prefix == NULL) { if (prefix == NULL) {
// Write back the NULL in case we overwrote it with BUSY above // Write back the NULL in case we overwrote it with BUSY above
// and it is still the same value. // and it is still the same value.
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
} }
return false; return false;
} }
@ -7896,7 +7896,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
// Write back the NULL in lieu of the BUSY we wrote // Write back the NULL in lieu of the BUSY we wrote
// above, if it is still the same value. // above, if it is still the same value.
if (_overflow_list == BUSY) { if (_overflow_list == BUSY) {
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
} }
} else { } else {
// Chop off the suffix and return it to the global list. // Chop off the suffix and return it to the global list.
@ -7912,7 +7912,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
bool attached = false; bool attached = false;
while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
observed_overflow_list = observed_overflow_list =
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
if (cur_overflow_list == observed_overflow_list) { if (cur_overflow_list == observed_overflow_list) {
attached = true; attached = true;
break; break;
@ -7937,7 +7937,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
} }
// ... and try to place spliced list back on overflow_list ... // ... and try to place spliced list back on overflow_list ...
observed_overflow_list = observed_overflow_list =
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
} while (cur_overflow_list != observed_overflow_list); } while (cur_overflow_list != observed_overflow_list);
// ... until we have succeeded in doing so. // ... until we have succeeded in doing so.
} }
@ -7958,7 +7958,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
} }
#ifndef PRODUCT #ifndef PRODUCT
assert(_num_par_pushes >= n, "Too many pops?"); assert(_num_par_pushes >= n, "Too many pops?");
Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); Atomic::sub(n, &_num_par_pushes);
#endif #endif
return true; return true;
} }
@ -7987,7 +7987,7 @@ void CMSCollector::par_push_on_overflow_list(oop p) {
p->set_mark(NULL); p->set_mark(NULL);
} }
observed_overflow_list = observed_overflow_list =
(oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list); Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
} while (cur_overflow_list != observed_overflow_list); } while (cur_overflow_list != observed_overflow_list);
} }
#undef BUSY #undef BUSY

View File

@ -1297,7 +1297,7 @@ void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadSt
from_space_obj->set_klass_to_list_ptr(NULL); from_space_obj->set_klass_to_list_ptr(NULL);
} }
observed_overflow_list = observed_overflow_list =
(oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
} while (cur_overflow_list != observed_overflow_list); } while (cur_overflow_list != observed_overflow_list);
} }
} }
@ -1340,7 +1340,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
if (_overflow_list == NULL) return false; if (_overflow_list == NULL) return false;
// Otherwise, there was something there; try claiming the list. // Otherwise, there was something there; try claiming the list.
oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
// Trim off a prefix of at most objsFromOverflow items // Trim off a prefix of at most objsFromOverflow items
Thread* tid = Thread::current(); Thread* tid = Thread::current();
size_t spin_count = ParallelGCThreads; size_t spin_count = ParallelGCThreads;
@ -1354,7 +1354,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
return false; return false;
} else if (_overflow_list != BUSY) { } else if (_overflow_list != BUSY) {
// try and grab the prefix // try and grab the prefix
prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
} }
} }
if (prefix == NULL || prefix == BUSY) { if (prefix == NULL || prefix == BUSY) {
@ -1362,7 +1362,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
if (prefix == NULL) { if (prefix == NULL) {
// Write back the NULL in case we overwrote it with BUSY above // Write back the NULL in case we overwrote it with BUSY above
// and it is still the same value. // and it is still the same value.
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
} }
return false; return false;
} }
@ -1381,7 +1381,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
// Write back the NULL in lieu of the BUSY we wrote // Write back the NULL in lieu of the BUSY we wrote
// above and it is still the same value. // above and it is still the same value.
if (_overflow_list == BUSY) { if (_overflow_list == BUSY) {
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
} }
} else { } else {
assert(suffix != BUSY, "Error"); assert(suffix != BUSY, "Error");
@ -1395,7 +1395,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
bool attached = false; bool attached = false;
while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
observed_overflow_list = observed_overflow_list =
(oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
if (cur_overflow_list == observed_overflow_list) { if (cur_overflow_list == observed_overflow_list) {
attached = true; attached = true;
break; break;
@ -1421,7 +1421,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
last->set_klass_to_list_ptr(NULL); last->set_klass_to_list_ptr(NULL);
} }
observed_overflow_list = observed_overflow_list =
(oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
} while (cur_overflow_list != observed_overflow_list); } while (cur_overflow_list != observed_overflow_list);
} }
} }
@ -1453,7 +1453,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
#ifndef PRODUCT #ifndef PRODUCT
assert(_num_par_pushes >= n, "Too many pops?"); assert(_num_par_pushes >= n, "Too many pops?");
Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); Atomic::sub(n, &_num_par_pushes);
#endif #endif
return true; return true;
} }

View File

@ -280,13 +280,13 @@ void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntr
BufferNode* nd = _cur_par_buffer_node; BufferNode* nd = _cur_par_buffer_node;
while (nd != NULL) { while (nd != NULL) {
BufferNode* next = nd->next(); BufferNode* next = nd->next();
void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd); BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
if (actual == nd) { if (actual == nd) {
bool b = apply_closure_to_buffer(cl, nd, false); bool b = apply_closure_to_buffer(cl, nd, false);
guarantee(b, "Should not stop early."); guarantee(b, "Should not stop early.");
nd = next; nd = next;
} else { } else {
nd = static_cast<BufferNode*>(actual); nd = actual;
} }
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -155,19 +155,19 @@ G1CodeRootSet::~G1CodeRootSet() {
} }
G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() { G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
return (G1CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table); return OrderAccess::load_acquire(&_table);
} }
void G1CodeRootSet::allocate_small_table() { void G1CodeRootSet::allocate_small_table() {
G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize); G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
OrderAccess::release_store_ptr(&_table, temp); OrderAccess::release_store(&_table, temp);
} }
void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) { void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
for (;;) { for (;;) {
table->_purge_next = _purge_list; table->_purge_next = _purge_list;
G1CodeRootSetTable* old = (G1CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next); G1CodeRootSetTable* old = Atomic::cmpxchg(table, &_purge_list, table->_purge_next);
if (old == table->_purge_next) { if (old == table->_purge_next) {
break; break;
} }
@ -191,7 +191,7 @@ void G1CodeRootSet::move_to_large() {
G1CodeRootSetTable::purge_list_append(_table); G1CodeRootSetTable::purge_list_append(_table);
OrderAccess::release_store_ptr(&_table, temp); OrderAccess::release_store(&_table, temp);
} }
void G1CodeRootSet::purge() { void G1CodeRootSet::purge() {

View File

@ -3451,10 +3451,10 @@ private:
// Variables used to claim nmethods. // Variables used to claim nmethods.
CompiledMethod* _first_nmethod; CompiledMethod* _first_nmethod;
volatile CompiledMethod* _claimed_nmethod; CompiledMethod* volatile _claimed_nmethod;
// The list of nmethods that need to be processed by the second pass. // The list of nmethods that need to be processed by the second pass.
volatile CompiledMethod* _postponed_list; CompiledMethod* volatile _postponed_list;
volatile uint _num_entered_barrier; volatile uint _num_entered_barrier;
public: public:
@ -3473,7 +3473,7 @@ private:
if(iter.next_alive()) { if(iter.next_alive()) {
_first_nmethod = iter.method(); _first_nmethod = iter.method();
} }
_claimed_nmethod = (volatile CompiledMethod*)_first_nmethod; _claimed_nmethod = _first_nmethod;
} }
~G1CodeCacheUnloadingTask() { ~G1CodeCacheUnloadingTask() {
@ -3489,9 +3489,9 @@ private:
void add_to_postponed_list(CompiledMethod* nm) { void add_to_postponed_list(CompiledMethod* nm) {
CompiledMethod* old; CompiledMethod* old;
do { do {
old = (CompiledMethod*)_postponed_list; old = _postponed_list;
nm->set_unloading_next(old); nm->set_unloading_next(old);
} while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old); } while (Atomic::cmpxchg(nm, &_postponed_list, old) != old);
} }
void clean_nmethod(CompiledMethod* nm) { void clean_nmethod(CompiledMethod* nm) {
@ -3520,7 +3520,7 @@ private:
do { do {
*num_claimed_nmethods = 0; *num_claimed_nmethods = 0;
first = (CompiledMethod*)_claimed_nmethod; first = _claimed_nmethod;
last = CompiledMethodIterator(first); last = CompiledMethodIterator(first);
if (first != NULL) { if (first != NULL) {
@ -3534,7 +3534,7 @@ private:
} }
} }
} while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first); } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first);
} }
CompiledMethod* claim_postponed_nmethod() { CompiledMethod* claim_postponed_nmethod() {
@ -3542,14 +3542,14 @@ private:
CompiledMethod* next; CompiledMethod* next;
do { do {
claim = (CompiledMethod*)_postponed_list; claim = _postponed_list;
if (claim == NULL) { if (claim == NULL) {
return NULL; return NULL;
} }
next = claim->unloading_next(); next = claim->unloading_next();
} while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim); } while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim);
return claim; return claim;
} }

View File

@ -1870,7 +1870,7 @@ G1ConcurrentMark::claim_region(uint worker_id) {
HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
// Is the gap between reading the finger and doing the CAS too long? // Is the gap between reading the finger and doing the CAS too long?
HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
if (res == finger && curr_region != NULL) { if (res == finger && curr_region != NULL) {
// we succeeded // we succeeded
HeapWord* bottom = curr_region->bottom(); HeapWord* bottom = curr_region->bottom();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,17 +29,17 @@
#include "runtime/atomic.hpp" #include "runtime/atomic.hpp"
inline void G1EvacStats::add_direct_allocated(size_t value) { inline void G1EvacStats::add_direct_allocated(size_t value) {
Atomic::add_ptr(value, &_direct_allocated); Atomic::add(value, &_direct_allocated);
} }
inline void G1EvacStats::add_region_end_waste(size_t value) { inline void G1EvacStats::add_region_end_waste(size_t value) {
Atomic::add_ptr(value, &_region_end_waste); Atomic::add(value, &_region_end_waste);
Atomic::add_ptr(1, &_regions_filled); Atomic::inc(&_regions_filled);
} }
inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) { inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
Atomic::add_ptr(used, &_failure_used); Atomic::add(used, &_failure_used);
Atomic::add_ptr(waste, &_failure_waste); Atomic::add(waste, &_failure_waste);
} }
#endif // SHARE_VM_GC_G1_G1EVACSTATS_INLINE_HPP #endif // SHARE_VM_GC_G1_G1EVACSTATS_INLINE_HPP

View File

@ -74,9 +74,9 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
// card_ptr in favor of the other option, which would be starting over. This // card_ptr in favor of the other option, which would be starting over. This
// should be OK since card_ptr will likely be the older card already when/if // should be OK since card_ptr will likely be the older card already when/if
// this ever happens. // this ever happens.
jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr, jbyte* previous_ptr = Atomic::cmpxchg(card_ptr,
&_hot_cache[masked_index], &_hot_cache[masked_index],
current_ptr); current_ptr);
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
} }

View File

@ -251,7 +251,7 @@ public:
virtual void work(uint worker_id) { virtual void work(uint worker_id) {
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size); size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
while (true) { while (true) {
char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size; char* touch_addr = Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size;
if (touch_addr < _start_addr || touch_addr >= _end_addr) { if (touch_addr < _start_addr || touch_addr >= _end_addr) {
break; break;
} }

View File

@ -203,12 +203,12 @@ G1StringDedupUnlinkOrOopsDoClosure::~G1StringDedupUnlinkOrOopsDoClosure() {
// Atomically claims the next available queue for exclusive access by // Atomically claims the next available queue for exclusive access by
// the current thread. Returns the queue number of the claimed queue. // the current thread. Returns the queue number of the claimed queue.
size_t G1StringDedupUnlinkOrOopsDoClosure::claim_queue() { size_t G1StringDedupUnlinkOrOopsDoClosure::claim_queue() {
return (size_t)Atomic::add_ptr(1, &_next_queue) - 1; return Atomic::add((size_t)1, &_next_queue) - 1;
} }
// Atomically claims the next available table partition for exclusive // Atomically claims the next available table partition for exclusive
// access by the current thread. Returns the table bucket number where // access by the current thread. Returns the table bucket number where
// the claimed partition starts. // the claimed partition starts.
size_t G1StringDedupUnlinkOrOopsDoClosure::claim_table_partition(size_t partition_size) { size_t G1StringDedupUnlinkOrOopsDoClosure::claim_table_partition(size_t partition_size) {
return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size; return Atomic::add(partition_size, &_next_bucket) - partition_size;
} }

View File

@ -59,7 +59,7 @@ inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size,
size_t want_to_allocate = MIN2(available, desired_word_size); size_t want_to_allocate = MIN2(available, desired_word_size);
if (want_to_allocate >= min_word_size) { if (want_to_allocate >= min_word_size) {
HeapWord* new_top = obj + want_to_allocate; HeapWord* new_top = obj + want_to_allocate;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
// result can be one of two: // result can be one of two:
// the old top value: the exchange succeeded // the old top value: the exchange succeeded
// otherwise: the new value of the top is returned. // otherwise: the new value of the top is returned.

View File

@ -113,9 +113,7 @@ protected:
public: public:
HeapRegion* hr() const { HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); }
return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
}
jint occupied() const { jint occupied() const {
// Overkill, but if we ever need it... // Overkill, but if we ever need it...
@ -133,7 +131,7 @@ public:
_bm.clear(); _bm.clear();
// Make sure that the bitmap clearing above has been finished before publishing // Make sure that the bitmap clearing above has been finished before publishing
// this PRT to concurrent threads. // this PRT to concurrent threads.
OrderAccess::release_store_ptr(&_hr, hr); OrderAccess::release_store(&_hr, hr);
} }
void add_reference(OopOrNarrowOopStar from) { void add_reference(OopOrNarrowOopStar from) {
@ -182,7 +180,7 @@ public:
while (true) { while (true) {
PerRegionTable* fl = _free_list; PerRegionTable* fl = _free_list;
last->set_next(fl); last->set_next(fl);
PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl); PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl);
if (res == fl) { if (res == fl) {
return; return;
} }
@ -199,9 +197,7 @@ public:
PerRegionTable* fl = _free_list; PerRegionTable* fl = _free_list;
while (fl != NULL) { while (fl != NULL) {
PerRegionTable* nxt = fl->next(); PerRegionTable* nxt = fl->next();
PerRegionTable* res = PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl);
(PerRegionTable*)
Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
if (res == fl) { if (res == fl) {
fl->init(hr, true); fl->init(hr, true);
return fl; return fl;
@ -416,7 +412,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
// some mark bits may not yet seem cleared or a 'later' update // some mark bits may not yet seem cleared or a 'later' update
// performed by a concurrent thread could be undone when the // performed by a concurrent thread could be undone when the
// zeroing becomes visible). This requires store ordering. // zeroing becomes visible). This requires store ordering.
OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt); OrderAccess::release_store(&_fine_grain_regions[ind], prt);
_n_fine_entries++; _n_fine_entries++;
if (G1HRRSUseSparseTable) { if (G1HRRSUseSparseTable) {

View File

@ -292,9 +292,7 @@ void SparsePRT::add_to_expanded_list(SparsePRT* sprt) {
SparsePRT* hd = _head_expanded_list; SparsePRT* hd = _head_expanded_list;
while (true) { while (true) {
sprt->_next_expanded = hd; sprt->_next_expanded = hd;
SparsePRT* res = SparsePRT* res = Atomic::cmpxchg(sprt, &_head_expanded_list, hd);
(SparsePRT*)
Atomic::cmpxchg_ptr(sprt, &_head_expanded_list, hd);
if (res == hd) return; if (res == hd) return;
else hd = res; else hd = res;
} }
@ -305,9 +303,7 @@ SparsePRT* SparsePRT::get_from_expanded_list() {
SparsePRT* hd = _head_expanded_list; SparsePRT* hd = _head_expanded_list;
while (hd != NULL) { while (hd != NULL) {
SparsePRT* next = hd->next_expanded(); SparsePRT* next = hd->next_expanded();
SparsePRT* res = SparsePRT* res = Atomic::cmpxchg(next, &_head_expanded_list, hd);
(SparsePRT*)
Atomic::cmpxchg_ptr(next, &_head_expanded_list, hd);
if (res == hd) { if (res == hd) {
hd->set_next_expanded(NULL); hd->set_next_expanded(NULL);
return hd; return hd;

View File

@ -77,8 +77,7 @@ GCTaskTimeStamp* GCTaskThread::time_stamp_at(uint index) {
if (_time_stamps == NULL) { if (_time_stamps == NULL) {
// We allocate the _time_stamps array lazily since logging can be enabled dynamically // We allocate the _time_stamps array lazily since logging can be enabled dynamically
GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC); GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
void* old = Atomic::cmpxchg_ptr(time_stamps, &_time_stamps, NULL); if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) {
if (old != NULL) {
// Someone already setup the time stamps // Someone already setup the time stamps
FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps); FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -862,7 +862,7 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
if (p != NULL) { if (p != NULL) {
HeapWord* cur_top, *cur_chunk_top = p + size; HeapWord* cur_top, *cur_chunk_top = p + size;
while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) { if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) {
break; break;
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -192,7 +192,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
HeapWord* obj = top(); HeapWord* obj = top();
if (pointer_delta(end(), obj) >= size) { if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size; HeapWord* new_top = obj + size;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
// result can be one of two: // result can be one of two:
// the old top value: the exchange succeeded // the old top value: the exchange succeeded
// otherwise: the new value of the top is returned. // otherwise: the new value of the top is returned.
@ -211,7 +211,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
// Try to deallocate previous allocation. Returns true upon success. // Try to deallocate previous allocation. Returns true upon success.
bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
HeapWord* expected_top = obj + size; HeapWord* expected_top = obj + size;
return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top; return Atomic::cmpxchg(obj, top_addr(), expected_top) == expected_top;
} }
void MutableSpace::oop_iterate_no_header(OopClosure* cl) { void MutableSpace::oop_iterate_no_header(OopClosure* cl) {

View File

@ -90,7 +90,7 @@ ParMarkBitMap::mark_obj(HeapWord* addr, size_t size)
bool end_bit_ok = _end_bits.par_set_bit(end_bit); bool end_bit_ok = _end_bits.par_set_bit(end_bit);
assert(end_bit_ok, "concurrency problem"); assert(end_bit_ok, "concurrency problem");
DEBUG_ONLY(Atomic::inc(&mark_bitmap_count)); DEBUG_ONLY(Atomic::inc(&mark_bitmap_count));
DEBUG_ONLY(Atomic::add_ptr(size, &mark_bitmap_size)); DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size));
return true; return true;
} }
return false; return false;

View File

@ -521,7 +521,7 @@ void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize; const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
DEBUG_ONLY(Atomic::inc(&add_obj_count);) DEBUG_ONLY(Atomic::inc(&add_obj_count);)
DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);) DEBUG_ONLY(Atomic::add(len, &add_obj_size);)
if (beg_region == end_region) { if (beg_region == end_region) {
// All in one region. // All in one region.

View File

@ -586,7 +586,7 @@ inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
#ifdef ASSERT #ifdef ASSERT
HeapWord* tmp = _highest_ref; HeapWord* tmp = _highest_ref;
while (addr > tmp) { while (addr > tmp) {
tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp); tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
} }
#endif // #ifdef ASSERT #endif // #ifdef ASSERT
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -43,19 +43,19 @@ inline HeapWord* PLAB::allocate_aligned(size_t word_sz, unsigned short alignment
} }
void PLABStats::add_allocated(size_t v) { void PLABStats::add_allocated(size_t v) {
Atomic::add_ptr(v, &_allocated); Atomic::add(v, &_allocated);
} }
void PLABStats::add_unused(size_t v) { void PLABStats::add_unused(size_t v) {
Atomic::add_ptr(v, &_unused); Atomic::add(v, &_unused);
} }
void PLABStats::add_wasted(size_t v) { void PLABStats::add_wasted(size_t v) {
Atomic::add_ptr(v, &_wasted); Atomic::add(v, &_wasted);
} }
void PLABStats::add_undo_wasted(size_t v) { void PLABStats::add_undo_wasted(size_t v) {
Atomic::add_ptr(v, &_undo_wasted); Atomic::add(v, &_undo_wasted);
} }
#endif // SHARE_VM_GC_SHARED_PLAB_INLINE_HPP #endif // SHARE_VM_GC_SHARED_PLAB_INLINE_HPP

View File

@ -631,7 +631,7 @@ inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
HeapWord* obj = top(); HeapWord* obj = top();
if (pointer_delta(end(), obj) >= size) { if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size; HeapWord* new_top = obj + size;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
// result can be one of two: // result can be one of two:
// the old top value: the exchange succeeded // the old top value: the exchange succeeded
// otherwise: the new value of the top is returned. // otherwise: the new value of the top is returned.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -259,9 +259,7 @@ GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
template <unsigned int N, MEMFLAGS F> template <unsigned int N, MEMFLAGS F>
inline typename TaskQueueSuper<N, F>::Age TaskQueueSuper<N, F>::Age::cmpxchg(const Age new_age, const Age old_age) volatile { inline typename TaskQueueSuper<N, F>::Age TaskQueueSuper<N, F>::Age::cmpxchg(const Age new_age, const Age old_age) volatile {
return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data, return Atomic::cmpxchg(new_age._data, &_data, old_age._data);
(volatile intptr_t *)&_data,
(intptr_t)old_age._data);
} }
template<class E, MEMFLAGS F, unsigned int N> template<class E, MEMFLAGS F, unsigned int N>

View File

@ -705,7 +705,7 @@ BytecodeInterpreter::run(interpreterState istate) {
if (hash != markOopDesc::no_hash) { if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash); header = header->copy_set_hash(hash);
} }
if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { if (Atomic::cmpxchg(header, rcvr->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics) if (PrintBiasedLockingStatistics)
(*BiasedLocking::revoked_lock_entry_count_addr())++; (*BiasedLocking::revoked_lock_entry_count_addr())++;
} }
@ -715,7 +715,7 @@ BytecodeInterpreter::run(interpreterState istate) {
if (hash != markOopDesc::no_hash) { if (hash != markOopDesc::no_hash) {
new_header = new_header->copy_set_hash(hash); new_header = new_header->copy_set_hash(hash);
} }
if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
(* BiasedLocking::rebiased_lock_entry_count_addr())++; (* BiasedLocking::rebiased_lock_entry_count_addr())++;
} }
@ -734,7 +734,7 @@ BytecodeInterpreter::run(interpreterState istate) {
markOop new_header = (markOop) ((uintptr_t) header | thread_ident); markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
// Debugging hint. // Debugging hint.
DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), header) == header) {
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
} }
@ -750,7 +750,7 @@ BytecodeInterpreter::run(interpreterState istate) {
markOop displaced = rcvr->mark()->set_unlocked(); markOop displaced = rcvr->mark()->set_unlocked();
mon->lock()->set_displaced_header(displaced); mon->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors; bool call_vm = UseHeavyMonitors;
if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { if (call_vm || Atomic::cmpxchg((markOop)mon, rcvr->mark_addr(), displaced) != displaced) {
// Is it simple recursive case? // Is it simple recursive case?
if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
mon->lock()->set_displaced_header(NULL); mon->lock()->set_displaced_header(NULL);
@ -903,7 +903,7 @@ BytecodeInterpreter::run(interpreterState istate) {
if (hash != markOopDesc::no_hash) { if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash); header = header->copy_set_hash(hash);
} }
if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
(*BiasedLocking::revoked_lock_entry_count_addr())++; (*BiasedLocking::revoked_lock_entry_count_addr())++;
} }
@ -914,7 +914,7 @@ BytecodeInterpreter::run(interpreterState istate) {
if (hash != markOopDesc::no_hash) { if (hash != markOopDesc::no_hash) {
new_header = new_header->copy_set_hash(hash); new_header = new_header->copy_set_hash(hash);
} }
if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
(* BiasedLocking::rebiased_lock_entry_count_addr())++; (* BiasedLocking::rebiased_lock_entry_count_addr())++;
} }
@ -932,7 +932,7 @@ BytecodeInterpreter::run(interpreterState istate) {
markOop new_header = (markOop) ((uintptr_t) header | thread_ident); markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
// debugging hint // debugging hint
DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
} }
@ -948,7 +948,7 @@ BytecodeInterpreter::run(interpreterState istate) {
markOop displaced = lockee->mark()->set_unlocked(); markOop displaced = lockee->mark()->set_unlocked();
entry->lock()->set_displaced_header(displaced); entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors; bool call_vm = UseHeavyMonitors;
if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
// Is it simple recursive case? // Is it simple recursive case?
if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
entry->lock()->set_displaced_header(NULL); entry->lock()->set_displaced_header(NULL);
@ -1844,7 +1844,7 @@ run:
if (hash != markOopDesc::no_hash) { if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash); header = header->copy_set_hash(hash);
} }
if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics) if (PrintBiasedLockingStatistics)
(*BiasedLocking::revoked_lock_entry_count_addr())++; (*BiasedLocking::revoked_lock_entry_count_addr())++;
} }
@ -1855,7 +1855,7 @@ run:
if (hash != markOopDesc::no_hash) { if (hash != markOopDesc::no_hash) {
new_header = new_header->copy_set_hash(hash); new_header = new_header->copy_set_hash(hash);
} }
if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics) if (PrintBiasedLockingStatistics)
(* BiasedLocking::rebiased_lock_entry_count_addr())++; (* BiasedLocking::rebiased_lock_entry_count_addr())++;
} }
@ -1875,7 +1875,7 @@ run:
markOop new_header = (markOop) ((uintptr_t) header | thread_ident); markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
// debugging hint // debugging hint
DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
if (PrintBiasedLockingStatistics) if (PrintBiasedLockingStatistics)
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
} }
@ -1891,7 +1891,7 @@ run:
markOop displaced = lockee->mark()->set_unlocked(); markOop displaced = lockee->mark()->set_unlocked();
entry->lock()->set_displaced_header(displaced); entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors; bool call_vm = UseHeavyMonitors;
if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
// Is it simple recursive case? // Is it simple recursive case?
if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
entry->lock()->set_displaced_header(NULL); entry->lock()->set_displaced_header(NULL);
@ -1923,7 +1923,8 @@ run:
bool call_vm = UseHeavyMonitors; bool call_vm = UseHeavyMonitors;
// If it isn't recursive we either must swap old header or call the runtime // If it isn't recursive we either must swap old header or call the runtime
if (header != NULL || call_vm) { if (header != NULL || call_vm) {
if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { markOop old_header = markOopDesc::encode(lock);
if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case // restore object for the slow case
most_recent->set_obj(lockee); most_recent->set_obj(lockee);
CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
@ -2189,7 +2190,7 @@ run:
HeapWord* compare_to = *Universe::heap()->top_addr(); HeapWord* compare_to = *Universe::heap()->top_addr();
HeapWord* new_top = compare_to + obj_size; HeapWord* new_top = compare_to + obj_size;
if (new_top <= *Universe::heap()->end_addr()) { if (new_top <= *Universe::heap()->end_addr()) {
if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
goto retry; goto retry;
} }
result = (oop) compare_to; result = (oop) compare_to;
@ -2975,7 +2976,8 @@ run:
if (!lockee->mark()->has_bias_pattern()) { if (!lockee->mark()->has_bias_pattern()) {
// If it isn't recursive we either must swap old header or call the runtime // If it isn't recursive we either must swap old header or call the runtime
if (header != NULL) { if (header != NULL) {
if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { markOop old_header = markOopDesc::encode(lock);
if (lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case // restore object for the slow case
end->set_obj(lockee); end->set_obj(lockee);
{ {
@ -3050,7 +3052,8 @@ run:
base->set_obj(NULL); base->set_obj(NULL);
// If it isn't recursive we either must swap old header or call the runtime // If it isn't recursive we either must swap old header or call the runtime
if (header != NULL) { if (header != NULL) {
if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { markOop old_header = markOopDesc::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case // restore object for the slow case
base->set_obj(rcvr); base->set_obj(rcvr);
{ {

View File

@ -448,11 +448,11 @@ OopMapCache::~OopMapCache() {
} }
OopMapCacheEntry* OopMapCache::entry_at(int i) const { OopMapCacheEntry* OopMapCache::entry_at(int i) const {
return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size])); return OrderAccess::load_acquire(&(_array[i % _size]));
} }
bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) { bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old; return Atomic::cmpxchg(entry, &_array[i % _size], old) == old;
} }
void OopMapCache::flush() { void OopMapCache::flush() {
@ -564,7 +564,7 @@ void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
do { do {
head = _old_entries; head = _old_entries;
entry->_next = head; entry->_next = head;
success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head; success = Atomic::cmpxchg(entry, &_old_entries, head) == head;
} while (!success); } while (!success);
if (log_is_enabled(Debug, interpreter, oopmap)) { if (log_is_enabled(Debug, interpreter, oopmap)) {

View File

@ -1499,7 +1499,7 @@ size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
} }
size_t MetaspaceGC::capacity_until_GC() { size_t MetaspaceGC::capacity_until_GC() {
size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
assert(value >= MetaspaceSize, "Not initialized properly?"); assert(value >= MetaspaceSize, "Not initialized properly?");
return value; return value;
} }
@ -1507,16 +1507,16 @@ size_t MetaspaceGC::capacity_until_GC() {
bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
assert_is_aligned(v, Metaspace::commit_alignment()); assert_is_aligned(v, Metaspace::commit_alignment());
size_t capacity_until_GC = (size_t) _capacity_until_GC; intptr_t capacity_until_GC = _capacity_until_GC;
size_t new_value = capacity_until_GC + v; intptr_t new_value = capacity_until_GC + v;
if (new_value < capacity_until_GC) { if (new_value < capacity_until_GC) {
// The addition wrapped around, set new_value to aligned max value. // The addition wrapped around, set new_value to aligned max value.
new_value = align_down(max_uintx, Metaspace::commit_alignment()); new_value = align_down(max_uintx, Metaspace::commit_alignment());
} }
intptr_t expected = (intptr_t) capacity_until_GC; intptr_t expected = _capacity_until_GC;
intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
if (expected != actual) { if (expected != actual) {
return false; return false;
@ -1534,7 +1534,7 @@ bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size
size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
assert_is_aligned(v, Metaspace::commit_alignment()); assert_is_aligned(v, Metaspace::commit_alignment());
return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
} }
void MetaspaceGC::initialize() { void MetaspaceGC::initialize() {
@ -2398,7 +2398,7 @@ void SpaceManager::inc_size_metrics(size_t words) {
void SpaceManager::inc_used_metrics(size_t words) { void SpaceManager::inc_used_metrics(size_t words) {
// Add to the per SpaceManager total // Add to the per SpaceManager total
Atomic::add_ptr(words, &_allocated_blocks_words); Atomic::add(words, &_allocated_blocks_words);
// Add to the global total // Add to the global total
MetaspaceAux::inc_used(mdtype(), words); MetaspaceAux::inc_used(mdtype(), words);
} }
@ -2753,8 +2753,7 @@ void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
// sweep which is a concurrent phase. Protection by the expand_lock() // sweep which is a concurrent phase. Protection by the expand_lock()
// is not enough since allocation is on a per Metaspace basis // is not enough since allocation is on a per Metaspace basis
// and protected by the Metaspace lock. // and protected by the Metaspace lock.
jlong minus_words = (jlong) - (jlong) words; Atomic::sub(words, &_used_words[mdtype]);
Atomic::add_ptr(minus_words, &_used_words[mdtype]);
} }
void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
@ -2762,7 +2761,7 @@ void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
// each piece of metadata. Those allocations are // each piece of metadata. Those allocations are
// generally done concurrently by different application // generally done concurrently by different application
// threads so must be done atomically. // threads so must be done atomically.
Atomic::add_ptr(words, &_used_words[mdtype]); Atomic::add(words, &_used_words[mdtype]);
} }
size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {

View File

@ -537,7 +537,7 @@ bool Universe::has_reference_pending_list() {
oop Universe::swap_reference_pending_list(oop list) { oop Universe::swap_reference_pending_list(oop list) {
assert_pll_locked(is_locked); assert_pll_locked(is_locked);
return (oop)Atomic::xchg_ptr(list, &_reference_pending_list); return Atomic::xchg(list, &_reference_pending_list);
} }
#undef assert_pll_locked #undef assert_pll_locked

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,11 +29,11 @@
#include "oops/arrayKlass.hpp" #include "oops/arrayKlass.hpp"
inline Klass* ArrayKlass::higher_dimension_acquire() const { inline Klass* ArrayKlass::higher_dimension_acquire() const {
return (Klass*) OrderAccess::load_ptr_acquire(&_higher_dimension); return OrderAccess::load_acquire(&_higher_dimension);
} }
inline void ArrayKlass::release_set_higher_dimension(Klass* k) { inline void ArrayKlass::release_set_higher_dimension(Klass* k) {
OrderAccess::release_store_ptr(&_higher_dimension, k); OrderAccess::release_store(&_higher_dimension, k);
} }
#endif // SHARE_VM_OOPS_ARRAYKLASS_INLINE_HPP #endif // SHARE_VM_OOPS_ARRAYKLASS_INLINE_HPP

View File

@ -226,7 +226,7 @@ void ConstantPool::klass_at_put(int class_index, int name_index, int resolved_kl
symbol_at_put(name_index, name); symbol_at_put(name_index, name);
name->increment_refcount(); name->increment_refcount();
Klass** adr = resolved_klasses()->adr_at(resolved_klass_index); Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
OrderAccess::release_store_ptr((Klass* volatile *)adr, k); OrderAccess::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved // The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* non-NULL, so we need hardware store ordering here. // and the Klass* non-NULL, so we need hardware store ordering here.
@ -243,7 +243,7 @@ void ConstantPool::klass_at_put(int class_index, Klass* k) {
CPKlassSlot kslot = klass_slot_at(class_index); CPKlassSlot kslot = klass_slot_at(class_index);
int resolved_klass_index = kslot.resolved_klass_index(); int resolved_klass_index = kslot.resolved_klass_index();
Klass** adr = resolved_klasses()->adr_at(resolved_klass_index); Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
OrderAccess::release_store_ptr((Klass* volatile *)adr, k); OrderAccess::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved // The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* non-NULL, so we need hardware store ordering here. // and the Klass* non-NULL, so we need hardware store ordering here.
@ -511,7 +511,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which,
trace_class_resolution(this_cp, k); trace_class_resolution(this_cp, k);
} }
Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index); Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index);
OrderAccess::release_store_ptr((Klass* volatile *)adr, k); OrderAccess::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved // The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* stored in _resolved_klasses is non-NULL, so we need // and the Klass* stored in _resolved_klasses is non-NULL, so we need
// hardware store ordering here. // hardware store ordering here.

View File

@ -145,7 +145,7 @@ class ConstantPool : public Metadata {
assert(is_within_bounds(which), "index out of bounds"); assert(is_within_bounds(which), "index out of bounds");
assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool"); assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool");
// Uses volatile because the klass slot changes without a lock. // Uses volatile because the klass slot changes without a lock.
volatile intptr_t adr = (intptr_t)OrderAccess::load_ptr_acquire(obj_at_addr_raw(which)); intptr_t adr = OrderAccess::load_acquire(obj_at_addr_raw(which));
assert(adr != 0 || which == 0, "cp entry for klass should not be zero"); assert(adr != 0 || which == 0, "cp entry for klass should not be zero");
return CPSlot(adr); return CPSlot(adr);
} }
@ -407,7 +407,7 @@ class ConstantPool : public Metadata {
assert(tag_at(kslot.name_index()).is_symbol(), "sanity"); assert(tag_at(kslot.name_index()).is_symbol(), "sanity");
Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index()); Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index());
return (Klass*)OrderAccess::load_ptr_acquire(adr); return OrderAccess::load_acquire(adr);
} }
// RedefineClasses() API support: // RedefineClasses() API support:

View File

@ -91,7 +91,7 @@ void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
assert(c == 0 || c == code || code == 0, "update must be consistent"); assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif #endif
// Need to flush pending stores here before bytecode is written. // Need to flush pending stores here before bytecode is written.
OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift)); OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
} }
void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
@ -101,19 +101,13 @@ void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
assert(c == 0 || c == code || code == 0, "update must be consistent"); assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif #endif
// Need to flush pending stores here before bytecode is written. // Need to flush pending stores here before bytecode is written.
OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift)); OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
} }
// Sets f1, ordering with previous writes. // Sets f1, ordering with previous writes.
void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) { void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
assert(f1 != NULL, ""); assert(f1 != NULL, "");
OrderAccess::release_store_ptr((HeapWord*) &_f1, f1); OrderAccess::release_store(&_f1, f1);
}
// Sets flags, but only if the value was previously zero.
bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
return (result == 0);
} }
// Note that concurrent update of both bytecodes can leave one of them // Note that concurrent update of both bytecodes can leave one of them
@ -154,7 +148,8 @@ void ConstantPoolCacheEntry::set_parameter_size(int value) {
// bother trying to update it once it's nonzero but always make // bother trying to update it once it's nonzero but always make
// sure that the final parameter size agrees with what was passed. // sure that the final parameter size agrees with what was passed.
if (_flags == 0) { if (_flags == 0) {
Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0); intx newflags = (value & parameter_size_mask);
Atomic::cmpxchg(newflags, &_flags, (intx)0);
} }
guarantee(parameter_size() == value, guarantee(parameter_size() == value,
"size must not change: parameter_size=%d, value=%d", parameter_size(), value); "size must not change: parameter_size=%d, value=%d", parameter_size(), value);

View File

@ -136,7 +136,7 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
private: private:
volatile intx _indices; // constant pool index & rewrite bytecodes volatile intx _indices; // constant pool index & rewrite bytecodes
volatile Metadata* _f1; // entry specific metadata field Metadata* volatile _f1; // entry specific metadata field
volatile intx _f2; // entry specific int/metadata field volatile intx _f2; // entry specific int/metadata field
volatile intx _flags; // flags volatile intx _flags; // flags
@ -144,7 +144,7 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
void set_bytecode_1(Bytecodes::Code code); void set_bytecode_1(Bytecodes::Code code);
void set_bytecode_2(Bytecodes::Code code); void set_bytecode_2(Bytecodes::Code code);
void set_f1(Metadata* f1) { void set_f1(Metadata* f1) {
Metadata* existing_f1 = (Metadata*)_f1; // read once Metadata* existing_f1 = _f1; // read once
assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change");
_f1 = f1; _f1 = f1;
} }
@ -160,7 +160,6 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
} }
int make_flags(TosState state, int option_bits, int field_index_or_method_params); int make_flags(TosState state, int option_bits, int field_index_or_method_params);
void set_flags(intx flags) { _flags = flags; } void set_flags(intx flags) { _flags = flags; }
bool init_flags_atomic(intx flags);
void set_field_flags(TosState field_type, int option_bits, int field_index) { void set_field_flags(TosState field_type, int option_bits, int field_index) {
assert((field_index & field_index_mask) == field_index, "field_index in range"); assert((field_index & field_index_mask) == field_index, "field_index in range");
set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index)); set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index));
@ -169,10 +168,6 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
assert((method_params & parameter_size_mask) == method_params, "method_params in range"); assert((method_params & parameter_size_mask) == method_params, "method_params in range");
set_flags(make_flags(return_type, option_bits, method_params)); set_flags(make_flags(return_type, option_bits, method_params));
} }
bool init_method_flags_atomic(TosState return_type, int option_bits, int method_params) {
assert((method_params & parameter_size_mask) == method_params, "method_params in range");
return init_flags_atomic(make_flags(return_type, option_bits, method_params));
}
public: public:
// specific bit definitions for the flags field: // specific bit definitions for the flags field:
@ -332,11 +327,11 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
// Accessors // Accessors
int indices() const { return _indices; } int indices() const { return _indices; }
int indices_ord() const { return (intx)OrderAccess::load_ptr_acquire(&_indices); } int indices_ord() const { return OrderAccess::load_acquire(&_indices); }
int constant_pool_index() const { return (indices() & cp_index_mask); } int constant_pool_index() const { return (indices() & cp_index_mask); }
Bytecodes::Code bytecode_1() const { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); } Bytecodes::Code bytecode_1() const { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); }
Bytecodes::Code bytecode_2() const { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); } Bytecodes::Code bytecode_2() const { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); }
Metadata* f1_ord() const { return (Metadata *)OrderAccess::load_ptr_acquire(&_f1); } Metadata* f1_ord() const { return (Metadata *)OrderAccess::load_acquire(&_f1); }
Method* f1_as_method() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; } Method* f1_as_method() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; }
Klass* f1_as_klass() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; } Klass* f1_as_klass() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; }
// Use the accessor f1() to acquire _f1's value. This is needed for // Use the accessor f1() to acquire _f1's value. This is needed for

View File

@ -1109,16 +1109,15 @@ void InstanceKlass::call_class_initializer(TRAPS) {
void InstanceKlass::mask_for(const methodHandle& method, int bci, void InstanceKlass::mask_for(const methodHandle& method, int bci,
InterpreterOopMap* entry_for) { InterpreterOopMap* entry_for) {
// Lazily create the _oop_map_cache at first request // Lazily create the _oop_map_cache at first request
// Lock-free access requires load_ptr_acquire. // Lock-free access requires load_acquire.
OopMapCache* oop_map_cache = OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache);
static_cast<OopMapCache*>(OrderAccess::load_ptr_acquire(&_oop_map_cache));
if (oop_map_cache == NULL) { if (oop_map_cache == NULL) {
MutexLocker x(OopMapCacheAlloc_lock); MutexLocker x(OopMapCacheAlloc_lock);
// Check if _oop_map_cache was allocated while we were waiting for this lock // Check if _oop_map_cache was allocated while we were waiting for this lock
if ((oop_map_cache = _oop_map_cache) == NULL) { if ((oop_map_cache = _oop_map_cache) == NULL) {
oop_map_cache = new OopMapCache(); oop_map_cache = new OopMapCache();
// Ensure _oop_map_cache is stable, since it is examined without a lock // Ensure _oop_map_cache is stable, since it is examined without a lock
OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache); OrderAccess::release_store(&_oop_map_cache, oop_map_cache);
} }
} }
// _oop_map_cache is constant after init; lookup below does its own locking. // _oop_map_cache is constant after init; lookup below does its own locking.
@ -1672,7 +1671,7 @@ jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) {
// transitions from NULL to non-NULL which is safe because we use // transitions from NULL to non-NULL which is safe because we use
// release_set_methods_jmethod_ids() to advertise the new cache. // release_set_methods_jmethod_ids() to advertise the new cache.
// A partially constructed cache should never be seen by a racing // A partially constructed cache should never be seen by a racing
// thread. We also use release_store_ptr() to save a new jmethodID // thread. We also use release_store() to save a new jmethodID
// in the cache so a partially constructed jmethodID should never be // in the cache so a partially constructed jmethodID should never be
// seen either. Cache reads of existing jmethodIDs proceed without a // seen either. Cache reads of existing jmethodIDs proceed without a
// lock, but cache writes of a new jmethodID requires uniqueness and // lock, but cache writes of a new jmethodID requires uniqueness and
@ -1831,7 +1830,7 @@ jmethodID InstanceKlass::get_jmethod_id_fetch_or_update(
// The jmethodID cache can be read while unlocked so we have to // The jmethodID cache can be read while unlocked so we have to
// make sure the new jmethodID is complete before installing it // make sure the new jmethodID is complete before installing it
// in the cache. // in the cache.
OrderAccess::release_store_ptr(&jmeths[idnum+1], id); OrderAccess::release_store(&jmeths[idnum+1], id);
} else { } else {
*to_dealloc_id_p = new_id; // save new id for later delete *to_dealloc_id_p = new_id; // save new id for later delete
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,19 +35,19 @@
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
inline Klass* InstanceKlass::array_klasses_acquire() const { inline Klass* InstanceKlass::array_klasses_acquire() const {
return (Klass*) OrderAccess::load_ptr_acquire(&_array_klasses); return OrderAccess::load_acquire(&_array_klasses);
} }
inline void InstanceKlass::release_set_array_klasses(Klass* k) { inline void InstanceKlass::release_set_array_klasses(Klass* k) {
OrderAccess::release_store_ptr(&_array_klasses, k); OrderAccess::release_store(&_array_klasses, k);
} }
inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const { inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const {
return (jmethodID*)OrderAccess::load_ptr_acquire(&_methods_jmethod_ids); return OrderAccess::load_acquire(&_methods_jmethod_ids);
} }
inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) { inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); OrderAccess::release_store(&_methods_jmethod_ids, jmeths);
} }
// The iteration over the oops in objects is a hot path in the GC code. // The iteration over the oops in objects is a hot path in the GC code.

View File

@ -444,6 +444,11 @@ MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
return mh->method_counters(); return mh->method_counters();
} }
bool Method::init_method_counters(MethodCounters* counters) {
// Try to install a pointer to MethodCounters, return true on success.
return Atomic::cmpxchg(counters, &_method_counters, (MethodCounters*)NULL) == NULL;
}
void Method::cleanup_inline_caches() { void Method::cleanup_inline_caches() {
// The current system doesn't use inline caches in the interpreter // The current system doesn't use inline caches in the interpreter
// => nothing to do (keep this method around for future use) // => nothing to do (keep this method around for future use)
@ -1108,8 +1113,8 @@ void Method::restore_unshareable_info(TRAPS) {
} }
} }
volatile address Method::from_compiled_entry_no_trampoline() const { address Method::from_compiled_entry_no_trampoline() const {
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code); CompiledMethod *code = OrderAccess::load_acquire(&_code);
if (code) { if (code) {
return code->verified_entry_point(); return code->verified_entry_point();
} else { } else {
@ -1135,7 +1140,7 @@ address Method::verified_code_entry() {
// Not inline to avoid circular ref. // Not inline to avoid circular ref.
bool Method::check_code() const { bool Method::check_code() const {
// cached in a register or local. There's a race on the value of the field. // cached in a register or local. There's a race on the value of the field.
CompiledMethod *code = (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); CompiledMethod *code = OrderAccess::load_acquire(&_code);
return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method()); return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
} }

View File

@ -136,9 +136,9 @@ class Method : public Metadata {
static address make_adapters(const methodHandle& mh, TRAPS); static address make_adapters(const methodHandle& mh, TRAPS);
volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); } address from_compiled_entry() const { return OrderAccess::load_acquire(&_from_compiled_entry); }
volatile address from_compiled_entry_no_trampoline() const; address from_compiled_entry_no_trampoline() const;
volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); } address from_interpreted_entry() const{ return OrderAccess::load_acquire(&_from_interpreted_entry); }
// access flag // access flag
AccessFlags access_flags() const { return _access_flags; } AccessFlags access_flags() const { return _access_flags; }
@ -337,7 +337,7 @@ class Method : public Metadata {
// The store into method must be released. On platforms without // The store into method must be released. On platforms without
// total store order (TSO) the reference may become visible before // total store order (TSO) the reference may become visible before
// the initialization of data otherwise. // the initialization of data otherwise.
OrderAccess::release_store_ptr((volatile void *)&_method_data, data); OrderAccess::release_store(&_method_data, data);
} }
MethodCounters* method_counters() const { MethodCounters* method_counters() const {
@ -348,10 +348,7 @@ class Method : public Metadata {
_method_counters = NULL; _method_counters = NULL;
} }
bool init_method_counters(MethodCounters* counters) { bool init_method_counters(MethodCounters* counters);
// Try to install a pointer to MethodCounters, return true on success.
return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL;
}
#ifdef TIERED #ifdef TIERED
// We are reusing interpreter_invocation_count as a holder for the previous event count! // We are reusing interpreter_invocation_count as a holder for the previous event count!
@ -452,7 +449,7 @@ class Method : public Metadata {
// nmethod/verified compiler entry // nmethod/verified compiler entry
address verified_code_entry(); address verified_code_entry();
bool check_code() const; // Not inline to avoid circular ref bool check_code() const; // Not inline to avoid circular ref
CompiledMethod* volatile code() const { assert( check_code(), "" ); return (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); } CompiledMethod* volatile code() const { assert( check_code(), "" ); return OrderAccess::load_acquire(&_code); }
void clear_code(bool acquire_lock = true); // Clear out any compiled code void clear_code(bool acquire_lock = true); // Clear out any compiled code
static void set_code(const methodHandle& mh, CompiledMethod* code); static void set_code(const methodHandle& mh, CompiledMethod* code);
void set_adapter_entry(AdapterHandlerEntry* adapter) { void set_adapter_entry(AdapterHandlerEntry* adapter) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -202,7 +202,7 @@ public:
_cells[index] = value; _cells[index] = value;
} }
void release_set_cell_at(int index, intptr_t value) { void release_set_cell_at(int index, intptr_t value) {
OrderAccess::release_store_ptr(&_cells[index], value); OrderAccess::release_store(&_cells[index], value);
} }
intptr_t cell_at(int index) const { intptr_t cell_at(int index) const {
return _cells[index]; return _cells[index];

View File

@ -66,7 +66,7 @@ template <class T> void oop_store(T* p, oop v) {
template <class T> void oop_store(volatile T* p, oop v) { template <class T> void oop_store(volatile T* p, oop v) {
update_barrier_set_pre((T*)p, v); // cast away volatile update_barrier_set_pre((T*)p, v); // cast away volatile
// Used by release_obj_field_put, so use release_store_ptr. // Used by release_obj_field_put, so use release_store.
oopDesc::release_encode_store_heap_oop(p, v); oopDesc::release_encode_store_heap_oop(p, v);
// When using CMS we must mark the card corresponding to p as dirty // When using CMS we must mark the card corresponding to p as dirty
// with release sematics to prevent that CMS sees the dirty card but // with release sematics to prevent that CMS sees the dirty card but
@ -90,7 +90,7 @@ inline void oop_store_raw(HeapWord* addr, oop value) {
// We need a separate file to avoid circular references // We need a separate file to avoid circular references
void oopDesc::release_set_mark(markOop m) { void oopDesc::release_set_mark(markOop m) {
OrderAccess::release_store_ptr(&_mark, m); OrderAccess::release_store(&_mark, m);
} }
markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
@ -124,7 +124,7 @@ Klass* oopDesc::klass_or_null_acquire() const volatile {
volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr); volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
return Klass::decode_klass(OrderAccess::load_acquire(xaddr)); return Klass::decode_klass(OrderAccess::load_acquire(xaddr));
} else { } else {
return (Klass*)OrderAccess::load_ptr_acquire(&_metadata._klass); return OrderAccess::load_acquire(&_metadata._klass);
} }
} }
@ -161,7 +161,7 @@ void oopDesc::release_set_klass(Klass* k) {
OrderAccess::release_store(compressed_klass_addr(), OrderAccess::release_store(compressed_klass_addr(),
Klass::encode_klass_not_null(k)); Klass::encode_klass_not_null(k));
} else { } else {
OrderAccess::release_store_ptr(klass_addr(), k); OrderAccess::release_store(klass_addr(), k);
} }
} }
@ -361,7 +361,7 @@ void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
// Store heap oop as is for volatile fields. // Store heap oop as is for volatile fields.
void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
OrderAccess::release_store_ptr(p, v); OrderAccess::release_store(p, v);
} }
void oopDesc::release_store_heap_oop(volatile narrowOop* p, narrowOop v) { void oopDesc::release_store_heap_oop(volatile narrowOop* p, narrowOop v) {
OrderAccess::release_store(p, v); OrderAccess::release_store(p, v);
@ -372,11 +372,11 @@ void oopDesc::release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop
OrderAccess::release_store(p, encode_heap_oop_not_null(v)); OrderAccess::release_store(p, encode_heap_oop_not_null(v));
} }
void oopDesc::release_encode_store_heap_oop_not_null(volatile oop* p, oop v) { void oopDesc::release_encode_store_heap_oop_not_null(volatile oop* p, oop v) {
OrderAccess::release_store_ptr(p, v); OrderAccess::release_store(p, v);
} }
void oopDesc::release_encode_store_heap_oop(volatile oop* p, oop v) { void oopDesc::release_encode_store_heap_oop(volatile oop* p, oop v) {
OrderAccess::release_store_ptr(p, v); OrderAccess::release_store(p, v);
} }
void oopDesc::release_encode_store_heap_oop(volatile narrowOop* p, oop v) { void oopDesc::release_encode_store_heap_oop(volatile narrowOop* p, oop v) {
OrderAccess::release_store(p, encode_heap_oop(v)); OrderAccess::release_store(p, encode_heap_oop(v));
@ -388,11 +388,11 @@ oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
if (UseCompressedOops) { if (UseCompressedOops) {
// encode exchange value from oop to T // encode exchange value from oop to T
narrowOop val = encode_heap_oop(exchange_value); narrowOop val = encode_heap_oop(exchange_value);
narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest); narrowOop old = Atomic::xchg(val, (narrowOop*)dest);
// decode old from T to oop // decode old from T to oop
return decode_heap_oop(old); return decode_heap_oop(old);
} else { } else {
return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); return Atomic::xchg(exchange_value, (oop*)dest);
} }
} }
@ -447,11 +447,11 @@ Metadata* oopDesc::metadata_field(int offset) const { return *metadata
void oopDesc::metadata_field_put(int offset, Metadata* value) { *metadata_field_addr(offset) = value; } void oopDesc::metadata_field_put(int offset, Metadata* value) { *metadata_field_addr(offset) = value; }
Metadata* oopDesc::metadata_field_acquire(int offset) const { Metadata* oopDesc::metadata_field_acquire(int offset) const {
return (Metadata*)OrderAccess::load_ptr_acquire(metadata_field_addr(offset)); return OrderAccess::load_acquire(metadata_field_addr(offset));
} }
void oopDesc::release_metadata_field_put(int offset, Metadata* value) { void oopDesc::release_metadata_field_put(int offset, Metadata* value) {
OrderAccess::release_store_ptr(metadata_field_addr(offset), value); OrderAccess::release_store(metadata_field_addr(offset), value);
} }
jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); }
@ -485,8 +485,8 @@ oop oopDesc::obj_field_acquire(int offset) const {
return UseCompressedOops ? return UseCompressedOops ?
decode_heap_oop((narrowOop) decode_heap_oop((narrowOop)
OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset))) OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
: decode_heap_oop((oop) : decode_heap_oop(
OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset))); OrderAccess::load_acquire(obj_field_addr<oop>(offset)));
} }
void oopDesc::release_obj_field_put(int offset, oop value) { void oopDesc::release_obj_field_put(int offset, oop value) {
UseCompressedOops ? UseCompressedOops ?
@ -518,8 +518,8 @@ void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAcc
jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); } jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); } void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); } address oopDesc::address_field_acquire(int offset) const { return OrderAccess::load_acquire(address_field_addr(offset)); }
void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); } void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store(address_field_addr(offset), contents); }
bool oopDesc::is_locked() const { bool oopDesc::is_locked() const {
return mark()->is_locked(); return mark()->is_locked();

View File

@ -1658,7 +1658,7 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount
c->set_next(NULL); c->set_next(NULL);
head = _named_counters; head = _named_counters;
c->set_next(head); c->set_next(head);
} while (Atomic::cmpxchg_ptr(c, &_named_counters, head) != head); } while (Atomic::cmpxchg(c, &_named_counters, head) != head);
return c; return c;
} }

View File

@ -3777,7 +3777,7 @@ void copy_jni_function_table(const struct JNINativeInterface_ *new_jni_NativeInt
intptr_t *a = (intptr_t *) jni_functions(); intptr_t *a = (intptr_t *) jni_functions();
intptr_t *b = (intptr_t *) new_jni_NativeInterface; intptr_t *b = (intptr_t *) new_jni_NativeInterface;
for (uint i=0; i < sizeof(struct JNINativeInterface_)/sizeof(void *); i++) { for (uint i=0; i < sizeof(struct JNINativeInterface_)/sizeof(void *); i++) {
Atomic::store_ptr(*b++, a++); Atomic::store(*b++, a++);
} }
} }
@ -3900,9 +3900,9 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) {
jint a = 0xcafebabe; jint a = 0xcafebabe;
jint b = Atomic::xchg((jint) 0xdeadbeef, &a); jint b = Atomic::xchg((jint) 0xdeadbeef, &a);
void *c = &a; void *c = &a;
void *d = Atomic::xchg_ptr(&b, &c); void *d = Atomic::xchg(&b, &c);
assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works"); assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
assert(c == &b && d == &a, "Atomic::xchg_ptr() works"); assert(c == &b && d == &a, "Atomic::xchg() works");
} }
#endif // ZERO && ASSERT #endif // ZERO && ASSERT

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -127,7 +127,7 @@ JvmtiRawMonitor::is_valid() {
int JvmtiRawMonitor::SimpleEnter (Thread * Self) { int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
for (;;) { for (;;) {
if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
return OS_OK ; return OS_OK ;
} }
@ -139,7 +139,7 @@ int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
Node._next = _EntryList ; Node._next = _EntryList ;
_EntryList = &Node ; _EntryList = &Node ;
OrderAccess::fence() ; OrderAccess::fence() ;
if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { if (_owner == NULL && Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
_EntryList = Node._next ; _EntryList = Node._next ;
RawMonitor_lock->unlock() ; RawMonitor_lock->unlock() ;
return OS_OK ; return OS_OK ;
@ -153,7 +153,7 @@ int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
int JvmtiRawMonitor::SimpleExit (Thread * Self) { int JvmtiRawMonitor::SimpleExit (Thread * Self) {
guarantee (_owner == Self, "invariant") ; guarantee (_owner == Self, "invariant") ;
OrderAccess::release_store_ptr (&_owner, NULL) ; OrderAccess::release_store(&_owner, (void*)NULL) ;
OrderAccess::fence() ; OrderAccess::fence() ;
if (_EntryList == NULL) return OS_OK ; if (_EntryList == NULL) return OS_OK ;
ObjectWaiter * w ; ObjectWaiter * w ;
@ -277,10 +277,10 @@ int JvmtiRawMonitor::raw_enter(TRAPS) {
jt->SR_lock()->lock_without_safepoint_check(); jt->SR_lock()->lock_without_safepoint_check();
} }
// guarded by SR_lock to avoid racing with new external suspend requests. // guarded by SR_lock to avoid racing with new external suspend requests.
Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; Contended = Atomic::cmpxchg(THREAD, &_owner, (void*)NULL);
jt->SR_lock()->unlock(); jt->SR_lock()->unlock();
} else { } else {
Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; Contended = Atomic::cmpxchg(THREAD, &_owner, (void*)NULL);
} }
if (Contended == THREAD) { if (Contended == THREAD) {

View File

@ -70,14 +70,6 @@ public:
template<typename T, typename D> template<typename T, typename D>
inline static void store(T store_value, volatile D* dest); inline static void store(T store_value, volatile D* dest);
inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest) {
Atomic::store(store_value, dest);
}
inline static void store_ptr(void* store_value, volatile void* dest) {
Atomic::store(store_value, reinterpret_cast<void* volatile*>(dest));
}
// Atomically load from a location // Atomically load from a location
// The type T must be either a pointer type, an integral/enum type, // The type T must be either a pointer type, an integral/enum type,
// or a type that is primitive convertible using PrimitiveConversions. // or a type that is primitive convertible using PrimitiveConversions.
@ -90,13 +82,8 @@ public:
template<typename I, typename D> template<typename I, typename D>
inline static D add(I add_value, D volatile* dest); inline static D add(I add_value, D volatile* dest);
inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) { template<typename I, typename D>
return add(add_value, dest); inline static D sub(I sub_value, D volatile* dest);
}
inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
return add(add_value, reinterpret_cast<char* volatile*>(dest));
}
// Atomically increment location. inc() provide: // Atomically increment location. inc() provide:
// <fence> increment-dest <membar StoreLoad|StoreStore> // <fence> increment-dest <membar StoreLoad|StoreStore>
@ -123,14 +110,6 @@ public:
template<typename T, typename D> template<typename T, typename D>
inline static D xchg(T exchange_value, volatile D* dest); inline static D xchg(T exchange_value, volatile D* dest);
inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return xchg(exchange_value, dest);
}
inline static void* xchg_ptr(void* exchange_value, volatile void* dest) {
return xchg(exchange_value, reinterpret_cast<void* volatile*>(dest));
}
// Performs atomic compare of *dest and compare_value, and exchanges // Performs atomic compare of *dest and compare_value, and exchanges
// *dest with exchange_value if the comparison succeeded. Returns prior // *dest with exchange_value if the comparison succeeded. Returns prior
// value of *dest. cmpxchg*() provide: // value of *dest. cmpxchg*() provide:
@ -151,23 +130,6 @@ public:
inline static bool replace_if_null(T* value, D* volatile* dest, inline static bool replace_if_null(T* value, D* volatile* dest,
cmpxchg_memory_order order = memory_order_conservative); cmpxchg_memory_order order = memory_order_conservative);
inline static intptr_t cmpxchg_ptr(intptr_t exchange_value,
volatile intptr_t* dest,
intptr_t compare_value,
cmpxchg_memory_order order = memory_order_conservative) {
return cmpxchg(exchange_value, dest, compare_value, order);
}
inline static void* cmpxchg_ptr(void* exchange_value,
volatile void* dest,
void* compare_value,
cmpxchg_memory_order order = memory_order_conservative) {
return cmpxchg(exchange_value,
reinterpret_cast<void* volatile*>(dest),
compare_value,
order);
}
private: private:
// Test whether From is implicitly convertible to To. // Test whether From is implicitly convertible to To.
// From and To must be pointer types. // From and To must be pointer types.
@ -555,6 +517,23 @@ inline void Atomic::dec(D volatile* dest) {
Atomic::add(I(-1), dest); Atomic::add(I(-1), dest);
} }
template<typename I, typename D>
inline D Atomic::sub(I sub_value, D volatile* dest) {
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
STATIC_ASSERT(IsIntegral<I>::value);
// If D is a pointer type, use [u]intptr_t as the addend type,
// matching signedness of I. Otherwise, use D as the addend type.
typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
// Only allow conversions that can't change the value.
STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
AddendType addend = sub_value;
// Assumes two's complement integer representation.
#pragma warning(suppress: 4146) // In case AddendType is not signed.
return Atomic::add(-addend, dest);
}
// Define the class before including platform file, which may specialize // Define the class before including platform file, which may specialize
// the operator definition. No generic definition of specializations // the operator definition. No generic definition of specializations
// of the operator template are provided, nor are there any generic // of the operator template are provided, nor are there any generic

View File

@ -251,12 +251,6 @@
// //
// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
// CASPTR() uses the canonical argument order that dominates in the literature.
// Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
#define CASPTR(a, c, s) \
intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c)))
#define UNS(x) (uintptr_t(x)) #define UNS(x) (uintptr_t(x))
#define TRACE(m) \ #define TRACE(m) \
{ \ { \
@ -268,6 +262,15 @@
} \ } \
} }
const intptr_t _LBIT = 1;
// Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
#ifdef VM_LITTLE_ENDIAN
#define _LSBINDEX 0
#else
#define _LSBINDEX (sizeof(intptr_t)-1)
#endif
// Simplistic low-quality Marsaglia SHIFT-XOR RNG. // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
// Bijective except for the trailing mask operation. // Bijective except for the trailing mask operation.
// Useful for spin loops as the compiler can't optimize it away. // Useful for spin loops as the compiler can't optimize it away.
@ -297,7 +300,7 @@ int Monitor::TryLock() {
intptr_t v = _LockWord.FullWord; intptr_t v = _LockWord.FullWord;
for (;;) { for (;;) {
if ((v & _LBIT) != 0) return 0; if ((v & _LBIT) != 0) return 0;
const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v);
if (v == u) return 1; if (v == u) return 1;
v = u; v = u;
} }
@ -307,12 +310,12 @@ int Monitor::TryFast() {
// Optimistic fast-path form ... // Optimistic fast-path form ...
// Fast-path attempt for the common uncontended case. // Fast-path attempt for the common uncontended case.
// Avoid RTS->RTO $ coherence upgrade on typical SMP systems. // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
intptr_t v = CASPTR(&_LockWord, 0, _LBIT); // agro ... intptr_t v = Atomic::cmpxchg(_LBIT, &_LockWord.FullWord, (intptr_t)0); // agro ...
if (v == 0) return 1; if (v == 0) return 1;
for (;;) { for (;;) {
if ((v & _LBIT) != 0) return 0; if ((v & _LBIT) != 0) return 0;
const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v);
if (v == u) return 1; if (v == u) return 1;
v = u; v = u;
} }
@ -350,7 +353,7 @@ int Monitor::TrySpin(Thread * const Self) {
for (;;) { for (;;) {
intptr_t v = _LockWord.FullWord; intptr_t v = _LockWord.FullWord;
if ((v & _LBIT) == 0) { if ((v & _LBIT) == 0) {
if (CASPTR (&_LockWord, v, v|_LBIT) == v) { if (Atomic::cmpxchg (v|_LBIT, &_LockWord.FullWord, v) == v) {
return 1; return 1;
} }
continue; continue;
@ -419,13 +422,13 @@ inline int Monitor::AcquireOrPush(ParkEvent * ESelf) {
intptr_t v = _LockWord.FullWord; intptr_t v = _LockWord.FullWord;
for (;;) { for (;;) {
if ((v & _LBIT) == 0) { if ((v & _LBIT) == 0) {
const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v);
if (u == v) return 1; // indicate acquired if (u == v) return 1; // indicate acquired
v = u; v = u;
} else { } else {
// Anticipate success ... // Anticipate success ...
ESelf->ListNext = (ParkEvent *)(v & ~_LBIT); ESelf->ListNext = (ParkEvent *)(v & ~_LBIT);
const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT); const intptr_t u = Atomic::cmpxchg(intptr_t(ESelf)|_LBIT, &_LockWord.FullWord, v);
if (u == v) return 0; // indicate pushed onto cxq if (u == v) return 0; // indicate pushed onto cxq
v = u; v = u;
} }
@ -463,7 +466,7 @@ void Monitor::ILock(Thread * Self) {
OrderAccess::fence(); OrderAccess::fence();
// Optional optimization ... try barging on the inner lock // Optional optimization ... try barging on the inner lock
if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(ESelf)) == 0) { if ((NativeMonitorFlags & 32) && Atomic::cmpxchg(ESelf, &_OnDeck, (ParkEvent*)NULL) == NULL) {
goto OnDeck_LOOP; goto OnDeck_LOOP;
} }
@ -474,7 +477,7 @@ void Monitor::ILock(Thread * Self) {
// Only the OnDeck thread can try to acquire -- contend for -- the lock. // Only the OnDeck thread can try to acquire -- contend for -- the lock.
// CONSIDER: use Self->OnDeck instead of m->OnDeck. // CONSIDER: use Self->OnDeck instead of m->OnDeck.
// Deschedule Self so that others may run. // Deschedule Self so that others may run.
while (OrderAccess::load_ptr_acquire(&_OnDeck) != ESelf) { while (OrderAccess::load_acquire(&_OnDeck) != ESelf) {
ParkCommon(ESelf, 0); ParkCommon(ESelf, 0);
} }
@ -570,7 +573,7 @@ void Monitor::IUnlock(bool RelaxAssert) {
// Unlike a normal lock, however, the exiting thread "locks" OnDeck, // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
// picks a successor and marks that thread as OnDeck. That successor // picks a successor and marks that thread as OnDeck. That successor
// thread will then clear OnDeck once it eventually acquires the outer lock. // thread will then clear OnDeck once it eventually acquires the outer lock.
if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) { if (Atomic::cmpxchg((ParkEvent*)_LBIT, &_OnDeck, (ParkEvent*)NULL) != NULL) {
return; return;
} }
@ -585,14 +588,14 @@ void Monitor::IUnlock(bool RelaxAssert) {
assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant"); assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
_EntryList = w->ListNext; _EntryList = w->ListNext;
// as a diagnostic measure consider setting w->_ListNext = BAD // as a diagnostic measure consider setting w->_ListNext = BAD
assert(UNS(_OnDeck) == _LBIT, "invariant"); assert(intptr_t(_OnDeck) == _LBIT, "invariant");
// Pass OnDeck role to w, ensuring that _EntryList has been set first. // Pass OnDeck role to w, ensuring that _EntryList has been set first.
// w will clear _OnDeck once it acquires the outer lock. // w will clear _OnDeck once it acquires the outer lock.
// Note that once we set _OnDeck that thread can acquire the mutex, proceed // Note that once we set _OnDeck that thread can acquire the mutex, proceed
// with its critical section and then enter this code to unlock the mutex. So // with its critical section and then enter this code to unlock the mutex. So
// you can have multiple threads active in IUnlock at the same time. // you can have multiple threads active in IUnlock at the same time.
OrderAccess::release_store_ptr(&_OnDeck, w); OrderAccess::release_store(&_OnDeck, w);
// Another optional optimization ... // Another optional optimization ...
// For heavily contended locks it's not uncommon that some other // For heavily contended locks it's not uncommon that some other
@ -616,7 +619,7 @@ void Monitor::IUnlock(bool RelaxAssert) {
for (;;) { for (;;) {
// optional optimization - if locked, the owner is responsible for succession // optional optimization - if locked, the owner is responsible for succession
if (cxq & _LBIT) goto Punt; if (cxq & _LBIT) goto Punt;
const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT); const intptr_t vfy = Atomic::cmpxchg(cxq & _LBIT, &_LockWord.FullWord, cxq);
if (vfy == cxq) break; if (vfy == cxq) break;
cxq = vfy; cxq = vfy;
// Interference - LockWord changed - Just retry // Interference - LockWord changed - Just retry
@ -652,7 +655,7 @@ void Monitor::IUnlock(bool RelaxAssert) {
// A thread could have added itself to cxq since this thread previously checked. // A thread could have added itself to cxq since this thread previously checked.
// Detect and recover by refetching cxq. // Detect and recover by refetching cxq.
Punt: Punt:
assert(UNS(_OnDeck) == _LBIT, "invariant"); assert(intptr_t(_OnDeck) == _LBIT, "invariant");
_OnDeck = NULL; // Release inner lock. _OnDeck = NULL; // Release inner lock.
OrderAccess::storeload(); // Dekker duality - pivot point OrderAccess::storeload(); // Dekker duality - pivot point
@ -693,7 +696,7 @@ bool Monitor::notify() {
const intptr_t v = _LockWord.FullWord; const intptr_t v = _LockWord.FullWord;
assert((v & 0xFF) == _LBIT, "invariant"); assert((v & 0xFF) == _LBIT, "invariant");
nfy->ListNext = (ParkEvent *)(v & ~_LBIT); nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break; if (Atomic::cmpxchg(intptr_t(nfy)|_LBIT, &_LockWord.FullWord, v) == v) break;
// interference - _LockWord changed -- just retry // interference - _LockWord changed -- just retry
} }
// Note that setting Notified before pushing nfy onto the cxq is // Note that setting Notified before pushing nfy onto the cxq is
@ -840,7 +843,7 @@ int Monitor::IWait(Thread * Self, jlong timo) {
// ESelf is now on the cxq, EntryList or at the OnDeck position. // ESelf is now on the cxq, EntryList or at the OnDeck position.
// The following fragment is extracted from Monitor::ILock() // The following fragment is extracted from Monitor::ILock()
for (;;) { for (;;) {
if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break; if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break;
ParkCommon(ESelf, 0); ParkCommon(ESelf, 0);
} }
assert(_OnDeck == ESelf, "invariant"); assert(_OnDeck == ESelf, "invariant");
@ -1058,7 +1061,7 @@ void Monitor::jvm_raw_lock() {
// Only the OnDeck thread can try to acquire -- contend for -- the lock. // Only the OnDeck thread can try to acquire -- contend for -- the lock.
// CONSIDER: use Self->OnDeck instead of m->OnDeck. // CONSIDER: use Self->OnDeck instead of m->OnDeck.
for (;;) { for (;;) {
if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break; if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break;
ParkCommon(ESelf, 0); ParkCommon(ESelf, 0);
} }

View File

@ -67,13 +67,6 @@ union SplitWord { // full-word with separately addressable LSB
volatile jbyte Bytes [sizeof(intptr_t)] ; volatile jbyte Bytes [sizeof(intptr_t)] ;
} ; } ;
// Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
#ifdef VM_LITTLE_ENDIAN
#define _LSBINDEX 0
#else
#define _LSBINDEX (sizeof(intptr_t)-1)
#endif
class ParkEvent ; class ParkEvent ;
// See orderAccess.hpp. We assume throughout the VM that mutex lock and // See orderAccess.hpp. We assume throughout the VM that mutex lock and
@ -128,7 +121,6 @@ class Monitor : public CHeapObj<mtInternal> {
protected: // Monitor-Mutex metadata protected: // Monitor-Mutex metadata
SplitWord _LockWord ; // Contention queue (cxq) colocated with Lock-byte SplitWord _LockWord ; // Contention queue (cxq) colocated with Lock-byte
enum LockWordBits { _LBIT=1 } ;
Thread * volatile _owner; // The owner of the lock Thread * volatile _owner; // The owner of the lock
// Consider sequestering _owner on its own $line // Consider sequestering _owner on its own $line
// to aid future synchronization mechanisms. // to aid future synchronization mechanisms.

View File

@ -249,7 +249,7 @@ void ObjectMonitor::enter(TRAPS) {
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
Thread * const Self = THREAD; Thread * const Self = THREAD;
void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL); void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
if (cur == NULL) { if (cur == NULL) {
// Either ASSERT _recursions == 0 or explicitly set _recursions = 0. // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
assert(_recursions == 0, "invariant"); assert(_recursions == 0, "invariant");
@ -406,7 +406,7 @@ void ObjectMonitor::enter(TRAPS) {
int ObjectMonitor::TryLock(Thread * Self) { int ObjectMonitor::TryLock(Thread * Self) {
void * own = _owner; void * own = _owner;
if (own != NULL) return 0; if (own != NULL) return 0;
if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
// Either guarantee _recursions == 0 or set _recursions = 0. // Either guarantee _recursions == 0 or set _recursions = 0.
assert(_recursions == 0, "invariant"); assert(_recursions == 0, "invariant");
assert(_owner == Self, "invariant"); assert(_owner == Self, "invariant");
@ -476,7 +476,7 @@ void ObjectMonitor::EnterI(TRAPS) {
ObjectWaiter * nxt; ObjectWaiter * nxt;
for (;;) { for (;;) {
node._next = nxt = _cxq; node._next = nxt = _cxq;
if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break; if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break;
// Interference - the CAS failed because _cxq changed. Just retry. // Interference - the CAS failed because _cxq changed. Just retry.
// As an optional optimization we retry the lock. // As an optional optimization we retry the lock.
@ -514,7 +514,7 @@ void ObjectMonitor::EnterI(TRAPS) {
if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
// Try to assume the role of responsible thread for the monitor. // Try to assume the role of responsible thread for the monitor.
// CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
Atomic::cmpxchg_ptr(Self, &_Responsible, NULL); Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
} }
// The lock might have been released while this thread was occupied queueing // The lock might have been released while this thread was occupied queueing
@ -538,7 +538,7 @@ void ObjectMonitor::EnterI(TRAPS) {
assert(_owner != Self, "invariant"); assert(_owner != Self, "invariant");
if ((SyncFlags & 2) && _Responsible == NULL) { if ((SyncFlags & 2) && _Responsible == NULL) {
Atomic::cmpxchg_ptr(Self, &_Responsible, NULL); Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
} }
// park self // park self
@ -795,7 +795,7 @@ void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
ObjectWaiter * v = _cxq; ObjectWaiter * v = _cxq;
assert(v != NULL, "invariant"); assert(v != NULL, "invariant");
if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) {
// The CAS above can fail from interference IFF a "RAT" arrived. // The CAS above can fail from interference IFF a "RAT" arrived.
// In that case Self must be in the interior and can no longer be // In that case Self must be in the interior and can no longer be
// at the head of cxq. // at the head of cxq.
@ -947,7 +947,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
// in massive wasteful coherency traffic on classic SMP systems. // in massive wasteful coherency traffic on classic SMP systems.
// Instead, I use release_store(), which is implemented as just a simple // Instead, I use release_store(), which is implemented as just a simple
// ST on x64, x86 and SPARC. // ST on x64, x86 and SPARC.
OrderAccess::release_store_ptr(&_owner, NULL); // drop the lock OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
OrderAccess::storeload(); // See if we need to wake a successor OrderAccess::storeload(); // See if we need to wake a successor
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
TEVENT(Inflated exit - simple egress); TEVENT(Inflated exit - simple egress);
@ -992,13 +992,13 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
// to reacquire the lock the responsibility for ensuring succession // to reacquire the lock the responsibility for ensuring succession
// falls to the new owner. // falls to the new owner.
// //
if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
return; return;
} }
TEVENT(Exit - Reacquired); TEVENT(Exit - Reacquired);
} else { } else {
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
OrderAccess::release_store_ptr(&_owner, NULL); // drop the lock OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
OrderAccess::storeload(); OrderAccess::storeload();
// Ratify the previously observed values. // Ratify the previously observed values.
if (_cxq == NULL || _succ != NULL) { if (_cxq == NULL || _succ != NULL) {
@ -1017,7 +1017,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
// B. If the elements forming the EntryList|cxq are TSM // B. If the elements forming the EntryList|cxq are TSM
// we could simply unpark() the lead thread and return // we could simply unpark() the lead thread and return
// without having set _succ. // without having set _succ.
if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
TEVENT(Inflated exit - reacquired succeeded); TEVENT(Inflated exit - reacquired succeeded);
return; return;
} }
@ -1052,7 +1052,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
w = _cxq; w = _cxq;
for (;;) { for (;;) {
assert(w != NULL, "Invariant"); assert(w != NULL, "Invariant");
ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w); ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
if (u == w) break; if (u == w) break;
w = u; w = u;
} }
@ -1093,7 +1093,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
w = _cxq; w = _cxq;
for (;;) { for (;;) {
assert(w != NULL, "Invariant"); assert(w != NULL, "Invariant");
ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w); ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
if (u == w) break; if (u == w) break;
w = u; w = u;
} }
@ -1146,7 +1146,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
// The following loop is tantamount to: w = swap(&cxq, NULL) // The following loop is tantamount to: w = swap(&cxq, NULL)
for (;;) { for (;;) {
assert(w != NULL, "Invariant"); assert(w != NULL, "Invariant");
ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w); ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
if (u == w) break; if (u == w) break;
w = u; w = u;
} }
@ -1279,7 +1279,7 @@ void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
Wakee = NULL; Wakee = NULL;
// Drop the lock // Drop the lock
OrderAccess::release_store_ptr(&_owner, NULL); OrderAccess::release_store(&_owner, (void*)NULL);
OrderAccess::fence(); // ST _owner vs LD in unpark() OrderAccess::fence(); // ST _owner vs LD in unpark()
if (SafepointSynchronize::do_call_back()) { if (SafepointSynchronize::do_call_back()) {
@ -1688,7 +1688,7 @@ void ObjectMonitor::INotify(Thread * Self) {
for (;;) { for (;;) {
ObjectWaiter * front = _cxq; ObjectWaiter * front = _cxq;
iterator->_next = front; iterator->_next = front;
if (Atomic::cmpxchg_ptr(iterator, &_cxq, front) == front) { if (Atomic::cmpxchg(iterator, &_cxq, front) == front) {
break; break;
} }
} }
@ -1699,7 +1699,7 @@ void ObjectMonitor::INotify(Thread * Self) {
ObjectWaiter * tail = _cxq; ObjectWaiter * tail = _cxq;
if (tail == NULL) { if (tail == NULL) {
iterator->_next = NULL; iterator->_next = NULL;
if (Atomic::cmpxchg_ptr(iterator, &_cxq, NULL) == NULL) { if (Atomic::cmpxchg(iterator, &_cxq, (ObjectWaiter*)NULL) == NULL) {
break; break;
} }
} else { } else {
@ -1980,7 +1980,7 @@ int ObjectMonitor::TrySpin(Thread * Self) {
Thread * ox = (Thread *) _owner; Thread * ox = (Thread *) _owner;
if (ox == NULL) { if (ox == NULL) {
ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL); ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
if (ox == NULL) { if (ox == NULL) {
// The CAS succeeded -- this thread acquired ownership // The CAS succeeded -- this thread acquired ownership
// Take care of some bookkeeping to exit spin state. // Take care of some bookkeeping to exit spin state.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -143,7 +143,7 @@ class ObjectMonitor {
volatile markOop _header; // displaced object header word - mark volatile markOop _header; // displaced object header word - mark
void* volatile _object; // backward object pointer - strong root void* volatile _object; // backward object pointer - strong root
public: public:
ObjectMonitor * FreeNext; // Free list linkage ObjectMonitor* FreeNext; // Free list linkage
private: private:
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
sizeof(volatile markOop) + sizeof(void * volatile) + sizeof(volatile markOop) + sizeof(void * volatile) +
@ -251,6 +251,7 @@ class ObjectMonitor {
((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value) ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
markOop header() const; markOop header() const;
volatile markOop* header_addr();
void set_header(markOop hdr); void set_header(markOop hdr);
intptr_t is_busy() const { intptr_t is_busy() const {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,11 @@ inline markOop ObjectMonitor::header() const {
return _header; return _header;
} }
inline volatile markOop* ObjectMonitor::header_addr() {
assert((intptr_t)this == (intptr_t)&_header, "sync code expects this");
return &_header;
}
inline void ObjectMonitor::set_header(markOop hdr) { inline void ObjectMonitor::set_header(markOop hdr) {
_header = hdr; _header = hdr;
} }

View File

@ -268,21 +268,12 @@ class OrderAccess : private Atomic {
template <typename T> template <typename T>
static T load_acquire(const volatile T* p); static T load_acquire(const volatile T* p);
static intptr_t load_ptr_acquire(const volatile intptr_t* p);
static void* load_ptr_acquire(const volatile void* p);
template <typename T, typename D> template <typename T, typename D>
static void release_store(volatile D* p, T v); static void release_store(volatile D* p, T v);
static void release_store_ptr(volatile intptr_t* p, intptr_t v);
static void release_store_ptr(volatile void* p, void* v);
template <typename T, typename D> template <typename T, typename D>
static void release_store_fence(volatile D* p, T v); static void release_store_fence(volatile D* p, T v);
static void release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
static void release_store_ptr_fence(volatile void* p, void* v);
private: private:
// This is a helper that invokes the StubRoutines::fence_entry() // This is a helper that invokes the StubRoutines::fence_entry()
// routine if it exists, It should only be used by platforms that // routine if it exists, It should only be used by platforms that

View File

@ -54,28 +54,13 @@ inline T OrderAccess::load_acquire(const volatile T* p) {
return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p); return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
} }
inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p) {
return load_acquire(p);
}
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) {
return load_acquire(static_cast<void* const volatile *>(p));
}
template <typename T, typename D> template <typename T, typename D>
inline void OrderAccess::release_store(volatile D* p, T v) { inline void OrderAccess::release_store(volatile D* p, T v) {
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p); StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
} }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { release_store(p, v); }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { release_store(static_cast<void* volatile*>(p), v); }
template <typename T, typename D> template <typename T, typename D>
inline void OrderAccess::release_store_fence(volatile D* p, T v) { inline void OrderAccess::release_store_fence(volatile D* p, T v) {
StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p); StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
} }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_fence(p, v); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_fence(static_cast<void* volatile*>(p), v); }
#endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP #endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP

View File

@ -59,11 +59,10 @@ address StubRoutines::_throw_delayed_StackOverflowError_entry = NULL;
jint StubRoutines::_verify_oop_count = 0; jint StubRoutines::_verify_oop_count = 0;
address StubRoutines::_verify_oop_subroutine_entry = NULL; address StubRoutines::_verify_oop_subroutine_entry = NULL;
address StubRoutines::_atomic_xchg_entry = NULL; address StubRoutines::_atomic_xchg_entry = NULL;
address StubRoutines::_atomic_xchg_ptr_entry = NULL; address StubRoutines::_atomic_xchg_long_entry = NULL;
address StubRoutines::_atomic_store_entry = NULL; address StubRoutines::_atomic_store_entry = NULL;
address StubRoutines::_atomic_store_ptr_entry = NULL; address StubRoutines::_atomic_store_ptr_entry = NULL;
address StubRoutines::_atomic_cmpxchg_entry = NULL; address StubRoutines::_atomic_cmpxchg_entry = NULL;
address StubRoutines::_atomic_cmpxchg_ptr_entry = NULL;
address StubRoutines::_atomic_cmpxchg_byte_entry = NULL; address StubRoutines::_atomic_cmpxchg_byte_entry = NULL;
address StubRoutines::_atomic_cmpxchg_long_entry = NULL; address StubRoutines::_atomic_cmpxchg_long_entry = NULL;
address StubRoutines::_atomic_add_entry = NULL; address StubRoutines::_atomic_add_entry = NULL;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -101,11 +101,10 @@ class StubRoutines: AllStatic {
static address _throw_delayed_StackOverflowError_entry; static address _throw_delayed_StackOverflowError_entry;
static address _atomic_xchg_entry; static address _atomic_xchg_entry;
static address _atomic_xchg_ptr_entry; static address _atomic_xchg_long_entry;
static address _atomic_store_entry; static address _atomic_store_entry;
static address _atomic_store_ptr_entry; static address _atomic_store_ptr_entry;
static address _atomic_cmpxchg_entry; static address _atomic_cmpxchg_entry;
static address _atomic_cmpxchg_ptr_entry;
static address _atomic_cmpxchg_byte_entry; static address _atomic_cmpxchg_byte_entry;
static address _atomic_cmpxchg_long_entry; static address _atomic_cmpxchg_long_entry;
static address _atomic_add_entry; static address _atomic_add_entry;
@ -276,11 +275,10 @@ class StubRoutines: AllStatic {
static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_entry; } static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_entry; }
static address atomic_xchg_entry() { return _atomic_xchg_entry; } static address atomic_xchg_entry() { return _atomic_xchg_entry; }
static address atomic_xchg_ptr_entry() { return _atomic_xchg_ptr_entry; } static address atomic_xchg_long_entry() { return _atomic_xchg_long_entry; }
static address atomic_store_entry() { return _atomic_store_entry; } static address atomic_store_entry() { return _atomic_store_entry; }
static address atomic_store_ptr_entry() { return _atomic_store_ptr_entry; } static address atomic_store_ptr_entry() { return _atomic_store_ptr_entry; }
static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; } static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; }
static address atomic_cmpxchg_ptr_entry() { return _atomic_cmpxchg_ptr_entry; }
static address atomic_cmpxchg_byte_entry() { return _atomic_cmpxchg_byte_entry; } static address atomic_cmpxchg_byte_entry() { return _atomic_cmpxchg_byte_entry; }
static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; } static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; }
static address atomic_add_entry() { return _atomic_add_entry; } static address atomic_add_entry() { return _atomic_add_entry; }

View File

@ -111,9 +111,7 @@ int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
// global list of blocks of monitors // global list of blocks of monitors
// gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
// want to expose the PaddedEnd template more than necessary.
ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
// global monitor free list // global monitor free list
ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
// global monitor in-use list, for moribund threads, // global monitor in-use list, for moribund threads,
@ -241,7 +239,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
lock->set_displaced_header(markOopDesc::unused_mark()); lock->set_displaced_header(markOopDesc::unused_mark());
if (owner == NULL && if (owner == NULL &&
Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { Atomic::cmpxchg(Self, &(m->_owner), (void*)NULL) == NULL) {
assert(m->_recursions == 0, "invariant"); assert(m->_recursions == 0, "invariant");
assert(m->_owner == Self, "invariant"); assert(m->_owner == Self, "invariant");
return true; return true;
@ -802,7 +800,7 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
hash = get_next_hash(Self, obj); hash = get_next_hash(Self, obj);
temp = mark->copy_set_hash(hash); // merge hash code into header temp = mark->copy_set_hash(hash); // merge hash code into header
assert(temp->is_neutral(), "invariant"); assert(temp->is_neutral(), "invariant");
test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
if (test != mark) { if (test != mark) {
// The only update to the header in the monitor (outside GC) // The only update to the header in the monitor (outside GC)
// is install the hash code. If someone add new usage of // is install the hash code. If someone add new usage of
@ -939,8 +937,7 @@ JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
// Visitors ... // Visitors ...
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
PaddedEnd<ObjectMonitor> * block = PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
(PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
while (block != NULL) { while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header"); assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = _BLOCKSIZE - 1; i > 0; i--) { for (int i = _BLOCKSIZE - 1; i > 0; i--) {
@ -955,9 +952,9 @@ void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
} }
// Get the next block in the block list. // Get the next block in the block list.
static inline ObjectMonitor* next(ObjectMonitor* block) { static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
assert(block->object() == CHAINMARKER, "must be a block header"); assert(block->object() == CHAINMARKER, "must be a block header");
block = block->FreeNext; block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
return block; return block;
} }
@ -991,9 +988,8 @@ void ObjectSynchronizer::oops_do(OopClosure* f) {
void ObjectSynchronizer::global_oops_do(OopClosure* f) { void ObjectSynchronizer::global_oops_do(OopClosure* f) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
PaddedEnd<ObjectMonitor> * block = PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
(PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); for (; block != NULL; block = next(block)) {
for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
assert(block->object() == CHAINMARKER, "must be a block header"); assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = 1; i < _BLOCKSIZE; i++) { for (int i = 1; i < _BLOCKSIZE; i++) {
ObjectMonitor* mid = (ObjectMonitor *)&block[i]; ObjectMonitor* mid = (ObjectMonitor *)&block[i];
@ -1232,7 +1228,7 @@ ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
temp[0].FreeNext = gBlockList; temp[0].FreeNext = gBlockList;
// There are lock-free uses of gBlockList so make sure that // There are lock-free uses of gBlockList so make sure that
// the previous stores happen before we update gBlockList. // the previous stores happen before we update gBlockList.
OrderAccess::release_store_ptr(&gBlockList, temp); OrderAccess::release_store(&gBlockList, temp);
// Add the new string of objectMonitors to the global free list // Add the new string of objectMonitors to the global free list
temp[_BLOCKSIZE - 1].FreeNext = gFreeList; temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
@ -1734,9 +1730,8 @@ void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters)
} }
} else { } else {
PaddedEnd<ObjectMonitor> * block = PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
(PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); for (; block != NULL; block = next(block)) {
for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
// Iterate over all extant monitors - Scavenge all idle monitors. // Iterate over all extant monitors - Scavenge all idle monitors.
assert(block->object() == CHAINMARKER, "must be a block header"); assert(block->object() == CHAINMARKER, "must be a block header");
counters->nInCirculation += _BLOCKSIZE; counters->nInCirculation += _BLOCKSIZE;
@ -1969,12 +1964,10 @@ void ObjectSynchronizer::sanity_checks(const bool verbose,
// the list of extant blocks without taking a lock. // the list of extant blocks without taking a lock.
int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
PaddedEnd<ObjectMonitor> * block = PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
(PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
while (block != NULL) { while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header"); assert(block->object() == CHAINMARKER, "must be a block header");
if (monitor > (ObjectMonitor *)&block[0] && if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
address mon = (address)monitor; address mon = (address)monitor;
address blk = (address)block; address blk = (address)block;
size_t diff = mon - blk; size_t diff = mon - blk;

View File

@ -25,6 +25,7 @@
#ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP #ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
#define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP #define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
#include "memory/padded.hpp"
#include "oops/markOop.hpp" #include "oops/markOop.hpp"
#include "runtime/basicLock.hpp" #include "runtime/basicLock.hpp"
#include "runtime/handles.hpp" #include "runtime/handles.hpp"
@ -159,9 +160,7 @@ class ObjectSynchronizer : AllStatic {
private: private:
enum { _BLOCKSIZE = 128 }; enum { _BLOCKSIZE = 128 };
// global list of blocks of monitors // global list of blocks of monitors
// gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't static PaddedEnd<ObjectMonitor> * volatile gBlockList;
// want to expose the PaddedEnd template more than necessary.
static ObjectMonitor * volatile gBlockList;
// global monitor free list // global monitor free list
static ObjectMonitor * volatile gFreeList; static ObjectMonitor * volatile gFreeList;
// global monitor in-use list, for moribund threads, // global monitor in-use list, for moribund threads,

View File

@ -4701,13 +4701,12 @@ void Thread::SpinRelease(volatile int * adr) {
// //
typedef volatile intptr_t MutexT; // Mux Lock-word const intptr_t LOCKBIT = 1;
enum MuxBits { LOCKBIT = 1 };
void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) { void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0); intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
if (w == 0) return; if (w == 0) return;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
return; return;
} }
@ -4720,7 +4719,7 @@ void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
// Optional spin phase: spin-then-park strategy // Optional spin phase: spin-then-park strategy
while (--its >= 0) { while (--its >= 0) {
w = *Lock; w = *Lock;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
return; return;
} }
} }
@ -4733,7 +4732,7 @@ void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
for (;;) { for (;;) {
w = *Lock; w = *Lock;
if ((w & LOCKBIT) == 0) { if ((w & LOCKBIT) == 0) {
if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
Self->OnList = 0; // hygiene - allows stronger asserts Self->OnList = 0; // hygiene - allows stronger asserts
return; return;
} }
@ -4741,7 +4740,7 @@ void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
} }
assert(w & LOCKBIT, "invariant"); assert(w & LOCKBIT, "invariant");
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT); Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break; if (Atomic::cmpxchg(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
} }
while (Self->OnList != 0) { while (Self->OnList != 0) {
@ -4751,9 +4750,9 @@ void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
} }
void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) { void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0); intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
if (w == 0) return; if (w == 0) return;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
return; return;
} }
@ -4770,7 +4769,7 @@ void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
// Optional spin phase: spin-then-park strategy // Optional spin phase: spin-then-park strategy
while (--its >= 0) { while (--its >= 0) {
w = *Lock; w = *Lock;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
if (ReleaseAfter != NULL) { if (ReleaseAfter != NULL) {
ParkEvent::Release(ReleaseAfter); ParkEvent::Release(ReleaseAfter);
} }
@ -4786,7 +4785,7 @@ void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
for (;;) { for (;;) {
w = *Lock; w = *Lock;
if ((w & LOCKBIT) == 0) { if ((w & LOCKBIT) == 0) {
if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
ev->OnList = 0; ev->OnList = 0;
// We call ::Release while holding the outer lock, thus // We call ::Release while holding the outer lock, thus
// artificially lengthening the critical section. // artificially lengthening the critical section.
@ -4801,7 +4800,7 @@ void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
} }
assert(w & LOCKBIT, "invariant"); assert(w & LOCKBIT, "invariant");
ev->ListNext = (ParkEvent *) (w & ~LOCKBIT); ev->ListNext = (ParkEvent *) (w & ~LOCKBIT);
if (Atomic::cmpxchg_ptr(intptr_t(ev)|LOCKBIT, Lock, w) == w) break; if (Atomic::cmpxchg(intptr_t(ev)|LOCKBIT, Lock, w) == w) break;
} }
while (ev->OnList != 0) { while (ev->OnList != 0) {
@ -4837,7 +4836,7 @@ void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) {
// store (CAS) to the lock-word that releases the lock becomes globally visible. // store (CAS) to the lock-word that releases the lock becomes globally visible.
void Thread::muxRelease(volatile intptr_t * Lock) { void Thread::muxRelease(volatile intptr_t * Lock) {
for (;;) { for (;;) {
const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT); const intptr_t w = Atomic::cmpxchg((intptr_t)0, Lock, LOCKBIT);
assert(w & LOCKBIT, "invariant"); assert(w & LOCKBIT, "invariant");
if (w == LOCKBIT) return; if (w == LOCKBIT) return;
ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT); ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT);
@ -4848,7 +4847,7 @@ void Thread::muxRelease(volatile intptr_t * Lock) {
// The following CAS() releases the lock and pops the head element. // The following CAS() releases the lock and pops the head element.
// The CAS() also ratifies the previously fetched lock-word value. // The CAS() also ratifies the previously fetched lock-word value.
if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) { if (Atomic::cmpxchg(intptr_t(nxt), Lock, w) != w) {
continue; continue;
} }
List->OnList = 0; List->OnList = 0;

View File

@ -61,6 +61,7 @@
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "memory/heap.hpp" #include "memory/heap.hpp"
#include "memory/metachunk.hpp" #include "memory/metachunk.hpp"
#include "memory/padded.hpp"
#include "memory/referenceType.hpp" #include "memory/referenceType.hpp"
#include "memory/universe.hpp" #include "memory/universe.hpp"
#include "memory/virtualspace.hpp" #include "memory/virtualspace.hpp"
@ -198,6 +199,8 @@ typedef HashtableEntry<InstanceKlass*, mtClass> KlassHashtableEntry;
typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable; typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable; typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
// VM_STRUCTS // VM_STRUCTS
// //
@ -359,7 +362,7 @@ typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
/***********************/ \ /***********************/ \
\ \
volatile_nonstatic_field(ConstantPoolCacheEntry, _indices, intx) \ volatile_nonstatic_field(ConstantPoolCacheEntry, _indices, intx) \
nonstatic_field(ConstantPoolCacheEntry, _f1, volatile Metadata*) \ volatile_nonstatic_field(ConstantPoolCacheEntry, _f1, Metadata*) \
volatile_nonstatic_field(ConstantPoolCacheEntry, _f2, intx) \ volatile_nonstatic_field(ConstantPoolCacheEntry, _f2, intx) \
volatile_nonstatic_field(ConstantPoolCacheEntry, _flags, intx) \ volatile_nonstatic_field(ConstantPoolCacheEntry, _flags, intx) \
\ \
@ -1052,7 +1055,7 @@ typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \ volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \
nonstatic_field(BasicObjectLock, _lock, BasicLock) \ nonstatic_field(BasicObjectLock, _lock, BasicLock) \
nonstatic_field(BasicObjectLock, _obj, oop) \ nonstatic_field(BasicObjectLock, _obj, oop) \
static_ptr_volatile_field(ObjectSynchronizer, gBlockList, ObjectMonitor*) \ static_ptr_volatile_field(ObjectSynchronizer, gBlockList, PaddedObjectMonitor*) \
\ \
/*********************/ \ /*********************/ \
/* Matcher (C2 only) */ \ /* Matcher (C2 only) */ \
@ -1680,6 +1683,7 @@ typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
/************/ \ /************/ \
\ \
declare_toplevel_type(ObjectMonitor) \ declare_toplevel_type(ObjectMonitor) \
declare_toplevel_type(PaddedObjectMonitor) \
declare_toplevel_type(ObjectSynchronizer) \ declare_toplevel_type(ObjectSynchronizer) \
declare_toplevel_type(BasicLock) \ declare_toplevel_type(BasicLock) \
declare_toplevel_type(BasicObjectLock) \ declare_toplevel_type(BasicObjectLock) \
@ -2154,6 +2158,7 @@ typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
declare_toplevel_type(nmethod*) \ declare_toplevel_type(nmethod*) \
COMPILER2_PRESENT(declare_unsigned_integer_type(node_idx_t)) \ COMPILER2_PRESENT(declare_unsigned_integer_type(node_idx_t)) \
declare_toplevel_type(ObjectMonitor*) \ declare_toplevel_type(ObjectMonitor*) \
declare_toplevel_type(PaddedObjectMonitor*) \
declare_toplevel_type(oop*) \ declare_toplevel_type(oop*) \
declare_toplevel_type(OopMap**) \ declare_toplevel_type(OopMap**) \
declare_toplevel_type(OopMapCache*) \ declare_toplevel_type(OopMapCache*) \

View File

@ -147,7 +147,7 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* b
if (entry == NULL) return NULL; if (entry == NULL) return NULL;
// swap in the head // swap in the head
if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) { if (Atomic::cmpxchg(entry, &_table[index], (MallocSiteHashtableEntry*)NULL) == NULL) {
return entry->data(); return entry->data();
} }
@ -257,3 +257,7 @@ void MallocSiteTable::AccessLock::exclusiveLock() {
} }
_lock_state = ExclusiveLock; _lock_state = ExclusiveLock;
} }
bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
return Atomic::cmpxchg(entry, &_next, (MallocSiteHashtableEntry*)NULL) == NULL;
}

View File

@ -61,8 +61,8 @@ class MallocSite : public AllocationSite<MemoryCounter> {
// Malloc site hashtable entry // Malloc site hashtable entry
class MallocSiteHashtableEntry : public CHeapObj<mtNMT> { class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
private: private:
MallocSite _malloc_site; MallocSite _malloc_site;
MallocSiteHashtableEntry* _next; MallocSiteHashtableEntry* volatile _next;
public: public:
MallocSiteHashtableEntry() : _next(NULL) { } MallocSiteHashtableEntry() : _next(NULL) { }
@ -79,10 +79,7 @@ class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
// Insert an entry atomically. // Insert an entry atomically.
// Return true if the entry is inserted successfully. // Return true if the entry is inserted successfully.
// The operation can be failed due to contention from other thread. // The operation can be failed due to contention from other thread.
bool atomic_insert(const MallocSiteHashtableEntry* entry) { bool atomic_insert(MallocSiteHashtableEntry* entry);
return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
NULL) == NULL);
}
void set_callsite(const MallocSite& site) { void set_callsite(const MallocSite& site) {
_malloc_site = site; _malloc_site = site;

View File

@ -68,7 +68,7 @@ class MemoryCounter VALUE_OBJ_CLASS_SPEC {
if (sz > 0) { if (sz > 0) {
// unary minus operator applied to unsigned type, result still unsigned // unary minus operator applied to unsigned type, result still unsigned
#pragma warning(suppress: 4146) #pragma warning(suppress: 4146)
Atomic::add(-sz, &_size); Atomic::sub(sz, &_size);
} }
} }

View File

@ -94,7 +94,7 @@ GCMemoryManager* MemoryManager::get_g1OldGen_memory_manager() {
instanceOop MemoryManager::get_memory_manager_instance(TRAPS) { instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// Must do an acquire so as to force ordering of subsequent // Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_mgr_obj points to or implies. // loads from anything _memory_mgr_obj points to or implies.
instanceOop mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj); instanceOop mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
if (mgr_obj == NULL) { if (mgr_obj == NULL) {
// It's ok for more than one thread to execute the code up to the locked region. // It's ok for more than one thread to execute the code up to the locked region.
// Extra manager instances will just be gc'ed. // Extra manager instances will just be gc'ed.
@ -147,7 +147,7 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// //
// The lock has done an acquire, so the load can't float above it, but // The lock has done an acquire, so the load can't float above it, but
// we need to do a load_acquire as above. // we need to do a load_acquire as above.
mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj); mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
if (mgr_obj != NULL) { if (mgr_obj != NULL) {
return mgr_obj; return mgr_obj;
} }
@ -159,7 +159,7 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// with creating the management object are visible before publishing // with creating the management object are visible before publishing
// its address. The unlock will publish the store to _memory_mgr_obj // its address. The unlock will publish the store to _memory_mgr_obj
// because it does a release first. // because it does a release first.
OrderAccess::release_store_ptr(&_memory_mgr_obj, mgr_obj); OrderAccess::release_store(&_memory_mgr_obj, mgr_obj);
} }
} }

View File

@ -82,7 +82,7 @@ void MemoryPool::add_manager(MemoryManager* mgr) {
instanceOop MemoryPool::get_memory_pool_instance(TRAPS) { instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// Must do an acquire so as to force ordering of subsequent // Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_pool_obj points to or implies. // loads from anything _memory_pool_obj points to or implies.
instanceOop pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj); instanceOop pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
if (pool_obj == NULL) { if (pool_obj == NULL) {
// It's ok for more than one thread to execute the code up to the locked region. // It's ok for more than one thread to execute the code up to the locked region.
// Extra pool instances will just be gc'ed. // Extra pool instances will just be gc'ed.
@ -123,7 +123,7 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// //
// The lock has done an acquire, so the load can't float above it, // The lock has done an acquire, so the load can't float above it,
// but we need to do a load_acquire as above. // but we need to do a load_acquire as above.
pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj); pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
if (pool_obj != NULL) { if (pool_obj != NULL) {
return pool_obj; return pool_obj;
} }
@ -135,7 +135,7 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// with creating the pool are visible before publishing its address. // with creating the pool are visible before publishing its address.
// The unlock will publish the store to _memory_pool_obj because // The unlock will publish the store to _memory_pool_obj because
// it does a release first. // it does a release first.
OrderAccess::release_store_ptr(&_memory_pool_obj, pool_obj); OrderAccess::release_store(&_memory_pool_obj, pool_obj);
} }
} }

View File

@ -628,7 +628,7 @@ void BitMap::init_pop_count_table() {
table[i] = num_set_bits(i); table[i] = num_set_bits(i);
} }
if (!Atomic::replace_if_null(table, &_pop_count_table)) { if (Atomic::cmpxchg(table, &_pop_count_table, (BitMap::idx_t*)NULL) != NULL) {
guarantee(_pop_count_table != NULL, "invariant"); guarantee(_pop_count_table != NULL, "invariant");
FREE_C_HEAP_ARRAY(idx_t, table); FREE_C_HEAP_ARRAY(idx_t, table);
} }

View File

@ -190,7 +190,7 @@ template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkCont
BasicHashtableEntry<F>* current = _free_list; BasicHashtableEntry<F>* current = _free_list;
while (true) { while (true) {
context->_removed_tail->set_next(current); context->_removed_tail->set_next(current);
BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current); BasicHashtableEntry<F>* old = Atomic::cmpxchg(context->_removed_head, &_free_list, current);
if (old == current) { if (old == current) {
break; break;
} }

View File

@ -78,7 +78,7 @@ template <MEMFLAGS F> inline void HashtableBucket<F>::set_entry(BasicHashtableEn
// SystemDictionary are read without locks. The new entry must be // SystemDictionary are read without locks. The new entry must be
// complete before other threads can be allowed to see it // complete before other threads can be allowed to see it
// via a store to _buckets[index]. // via a store to _buckets[index].
OrderAccess::release_store_ptr(&_entry, l); OrderAccess::release_store(&_entry, l);
} }
@ -87,7 +87,7 @@ template <MEMFLAGS F> inline BasicHashtableEntry<F>* HashtableBucket<F>::get_ent
// SystemDictionary are read without locks. The new entry must be // SystemDictionary are read without locks. The new entry must be
// complete before other threads can be allowed to see it // complete before other threads can be allowed to see it
// via a store to _buckets[index]. // via a store to _buckets[index].
return (BasicHashtableEntry<F>*) OrderAccess::load_ptr_acquire(&_entry); return OrderAccess::load_acquire(&_entry);
} }

View File

@ -1269,7 +1269,7 @@ void VMError::report_and_die(int id, const char* message, const char* detail_fmt
} }
intptr_t mytid = os::current_thread_id(); intptr_t mytid = os::current_thread_id();
if (first_error_tid == -1 && if (first_error_tid == -1 &&
Atomic::cmpxchg_ptr(mytid, &first_error_tid, -1) == -1) { Atomic::cmpxchg(mytid, &first_error_tid, (intptr_t)-1) == -1) {
// Initialize time stamps to use the same base. // Initialize time stamps to use the same base.
out.time_stamp().update_to(1); out.time_stamp().update_to(1);