8234060: Potential memory reordering problem in JfrBuffer flush mechanism
Reviewed-by: egahlin
This commit is contained in:
parent
ce9ff0924f
commit
d713fb8aa2
@ -24,11 +24,9 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/recorder/storage/jfrBuffer.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
static const u1* const MUTEX_CLAIM = NULL;
|
||||
static const u1* const TOP_CRITICAL_SECTION = NULL;
|
||||
|
||||
JfrBuffer::JfrBuffer() : _next(NULL),
|
||||
_prev(NULL),
|
||||
@ -39,14 +37,13 @@ JfrBuffer::JfrBuffer() : _next(NULL),
|
||||
_header_size(0),
|
||||
_size(0) {}
|
||||
|
||||
bool JfrBuffer::initialize(size_t header_size, size_t size, const void* id /* NULL */) {
|
||||
bool JfrBuffer::initialize(size_t header_size, size_t size) {
|
||||
assert(_next == NULL, "invariant");
|
||||
assert(_identity == NULL, "invariant");
|
||||
_header_size = (u2)header_size;
|
||||
_size = (u4)(size / BytesPerWord);
|
||||
assert(_identity == NULL, "invariant");
|
||||
_identity = id;
|
||||
set_pos(start());
|
||||
set_top(start());
|
||||
assert(_next == NULL, "invariant");
|
||||
assert(free_size() == size, "invariant");
|
||||
assert(!transient(), "invariant");
|
||||
assert(!lease(), "invariant");
|
||||
@ -55,9 +52,9 @@ bool JfrBuffer::initialize(size_t header_size, size_t size, const void* id /* NU
|
||||
}
|
||||
|
||||
void JfrBuffer::reinitialize(bool exclusion /* false */) {
|
||||
acquire_critical_section_top();
|
||||
assert(!lease(), "invariant");
|
||||
assert(!transient(), "invariant");
|
||||
set_pos(start());
|
||||
if (exclusion != excluded()) {
|
||||
// update
|
||||
if (exclusion) {
|
||||
@ -66,80 +63,43 @@ void JfrBuffer::reinitialize(bool exclusion /* false */) {
|
||||
clear_excluded();
|
||||
}
|
||||
}
|
||||
clear_retired();
|
||||
set_top(start());
|
||||
}
|
||||
|
||||
void JfrBuffer::concurrent_reinitialization() {
|
||||
concurrent_top();
|
||||
assert(!lease(), "invariant");
|
||||
assert(!transient(), "invariant");
|
||||
set_pos(start());
|
||||
set_concurrent_top(start());
|
||||
release_critical_section_top(start());
|
||||
clear_retired();
|
||||
}
|
||||
|
||||
size_t JfrBuffer::discard() {
|
||||
size_t discard_size = unflushed_size();
|
||||
set_top(pos());
|
||||
return discard_size;
|
||||
const u1* JfrBuffer::top() const {
|
||||
return Atomic::load_acquire(&_top);
|
||||
}
|
||||
|
||||
const u1* JfrBuffer::stable_top() const {
|
||||
const u1* current_top;
|
||||
do {
|
||||
current_top = Atomic::load(&_top);
|
||||
} while (MUTEX_CLAIM == current_top);
|
||||
current_top = top();
|
||||
} while (TOP_CRITICAL_SECTION == current_top);
|
||||
return current_top;
|
||||
}
|
||||
|
||||
const u1* JfrBuffer::top() const {
|
||||
return _top;
|
||||
}
|
||||
|
||||
void JfrBuffer::set_top(const u1* new_top) {
|
||||
_top = new_top;
|
||||
assert(new_top <= end(), "invariant");
|
||||
assert(new_top >= start(), "invariant");
|
||||
Atomic::release_store(&_top, new_top);
|
||||
}
|
||||
|
||||
const u1* JfrBuffer::concurrent_top() const {
|
||||
const u1* JfrBuffer::acquire_critical_section_top() const {
|
||||
do {
|
||||
const u1* current_top = stable_top();
|
||||
if (Atomic::cmpxchg(&_top, current_top, MUTEX_CLAIM) == current_top) {
|
||||
assert(current_top != TOP_CRITICAL_SECTION, "invariant");
|
||||
if (Atomic::cmpxchg(&_top, current_top, TOP_CRITICAL_SECTION) == current_top) {
|
||||
return current_top;
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
|
||||
void JfrBuffer::set_concurrent_top(const u1* new_top) {
|
||||
assert(new_top != MUTEX_CLAIM, "invariant");
|
||||
assert(new_top <= end(), "invariant");
|
||||
assert(new_top >= start(), "invariant");
|
||||
assert(top() == MUTEX_CLAIM, "invariant");
|
||||
OrderAccess::storestore();
|
||||
_top = new_top;
|
||||
}
|
||||
|
||||
size_t JfrBuffer::unflushed_size() const {
|
||||
return pos() - stable_top();
|
||||
}
|
||||
|
||||
void JfrBuffer::acquire(const void* id) {
|
||||
assert(id != NULL, "invariant");
|
||||
const void* current_id;
|
||||
do {
|
||||
current_id = Atomic::load(&_identity);
|
||||
} while (current_id != NULL || Atomic::cmpxchg(&_identity, current_id, id) != current_id);
|
||||
}
|
||||
|
||||
bool JfrBuffer::try_acquire(const void* id) {
|
||||
assert(id != NULL, "invariant");
|
||||
const void* const current_id = Atomic::load(&_identity);
|
||||
return current_id == NULL && Atomic::cmpxchg(&_identity, current_id, id) == current_id;
|
||||
}
|
||||
|
||||
void JfrBuffer::release() {
|
||||
OrderAccess::storestore();
|
||||
_identity = NULL;
|
||||
void JfrBuffer::release_critical_section_top(const u1* new_top) {
|
||||
assert(new_top != TOP_CRITICAL_SECTION, "invariant");
|
||||
assert(top() == TOP_CRITICAL_SECTION, "invariant");
|
||||
set_top(new_top);
|
||||
}
|
||||
|
||||
bool JfrBuffer::acquired_by(const void* id) const {
|
||||
@ -150,6 +110,25 @@ bool JfrBuffer::acquired_by_self() const {
|
||||
return acquired_by(Thread::current());
|
||||
}
|
||||
|
||||
void JfrBuffer::acquire(const void* id) {
|
||||
assert(id != NULL, "invariant");
|
||||
const void* current_id;
|
||||
do {
|
||||
current_id = identity();
|
||||
} while (current_id != NULL || Atomic::cmpxchg(&_identity, current_id, id) != current_id);
|
||||
}
|
||||
|
||||
bool JfrBuffer::try_acquire(const void* id) {
|
||||
assert(id != NULL, "invariant");
|
||||
const void* const current_id = identity();
|
||||
return current_id == NULL && Atomic::cmpxchg(&_identity, current_id, id) == current_id;
|
||||
}
|
||||
|
||||
void JfrBuffer::release() {
|
||||
assert(identity() != NULL, "invariant");
|
||||
Atomic::release_store(&_identity, (const void*)NULL);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static bool validate_to(const JfrBuffer* const to, size_t size) {
|
||||
assert(to != NULL, "invariant");
|
||||
@ -158,39 +137,40 @@ static bool validate_to(const JfrBuffer* const to, size_t size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool validate_concurrent_this(const JfrBuffer* const t, size_t size) {
|
||||
assert(t->top() == MUTEX_CLAIM, "invariant");
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool validate_this(const JfrBuffer* const t, size_t size) {
|
||||
assert(t->top() + size <= t->pos(), "invariant");
|
||||
assert(t->acquired_by_self(), "invariant");
|
||||
assert(t->top() == TOP_CRITICAL_SECTION, "invariant");
|
||||
return true;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void JfrBuffer::move(JfrBuffer* const to, size_t size) {
|
||||
assert(validate_to(to, size), "invariant");
|
||||
const u1* const current_top = acquire_critical_section_top();
|
||||
assert(validate_this(this, size), "invariant");
|
||||
const u1* current_top = top();
|
||||
assert(current_top != NULL, "invariant");
|
||||
memcpy(to->pos(), current_top, size);
|
||||
to->set_pos(size);
|
||||
const size_t actual_size = pos() - current_top;
|
||||
assert(actual_size <= size, "invariant");
|
||||
if (actual_size > 0) {
|
||||
memcpy(to->pos(), current_top, actual_size);
|
||||
to->set_pos(actual_size);
|
||||
}
|
||||
to->release();
|
||||
set_top(current_top + size);
|
||||
set_pos(start());
|
||||
release_critical_section_top(start());
|
||||
}
|
||||
|
||||
void JfrBuffer::concurrent_move_and_reinitialize(JfrBuffer* const to, size_t size) {
|
||||
assert(validate_to(to, size), "invariant");
|
||||
const u1* current_top = concurrent_top();
|
||||
assert(validate_concurrent_this(this, size), "invariant");
|
||||
const size_t actual_size = MIN2(size, (size_t)(pos() - current_top));
|
||||
assert(actual_size <= size, "invariant");
|
||||
memcpy(to->pos(), current_top, actual_size);
|
||||
to->set_pos(actual_size);
|
||||
set_pos(start());
|
||||
to->release();
|
||||
set_concurrent_top(start());
|
||||
size_t JfrBuffer::discard() {
|
||||
const u1* const position = pos();
|
||||
// stable_top() provides acquire semantics for pos()
|
||||
const u1* const current_top = stable_top();
|
||||
set_top(position);
|
||||
return position - current_top;
|
||||
}
|
||||
|
||||
size_t JfrBuffer::unflushed_size() const {
|
||||
const u1* const position = pos();
|
||||
// stable_top() provides acquire semantics for pos()
|
||||
return position - stable_top();
|
||||
}
|
||||
|
||||
enum FLAG {
|
||||
@ -200,67 +180,93 @@ enum FLAG {
|
||||
EXCLUDED = 8
|
||||
};
|
||||
|
||||
inline u2 load(const volatile u2* flags) {
|
||||
assert(flags != NULL, "invariant");
|
||||
return Atomic::load_acquire(flags);
|
||||
}
|
||||
|
||||
inline void set(u2* flags, FLAG flag) {
|
||||
assert(flags != NULL, "invariant");
|
||||
OrderAccess::storestore();
|
||||
*flags |= (u1)flag;
|
||||
}
|
||||
|
||||
inline void clear(u2* flags, FLAG flag) {
|
||||
assert(flags != NULL, "invariant");
|
||||
OrderAccess::storestore();
|
||||
*flags ^= (u1)flag;
|
||||
}
|
||||
|
||||
inline bool test(const u2* flags, FLAG flag) {
|
||||
return (u1)flag == (load(flags) & (u1)flag);
|
||||
}
|
||||
|
||||
bool JfrBuffer::transient() const {
|
||||
return (u1)TRANSIENT == (_flags & (u1)TRANSIENT);
|
||||
return test(&_flags, TRANSIENT);
|
||||
}
|
||||
|
||||
void JfrBuffer::set_transient() {
|
||||
_flags |= (u1)TRANSIENT;
|
||||
assert(acquired_by_self(), "invariant");
|
||||
set(&_flags, TRANSIENT);
|
||||
assert(transient(), "invariant");
|
||||
}
|
||||
|
||||
void JfrBuffer::clear_transient() {
|
||||
if (transient()) {
|
||||
_flags ^= (u1)TRANSIENT;
|
||||
assert(acquired_by_self(), "invariant");
|
||||
clear(&_flags, TRANSIENT);
|
||||
}
|
||||
assert(!transient(), "invariant");
|
||||
}
|
||||
|
||||
bool JfrBuffer::lease() const {
|
||||
return (u1)LEASE == (_flags & (u1)LEASE);
|
||||
return test(&_flags, LEASE);
|
||||
}
|
||||
|
||||
void JfrBuffer::set_lease() {
|
||||
_flags |= (u1)LEASE;
|
||||
assert(acquired_by_self(), "invariant");
|
||||
set(&_flags, LEASE);
|
||||
assert(lease(), "invariant");
|
||||
}
|
||||
|
||||
void JfrBuffer::clear_lease() {
|
||||
if (lease()) {
|
||||
_flags ^= (u1)LEASE;
|
||||
assert(acquired_by_self(), "invariant");
|
||||
clear(&_flags, LEASE);
|
||||
}
|
||||
assert(!lease(), "invariant");
|
||||
}
|
||||
|
||||
bool JfrBuffer::excluded() const {
|
||||
return (u1)EXCLUDED == (_flags & (u1)EXCLUDED);
|
||||
return test(&_flags, EXCLUDED);
|
||||
}
|
||||
|
||||
void JfrBuffer::set_excluded() {
|
||||
_flags |= (u1)EXCLUDED;
|
||||
assert(acquired_by_self(), "invariant");
|
||||
set(&_flags, EXCLUDED);
|
||||
assert(excluded(), "invariant");
|
||||
}
|
||||
|
||||
void JfrBuffer::clear_excluded() {
|
||||
if (excluded()) {
|
||||
OrderAccess::storestore();
|
||||
_flags ^= (u1)EXCLUDED;
|
||||
assert(identity() != NULL, "invariant");
|
||||
clear(&_flags, EXCLUDED);
|
||||
}
|
||||
assert(!excluded(), "invariant");
|
||||
}
|
||||
|
||||
bool JfrBuffer::retired() const {
|
||||
return (_flags & (u1)RETIRED) == (u1)RETIRED;
|
||||
return test(&_flags, RETIRED);
|
||||
}
|
||||
|
||||
void JfrBuffer::set_retired() {
|
||||
OrderAccess::storestore();
|
||||
_flags |= (u1)RETIRED;
|
||||
assert(acquired_by_self(), "invariant");
|
||||
set(&_flags, RETIRED);
|
||||
}
|
||||
|
||||
void JfrBuffer::clear_retired() {
|
||||
if (retired()) {
|
||||
OrderAccess::storestore();
|
||||
_flags ^= (u1)RETIRED;
|
||||
assert(identity() != NULL, "invariant");
|
||||
clear(&_flags, RETIRED);
|
||||
}
|
||||
}
|
||||
|
@ -26,32 +26,46 @@
|
||||
#define SHARE_JFR_RECORDER_STORAGE_JFRBUFFER_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
//
|
||||
// Represents a piece of committed memory.
|
||||
//
|
||||
// u1* _pos <-- next store position
|
||||
// Use acquire() and/or try_acquire() for exclusive access
|
||||
// to the buffer (cas identity). This is a precondition
|
||||
// for attempting stores.
|
||||
//
|
||||
// u1* _pos <-- last committed position
|
||||
// u1* _top <-- next unflushed position
|
||||
//
|
||||
// const void* _identity <-- acquired by
|
||||
//
|
||||
// Must be the owner before attempting stores.
|
||||
// Use acquire() and/or try_acquire() for exclusive access
|
||||
// to the (entire) buffer (cas identity).
|
||||
//
|
||||
// Stores to the buffer should uphold transactional semantics.
|
||||
// A new _pos must be updated only after all intended stores have completed.
|
||||
// Stores must uphold transactional semantics. This means that _pos
|
||||
// must be updated only after all intended stores have completed already.
|
||||
// The relation between _pos and _top must hold atomically,
|
||||
// e.g. the delta must always be fully parsable.
|
||||
// _top can move concurrently by other threads but is always <= _pos.
|
||||
//
|
||||
// Memory ordering:
|
||||
//
|
||||
// Method Owner thread Other threads
|
||||
// ---------------------------------------------------------------
|
||||
// acquire() Acquire semantics (cas) Acquire semantics (cas)
|
||||
// try_acquire() Acquire semantics (cas) Acquire semantics (cas)
|
||||
// release() Release semantics Release semantics
|
||||
// pos() Plain load Acquire semantics needed at call sites
|
||||
// set_pos() Release semantics N/A
|
||||
// top() Acquire semantics Acquire semantics
|
||||
// set_top() Release semantics Release semantics
|
||||
// acquire_crit_sec_top() Acquire semantics (cas) Acquire semantics (cas)
|
||||
// release_crit_sec_top() Release semantics Release semantics
|
||||
//
|
||||
|
||||
class JfrBuffer {
|
||||
private:
|
||||
JfrBuffer* _next;
|
||||
JfrBuffer* _prev;
|
||||
const void* volatile _identity;
|
||||
const void* _identity;
|
||||
u1* _pos;
|
||||
mutable const u1* volatile _top;
|
||||
mutable const u1* _top;
|
||||
u2 _flags;
|
||||
u2 _header_size;
|
||||
u4 _size;
|
||||
@ -60,10 +74,9 @@ class JfrBuffer {
|
||||
|
||||
public:
|
||||
JfrBuffer();
|
||||
bool initialize(size_t header_size, size_t size, const void* id = NULL);
|
||||
bool initialize(size_t header_size, size_t size);
|
||||
void reinitialize(bool exclusion = false);
|
||||
void concurrent_reinitialization();
|
||||
size_t discard();
|
||||
|
||||
JfrBuffer* next() const {
|
||||
return _next;
|
||||
}
|
||||
@ -92,6 +105,8 @@ class JfrBuffer {
|
||||
return start() + size();
|
||||
}
|
||||
|
||||
// If pos() methods are invoked by a thread that is not the owner,
|
||||
// then acquire semantics must be ensured at the call site.
|
||||
const u1* pos() const {
|
||||
return _pos;
|
||||
}
|
||||
@ -101,48 +116,45 @@ class JfrBuffer {
|
||||
}
|
||||
|
||||
u1** pos_address() {
|
||||
return (u1**)&_pos;
|
||||
return &_pos;
|
||||
}
|
||||
|
||||
void set_pos(u1* new_pos) {
|
||||
assert(new_pos <= end(), "invariant");
|
||||
_pos = new_pos;
|
||||
Atomic::release_store(&_pos, new_pos);
|
||||
}
|
||||
|
||||
void set_pos(size_t size) {
|
||||
assert(_pos + size <= end(), "invariant");
|
||||
_pos += size;
|
||||
set_pos(pos() + size);
|
||||
}
|
||||
|
||||
const u1* top() const;
|
||||
void set_top(const u1* new_top);
|
||||
const u1* concurrent_top() const;
|
||||
void set_concurrent_top(const u1* new_top);
|
||||
|
||||
size_t header_size() const {
|
||||
return _header_size;
|
||||
}
|
||||
// mutual exclusion
|
||||
const u1* acquire_critical_section_top() const;
|
||||
void release_critical_section_top(const u1* new_top);
|
||||
|
||||
size_t size() const {
|
||||
return _size * BytesPerWord;
|
||||
}
|
||||
|
||||
size_t total_size() const {
|
||||
return header_size() + size();
|
||||
return _header_size + size();
|
||||
}
|
||||
|
||||
size_t free_size() const {
|
||||
return end() - pos();
|
||||
return end() - Atomic::load_acquire(&_pos);
|
||||
}
|
||||
|
||||
size_t unflushed_size() const;
|
||||
|
||||
bool empty() const {
|
||||
return pos() == start();
|
||||
return Atomic::load_acquire(&_pos) == start();
|
||||
}
|
||||
|
||||
const void* identity() const {
|
||||
return _identity;
|
||||
return Atomic::load_acquire(&_identity);
|
||||
}
|
||||
|
||||
void acquire(const void* id);
|
||||
@ -151,8 +163,8 @@ class JfrBuffer {
|
||||
bool acquired_by_self() const;
|
||||
void release();
|
||||
|
||||
size_t discard();
|
||||
void move(JfrBuffer* const to, size_t size);
|
||||
void concurrent_move_and_reinitialize(JfrBuffer* const to, size_t size);
|
||||
|
||||
bool transient() const;
|
||||
void set_transient();
|
||||
|
@ -426,9 +426,11 @@ inline bool ReleaseOp<Mspace>::process(typename Mspace::Type* t) {
|
||||
return true;
|
||||
}
|
||||
t->reinitialize();
|
||||
assert(t->empty(), "invariant");
|
||||
assert(!t->retired(), "invariant");
|
||||
t->release(); // publish
|
||||
if (t->identity() != NULL) {
|
||||
assert(t->empty(), "invariant");
|
||||
assert(!t->retired(), "invariant");
|
||||
t->release(); // publish
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -234,7 +234,7 @@ static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread*
|
||||
static void write_data_loss(BufferPtr buffer, Thread* thread) {
|
||||
assert(buffer != NULL, "invariant");
|
||||
const size_t unflushed_size = buffer->unflushed_size();
|
||||
buffer->concurrent_reinitialization();
|
||||
buffer->reinitialize();
|
||||
if (unflushed_size == 0) {
|
||||
return;
|
||||
}
|
||||
@ -249,7 +249,7 @@ bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
|
||||
assert(!buffer->transient(), "invariant");
|
||||
const size_t unflushed_size = buffer->unflushed_size();
|
||||
if (unflushed_size == 0) {
|
||||
buffer->concurrent_reinitialization();
|
||||
buffer->reinitialize();
|
||||
assert(buffer->empty(), "invariant");
|
||||
return true;
|
||||
}
|
||||
@ -272,7 +272,7 @@ bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
|
||||
}
|
||||
assert(promotion_buffer->acquired_by_self(), "invariant");
|
||||
assert(promotion_buffer->free_size() >= unflushed_size, "invariant");
|
||||
buffer->concurrent_move_and_reinitialize(promotion_buffer, unflushed_size);
|
||||
buffer->move(promotion_buffer, unflushed_size);
|
||||
assert(buffer->empty(), "invariant");
|
||||
return true;
|
||||
}
|
||||
@ -313,7 +313,7 @@ static void handle_registration_failure(BufferPtr buffer) {
|
||||
assert(buffer != NULL, "invariant");
|
||||
assert(buffer->retired(), "invariant");
|
||||
const size_t unflushed_size = buffer->unflushed_size();
|
||||
buffer->concurrent_reinitialization();
|
||||
buffer->reinitialize();
|
||||
log_registration_failure(unflushed_size);
|
||||
}
|
||||
|
||||
@ -388,7 +388,7 @@ void JfrStorage::release(BufferPtr buffer, Thread* thread) {
|
||||
assert(!buffer->retired(), "invariant");
|
||||
if (!buffer->empty()) {
|
||||
if (!flush_regular_buffer(buffer, thread)) {
|
||||
buffer->concurrent_reinitialization();
|
||||
buffer->reinitialize();
|
||||
}
|
||||
}
|
||||
assert(buffer->empty(), "invariant");
|
||||
@ -431,6 +431,7 @@ void JfrStorage::discard_oldest(Thread* thread) {
|
||||
assert(oldest_age_node->identity() == NULL, "invariant");
|
||||
BufferPtr const buffer = oldest_age_node->retired_buffer();
|
||||
assert(buffer->retired(), "invariant");
|
||||
assert(buffer->identity() != NULL, "invariant");
|
||||
discarded_size += buffer->discard();
|
||||
assert(buffer->unflushed_size() == 0, "invariant");
|
||||
num_full_post_discard = control().decrement_full();
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP
|
||||
|
||||
#include "jfr/recorder/storage/jfrStorageUtils.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
template <typename T>
|
||||
@ -43,29 +44,36 @@ inline bool DefaultDiscarder<T>::discard(T* t, const u1* data, size_t size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
inline size_t get_unflushed_size(const u1* top, Type* t) {
|
||||
assert(t != NULL, "invariant");
|
||||
return Atomic::load_acquire(t->pos_address()) - top;
|
||||
}
|
||||
|
||||
template <typename Operation>
|
||||
inline bool ConcurrentWriteOp<Operation>::process(typename Operation::Type* t) {
|
||||
const u1* const current_top = t->concurrent_top();
|
||||
const size_t unflushed_size = t->pos() - current_top;
|
||||
// acquire_critical_section_top() must be read before pos() for stable access
|
||||
const u1* const top = t->acquire_critical_section_top();
|
||||
const size_t unflushed_size = get_unflushed_size(top, t);
|
||||
if (unflushed_size == 0) {
|
||||
t->set_concurrent_top(current_top);
|
||||
t->release_critical_section_top(top);
|
||||
return true;
|
||||
}
|
||||
const bool result = _operation.write(t, current_top, unflushed_size);
|
||||
t->set_concurrent_top(current_top + unflushed_size);
|
||||
const bool result = _operation.write(t, top, unflushed_size);
|
||||
t->release_critical_section_top(top + unflushed_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename Operation>
|
||||
inline bool MutexedWriteOp<Operation>::process(typename Operation::Type* t) {
|
||||
assert(t != NULL, "invariant");
|
||||
const u1* const current_top = t->top();
|
||||
const size_t unflushed_size = t->pos() - current_top;
|
||||
const u1* const top = t->top();
|
||||
const size_t unflushed_size = get_unflushed_size(top, t);
|
||||
if (unflushed_size == 0) {
|
||||
return true;
|
||||
}
|
||||
const bool result = _operation.write(t, current_top, unflushed_size);
|
||||
t->set_top(current_top + unflushed_size);
|
||||
const bool result = _operation.write(t, top, unflushed_size);
|
||||
t->set_top(top + unflushed_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -94,19 +102,19 @@ inline bool ExclusiveOp<Operation>::process(typename Operation::Type* t) {
|
||||
template <typename Operation>
|
||||
inline bool DiscardOp<Operation>::process(typename Operation::Type* t) {
|
||||
assert(t != NULL, "invariant");
|
||||
const u1* const current_top = _mode == concurrent ? t->concurrent_top() : t->top();
|
||||
const size_t unflushed_size = t->pos() - current_top;
|
||||
const u1* const top = _mode == concurrent ? t->acquire_critical_section_top() : t->top();
|
||||
const size_t unflushed_size = get_unflushed_size(top, t);
|
||||
if (unflushed_size == 0) {
|
||||
if (_mode == concurrent) {
|
||||
t->set_concurrent_top(current_top);
|
||||
t->release_critical_section_top(top);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
const bool result = _operation.discard(t, current_top, unflushed_size);
|
||||
const bool result = _operation.discard(t, top, unflushed_size);
|
||||
if (_mode == concurrent) {
|
||||
t->set_concurrent_top(current_top + unflushed_size);
|
||||
t->release_critical_section_top(top + unflushed_size);
|
||||
} else {
|
||||
t->set_top(current_top + unflushed_size);
|
||||
t->set_top(top + unflushed_size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -254,7 +254,8 @@ public final class EventWriter {
|
||||
return false;
|
||||
}
|
||||
startPosition = currentPosition;
|
||||
unsafe.putAddress(startPositionAddress, startPosition);
|
||||
unsafe.storeStoreFence();
|
||||
unsafe.putAddress(startPositionAddress, currentPosition);
|
||||
// the event is now committed
|
||||
if (flushOnEnd) {
|
||||
flushOnEnd = flush();
|
||||
|
Loading…
Reference in New Issue
Block a user