8248485: Poor scalability in JfrCheckpointManager when using many threads after JDK-8242008
Reviewed-by: egahlin
This commit is contained in:
parent
eb1bacc71b
commit
abc55dea7e
src/hotspot/share/jfr
recorder
checkpoint
storage
stringpool
utilities
@ -35,6 +35,7 @@
|
|||||||
#include "jfr/recorder/jfrRecorder.hpp"
|
#include "jfr/recorder/jfrRecorder.hpp"
|
||||||
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
|
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
|
||||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||||
|
#include "jfr/recorder/storage/jfrEpochStorage.inline.hpp"
|
||||||
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
|
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
|
||||||
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
|
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
|
||||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||||
@ -91,50 +92,44 @@ void JfrCheckpointManager::destroy() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JfrCheckpointManager::JfrCheckpointManager(JfrChunkWriter& cw) :
|
JfrCheckpointManager::JfrCheckpointManager(JfrChunkWriter& cw) :
|
||||||
_mspace(NULL),
|
_global_mspace(NULL),
|
||||||
|
_thread_local_mspace(NULL),
|
||||||
_chunkwriter(cw) {}
|
_chunkwriter(cw) {}
|
||||||
|
|
||||||
JfrCheckpointManager::~JfrCheckpointManager() {
|
JfrCheckpointManager::~JfrCheckpointManager() {
|
||||||
JfrTraceIdLoadBarrier::destroy();
|
JfrTraceIdLoadBarrier::destroy();
|
||||||
JfrTypeManager::destroy();
|
JfrTypeManager::destroy();
|
||||||
delete _mspace;
|
delete _global_mspace;
|
||||||
|
delete _thread_local_mspace;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const size_t buffer_count = 2;
|
static const size_t global_buffer_prealloc_count = 2;
|
||||||
static const size_t buffer_size = 512 * K;
|
static const size_t global_buffer_size = 512 * K;
|
||||||
|
|
||||||
static JfrCheckpointMspace* allocate_mspace(size_t min_elem_size,
|
static const size_t thread_local_buffer_prealloc_count = 16;
|
||||||
size_t free_list_cache_count_limit,
|
static const size_t thread_local_buffer_size = 128;
|
||||||
size_t cache_prealloc_count,
|
|
||||||
bool prealloc_to_free_list,
|
|
||||||
JfrCheckpointManager* mgr) {
|
|
||||||
return create_mspace<JfrCheckpointMspace, JfrCheckpointManager>(min_elem_size,
|
|
||||||
free_list_cache_count_limit,
|
|
||||||
cache_prealloc_count,
|
|
||||||
prealloc_to_free_list,
|
|
||||||
mgr);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool JfrCheckpointManager::initialize() {
|
bool JfrCheckpointManager::initialize() {
|
||||||
assert(_mspace == NULL, "invariant");
|
assert(_global_mspace == NULL, "invariant");
|
||||||
_mspace = allocate_mspace(buffer_size, 0, 0, false, this); // post-pone preallocation
|
_global_mspace = create_mspace<JfrCheckpointMspace, JfrCheckpointManager>(global_buffer_size, 0, 0, false, this); // post-pone preallocation
|
||||||
if (_mspace == NULL) {
|
if (_global_mspace == NULL) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// preallocate buffer count to each of the epoch live lists
|
// preallocate buffer count to each of the epoch live lists
|
||||||
for (size_t i = 0; i < buffer_count * 2; ++i) {
|
for (size_t i = 0; i < global_buffer_prealloc_count * 2; ++i) {
|
||||||
Buffer* const buffer = mspace_allocate(buffer_size, _mspace);
|
Buffer* const buffer = mspace_allocate(global_buffer_size, _global_mspace);
|
||||||
_mspace->add_to_live_list(buffer, i % 2 == 0);
|
_global_mspace->add_to_live_list(buffer, i % 2 == 0);
|
||||||
}
|
|
||||||
assert(_mspace->free_list_is_empty(), "invariant");
|
|
||||||
return JfrTypeManager::initialize() && JfrTraceIdLoadBarrier::initialize();
|
|
||||||
}
|
}
|
||||||
|
assert(_global_mspace->free_list_is_empty(), "invariant");
|
||||||
|
|
||||||
void JfrCheckpointManager::register_full(BufferPtr buffer, Thread* thread) {
|
assert(_thread_local_mspace == NULL, "invariant");
|
||||||
// nothing here at the moment
|
_thread_local_mspace = new JfrThreadLocalCheckpointMspace();
|
||||||
assert(buffer != NULL, "invariant");
|
if (_thread_local_mspace == NULL || !_thread_local_mspace->initialize(thread_local_buffer_size,
|
||||||
assert(buffer->acquired_by(thread), "invariant");
|
JFR_MSPACE_UNLIMITED_CACHE_SIZE,
|
||||||
assert(buffer->retired(), "invariant");
|
thread_local_buffer_prealloc_count)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return JfrTypeManager::initialize() && JfrTraceIdLoadBarrier::initialize();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -149,15 +144,28 @@ static void assert_release(const BufferPtr buffer) {
|
|||||||
assert(buffer->lease(), "invariant");
|
assert(buffer->lease(), "invariant");
|
||||||
assert(buffer->acquired_by_self(), "invariant");
|
assert(buffer->acquired_by_self(), "invariant");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void assert_retired(const BufferPtr buffer, Thread* thread) {
|
||||||
|
assert(buffer != NULL, "invariant");
|
||||||
|
assert(buffer->acquired_by(thread), "invariant");
|
||||||
|
assert(buffer->retired(), "invariant");
|
||||||
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
static BufferPtr lease(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread, bool previous_epoch) {
|
void JfrCheckpointManager::register_full(BufferPtr buffer, Thread* thread) {
|
||||||
|
DEBUG_ONLY(assert_retired(buffer, thread);)
|
||||||
|
// nothing here at the moment
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferPtr JfrCheckpointManager::lease(Thread* thread, bool previous_epoch /* false */, size_t size /* 0 */) {
|
||||||
|
JfrCheckpointMspace* const mspace = instance()._global_mspace;
|
||||||
assert(mspace != NULL, "invariant");
|
assert(mspace != NULL, "invariant");
|
||||||
static const size_t max_elem_size = mspace->min_element_size(); // min is max
|
static const size_t max_elem_size = mspace->min_element_size(); // min is max
|
||||||
BufferPtr buffer;
|
BufferPtr buffer;
|
||||||
if (size <= max_elem_size) {
|
if (size <= max_elem_size) {
|
||||||
buffer = mspace_acquire_lease_with_retry(size, mspace, retry_count, thread, previous_epoch);
|
buffer = mspace_acquire_live(size, mspace, thread, previous_epoch);
|
||||||
if (buffer != NULL) {
|
if (buffer != NULL) {
|
||||||
|
buffer->set_lease();
|
||||||
DEBUG_ONLY(assert_lease(buffer);)
|
DEBUG_ONLY(assert_lease(buffer);)
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
@ -167,54 +175,70 @@ static BufferPtr lease(size_t size, JfrCheckpointMspace* mspace, size_t retry_co
|
|||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const size_t lease_retry = 100;
|
const u1 thread_local_context = 1;
|
||||||
|
|
||||||
BufferPtr JfrCheckpointManager::lease(Thread* thread, bool previous_epoch /* false */, size_t size /* 0 */) {
|
static bool is_thread_local(JfrBuffer* buffer) {
|
||||||
return ::lease(size, instance()._mspace, lease_retry, thread, previous_epoch);
|
assert(buffer != NULL, "invariant");
|
||||||
|
return buffer->context() == thread_local_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool JfrCheckpointManager::lookup(BufferPtr old) const {
|
static void retire(JfrBuffer* buffer) {
|
||||||
assert(old != NULL, "invariant");
|
DEBUG_ONLY(assert_release(buffer);)
|
||||||
return !_mspace->in_current_epoch_list(old);
|
buffer->clear_lease();
|
||||||
}
|
buffer->set_retired();
|
||||||
|
|
||||||
BufferPtr JfrCheckpointManager::lease(BufferPtr old, Thread* thread, size_t size /* 0 */) {
|
|
||||||
assert(old != NULL, "invariant");
|
|
||||||
return ::lease(size, instance()._mspace, lease_retry, thread, instance().lookup(old));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the buffer was a lease, release back.
|
|
||||||
*
|
|
||||||
* The buffer is effectively invalidated for the thread post-return,
|
* The buffer is effectively invalidated for the thread post-return,
|
||||||
* and the caller should take means to ensure that it is not referenced.
|
* and the caller should take means to ensure that it is not referenced.
|
||||||
*/
|
*/
|
||||||
static void release(BufferPtr buffer, Thread* thread) {
|
static void release(JfrBuffer* buffer) {
|
||||||
DEBUG_ONLY(assert_release(buffer);)
|
DEBUG_ONLY(assert_release(buffer);)
|
||||||
buffer->clear_lease();
|
if (is_thread_local(buffer)) {
|
||||||
if (buffer->transient()) {
|
retire(buffer);
|
||||||
buffer->set_retired();
|
|
||||||
} else {
|
} else {
|
||||||
|
buffer->clear_lease();
|
||||||
buffer->release();
|
buffer->release();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
BufferPtr JfrCheckpointManager::acquire_thread_local(size_t size, Thread* thread) {
|
||||||
|
assert(thread != NULL, "invariant");
|
||||||
|
JfrBuffer* const buffer = instance()._thread_local_mspace->acquire(size, thread);
|
||||||
|
assert(buffer != NULL, "invariant");
|
||||||
|
assert(buffer->free_size() >= size, "invariant");
|
||||||
|
buffer->set_context(thread_local_context);
|
||||||
|
assert(is_thread_local(buffer), "invariant");
|
||||||
|
buffer->set_lease();
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferPtr JfrCheckpointManager::lease_thread_local(Thread* thread, size_t size /* 0 */) {
|
||||||
|
JfrBuffer* const buffer = acquire_thread_local(size, thread);
|
||||||
|
DEBUG_ONLY(assert_lease(buffer);)
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferPtr JfrCheckpointManager::lease(BufferPtr old, Thread* thread, size_t size) {
|
||||||
|
assert(old != NULL, "invariant");
|
||||||
|
return is_thread_local(old) ? acquire_thread_local(size, thread) :
|
||||||
|
lease(thread, instance()._global_mspace->in_previous_epoch_list(old), size);
|
||||||
|
}
|
||||||
|
|
||||||
BufferPtr JfrCheckpointManager::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) {
|
BufferPtr JfrCheckpointManager::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) {
|
||||||
assert(old != NULL, "invariant");
|
assert(old != NULL, "invariant");
|
||||||
assert(old->lease(), "invariant");
|
assert(old->lease(), "invariant");
|
||||||
if (0 == requested) {
|
if (0 == requested) {
|
||||||
// indicates a lease is being returned
|
// indicates a lease is being returned
|
||||||
release(old, thread);
|
release(old);
|
||||||
|
// signal completion of a new checkpoint
|
||||||
set_constant_pending();
|
set_constant_pending();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
// migration of in-flight information
|
BufferPtr new_buffer = lease(old, thread, used + requested);
|
||||||
BufferPtr const new_buffer = lease(old, thread, used + requested);
|
assert(new_buffer != NULL, "invariant");
|
||||||
if (new_buffer != NULL) {
|
|
||||||
migrate_outstanding_writes(old, new_buffer, used, requested);
|
migrate_outstanding_writes(old, new_buffer, used, requested);
|
||||||
}
|
retire(old);
|
||||||
release(old, thread);
|
return new_buffer;
|
||||||
return new_buffer; // might be NULL
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// offsets into the JfrCheckpointEntry
|
// offsets into the JfrCheckpointEntry
|
||||||
@ -311,7 +335,7 @@ class CheckpointWriteOp {
|
|||||||
|
|
||||||
typedef CheckpointWriteOp<JfrCheckpointManager::Buffer> WriteOperation;
|
typedef CheckpointWriteOp<JfrCheckpointManager::Buffer> WriteOperation;
|
||||||
typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
|
typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
|
||||||
typedef ReleaseOpWithExcision<JfrCheckpointMspace, JfrCheckpointMspace::LiveList> ReleaseOperation;
|
typedef ReleaseWithExcisionOp<JfrCheckpointMspace, JfrCheckpointMspace::LiveList> ReleaseOperation;
|
||||||
typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> WriteReleaseOperation;
|
typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> WriteReleaseOperation;
|
||||||
|
|
||||||
void JfrCheckpointManager::begin_epoch_shift() {
|
void JfrCheckpointManager::begin_epoch_shift() {
|
||||||
@ -328,12 +352,13 @@ void JfrCheckpointManager::end_epoch_shift() {
|
|||||||
|
|
||||||
size_t JfrCheckpointManager::write() {
|
size_t JfrCheckpointManager::write() {
|
||||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(Thread::current()));
|
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(Thread::current()));
|
||||||
assert(_mspace->free_list_is_empty(), "invariant");
|
|
||||||
WriteOperation wo(_chunkwriter);
|
WriteOperation wo(_chunkwriter);
|
||||||
MutexedWriteOperation mwo(wo);
|
MutexedWriteOperation mwo(wo);
|
||||||
ReleaseOperation ro(_mspace, _mspace->live_list(true));
|
_thread_local_mspace->iterate(mwo, true); // previous epoch list
|
||||||
|
assert(_global_mspace->free_list_is_empty(), "invariant");
|
||||||
|
ReleaseOperation ro(_global_mspace, _global_mspace->live_list(true));
|
||||||
WriteReleaseOperation wro(&mwo, &ro);
|
WriteReleaseOperation wro(&mwo, &ro);
|
||||||
process_live_list(wro, _mspace, true);
|
process_live_list(wro, _global_mspace, true); // previous epoch list
|
||||||
return wo.processed();
|
return wo.processed();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,10 +369,11 @@ size_t JfrCheckpointManager::clear() {
|
|||||||
JfrTraceIdLoadBarrier::clear();
|
JfrTraceIdLoadBarrier::clear();
|
||||||
clear_type_set();
|
clear_type_set();
|
||||||
DiscardOperation discard_operation(mutexed); // mutexed discard mode
|
DiscardOperation discard_operation(mutexed); // mutexed discard mode
|
||||||
ReleaseOperation ro(_mspace, _mspace->live_list(true));
|
_thread_local_mspace->iterate(discard_operation, true); // previous epoch list
|
||||||
|
ReleaseOperation ro(_global_mspace, _global_mspace->live_list(true));
|
||||||
DiscardReleaseOperation discard_op(&discard_operation, &ro);
|
DiscardReleaseOperation discard_op(&discard_operation, &ro);
|
||||||
assert(_mspace->free_list_is_empty(), "invariant");
|
assert(_global_mspace->free_list_is_empty(), "invariant");
|
||||||
process_live_list(discard_op, _mspace, true); // previous epoch list
|
process_live_list(discard_op, _global_mspace, true); // previous epoch list
|
||||||
return discard_operation.elements();
|
return discard_operation.elements();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -451,8 +477,9 @@ size_t JfrCheckpointManager::flush_type_set() {
|
|||||||
if (is_constant_pending()) {
|
if (is_constant_pending()) {
|
||||||
WriteOperation wo(_chunkwriter);
|
WriteOperation wo(_chunkwriter);
|
||||||
MutexedWriteOperation mwo(wo);
|
MutexedWriteOperation mwo(wo);
|
||||||
assert(_mspace->live_list_is_nonempty(), "invariant");
|
_thread_local_mspace->iterate(mwo); // current epoch list
|
||||||
process_live_list(mwo, _mspace);
|
assert(_global_mspace->live_list_is_nonempty(), "invariant");
|
||||||
|
process_live_list(mwo, _global_mspace); // current epoch list
|
||||||
}
|
}
|
||||||
return elements;
|
return elements;
|
||||||
}
|
}
|
||||||
|
@ -26,14 +26,13 @@
|
|||||||
#define SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTMANAGER_HPP
|
#define SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTMANAGER_HPP
|
||||||
|
|
||||||
#include "jfr/recorder/storage/jfrBuffer.hpp"
|
#include "jfr/recorder/storage/jfrBuffer.hpp"
|
||||||
|
#include "jfr/recorder/storage/jfrEpochStorage.hpp"
|
||||||
#include "jfr/recorder/storage/jfrMemorySpace.hpp"
|
#include "jfr/recorder/storage/jfrMemorySpace.hpp"
|
||||||
#include "jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp"
|
#include "jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp"
|
||||||
#include "jfr/utilities/jfrLinkedList.hpp"
|
#include "jfr/utilities/jfrLinkedList.hpp"
|
||||||
|
|
||||||
class JfrCheckpointManager;
|
class JfrCheckpointManager;
|
||||||
class JfrChunkWriter;
|
class JfrChunkWriter;
|
||||||
class JfrSerializer;
|
|
||||||
class JfrTypeManager;
|
|
||||||
class Thread;
|
class Thread;
|
||||||
|
|
||||||
struct JfrCheckpointEntry {
|
struct JfrCheckpointEntry {
|
||||||
@ -45,6 +44,7 @@ struct JfrCheckpointEntry {
|
|||||||
};
|
};
|
||||||
|
|
||||||
typedef JfrMemorySpace<JfrCheckpointManager, JfrMspaceRetrieval, JfrLinkedList<JfrBuffer>, JfrLinkedList<JfrBuffer>, true > JfrCheckpointMspace;
|
typedef JfrMemorySpace<JfrCheckpointManager, JfrMspaceRetrieval, JfrLinkedList<JfrBuffer>, JfrLinkedList<JfrBuffer>, true > JfrCheckpointMspace;
|
||||||
|
typedef JfrEpochStorageHost<JfrBuffer, JfrMspaceRemoveRetrieval, true /* reclaim buffers eagerly*/ > JfrThreadLocalCheckpointMspace;
|
||||||
|
|
||||||
//
|
//
|
||||||
// Responsible for maintaining checkpoints and by implication types.
|
// Responsible for maintaining checkpoints and by implication types.
|
||||||
@ -56,7 +56,8 @@ class JfrCheckpointManager : public JfrCHeapObj {
|
|||||||
typedef JfrCheckpointMspace::Node Buffer;
|
typedef JfrCheckpointMspace::Node Buffer;
|
||||||
typedef JfrCheckpointMspace::NodePtr BufferPtr;
|
typedef JfrCheckpointMspace::NodePtr BufferPtr;
|
||||||
private:
|
private:
|
||||||
JfrCheckpointMspace* _mspace;
|
JfrCheckpointMspace* _global_mspace;
|
||||||
|
JfrThreadLocalCheckpointMspace* _thread_local_mspace;
|
||||||
JfrChunkWriter& _chunkwriter;
|
JfrChunkWriter& _chunkwriter;
|
||||||
|
|
||||||
JfrCheckpointManager(JfrChunkWriter& cw);
|
JfrCheckpointManager(JfrChunkWriter& cw);
|
||||||
@ -66,14 +67,16 @@ class JfrCheckpointManager : public JfrCHeapObj {
|
|||||||
bool initialize();
|
bool initialize();
|
||||||
static void destroy();
|
static void destroy();
|
||||||
|
|
||||||
bool lookup(Buffer* old) const;
|
|
||||||
static BufferPtr lease(Thread* thread, bool previous_epoch = false, size_t size = 0);
|
static BufferPtr lease(Thread* thread, bool previous_epoch = false, size_t size = 0);
|
||||||
static BufferPtr lease(BufferPtr old, Thread* thread, size_t size = 0);
|
static BufferPtr lease(BufferPtr old, Thread* thread, size_t size);
|
||||||
|
|
||||||
|
static BufferPtr acquire_thread_local(size_t size, Thread* thread);
|
||||||
|
static BufferPtr lease_thread_local(Thread* thread, size_t size = 0);
|
||||||
|
|
||||||
static BufferPtr flush(BufferPtr old, size_t used, size_t requested, Thread* thread);
|
static BufferPtr flush(BufferPtr old, size_t used, size_t requested, Thread* thread);
|
||||||
|
|
||||||
size_t clear();
|
size_t clear();
|
||||||
size_t write();
|
size_t write();
|
||||||
size_t flush();
|
|
||||||
void notify_threads();
|
void notify_threads();
|
||||||
|
|
||||||
size_t write_static_type_set(Thread* thread);
|
size_t write_static_type_set(Thread* thread);
|
||||||
@ -102,7 +105,6 @@ class JfrCheckpointManager : public JfrCHeapObj {
|
|||||||
friend class JfrCheckpointFlush;
|
friend class JfrCheckpointFlush;
|
||||||
friend class JfrCheckpointWriter;
|
friend class JfrCheckpointWriter;
|
||||||
friend class JfrSerializer;
|
friend class JfrSerializer;
|
||||||
friend class JfrStackTraceRepository;
|
|
||||||
template <typename, template <typename> class, typename, typename, bool>
|
template <typename, template <typename> class, typename, typename, bool>
|
||||||
friend class JfrMemorySpace;
|
friend class JfrMemorySpace;
|
||||||
};
|
};
|
||||||
|
@ -45,8 +45,8 @@ JfrCheckpointWriter::JfrCheckpointWriter(JfrCheckpointType type /* GENERIC */) :
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JfrCheckpointWriter::JfrCheckpointWriter(Thread* thread, bool header /* true */, JfrCheckpointType type /* GENERIC */) :
|
JfrCheckpointWriter::JfrCheckpointWriter(Thread* thread, bool header /* true */, JfrCheckpointType type /* GENERIC */, bool global_lease /* true */) :
|
||||||
JfrCheckpointWriterBase(JfrCheckpointManager::lease(thread), thread),
|
JfrCheckpointWriterBase(global_lease ? JfrCheckpointManager::lease(thread) : JfrCheckpointManager::lease_thread_local(thread), thread),
|
||||||
_time(JfrTicks::now()),
|
_time(JfrTicks::now()),
|
||||||
_offset(0),
|
_offset(0),
|
||||||
_count(0),
|
_count(0),
|
||||||
|
@ -72,7 +72,7 @@ class JfrCheckpointWriter : public JfrCheckpointWriterBase {
|
|||||||
JfrCheckpointWriter(bool previous_epoch, Thread* thread, JfrCheckpointType type = GENERIC);
|
JfrCheckpointWriter(bool previous_epoch, Thread* thread, JfrCheckpointType type = GENERIC);
|
||||||
public:
|
public:
|
||||||
JfrCheckpointWriter(JfrCheckpointType type = GENERIC);
|
JfrCheckpointWriter(JfrCheckpointType type = GENERIC);
|
||||||
JfrCheckpointWriter(Thread* thread, bool header = true, JfrCheckpointType mode = GENERIC);
|
JfrCheckpointWriter(Thread* thread, bool header = true, JfrCheckpointType mode = GENERIC, bool global_lease = true);
|
||||||
~JfrCheckpointWriter();
|
~JfrCheckpointWriter();
|
||||||
void write_type(JfrTypeId type_id);
|
void write_type(JfrTypeId type_id);
|
||||||
void write_count(u4 nof_entries);
|
void write_count(u4 nof_entries);
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "jfr/recorder/checkpoint/types/jfrType.hpp"
|
#include "jfr/recorder/checkpoint/types/jfrType.hpp"
|
||||||
#include "jfr/recorder/checkpoint/types/jfrTypeManager.hpp"
|
#include "jfr/recorder/checkpoint/types/jfrTypeManager.hpp"
|
||||||
#include "jfr/recorder/jfrRecorder.hpp"
|
#include "jfr/recorder/jfrRecorder.hpp"
|
||||||
|
#include "jfr/support/jfrThreadLocal.hpp"
|
||||||
#include "jfr/utilities/jfrIterator.hpp"
|
#include "jfr/utilities/jfrIterator.hpp"
|
||||||
#include "jfr/utilities/jfrLinkedList.inline.hpp"
|
#include "jfr/utilities/jfrLinkedList.inline.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
@ -105,7 +106,7 @@ void JfrTypeManager::create_thread_blob(Thread* t) {
|
|||||||
ResourceMark rm(t);
|
ResourceMark rm(t);
|
||||||
HandleMark hm(t);
|
HandleMark hm(t);
|
||||||
JfrThreadConstant type_thread(t);
|
JfrThreadConstant type_thread(t);
|
||||||
JfrCheckpointWriter writer(t, true, THREADS);
|
JfrCheckpointWriter writer(t, true, THREADS, false);
|
||||||
writer.write_type(TYPE_THREAD);
|
writer.write_type(TYPE_THREAD);
|
||||||
type_thread.serialize(writer);
|
type_thread.serialize(writer);
|
||||||
// create and install a checkpoint blob
|
// create and install a checkpoint blob
|
||||||
@ -115,12 +116,11 @@ void JfrTypeManager::create_thread_blob(Thread* t) {
|
|||||||
|
|
||||||
void JfrTypeManager::write_thread_checkpoint(Thread* t) {
|
void JfrTypeManager::write_thread_checkpoint(Thread* t) {
|
||||||
assert(t != NULL, "invariant");
|
assert(t != NULL, "invariant");
|
||||||
ResourceMark rm(t);
|
if (!t->jfr_thread_local()->has_thread_blob()) {
|
||||||
HandleMark hm(t);
|
create_thread_blob(t);
|
||||||
JfrThreadConstant type_thread(t);
|
}
|
||||||
JfrCheckpointWriter writer(t, true, THREADS);
|
JfrCheckpointWriter writer(t, false, THREADS, false);
|
||||||
writer.write_type(TYPE_THREAD);
|
t->jfr_thread_local()->thread_blob()->write(writer);
|
||||||
type_thread.serialize(writer);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class SerializerRegistrationGuard : public StackObj {
|
class SerializerRegistrationGuard : public StackObj {
|
||||||
|
@ -32,9 +32,10 @@ JfrBuffer::JfrBuffer() : _next(NULL),
|
|||||||
_identity(NULL),
|
_identity(NULL),
|
||||||
_pos(NULL),
|
_pos(NULL),
|
||||||
_top(NULL),
|
_top(NULL),
|
||||||
_flags(0),
|
_size(0),
|
||||||
_header_size(0),
|
_header_size(0),
|
||||||
_size(0) {}
|
_flags(0),
|
||||||
|
_context(0) {}
|
||||||
|
|
||||||
bool JfrBuffer::initialize(size_t header_size, size_t size) {
|
bool JfrBuffer::initialize(size_t header_size, size_t size) {
|
||||||
assert(_next == NULL, "invariant");
|
assert(_next == NULL, "invariant");
|
||||||
@ -52,7 +53,6 @@ bool JfrBuffer::initialize(size_t header_size, size_t size) {
|
|||||||
|
|
||||||
void JfrBuffer::reinitialize(bool exclusion /* false */) {
|
void JfrBuffer::reinitialize(bool exclusion /* false */) {
|
||||||
acquire_critical_section_top();
|
acquire_critical_section_top();
|
||||||
assert(!lease(), "invariant");
|
|
||||||
if (exclusion != excluded()) {
|
if (exclusion != excluded()) {
|
||||||
// update
|
// update
|
||||||
if (exclusion) {
|
if (exclusion) {
|
||||||
@ -185,25 +185,25 @@ enum FLAG {
|
|||||||
EXCLUDED = 8
|
EXCLUDED = 8
|
||||||
};
|
};
|
||||||
|
|
||||||
inline u2 load(const volatile u2* flags) {
|
inline u1 load(const volatile u1* dest) {
|
||||||
assert(flags != NULL, "invariant");
|
assert(dest != NULL, "invariant");
|
||||||
return Atomic::load_acquire(flags);
|
return Atomic::load_acquire(dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void set(u2* flags, FLAG flag) {
|
inline void set(u1* dest, u1 data) {
|
||||||
assert(flags != NULL, "invariant");
|
assert(dest != NULL, "invariant");
|
||||||
OrderAccess::storestore();
|
OrderAccess::storestore();
|
||||||
*flags |= (u1)flag;
|
*dest |= data;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void clear(u2* flags, FLAG flag) {
|
inline void clear(u1* dest, u1 data) {
|
||||||
assert(flags != NULL, "invariant");
|
assert(dest != NULL, "invariant");
|
||||||
OrderAccess::storestore();
|
OrderAccess::storestore();
|
||||||
*flags ^= (u1)flag;
|
*dest ^= data;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool test(const u2* flags, FLAG flag) {
|
inline bool test(const u1* dest, u1 data) {
|
||||||
return (u1)flag == (load(flags) & (u1)flag);
|
return data == (load(dest) & data);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool JfrBuffer::transient() const {
|
bool JfrBuffer::transient() const {
|
||||||
@ -273,3 +273,15 @@ void JfrBuffer::clear_retired() {
|
|||||||
clear(&_flags, RETIRED);
|
clear(&_flags, RETIRED);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u1 JfrBuffer::context() const {
|
||||||
|
return load(&_context);
|
||||||
|
}
|
||||||
|
|
||||||
|
void JfrBuffer::set_context(u1 context) {
|
||||||
|
set(&_context, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
void JfrBuffer::clear_context() {
|
||||||
|
set(&_context, 0);
|
||||||
|
}
|
||||||
|
@ -44,6 +44,10 @@
|
|||||||
// e.g. the delta must always be fully parsable.
|
// e.g. the delta must always be fully parsable.
|
||||||
// _top can move concurrently by other threads but is always <= _pos.
|
// _top can move concurrently by other threads but is always <= _pos.
|
||||||
//
|
//
|
||||||
|
// The _flags field holds generic tags applicable to all subsystems.
|
||||||
|
//
|
||||||
|
// The _context field can be used to set subsystem specific tags onto a buffer.
|
||||||
|
//
|
||||||
// Memory ordering:
|
// Memory ordering:
|
||||||
//
|
//
|
||||||
// Method Owner thread Other threads
|
// Method Owner thread Other threads
|
||||||
@ -66,9 +70,10 @@ class JfrBuffer {
|
|||||||
const void* _identity;
|
const void* _identity;
|
||||||
u1* _pos;
|
u1* _pos;
|
||||||
mutable const u1* _top;
|
mutable const u1* _top;
|
||||||
u2 _flags;
|
|
||||||
u2 _header_size;
|
|
||||||
u4 _size;
|
u4 _size;
|
||||||
|
u2 _header_size;
|
||||||
|
u1 _flags;
|
||||||
|
u1 _context;
|
||||||
|
|
||||||
const u1* stable_top() const;
|
const u1* stable_top() const;
|
||||||
|
|
||||||
@ -168,6 +173,10 @@ class JfrBuffer {
|
|||||||
bool excluded() const;
|
bool excluded() const;
|
||||||
void set_excluded();
|
void set_excluded();
|
||||||
void clear_excluded();
|
void clear_excluded();
|
||||||
|
|
||||||
|
u1 context() const;
|
||||||
|
void set_context(u1 context);
|
||||||
|
void clear_context();
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_JFR_RECORDER_STORAGE_JFRBUFFER_HPP
|
#endif // SHARE_JFR_RECORDER_STORAGE_JFRBUFFER_HPP
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -33,9 +33,19 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Provides storage as a function of an epoch, with iteration capabilities for the current and previous epoch.
|
* Provides storage as a function of an epoch, with iteration capabilities for the current and previous epoch.
|
||||||
* Iteration over the current epoch is incremental while iteration over the previous epoch is complete,
|
*
|
||||||
* including storage reclamation. The design caters to use cases having multiple incremental iterations
|
* When iterating the previous epoch, where exclusive access to buffers is assumed,
|
||||||
* over the current epoch, and a single, complete, iteration over the previous epoch.
|
* all buffers will be reinitialized post-callback, with retired buffers reclaimed
|
||||||
|
* and moved onto the free list and non-retired buffers left in-place.
|
||||||
|
*
|
||||||
|
* When iterating the current epoch, where concurrent access to buffers is assumed,
|
||||||
|
* there exist two modes, controlled by the EagerReclaim parameter.
|
||||||
|
* By default, EagerReclaim is false, meaning no retired buffers are reclaimed during the current epoch.
|
||||||
|
* Setting EagerReclaim to true, retired buffers will be reclaimed post-callback, by reinitialization
|
||||||
|
* and by moving them onto the free list, just like is done when iterating the previous epoch.
|
||||||
|
*
|
||||||
|
* The design caters to use cases having multiple incremental iterations over the current epoch,
|
||||||
|
* and a single iteration over the previous epoch.
|
||||||
*
|
*
|
||||||
* The JfrEpochStorage can be specialized by the following policies:
|
* The JfrEpochStorage can be specialized by the following policies:
|
||||||
*
|
*
|
||||||
@ -43,10 +53,12 @@
|
|||||||
*
|
*
|
||||||
* RetrievalPolicy see jfrMemorySpace.hpp for a description.
|
* RetrievalPolicy see jfrMemorySpace.hpp for a description.
|
||||||
*
|
*
|
||||||
|
* EagerReclaim should retired buffers be reclaimed also during the current epoch (i.e. eagerly)
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim = false>
|
||||||
class JfrEpochStorageHost : public JfrCHeapObj {
|
class JfrEpochStorageHost : public JfrCHeapObj {
|
||||||
typedef JfrMemorySpace<JfrEpochStorageHost<NodeType, RetrievalPolicy>,
|
typedef JfrMemorySpace<JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>,
|
||||||
RetrievalPolicy,
|
RetrievalPolicy,
|
||||||
JfrConcurrentQueue<NodeType>,
|
JfrConcurrentQueue<NodeType>,
|
||||||
JfrLinkedList<NodeType>,
|
JfrLinkedList<NodeType>,
|
||||||
|
@ -32,23 +32,23 @@
|
|||||||
#include "jfr/utilities/jfrLinkedList.inline.hpp"
|
#include "jfr/utilities/jfrLinkedList.inline.hpp"
|
||||||
#include "logging/log.hpp"
|
#include "logging/log.hpp"
|
||||||
|
|
||||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
|
||||||
JfrEpochStorageHost<NodeType, RetrievalPolicy>::JfrEpochStorageHost() : _mspace(NULL) {}
|
JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::JfrEpochStorageHost() : _mspace(NULL) {}
|
||||||
|
|
||||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
|
||||||
JfrEpochStorageHost<NodeType, RetrievalPolicy>::~JfrEpochStorageHost() {
|
JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::~JfrEpochStorageHost() {
|
||||||
delete _mspace;
|
delete _mspace;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
|
||||||
bool JfrEpochStorageHost<NodeType, RetrievalPolicy>::initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count) {
|
bool JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count) {
|
||||||
assert(_mspace == NULL, "invariant");
|
assert(_mspace == NULL, "invariant");
|
||||||
_mspace = new EpochMspace(min_elem_size, free_list_cache_count_limit, this);
|
_mspace = new EpochMspace(min_elem_size, free_list_cache_count_limit, this);
|
||||||
return _mspace != NULL && _mspace->initialize(cache_prealloc_count);
|
return _mspace != NULL && _mspace->initialize(cache_prealloc_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
|
||||||
inline NodeType* JfrEpochStorageHost<NodeType, RetrievalPolicy>::acquire(size_t size, Thread* thread) {
|
inline NodeType* JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::acquire(size_t size, Thread* thread) {
|
||||||
BufferPtr buffer = mspace_acquire_to_live_list(size, _mspace, thread);
|
BufferPtr buffer = mspace_acquire_to_live_list(size, _mspace, thread);
|
||||||
if (buffer == NULL) {
|
if (buffer == NULL) {
|
||||||
log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", _mspace->min_element_size(), "epoch storage");
|
log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", _mspace->min_element_size(), "epoch storage");
|
||||||
@ -58,29 +58,37 @@ inline NodeType* JfrEpochStorageHost<NodeType, RetrievalPolicy>::acquire(size_t
|
|||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
|
||||||
void JfrEpochStorageHost<NodeType, RetrievalPolicy>::release(NodeType* buffer) {
|
void JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::release(NodeType* buffer) {
|
||||||
assert(buffer != NULL, "invariant");
|
assert(buffer != NULL, "invariant");
|
||||||
buffer->set_retired();
|
buffer->set_retired();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
|
||||||
void JfrEpochStorageHost<NodeType, RetrievalPolicy>::register_full(NodeType* buffer, Thread* thread) {
|
void JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::register_full(NodeType* buffer, Thread* thread) {
|
||||||
// nothing here at the moment
|
// nothing here at the moment
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
|
||||||
template <typename Functor>
|
template <typename Functor>
|
||||||
void JfrEpochStorageHost<NodeType, RetrievalPolicy>::iterate(Functor& functor, bool previous_epoch) {
|
void JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::iterate(Functor& functor, bool previous_epoch) {
|
||||||
typedef ReleaseRetiredToFreeListOp<EpochMspace, typename EpochMspace::LiveList> ReleaseStorage;
|
typedef ReinitializeAllReleaseRetiredOp<EpochMspace, typename EpochMspace::LiveList> PreviousEpochReleaseOperation;
|
||||||
typedef CompositeOperation<Functor, ReleaseStorage> PreviousEpochOperation;
|
typedef CompositeOperation<Functor, PreviousEpochReleaseOperation> PreviousEpochOperation;
|
||||||
|
typedef ReleaseRetiredOp<EpochMspace, typename EpochMspace::LiveList> CurrentEpochReleaseOperation;
|
||||||
|
typedef CompositeOperation<Functor, CurrentEpochReleaseOperation> CurrentEpochOperation;
|
||||||
if (previous_epoch) {
|
if (previous_epoch) {
|
||||||
ReleaseStorage rs(_mspace, _mspace->live_list(true));
|
PreviousEpochReleaseOperation pero(_mspace, _mspace->live_list(true));
|
||||||
PreviousEpochOperation peo(&functor, &rs);
|
PreviousEpochOperation peo(&functor, &pero);
|
||||||
process_live_list(peo, _mspace, true);
|
process_live_list(peo, _mspace, true); // previous epoch list
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
process_live_list(functor, _mspace, false);
|
if (EagerReclaim) {
|
||||||
|
CurrentEpochReleaseOperation cero(_mspace, _mspace->live_list());
|
||||||
|
CurrentEpochOperation ceo(&functor, &cero);
|
||||||
|
process_live_list(ceo, _mspace, false); // current epoch list
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
process_live_list(functor, _mspace, false); // current epoch list
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -100,8 +108,8 @@ class EmptyVerifier {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
|
||||||
void JfrEpochStorageHost<NodeType, RetrievalPolicy>::verify_previous_empty() const {
|
void JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::verify_previous_empty() const {
|
||||||
typedef EmptyVerifier<JfrEpochStorage::Mspace> VerifyEmptyMspace;
|
typedef EmptyVerifier<JfrEpochStorage::Mspace> VerifyEmptyMspace;
|
||||||
VerifyEmptyMspace vem(_mspace);
|
VerifyEmptyMspace vem(_mspace);
|
||||||
process_live_list(vem, _mspace, true);
|
process_live_list(vem, _mspace, true);
|
||||||
|
@ -493,14 +493,14 @@ inline bool ReleaseOp<Mspace>::process(typename Mspace::NodePtr node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename Mspace, typename List>
|
template <typename Mspace, typename List>
|
||||||
class ReleaseOpWithExcision : public ReleaseOp<Mspace> {
|
class ReleaseWithExcisionOp : public ReleaseOp<Mspace> {
|
||||||
private:
|
private:
|
||||||
List& _list;
|
List& _list;
|
||||||
typename List::NodePtr _prev;
|
typename List::NodePtr _prev;
|
||||||
size_t _count;
|
size_t _count;
|
||||||
size_t _amount;
|
size_t _amount;
|
||||||
public:
|
public:
|
||||||
ReleaseOpWithExcision(Mspace* mspace, List& list) :
|
ReleaseWithExcisionOp(Mspace* mspace, List& list) :
|
||||||
ReleaseOp<Mspace>(mspace), _list(list), _prev(NULL), _count(0), _amount(0) {}
|
ReleaseOp<Mspace>(mspace), _list(list), _prev(NULL), _count(0), _amount(0) {}
|
||||||
bool process(typename List::NodePtr node);
|
bool process(typename List::NodePtr node);
|
||||||
size_t processed() const { return _count; }
|
size_t processed() const { return _count; }
|
||||||
@ -508,7 +508,7 @@ class ReleaseOpWithExcision : public ReleaseOp<Mspace> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <typename Mspace, typename List>
|
template <typename Mspace, typename List>
|
||||||
inline bool ReleaseOpWithExcision<Mspace, List>::process(typename List::NodePtr node) {
|
inline bool ReleaseWithExcisionOp<Mspace, List>::process(typename List::NodePtr node) {
|
||||||
assert(node != NULL, "invariant");
|
assert(node != NULL, "invariant");
|
||||||
if (node->transient()) {
|
if (node->transient()) {
|
||||||
_prev = _list.excise(_prev, node);
|
_prev = _list.excise(_prev, node);
|
||||||
@ -569,20 +569,49 @@ inline bool ScavengingReleaseOp<Mspace, List>::excise_with_release(typename List
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename Mspace, typename FromList>
|
template <typename Mspace, typename FromList>
|
||||||
class ReleaseRetiredToFreeListOp : public StackObj {
|
class ReleaseRetiredOp : public StackObj {
|
||||||
private:
|
private:
|
||||||
Mspace* _mspace;
|
Mspace* _mspace;
|
||||||
FromList& _list;
|
FromList& _list;
|
||||||
typename Mspace::NodePtr _prev;
|
typename Mspace::NodePtr _prev;
|
||||||
public:
|
public:
|
||||||
typedef typename Mspace::Node Node;
|
typedef typename Mspace::Node Node;
|
||||||
ReleaseRetiredToFreeListOp(Mspace* mspace, FromList& list) :
|
ReleaseRetiredOp(Mspace* mspace, FromList& list) :
|
||||||
_mspace(mspace), _list(list), _prev(NULL) {}
|
_mspace(mspace), _list(list), _prev(NULL) {}
|
||||||
bool process(Node* node);
|
bool process(Node* node);
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Mspace, typename FromList>
|
template <typename Mspace, typename FromList>
|
||||||
inline bool ReleaseRetiredToFreeListOp<Mspace, FromList>::process(typename Mspace::Node* node) {
|
inline bool ReleaseRetiredOp<Mspace, FromList>::process(typename Mspace::Node* node) {
|
||||||
|
assert(node != NULL, "invariant");
|
||||||
|
if (node->retired()) {
|
||||||
|
_prev = _list.excise(_prev, node);
|
||||||
|
node->reinitialize();
|
||||||
|
assert(node->empty(), "invariant");
|
||||||
|
assert(!node->retired(), "invariant");
|
||||||
|
node->release();
|
||||||
|
mspace_release(node, _mspace);
|
||||||
|
} else {
|
||||||
|
_prev = node;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Mspace, typename FromList>
|
||||||
|
class ReinitializeAllReleaseRetiredOp : public StackObj {
|
||||||
|
private:
|
||||||
|
Mspace* _mspace;
|
||||||
|
FromList& _list;
|
||||||
|
typename Mspace::NodePtr _prev;
|
||||||
|
public:
|
||||||
|
typedef typename Mspace::Node Node;
|
||||||
|
ReinitializeAllReleaseRetiredOp(Mspace* mspace, FromList& list) :
|
||||||
|
_mspace(mspace), _list(list), _prev(NULL) {}
|
||||||
|
bool process(Node* node);
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Mspace, typename FromList>
|
||||||
|
inline bool ReinitializeAllReleaseRetiredOp<Mspace, FromList>::process(typename Mspace::Node* node) {
|
||||||
assert(node != NULL, "invariant");
|
assert(node != NULL, "invariant");
|
||||||
// assumes some means of exclusive access to node
|
// assumes some means of exclusive access to node
|
||||||
const bool retired = node->retired();
|
const bool retired = node->retired();
|
||||||
|
@ -208,7 +208,7 @@ typedef StringPoolOp<UnBufferedWriteToChunk> WriteOperation;
|
|||||||
typedef StringPoolOp<StringPoolDiscarderStub> DiscardOperation;
|
typedef StringPoolOp<StringPoolDiscarderStub> DiscardOperation;
|
||||||
typedef ExclusiveOp<WriteOperation> ExclusiveWriteOperation;
|
typedef ExclusiveOp<WriteOperation> ExclusiveWriteOperation;
|
||||||
typedef ExclusiveOp<DiscardOperation> ExclusiveDiscardOperation;
|
typedef ExclusiveOp<DiscardOperation> ExclusiveDiscardOperation;
|
||||||
typedef ReleaseOpWithExcision<JfrStringPoolMspace, JfrStringPoolMspace::LiveList> ReleaseOperation;
|
typedef ReleaseWithExcisionOp<JfrStringPoolMspace, JfrStringPoolMspace::LiveList> ReleaseOperation;
|
||||||
typedef CompositeOperation<ExclusiveWriteOperation, ReleaseOperation> WriteReleaseOperation;
|
typedef CompositeOperation<ExclusiveWriteOperation, ReleaseOperation> WriteReleaseOperation;
|
||||||
typedef CompositeOperation<ExclusiveDiscardOperation, ReleaseOperation> DiscardReleaseOperation;
|
typedef CompositeOperation<ExclusiveDiscardOperation, ReleaseOperation> DiscardReleaseOperation;
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The removal marker (i.e. the excision bit) is represented by '( )' as part of state description comments:
|
* The removal marker (i.e. the excision bit) is represented by '( )' as part of state description comments:
|
||||||
* node --> next becomes (node) --> next, when node is logically deleted.
|
* "node --> next" becomes "(node) --> next", when node is logically deleted.
|
||||||
*/
|
*/
|
||||||
template <typename Node>
|
template <typename Node>
|
||||||
inline Node* mark_for_removal(Node* node) {
|
inline Node* mark_for_removal(Node* node) {
|
||||||
@ -47,7 +47,7 @@ inline Node* mark_for_removal(Node* node) {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The insertion marker (i.e. the insertion bit) is represented by '[ ]' as part of state description comments:
|
* The insertion marker (i.e. the insertion bit) is represented by '[ ]' as part of state description comments:
|
||||||
* "node --> next" becomes "[node} --> next", in an attempt to convey node as being exlusively reserved.
|
* "node --> next" becomes "[node] --> next", in an attempt to convey node as being exlusively reserved.
|
||||||
*/
|
*/
|
||||||
template <typename Node>
|
template <typename Node>
|
||||||
inline bool mark_for_insertion(Node* node, const Node* tail) {
|
inline bool mark_for_insertion(Node* node, const Node* tail) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user