8233705: Let artifact iteration running time be a function of incrementally tagged artifacts
Reviewed-by: egahlin
This commit is contained in:
parent
45fa5aa699
commit
c66bef0289
@ -37,7 +37,8 @@
|
||||
#include "jfr/jfr.hpp"
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
#include "jfr/jni/jfrUpcalls.hpp"
|
||||
#include "jfr/support/jfrEventClass.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/support/jfrJdkJfrEvent.hpp"
|
||||
#include "jfr/utilities/jfrBigEndian.hpp"
|
||||
#include "jfr/writers/jfrBigEndianWriter.hpp"
|
||||
#include "logging/log.hpp"
|
||||
@ -1408,7 +1409,7 @@ static ClassFileStream* create_new_bytes_for_subklass(const InstanceKlass* ik, c
|
||||
jint size_instrumented_data = 0;
|
||||
unsigned char* instrumented_data = NULL;
|
||||
const jclass super = (jclass)JNIHandles::make_local(ik->super()->java_mirror());
|
||||
JfrUpcalls::new_bytes_eager_instrumentation(TRACE_ID(ik),
|
||||
JfrUpcalls::new_bytes_eager_instrumentation(JfrTraceId::load_raw(ik),
|
||||
force_instrumentation,
|
||||
super,
|
||||
size_of_new_bytes,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,7 @@
|
||||
#include "jfr/jni/jfrUpcalls.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/support/jfrEventClass.hpp"
|
||||
#include "jfr/support/jfrJdkJfrEvent.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jvmtiEnvBase.hpp"
|
||||
@ -91,7 +91,7 @@ extern "C" void JNICALL jfr_on_class_file_load_hook(jvmtiEnv *jvmti_env,
|
||||
JavaThread* jt = JavaThread::thread_from_jni_environment(jni_env);
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));;
|
||||
ThreadInVMfromNative tvmfn(jt);
|
||||
JfrUpcalls::on_retransform(JfrTraceId::get(class_being_redefined),
|
||||
JfrUpcalls::on_retransform(JfrTraceId::load_raw(class_being_redefined),
|
||||
class_being_redefined,
|
||||
class_data_len,
|
||||
class_data,
|
||||
|
@ -66,7 +66,7 @@ void Jfr::on_create_vm_3() {
|
||||
|
||||
void Jfr::on_unloading_classes() {
|
||||
if (JfrRecorder::is_created()) {
|
||||
JfrCheckpointManager::write_type_set_for_unloaded_classes();
|
||||
JfrCheckpointManager::on_unloading_classes();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,12 +38,13 @@
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
|
||||
#include "jfr/recorder/stringpool/jfrStringPool.hpp"
|
||||
#include "jfr/jni/jfrGetAllEventClasses.hpp"
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
#include "jfr/jni/jfrJniMethodRegistration.hpp"
|
||||
#include "jfr/instrumentation/jfrEventClassTransformer.hpp"
|
||||
#include "jfr/instrumentation/jfrJvmtiAgent.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/support/jfrJdkJfrEvent.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
#include "jfr/utilities/jfrJavaLog.hpp"
|
||||
#include "jfr/utilities/jfrTimeConverter.hpp"
|
||||
#include "jfr/utilities/jfrTime.hpp"
|
||||
@ -163,7 +164,7 @@ NO_TRANSITION(jlong, jfr_get_epoch_address(JNIEnv* env, jobject jvm))
|
||||
NO_TRANSITION_END
|
||||
|
||||
NO_TRANSITION(jlong, jfr_get_unloaded_event_classes_count(JNIEnv* env, jobject jvm))
|
||||
return JfrEventClasses::unloaded_event_classes_count();
|
||||
return JfrKlassUnloading::event_class_count();
|
||||
NO_TRANSITION_END
|
||||
|
||||
NO_TRANSITION(jdouble, jfr_time_conv_factor(JNIEnv* env, jobject jvm))
|
||||
@ -234,11 +235,11 @@ JVM_ENTRY_NO_ENV(jboolean, jfr_emit_event(JNIEnv* env, jobject jvm, jlong eventT
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jobject, jfr_get_all_event_classes(JNIEnv* env, jobject jvm))
|
||||
return JfrEventClasses::get_all_event_classes(thread);
|
||||
return JdkJfrEvent::get_all_klasses(thread);
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_class_id(JNIEnv* env, jclass jvm, jclass jc))
|
||||
return JfrTraceId::use(jc);
|
||||
return JfrTraceId::load(jc);
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_stacktrace_id(JNIEnv* env, jobject jvm, jint skip))
|
||||
@ -311,7 +312,7 @@ JVM_ENTRY_NO_ENV(void, jfr_abort(JNIEnv* env, jobject jvm, jstring errorMsg))
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_type_id(JNIEnv* env, jobject jvm, jclass jc))
|
||||
return JfrTraceId::get(jc);
|
||||
return JfrTraceId::load_raw(jc);
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jboolean, jfr_add_string_constant(JNIEnv* env, jclass jvm, jboolean epoch, jlong id, jstring string))
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
#include "jfr/jni/jfrUpcalls.hpp"
|
||||
#include "jfr/support/jfrEventClass.hpp"
|
||||
#include "jfr/support/jfrJdkJfrEvent.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
@ -35,36 +35,15 @@
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
#include "jfr/support/jfrMethodLookup.hpp"
|
||||
#include "jfr/utilities/jfrHashtable.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "jfr/utilities/jfrPredicate.hpp"
|
||||
#include "jfr/utilities/jfrRelation.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
static bool predicate(GrowableArray<traceid>* set, traceid id) {
|
||||
assert(set != NULL, "invariant");
|
||||
bool found = false;
|
||||
set->find_sorted<traceid, compare_traceid>(id, found);
|
||||
return found;
|
||||
}
|
||||
|
||||
static bool mutable_predicate(GrowableArray<traceid>* set, traceid id) {
|
||||
assert(set != NULL, "invariant");
|
||||
bool found = false;
|
||||
const int location = set->find_sorted<traceid, compare_traceid>(id, found);
|
||||
if (!found) {
|
||||
set->insert_before(location, id);
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
static bool add(GrowableArray<traceid>* set, traceid id) {
|
||||
assert(set != NULL, "invariant");
|
||||
return mutable_predicate(set, id);
|
||||
}
|
||||
|
||||
const int initial_array_size = 64;
|
||||
|
||||
@ -87,7 +66,12 @@ Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
|
||||
|
||||
static bool has_thread_exited(traceid tid) {
|
||||
assert(tid != 0, "invariant");
|
||||
return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid);
|
||||
return unloaded_thread_id_set != NULL && JfrPredicate<traceid, compare_traceid>::test(unloaded_thread_id_set, tid);
|
||||
}
|
||||
|
||||
static bool add(GrowableArray<traceid>* set, traceid id) {
|
||||
assert(set != NULL, "invariant");
|
||||
return JfrMutablePredicate<traceid, compare_traceid>::test(set, id);
|
||||
}
|
||||
|
||||
static void add_to_unloaded_thread_set(traceid tid) {
|
||||
@ -105,31 +89,6 @@ void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
|
||||
}
|
||||
}
|
||||
|
||||
// Track the set of unloaded klasses during a chunk / epoch.
|
||||
// Methods in stacktraces belonging to unloaded klasses must not be accessed.
|
||||
static GrowableArray<traceid>* unloaded_klass_set = NULL;
|
||||
|
||||
static void add_to_unloaded_klass_set(traceid klass_id) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
if (unloaded_klass_set == NULL) {
|
||||
unloaded_klass_set = c_heap_allocate_array<traceid>();
|
||||
}
|
||||
unloaded_klass_set->append(klass_id);
|
||||
}
|
||||
|
||||
static void sort_unloaded_klass_set() {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
if (unloaded_klass_set != NULL && unloaded_klass_set->length() > 1) {
|
||||
unloaded_klass_set->sort(sort_traceid);
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
assert(k != NULL, "invariant");
|
||||
add_to_unloaded_klass_set(JfrTraceId::get(k));
|
||||
}
|
||||
|
||||
template <typename Processor>
|
||||
static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) {
|
||||
assert(sample != NULL, "invariant");
|
||||
@ -228,7 +187,6 @@ static GrowableArray<traceid>* id_set = NULL;
|
||||
|
||||
static void prepare_for_resolution() {
|
||||
id_set = new GrowableArray<traceid>(JfrOptionSet::old_object_queue_size());
|
||||
sort_unloaded_klass_set();
|
||||
}
|
||||
|
||||
static bool stack_trace_precondition(const ObjectSample* sample) {
|
||||
@ -290,6 +248,7 @@ static void install_stack_traces(const ObjectSampler* sampler, JfrStackTraceRepo
|
||||
assert(sampler != NULL, "invariant");
|
||||
const ObjectSample* const last = sampler->last();
|
||||
if (last != sampler->last_resolved()) {
|
||||
JfrKlassUnloading::sort();
|
||||
StackTraceBlobInstaller installer(stack_trace_repo);
|
||||
iterate_samples(installer);
|
||||
}
|
||||
@ -307,13 +266,13 @@ void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler, JfrStackT
|
||||
|
||||
static bool is_klass_unloaded(traceid klass_id) {
|
||||
assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
|
||||
return unloaded_klass_set != NULL && predicate(unloaded_klass_set, klass_id);
|
||||
return JfrKlassUnloading::is_unloaded(klass_id);
|
||||
}
|
||||
|
||||
static bool is_processed(traceid method_id) {
|
||||
assert(method_id != 0, "invariant");
|
||||
assert(id_set != NULL, "invariant");
|
||||
return mutable_predicate(id_set, method_id);
|
||||
return JfrMutablePredicate<traceid, compare_traceid>::test(id_set, method_id);
|
||||
}
|
||||
|
||||
void ObjectSampleCheckpoint::add_to_leakp_set(const InstanceKlass* ik, traceid method_id) {
|
||||
@ -324,7 +283,7 @@ void ObjectSampleCheckpoint::add_to_leakp_set(const InstanceKlass* ik, traceid m
|
||||
const Method* const method = JfrMethodLookup::lookup(ik, method_id);
|
||||
assert(method != NULL, "invariant");
|
||||
assert(method->method_holder() == ik, "invariant");
|
||||
JfrTraceId::set_leakp(ik, method);
|
||||
JfrTraceId::load_leakp(ik, method);
|
||||
}
|
||||
|
||||
void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
|
||||
@ -419,13 +378,6 @@ void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge
|
||||
}
|
||||
}
|
||||
|
||||
static void clear_unloaded_klass_set() {
|
||||
assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
|
||||
if (unloaded_klass_set != NULL && unloaded_klass_set->is_nonempty()) {
|
||||
unloaded_klass_set->clear();
|
||||
}
|
||||
}
|
||||
|
||||
// A linked list of saved type set blobs for the epoch.
|
||||
// The link consist of a reference counted handle.
|
||||
static JfrBlobHandle saved_type_set_blobs;
|
||||
@ -433,7 +385,6 @@ static JfrBlobHandle saved_type_set_blobs;
|
||||
static void release_state_for_previous_epoch() {
|
||||
// decrements the reference count and the list is reinitialized
|
||||
saved_type_set_blobs = JfrBlobHandle();
|
||||
clear_unloaded_klass_set();
|
||||
}
|
||||
|
||||
class BlobInstaller {
|
||||
|
@ -50,7 +50,6 @@ class ObjectSampleCheckpoint : AllStatic {
|
||||
static void write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer);
|
||||
static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
|
||||
public:
|
||||
static void on_klass_unload(const Klass* k);
|
||||
static void on_type_set(JfrCheckpointWriter& writer);
|
||||
static void on_type_set_unload(JfrCheckpointWriter& writer);
|
||||
static void on_thread_exit(JavaThread* jt);
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
|
||||
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
#include "jfr/utilities/jfrBigEndian.hpp"
|
||||
#include "jfr/utilities/jfrIterator.hpp"
|
||||
#include "jfr/utilities/jfrLinkedList.inline.hpp"
|
||||
@ -88,46 +89,43 @@ void JfrCheckpointManager::destroy() {
|
||||
}
|
||||
|
||||
JfrCheckpointManager::JfrCheckpointManager(JfrChunkWriter& cw) :
|
||||
_free_list_mspace(NULL),
|
||||
_epoch_transition_mspace(NULL),
|
||||
_service_thread(NULL),
|
||||
_chunkwriter(cw),
|
||||
_checkpoint_epoch_state(JfrTraceIdEpoch::epoch()) {}
|
||||
_mspace(NULL),
|
||||
_chunkwriter(cw) {}
|
||||
|
||||
JfrCheckpointManager::~JfrCheckpointManager() {
|
||||
if (_free_list_mspace != NULL) {
|
||||
delete _free_list_mspace;
|
||||
}
|
||||
if (_epoch_transition_mspace != NULL) {
|
||||
delete _epoch_transition_mspace;
|
||||
}
|
||||
JfrTraceIdLoadBarrier::destroy();
|
||||
JfrTypeManager::destroy();
|
||||
delete _mspace;
|
||||
}
|
||||
|
||||
static const size_t unlimited_mspace_size = 0;
|
||||
static const size_t checkpoint_buffer_cache_count = 2;
|
||||
static const size_t checkpoint_buffer_size = 512 * K;
|
||||
static const size_t buffer_count = 2;
|
||||
static const size_t buffer_size = 512 * K;
|
||||
|
||||
static JfrCheckpointMspace* allocate_mspace(size_t size, size_t limit, size_t cache_count, JfrCheckpointManager* mgr) {
|
||||
return create_mspace<JfrCheckpointMspace, JfrCheckpointManager>(size, limit, cache_count, mgr);
|
||||
static JfrCheckpointMspace* allocate_mspace(size_t min_elem_size,
|
||||
size_t free_list_cache_count_limit,
|
||||
size_t cache_prealloc_count,
|
||||
bool prealloc_to_free_list,
|
||||
JfrCheckpointManager* mgr) {
|
||||
return create_mspace<JfrCheckpointMspace, JfrCheckpointManager>(min_elem_size,
|
||||
free_list_cache_count_limit,
|
||||
cache_prealloc_count,
|
||||
prealloc_to_free_list,
|
||||
mgr);
|
||||
}
|
||||
|
||||
bool JfrCheckpointManager::initialize() {
|
||||
assert(_free_list_mspace == NULL, "invariant");
|
||||
_free_list_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
|
||||
if (_free_list_mspace == NULL) {
|
||||
assert(_mspace == NULL, "invariant");
|
||||
_mspace = allocate_mspace(buffer_size, 0, 0, false, this); // post-pone preallocation
|
||||
if (_mspace == NULL) {
|
||||
return false;
|
||||
}
|
||||
assert(_epoch_transition_mspace == NULL, "invariant");
|
||||
_epoch_transition_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
|
||||
if (_epoch_transition_mspace == NULL) {
|
||||
return false;
|
||||
// preallocate buffer count to each of the epoch live lists
|
||||
for (size_t i = 0; i < buffer_count * 2; ++i) {
|
||||
Buffer* const buffer = mspace_allocate(buffer_size, _mspace);
|
||||
_mspace->add_to_live_list(buffer, i % 2 == 0);
|
||||
}
|
||||
return JfrTypeManager::initialize();
|
||||
}
|
||||
|
||||
void JfrCheckpointManager::register_service_thread(const Thread* thread) {
|
||||
_service_thread = thread;
|
||||
assert(_mspace->free_list_is_empty(), "invariant");
|
||||
return JfrTypeManager::initialize() && JfrTraceIdLoadBarrier::initialize();
|
||||
}
|
||||
|
||||
void JfrCheckpointManager::register_full(BufferPtr buffer, Thread* thread) {
|
||||
@ -151,46 +149,36 @@ static void assert_release(const BufferPtr buffer) {
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
|
||||
return _service_thread != thread && Atomic::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
|
||||
}
|
||||
|
||||
static const size_t lease_retry = 10;
|
||||
|
||||
BufferPtr JfrCheckpointManager::lease(JfrCheckpointMspace* mspace, Thread* thread, size_t size /* 0 */) {
|
||||
static BufferPtr lease(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread, bool previous_epoch) {
|
||||
assert(mspace != NULL, "invariant");
|
||||
static const size_t max_elem_size = mspace->min_elem_size(); // min is max
|
||||
static const size_t max_elem_size = mspace->min_element_size(); // min is max
|
||||
BufferPtr buffer;
|
||||
if (size <= max_elem_size) {
|
||||
buffer = mspace_get_free_lease_with_retry(size, mspace, lease_retry, thread);
|
||||
buffer = mspace_acquire_lease_with_retry(size, mspace, retry_count, thread, previous_epoch);
|
||||
if (buffer != NULL) {
|
||||
DEBUG_ONLY(assert_lease(buffer);)
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
buffer = mspace_allocate_transient_lease_to_full(size, mspace, thread);
|
||||
buffer = mspace_allocate_transient_lease_to_live_list(size, mspace, thread, previous_epoch);
|
||||
DEBUG_ONLY(assert_lease(buffer);)
|
||||
return buffer;
|
||||
}
|
||||
|
||||
BufferPtr JfrCheckpointManager::lease(Thread* thread, size_t size /* 0 */) {
|
||||
JfrCheckpointManager& manager = instance();
|
||||
JfrCheckpointMspace* const mspace = manager.use_epoch_transition_mspace(thread) ?
|
||||
manager._epoch_transition_mspace :
|
||||
manager._free_list_mspace;
|
||||
return lease(mspace, thread, size);
|
||||
static const size_t lease_retry = 100;
|
||||
|
||||
BufferPtr JfrCheckpointManager::lease(Thread* thread, bool previous_epoch /* false */, size_t size /* 0 */) {
|
||||
return ::lease(size, instance()._mspace, lease_retry, thread, previous_epoch);
|
||||
}
|
||||
|
||||
JfrCheckpointMspace* JfrCheckpointManager::lookup(BufferPtr old) const {
|
||||
bool JfrCheckpointManager::lookup(BufferPtr old) const {
|
||||
assert(old != NULL, "invariant");
|
||||
return _free_list_mspace->in_mspace(old) ? _free_list_mspace : _epoch_transition_mspace;
|
||||
return !_mspace->in_current_epoch_list(old);
|
||||
}
|
||||
|
||||
BufferPtr JfrCheckpointManager::lease(BufferPtr old, Thread* thread, size_t size /* 0 */) {
|
||||
assert(old != NULL, "invariant");
|
||||
JfrCheckpointMspace* mspace = instance().lookup(old);
|
||||
assert(mspace != NULL, "invariant");
|
||||
return lease(mspace, thread, size);
|
||||
return ::lease(size, instance()._mspace, lease_retry, thread, instance().lookup(old));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -320,23 +308,9 @@ class CheckpointWriteOp {
|
||||
};
|
||||
|
||||
typedef CheckpointWriteOp<JfrCheckpointManager::Buffer> WriteOperation;
|
||||
typedef ReleaseOp<JfrCheckpointMspace> CheckpointReleaseFreeOperation;
|
||||
typedef ScavengingReleaseOp<JfrCheckpointMspace> CheckpointReleaseFullOperation;
|
||||
|
||||
template <template <typename> class WriterHost>
|
||||
static size_t write_mspace(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwriter) {
|
||||
assert(mspace != NULL, "invariant");
|
||||
WriteOperation wo(chunkwriter);
|
||||
WriterHost<WriteOperation> wh(wo);
|
||||
CheckpointReleaseFreeOperation free_release_op(mspace);
|
||||
CompositeOperation<WriterHost<WriteOperation>, CheckpointReleaseFreeOperation> free_op(&wh, &free_release_op);
|
||||
process_free_list(free_op, mspace);
|
||||
CheckpointReleaseFullOperation full_release_op(mspace);
|
||||
MutexedWriteOp<WriteOperation> full_write_op(wo);
|
||||
CompositeOperation<MutexedWriteOp<WriteOperation>, CheckpointReleaseFullOperation> full_op(&full_write_op, &full_release_op);
|
||||
process_full_list(full_op, mspace);
|
||||
return wo.processed();
|
||||
}
|
||||
typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
|
||||
typedef ReleaseOpWithExcision<JfrCheckpointMspace, JfrCheckpointMspace::LiveList> ReleaseOperation;
|
||||
typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> WriteReleaseOperation;
|
||||
|
||||
void JfrCheckpointManager::begin_epoch_shift() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
@ -350,81 +324,51 @@ void JfrCheckpointManager::end_epoch_shift() {
|
||||
assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
|
||||
}
|
||||
|
||||
void JfrCheckpointManager::synchronize_checkpoint_manager_with_current_epoch() {
|
||||
assert(_checkpoint_epoch_state != JfrTraceIdEpoch::epoch(), "invariant");
|
||||
OrderAccess::storestore();
|
||||
_checkpoint_epoch_state = JfrTraceIdEpoch::epoch();
|
||||
}
|
||||
|
||||
size_t JfrCheckpointManager::write() {
|
||||
const size_t processed = write_mspace<MutexedWriteOp>(_free_list_mspace, _chunkwriter);
|
||||
synchronize_checkpoint_manager_with_current_epoch();
|
||||
return processed;
|
||||
}
|
||||
|
||||
size_t JfrCheckpointManager::write_epoch_transition_mspace() {
|
||||
return write_mspace<ExclusiveOp>(_epoch_transition_mspace, _chunkwriter);
|
||||
assert(_mspace->free_list_is_empty(), "invariant");
|
||||
WriteOperation wo(_chunkwriter);
|
||||
MutexedWriteOperation mwo(wo);
|
||||
ReleaseOperation ro(_mspace, _mspace->live_list(true));
|
||||
WriteReleaseOperation wro(&mwo, &ro);
|
||||
process_live_list(wro, _mspace, true);
|
||||
return wo.processed();
|
||||
}
|
||||
|
||||
typedef DiscardOp<DefaultDiscarder<JfrCheckpointManager::Buffer> > DiscardOperation;
|
||||
typedef ExclusiveDiscardOp<DefaultDiscarder<JfrCheckpointManager::Buffer> > DiscardOperationEpochTransitionMspace;
|
||||
typedef CompositeOperation<DiscardOperation, CheckpointReleaseFreeOperation> DiscardFreeOperation;
|
||||
typedef CompositeOperation<DiscardOperation, CheckpointReleaseFullOperation> DiscardFullOperation;
|
||||
typedef CompositeOperation<DiscardOperationEpochTransitionMspace, CheckpointReleaseFreeOperation> DiscardEpochTransMspaceFreeOperation;
|
||||
typedef CompositeOperation<DiscardOperationEpochTransitionMspace, CheckpointReleaseFullOperation> DiscardEpochTransMspaceFullOperation;
|
||||
typedef CompositeOperation<DiscardOperation, ReleaseOperation> DiscardReleaseOperation;
|
||||
|
||||
size_t JfrCheckpointManager::clear() {
|
||||
JfrTraceIdLoadBarrier::clear();
|
||||
clear_type_set();
|
||||
DiscardOperation mutex_discarder(mutexed);
|
||||
CheckpointReleaseFreeOperation free_release_op(_free_list_mspace);
|
||||
DiscardFreeOperation free_op(&mutex_discarder, &free_release_op);
|
||||
process_free_list(free_op, _free_list_mspace);
|
||||
CheckpointReleaseFullOperation full_release_op(_free_list_mspace);
|
||||
DiscardFullOperation full_op(&mutex_discarder, &full_release_op);
|
||||
process_full_list(full_op, _free_list_mspace);
|
||||
DiscardOperationEpochTransitionMspace epoch_transition_discarder(mutexed);
|
||||
CheckpointReleaseFreeOperation epoch_free_release_op(_epoch_transition_mspace);
|
||||
DiscardEpochTransMspaceFreeOperation epoch_free_op(&epoch_transition_discarder, &epoch_free_release_op);
|
||||
process_free_list(epoch_free_op, _epoch_transition_mspace);
|
||||
CheckpointReleaseFullOperation epoch_full_release_op(_epoch_transition_mspace);
|
||||
DiscardEpochTransMspaceFullOperation epoch_full_op(&epoch_transition_discarder, &epoch_full_release_op);
|
||||
process_full_list(epoch_full_op, _epoch_transition_mspace);
|
||||
synchronize_checkpoint_manager_with_current_epoch();
|
||||
return mutex_discarder.elements() + epoch_transition_discarder.elements();
|
||||
DiscardOperation discard_operation(mutexed); // mutexed discard mode
|
||||
ReleaseOperation ro(_mspace, _mspace->live_list(true));
|
||||
DiscardReleaseOperation discard_op(&discard_operation, &ro);
|
||||
assert(_mspace->free_list_is_empty(), "invariant");
|
||||
process_live_list(discard_op, _mspace, true); // previous epoch list
|
||||
return discard_operation.elements();
|
||||
}
|
||||
|
||||
// Optimization for write_static_type_set() and write_threads() is to write
|
||||
// directly into the epoch transition mspace because we will immediately
|
||||
// serialize and reset this mspace post-write.
|
||||
BufferPtr JfrCheckpointManager::epoch_transition_buffer(Thread* thread) {
|
||||
assert(_epoch_transition_mspace->free_list_is_nonempty(), "invariant");
|
||||
BufferPtr const buffer = lease(_epoch_transition_mspace, thread, _epoch_transition_mspace->min_elem_size());
|
||||
DEBUG_ONLY(assert_lease(buffer);)
|
||||
return buffer;
|
||||
}
|
||||
|
||||
size_t JfrCheckpointManager::write_static_type_set() {
|
||||
Thread* const thread = Thread::current();
|
||||
ResourceMark rm(thread);
|
||||
HandleMark hm(thread);
|
||||
JfrCheckpointWriter writer(thread, epoch_transition_buffer(thread), STATICS);
|
||||
size_t JfrCheckpointManager::write_static_type_set(Thread* thread) {
|
||||
assert(thread != NULL, "invariant");
|
||||
JfrCheckpointWriter writer(true, thread, STATICS);
|
||||
JfrTypeManager::write_static_types(writer);
|
||||
return writer.used_size();
|
||||
}
|
||||
|
||||
size_t JfrCheckpointManager::write_threads() {
|
||||
Thread* const thread = Thread::current();
|
||||
ResourceMark rm(thread);
|
||||
HandleMark hm(thread);
|
||||
JfrCheckpointWriter writer(thread, epoch_transition_buffer(thread), THREADS);
|
||||
size_t JfrCheckpointManager::write_threads(Thread* thread) {
|
||||
assert(thread != NULL, "invariant");
|
||||
JfrCheckpointWriter writer(true, thread, THREADS);
|
||||
JfrTypeManager::write_threads(writer);
|
||||
return writer.used_size();
|
||||
}
|
||||
|
||||
size_t JfrCheckpointManager::write_static_type_set_and_threads() {
|
||||
write_static_type_set();
|
||||
write_threads();
|
||||
return write_epoch_transition_mspace();
|
||||
Thread* const thread = Thread::current();
|
||||
ResourceMark rm(thread);
|
||||
HandleMark hm(thread);
|
||||
write_static_type_set(thread);
|
||||
write_threads(thread);
|
||||
return write();
|
||||
}
|
||||
|
||||
void JfrCheckpointManager::on_rotation() {
|
||||
@ -449,36 +393,29 @@ void JfrCheckpointManager::write_type_set() {
|
||||
// can safepoint here
|
||||
MutexLocker cld_lock(thread, ClassLoaderDataGraph_lock);
|
||||
MutexLocker module_lock(thread, Module_lock);
|
||||
JfrCheckpointWriter leakp_writer(thread);
|
||||
JfrCheckpointWriter writer(thread);
|
||||
JfrCheckpointWriter leakp_writer(true, thread);
|
||||
JfrCheckpointWriter writer(true, thread);
|
||||
JfrTypeSet::serialize(&writer, &leakp_writer, false, false);
|
||||
ObjectSampleCheckpoint::on_type_set(leakp_writer);
|
||||
} else {
|
||||
// can safepoint here
|
||||
MutexLocker cld_lock(ClassLoaderDataGraph_lock);
|
||||
MutexLocker module_lock(Module_lock);
|
||||
JfrCheckpointWriter writer(thread);
|
||||
JfrCheckpointWriter writer(true, thread);
|
||||
JfrTypeSet::serialize(&writer, NULL, false, false);
|
||||
}
|
||||
write();
|
||||
}
|
||||
|
||||
void JfrCheckpointManager::write_type_set_for_unloaded_classes() {
|
||||
void JfrCheckpointManager::on_unloading_classes() {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
JfrCheckpointWriter writer(Thread::current());
|
||||
const JfrCheckpointContext ctx = writer.context();
|
||||
JfrTypeSet::serialize(&writer, NULL, true, false);
|
||||
JfrTypeSet::on_unloading_classes(&writer);
|
||||
if (LeakProfiler::is_running()) {
|
||||
ObjectSampleCheckpoint::on_type_set_unload(writer);
|
||||
}
|
||||
if (!JfrRecorder::is_recording()) {
|
||||
// discard by rewind
|
||||
writer.set_context(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
typedef MutexedWriteOp<WriteOperation> FlushOperation;
|
||||
|
||||
size_t JfrCheckpointManager::flush_type_set() {
|
||||
size_t elements = 0;
|
||||
if (JfrTraceIdEpoch::has_changed_tag_state()) {
|
||||
@ -490,9 +427,9 @@ size_t JfrCheckpointManager::flush_type_set() {
|
||||
}
|
||||
if (is_constant_pending()) {
|
||||
WriteOperation wo(_chunkwriter);
|
||||
FlushOperation fo(wo);
|
||||
process_free_list(fo, _free_list_mspace);
|
||||
process_full_list(fo, _free_list_mspace);
|
||||
MutexedWriteOperation mwo(wo);
|
||||
assert(_mspace->live_list_is_nonempty(), "invariant");
|
||||
process_live_list(mwo, _mspace);
|
||||
}
|
||||
return elements;
|
||||
}
|
||||
|
@ -34,7 +34,6 @@ class JfrCheckpointManager;
|
||||
class JfrChunkWriter;
|
||||
class JfrSerializer;
|
||||
class JfrTypeManager;
|
||||
class Mutex;
|
||||
class Thread;
|
||||
|
||||
struct JfrCheckpointEntry {
|
||||
@ -45,7 +44,7 @@ struct JfrCheckpointEntry {
|
||||
juint nof_segments;
|
||||
};
|
||||
|
||||
typedef JfrMemorySpace<JfrCheckpointManager, JfrMspaceRetrieval, JfrLinkedList<JfrBuffer> > JfrCheckpointMspace;
|
||||
typedef JfrMemorySpace<JfrCheckpointManager, JfrMspaceRetrieval, JfrLinkedList<JfrBuffer>, JfrLinkedList<JfrBuffer>, true > JfrCheckpointMspace;
|
||||
|
||||
//
|
||||
// Responsible for maintaining checkpoints and by implication types.
|
||||
@ -54,59 +53,49 @@ typedef JfrMemorySpace<JfrCheckpointManager, JfrMspaceRetrieval, JfrLinkedList<J
|
||||
//
|
||||
class JfrCheckpointManager : public JfrCHeapObj {
|
||||
public:
|
||||
size_t flush_type_set();
|
||||
static void create_thread_blob(Thread* thread);
|
||||
static void write_thread_checkpoint(Thread* thread);
|
||||
void register_service_thread(const Thread* thread);
|
||||
typedef JfrCheckpointMspace::Node Buffer;
|
||||
typedef JfrCheckpointMspace::NodePtr BufferPtr;
|
||||
|
||||
private:
|
||||
JfrCheckpointMspace* _free_list_mspace;
|
||||
JfrCheckpointMspace* _epoch_transition_mspace;
|
||||
const Thread* _service_thread;
|
||||
JfrCheckpointMspace* _mspace;
|
||||
JfrChunkWriter& _chunkwriter;
|
||||
bool _checkpoint_epoch_state;
|
||||
|
||||
JfrCheckpointMspace* lookup(BufferPtr old) const;
|
||||
bool use_epoch_transition_mspace(const Thread* thread) const;
|
||||
size_t write_epoch_transition_mspace();
|
||||
BufferPtr epoch_transition_buffer(Thread* thread);
|
||||
JfrCheckpointManager(JfrChunkWriter& cw);
|
||||
~JfrCheckpointManager();
|
||||
static JfrCheckpointManager& instance();
|
||||
static JfrCheckpointManager* create(JfrChunkWriter& cw);
|
||||
bool initialize();
|
||||
static void destroy();
|
||||
|
||||
static BufferPtr lease(Thread* thread, size_t size = 0);
|
||||
bool lookup(Buffer* old) const;
|
||||
static BufferPtr lease(Thread* thread, bool previous_epoch = false, size_t size = 0);
|
||||
static BufferPtr lease(BufferPtr old, Thread* thread, size_t size = 0);
|
||||
static BufferPtr lease(JfrCheckpointMspace* mspace, Thread* thread, size_t size = 0);
|
||||
static BufferPtr flush(BufferPtr old, size_t used, size_t requested, Thread* thread);
|
||||
|
||||
size_t clear();
|
||||
size_t write();
|
||||
size_t flush();
|
||||
void notify_threads();
|
||||
|
||||
size_t write_static_type_set();
|
||||
size_t write_threads();
|
||||
size_t write_static_type_set(Thread* thread);
|
||||
size_t write_threads(Thread* thread);
|
||||
size_t write_static_type_set_and_threads();
|
||||
void clear_type_set();
|
||||
void write_type_set();
|
||||
static void write_type_set_for_unloaded_classes();
|
||||
|
||||
void begin_epoch_shift();
|
||||
void end_epoch_shift();
|
||||
void synchronize_checkpoint_manager_with_current_epoch();
|
||||
|
||||
void notify_threads();
|
||||
|
||||
JfrCheckpointManager(JfrChunkWriter& cw);
|
||||
~JfrCheckpointManager();
|
||||
|
||||
static JfrCheckpointManager& instance();
|
||||
static JfrCheckpointManager* create(JfrChunkWriter& cw);
|
||||
bool initialize();
|
||||
static void on_unloading_classes();
|
||||
void on_rotation();
|
||||
static void destroy();
|
||||
|
||||
// mspace callback
|
||||
void register_full(BufferPtr buffer, Thread* thread);
|
||||
|
||||
public:
|
||||
size_t flush_type_set();
|
||||
static void create_thread_blob(Thread* thread);
|
||||
static void write_thread_checkpoint(Thread* thread);
|
||||
|
||||
friend class Jfr;
|
||||
friend class JfrRecorder;
|
||||
friend class JfrRecorderService;
|
||||
@ -114,7 +103,7 @@ class JfrCheckpointManager : public JfrCHeapObj {
|
||||
friend class JfrCheckpointWriter;
|
||||
friend class JfrSerializer;
|
||||
friend class JfrStackTraceRepository;
|
||||
template <typename, template <typename> class, typename, typename>
|
||||
template <typename, template <typename> class, typename, typename, bool>
|
||||
friend class JfrMemorySpace;
|
||||
};
|
||||
|
||||
|
@ -45,8 +45,8 @@ JfrCheckpointWriter::JfrCheckpointWriter(JfrCheckpointType type /* GENERIC */) :
|
||||
}
|
||||
}
|
||||
|
||||
JfrCheckpointWriter::JfrCheckpointWriter(Thread* t, bool header /* true */, JfrCheckpointType type /* GENERIC */) :
|
||||
JfrCheckpointWriterBase(JfrCheckpointManager::lease(t), t),
|
||||
JfrCheckpointWriter::JfrCheckpointWriter(Thread* thread, bool header /* true */, JfrCheckpointType type /* GENERIC */) :
|
||||
JfrCheckpointWriterBase(JfrCheckpointManager::lease(thread), thread),
|
||||
_time(JfrTicks::now()),
|
||||
_offset(0),
|
||||
_count(0),
|
||||
@ -59,8 +59,8 @@ JfrCheckpointWriter::JfrCheckpointWriter(Thread* t, bool header /* true */, JfrC
|
||||
}
|
||||
}
|
||||
|
||||
JfrCheckpointWriter::JfrCheckpointWriter(Thread* t, JfrBuffer* buffer, JfrCheckpointType type /* GENERIC */) :
|
||||
JfrCheckpointWriterBase(buffer, t),
|
||||
JfrCheckpointWriter::JfrCheckpointWriter(bool previous_epoch, Thread* thread, JfrCheckpointType type /* GENERIC */) :
|
||||
JfrCheckpointWriterBase(JfrCheckpointManager::lease(thread, previous_epoch), thread),
|
||||
_time(JfrTicks::now()),
|
||||
_offset(0),
|
||||
_count(0),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -56,6 +56,7 @@ struct JfrCheckpointContext {
|
||||
class JfrCheckpointWriter : public JfrCheckpointWriterBase {
|
||||
friend class JfrCheckpointManager;
|
||||
friend class JfrSerializerRegistration;
|
||||
friend class JfrTypeManager;
|
||||
private:
|
||||
JfrTicks _time;
|
||||
int64_t _offset;
|
||||
@ -68,10 +69,10 @@ class JfrCheckpointWriter : public JfrCheckpointWriterBase {
|
||||
void increment();
|
||||
const u1* session_data(size_t* size, bool move = false, const JfrCheckpointContext* ctx = NULL);
|
||||
void release();
|
||||
JfrCheckpointWriter(Thread* t, JfrBuffer* buffer, JfrCheckpointType type = GENERIC);
|
||||
JfrCheckpointWriter(bool previous_epoch, Thread* thread, JfrCheckpointType type = GENERIC);
|
||||
public:
|
||||
JfrCheckpointWriter(JfrCheckpointType type = GENERIC);
|
||||
JfrCheckpointWriter(Thread* t, bool header = true, JfrCheckpointType mode = GENERIC);
|
||||
JfrCheckpointWriter(Thread* thread, bool header = true, JfrCheckpointType mode = GENERIC);
|
||||
~JfrCheckpointWriter();
|
||||
void write_type(JfrTypeId type_id);
|
||||
void write_count(u4 nof_entries);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,10 +47,10 @@ static void write_metadata_blob(JfrChunkWriter& chunkwriter) {
|
||||
chunkwriter.write_unbuffered(data_address, length);
|
||||
}
|
||||
|
||||
void JfrMetadataEvent::write(JfrChunkWriter& chunkwriter) {
|
||||
bool JfrMetadataEvent::write(JfrChunkWriter& chunkwriter) {
|
||||
assert(chunkwriter.is_valid(), "invariant");
|
||||
if (last_metadata_id == metadata_id && chunkwriter.has_metadata()) {
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
// header
|
||||
const int64_t metadata_offset = chunkwriter.reserve(sizeof(u4));
|
||||
@ -65,6 +65,7 @@ void JfrMetadataEvent::write(JfrChunkWriter& chunkwriter) {
|
||||
chunkwriter.write_padded_at_offset((u4)size_written, metadata_offset);
|
||||
chunkwriter.set_last_metadata_offset(metadata_offset);
|
||||
last_metadata_id = metadata_id;
|
||||
return true;
|
||||
}
|
||||
|
||||
void JfrMetadataEvent::update(jbyteArray metadata) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,7 +36,7 @@ class JfrChunkWriter;
|
||||
//
|
||||
class JfrMetadataEvent : AllStatic {
|
||||
public:
|
||||
static void write(JfrChunkWriter& writer);
|
||||
static bool write(JfrChunkWriter& writer);
|
||||
static void update(jbyteArray metadata);
|
||||
};
|
||||
|
||||
|
@ -28,12 +28,13 @@
|
||||
#include "classfile/moduleEntry.hpp"
|
||||
#include "classfile/packageEntry.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "jfr/jfr.hpp"
|
||||
#include "jfr/jni/jfrGetAllEventClasses.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
#include "jfr/utilities/jfrHashtable.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "jfr/writers/jfrTypeWriterHost.hpp"
|
||||
@ -44,6 +45,7 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/accessFlags.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
typedef const Klass* KlassPtr;
|
||||
typedef const PackageEntry* PkgPtr;
|
||||
@ -99,7 +101,7 @@ static traceid get_bootstrap_name(bool leakp) {
|
||||
template <typename T>
|
||||
static traceid artifact_id(const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
return TRACE_ID(ptr);
|
||||
return JfrTraceId::load_raw(ptr);
|
||||
}
|
||||
|
||||
static traceid package_id(KlassPtr klass, bool leakp) {
|
||||
@ -215,53 +217,96 @@ static bool is_implied(const Klass* klass) {
|
||||
return klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass();
|
||||
}
|
||||
|
||||
static void do_implied(Klass* klass) {
|
||||
static void do_klass(Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
if (is_implied(klass)) {
|
||||
assert(_flushpoint ? USED_THIS_EPOCH(klass) : USED_PREVIOUS_EPOCH(klass), "invariant");
|
||||
assert(_subsystem_callback != NULL, "invariant");
|
||||
_subsystem_callback->do_artifact(klass);
|
||||
}
|
||||
|
||||
static void do_loader_klass(const Klass* klass) {
|
||||
if (klass != NULL && _artifacts->should_do_loader_klass(klass)) {
|
||||
if (_leakp_writer != NULL) {
|
||||
SET_LEAKP(klass);
|
||||
}
|
||||
SET_TRANSIENT(klass);
|
||||
_subsystem_callback->do_artifact(klass);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_unloaded_klass(Klass* klass) {
|
||||
static bool register_klass_unload(Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
assert(_subsystem_callback != NULL, "invariant");
|
||||
if (IS_JDK_JFR_EVENT_SUBKLASS(klass)) {
|
||||
JfrEventClasses::increment_unloaded_event_class();
|
||||
}
|
||||
if (USED_THIS_EPOCH(klass)) {
|
||||
ObjectSampleCheckpoint::on_klass_unload(klass);
|
||||
_subsystem_callback->do_artifact(klass);
|
||||
return;
|
||||
}
|
||||
do_implied(klass);
|
||||
return JfrKlassUnloading::on_unload(klass);
|
||||
}
|
||||
|
||||
static void do_klass(Klass* klass) {
|
||||
static void on_klass_unload(Klass* klass) {
|
||||
register_klass_unload(klass);
|
||||
}
|
||||
|
||||
static size_t register_unloading_klasses() {
|
||||
ClassLoaderDataGraph::classes_unloading_do(&on_klass_unload);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void do_unloading_klass(Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
assert(_subsystem_callback != NULL, "invariant");
|
||||
if (_flushpoint) {
|
||||
if (USED_THIS_EPOCH(klass)) {
|
||||
_subsystem_callback->do_artifact(klass);
|
||||
return;
|
||||
if (register_klass_unload(klass)) {
|
||||
_subsystem_callback->do_artifact(klass);
|
||||
do_loader_klass(klass->class_loader_data()->class_loader_klass());
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Abstract klasses are filtered out unconditionally.
|
||||
* If a klass is not yet initialized, i.e yet to run its <clinit>
|
||||
* it is also filtered out so we don't accidentally
|
||||
* trigger initialization.
|
||||
*/
|
||||
static bool is_classloader_klass_whitelisted(const Klass* k) {
|
||||
assert(k != NULL, "invariant");
|
||||
return !(k->is_abstract() || k->should_be_initialized());
|
||||
}
|
||||
|
||||
static void do_classloaders() {
|
||||
Stack<const Klass*, mtTracing> mark_stack;
|
||||
mark_stack.push(SystemDictionary::ClassLoader_klass()->subklass());
|
||||
|
||||
while (!mark_stack.is_empty()) {
|
||||
const Klass* const current = mark_stack.pop();
|
||||
assert(current != NULL, "null element in stack!");
|
||||
if (is_classloader_klass_whitelisted(current)) {
|
||||
do_loader_klass(current);
|
||||
}
|
||||
} else {
|
||||
if (USED_PREV_EPOCH(klass)) {
|
||||
_subsystem_callback->do_artifact(klass);
|
||||
return;
|
||||
|
||||
// subclass (depth)
|
||||
const Klass* next_klass = current->subklass();
|
||||
if (next_klass != NULL) {
|
||||
mark_stack.push(next_klass);
|
||||
}
|
||||
|
||||
// siblings (breadth)
|
||||
next_klass = current->next_sibling();
|
||||
if (next_klass != NULL) {
|
||||
mark_stack.push(next_klass);
|
||||
}
|
||||
}
|
||||
do_implied(klass);
|
||||
assert(mark_stack.is_empty(), "invariant");
|
||||
}
|
||||
|
||||
static void do_object() {
|
||||
SET_TRANSIENT(SystemDictionary::Object_klass());
|
||||
do_klass(SystemDictionary::Object_klass());
|
||||
}
|
||||
|
||||
static void do_klasses() {
|
||||
if (_class_unload) {
|
||||
ClassLoaderDataGraph::classes_unloading_do(&do_unloaded_klass);
|
||||
ClassLoaderDataGraph::classes_unloading_do(&do_unloading_klass);
|
||||
return;
|
||||
}
|
||||
ClassLoaderDataGraph::classes_do(&do_klass);
|
||||
JfrTraceIdLoadBarrier::do_klasses(&do_klass, previous_epoch());
|
||||
do_classloaders();
|
||||
do_object();
|
||||
}
|
||||
|
||||
typedef SerializePredicate<KlassPtr> KlassPredicate;
|
||||
@ -317,7 +362,7 @@ template <typename T>
|
||||
static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) {
|
||||
assert(callback != NULL, "invariant");
|
||||
assert(value != NULL, "invariant");
|
||||
if (USED_PREV_EPOCH(value)) {
|
||||
if (USED_PREVIOUS_EPOCH(value)) {
|
||||
callback->do_artifact(value);
|
||||
assert(IS_NOT_SERIALIZED(value), "invariant");
|
||||
return;
|
||||
@ -328,7 +373,7 @@ static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) {
|
||||
assert(IS_NOT_SERIALIZED(value), "invariant");
|
||||
}
|
||||
|
||||
typedef JfrArtifactCallbackHost<KlassPtr, KlassArtifactRegistrator> RegistrationCallback;
|
||||
typedef JfrArtifactCallbackHost<KlassPtr, KlassArtifactRegistrator> RegisterKlassCallback;
|
||||
|
||||
static void register_klass(Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
@ -336,16 +381,12 @@ static void register_klass(Klass* klass) {
|
||||
do_previous_epoch_artifact(_subsystem_callback, klass);
|
||||
}
|
||||
|
||||
static void do_register_klasses() {
|
||||
ClassLoaderDataGraph::classes_do(®ister_klass);
|
||||
}
|
||||
|
||||
static void register_klasses() {
|
||||
assert(!_artifacts->has_klass_entries(), "invariant");
|
||||
KlassArtifactRegistrator reg(_artifacts);
|
||||
RegistrationCallback callback(®);
|
||||
RegisterKlassCallback callback(®);
|
||||
_subsystem_callback = &callback;
|
||||
do_register_klasses();
|
||||
ClassLoaderDataGraph::classes_do(®ister_klass);
|
||||
}
|
||||
|
||||
static int write_package(JfrCheckpointWriter* writer, PkgPtr pkg, bool leakp) {
|
||||
@ -556,6 +597,7 @@ static int write_classloader(JfrCheckpointWriter* writer, CldPtr cld, bool leakp
|
||||
writer->write((traceid)0); // class loader type id (absence of)
|
||||
writer->write(get_bootstrap_name(leakp)); // maps to synthetic name -> "bootstrap"
|
||||
} else {
|
||||
assert(_class_unload ? true : IS_SERIALIZED(class_loader_klass), "invariant");
|
||||
writer->write(artifact_id(cld)); // class loader instance id
|
||||
writer->write(artifact_id(class_loader_klass)); // class loader type id
|
||||
writer->write(mark_symbol(cld->name(), leakp)); // class loader instance name
|
||||
@ -947,6 +989,7 @@ static size_t teardown() {
|
||||
const size_t total_count = _artifacts->total_count();
|
||||
if (previous_epoch()) {
|
||||
clear_klasses_and_methods();
|
||||
JfrKlassUnloading::clear();
|
||||
clear_artifacts = true;
|
||||
++checkpoint_id;
|
||||
}
|
||||
@ -963,6 +1006,9 @@ static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer
|
||||
} else {
|
||||
_artifacts->initialize(class_unload, clear_artifacts);
|
||||
}
|
||||
if (!_class_unload) {
|
||||
JfrKlassUnloading::sort(previous_epoch());
|
||||
}
|
||||
clear_artifacts = false;
|
||||
assert(_artifacts != NULL, "invariant");
|
||||
assert(!_artifacts->has_klass_entries(), "invariant");
|
||||
@ -992,6 +1038,7 @@ size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* l
|
||||
* Clear all tags from the previous epoch.
|
||||
*/
|
||||
void JfrTypeSet::clear() {
|
||||
JfrKlassUnloading::clear();
|
||||
clear_artifacts = true;
|
||||
setup(NULL, NULL, false, false);
|
||||
register_klasses();
|
||||
@ -1000,3 +1047,10 @@ void JfrTypeSet::clear() {
|
||||
clear_classloaders();
|
||||
clear_klasses_and_methods();
|
||||
}
|
||||
|
||||
size_t JfrTypeSet::on_unloading_classes(JfrCheckpointWriter* writer) {
|
||||
if (JfrRecorder::is_recording()) {
|
||||
return serialize(writer, NULL, true, false);
|
||||
}
|
||||
return register_unloading_klasses();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,6 +33,7 @@ class JfrTypeSet : AllStatic {
|
||||
public:
|
||||
static void clear();
|
||||
static size_t serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload, bool flushpoint);
|
||||
static size_t on_unloading_classes(JfrCheckpointWriter* writer);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTYPESET_HPP
|
||||
|
@ -24,6 +24,8 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
|
||||
#include "jfr/utilities/jfrPredicate.hpp"
|
||||
#include "jfr/utilities/jfrRelation.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
@ -255,7 +257,8 @@ JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_id(new JfrSymbolId()
|
||||
assert(_klass_list != NULL, "invariant");
|
||||
}
|
||||
|
||||
static const size_t initial_class_list_size = 200;
|
||||
static const size_t initial_klass_list_size = 256;
|
||||
const int initial_klass_loader_set_size = 64;
|
||||
|
||||
void JfrArtifactSet::initialize(bool class_unload, bool clear /* false */) {
|
||||
assert(_symbol_id != NULL, "invariant");
|
||||
@ -265,13 +268,14 @@ void JfrArtifactSet::initialize(bool class_unload, bool clear /* false */) {
|
||||
_symbol_id->set_class_unload(class_unload);
|
||||
_total_count = 0;
|
||||
// resource allocation
|
||||
_klass_list = new GrowableArray<const Klass*>(initial_class_list_size, false, mtTracing);
|
||||
_klass_list = new GrowableArray<const Klass*>(initial_klass_list_size, false, mtTracing);
|
||||
_klass_loader_set = new GrowableArray<const Klass*>(initial_klass_loader_set_size, false, mtTracing);
|
||||
}
|
||||
|
||||
JfrArtifactSet::~JfrArtifactSet() {
|
||||
_symbol_id->clear();
|
||||
delete _symbol_id;
|
||||
// _klass_list will be cleared by a ResourceMark
|
||||
// _klass_list and _klass_loader_list will be cleared by a ResourceMark
|
||||
}
|
||||
|
||||
traceid JfrArtifactSet::bootstrap_name(bool leakp) {
|
||||
@ -307,10 +311,15 @@ int JfrArtifactSet::entries() const {
|
||||
return _klass_list->length();
|
||||
}
|
||||
|
||||
bool JfrArtifactSet::should_do_loader_klass(const Klass* k) {
|
||||
assert(k != NULL, "invariant");
|
||||
assert(_klass_loader_set != NULL, "invariant");
|
||||
return !JfrMutablePredicate<const Klass*, compare_klasses>::test(_klass_loader_set, k);
|
||||
}
|
||||
|
||||
void JfrArtifactSet::register_klass(const Klass* k) {
|
||||
assert(k != NULL, "invariant");
|
||||
assert(_klass_list != NULL, "invariant");
|
||||
assert(_klass_list->find(k) == -1, "invariant");
|
||||
_klass_list->append(k);
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,9 @@
|
||||
#include "jfr/utilities/jfrHashtable.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
template <typename T>
|
||||
class GrowableArray;
|
||||
|
||||
// Composite callback/functor building block
|
||||
template <typename T, typename Func1, typename Func2>
|
||||
@ -81,8 +83,8 @@ class ClearArtifact {
|
||||
bool operator()(T const& value) {
|
||||
CLEAR_SERIALIZED(value);
|
||||
assert(IS_NOT_SERIALIZED(value), "invariant");
|
||||
SET_PREV_EPOCH_CLEARED_BIT(value);
|
||||
CLEAR_METHOD_AND_CLASS_PREV_EPOCH(value);
|
||||
SET_PREVIOUS_EPOCH_CLEARED_BIT(value);
|
||||
CLEAR_PREVIOUS_EPOCH_METHOD_AND_CLASS(value);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
@ -91,11 +93,11 @@ template <>
|
||||
class ClearArtifact<const Method*> {
|
||||
public:
|
||||
bool operator()(const Method* method) {
|
||||
assert(METHOD_FLAG_USED_PREV_EPOCH(method), "invariant");
|
||||
CLEAR_METHOD_SERIALIZED(method);
|
||||
assert(METHOD_FLAG_USED_PREVIOUS_EPOCH(method), "invariant");
|
||||
CLEAR_SERIALIZED_METHOD(method);
|
||||
assert(METHOD_NOT_SERIALIZED(method), "invariant");
|
||||
SET_PREV_EPOCH_METHOD_CLEARED_BIT(method);
|
||||
CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
|
||||
SET_PREVIOUS_EPOCH_METHOD_CLEARED_BIT(method);
|
||||
CLEAR_PREVIOUS_EPOCH_METHOD_FLAG(method);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
@ -145,7 +147,7 @@ public:
|
||||
if (_current_epoch) {
|
||||
return leakp ? IS_LEAKP(klass) : METHOD_USED_THIS_EPOCH(klass);
|
||||
}
|
||||
return leakp ? IS_LEAKP(klass) : METHOD_USED_PREV_EPOCH(klass);
|
||||
return leakp ? IS_LEAKP(klass) : METHOD_USED_PREVIOUS_EPOCH(klass);
|
||||
}
|
||||
};
|
||||
|
||||
@ -158,7 +160,7 @@ class MethodFlagPredicate {
|
||||
if (_current_epoch) {
|
||||
return leakp ? IS_METHOD_LEAKP_USED(method) : METHOD_FLAG_USED_THIS_EPOCH(method);
|
||||
}
|
||||
return leakp ? IS_METHOD_LEAKP_USED(method) : METHOD_FLAG_USED_PREV_EPOCH(method);
|
||||
return leakp ? IS_METHOD_LEAKP_USED(method) : METHOD_FLAG_USED_PREVIOUS_EPOCH(method);
|
||||
}
|
||||
};
|
||||
|
||||
@ -291,6 +293,7 @@ class JfrArtifactSet : public JfrCHeapObj {
|
||||
private:
|
||||
JfrSymbolId* _symbol_id;
|
||||
GrowableArray<const Klass*>* _klass_list;
|
||||
GrowableArray<const Klass*>* _klass_loader_set;
|
||||
size_t _total_count;
|
||||
|
||||
public:
|
||||
@ -315,6 +318,7 @@ class JfrArtifactSet : public JfrCHeapObj {
|
||||
int entries() const;
|
||||
size_t total_count() const;
|
||||
void register_klass(const Klass* k);
|
||||
bool should_do_loader_klass(const Klass* k);
|
||||
|
||||
template <typename Functor>
|
||||
void iterate_klasses(Functor& functor) const {
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfrfiles/jfrTypes.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "oops/arrayKlass.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
@ -153,6 +152,14 @@ traceid JfrTraceId::assign_thread_id() {
|
||||
return next_thread_id();
|
||||
}
|
||||
|
||||
traceid JfrTraceId::load_raw(jclass jc) {
|
||||
assert(jc != NULL, "invariant");
|
||||
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
|
||||
const oop my_oop = JNIHandles::resolve(jc);
|
||||
assert(my_oop != NULL, "invariant");
|
||||
return load_raw(java_lang_Class::as_Klass(my_oop));
|
||||
}
|
||||
|
||||
// used by CDS / APPCDS as part of "remove_unshareable_info"
|
||||
void JfrTraceId::remove(const Klass* k) {
|
||||
assert(k != NULL, "invariant");
|
||||
@ -174,22 +181,6 @@ void JfrTraceId::restore(const Klass* k) {
|
||||
k->set_trace_id(next_class_id() | event_flags);
|
||||
}
|
||||
|
||||
traceid JfrTraceId::get(jclass jc) {
|
||||
assert(jc != NULL, "invariant");
|
||||
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
|
||||
const oop my_oop = JNIHandles::resolve(jc);
|
||||
assert(my_oop != NULL, "invariant");
|
||||
return get(java_lang_Class::as_Klass(my_oop));
|
||||
}
|
||||
|
||||
traceid JfrTraceId::use(jclass jc) {
|
||||
assert(jc != NULL, "invariant");
|
||||
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
|
||||
const oop my_oop = JNIHandles::resolve(jc);
|
||||
assert(my_oop != NULL, "invariant");
|
||||
return use(java_lang_Class::as_Klass(my_oop));
|
||||
}
|
||||
|
||||
bool JfrTraceId::in_visible_set(const jclass jc) {
|
||||
assert(jc != NULL, "invariant");
|
||||
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -85,21 +85,25 @@ class JfrTraceId : public AllStatic {
|
||||
static void assign(const ClassLoaderData* cld);
|
||||
static traceid assign_thread_id();
|
||||
|
||||
static traceid get(const Klass* klass);
|
||||
static traceid get(jclass jc);
|
||||
static traceid get(const Thread* thread);
|
||||
// through load barrier
|
||||
static traceid load(const Klass* klass);
|
||||
static traceid load(jclass jc);
|
||||
static traceid load(const Method* method);
|
||||
static traceid load(const Klass* klass, const Method* method);
|
||||
static traceid load(const ModuleEntry* module);
|
||||
static traceid load(const PackageEntry* package);
|
||||
static traceid load(const ClassLoaderData* cld);
|
||||
static traceid load_leakp(const Klass* klass, const Method* method); // leak profiler
|
||||
|
||||
// tag construct as used, returns pre-tagged traceid
|
||||
static traceid use(const Klass* klass);
|
||||
static traceid use(jclass jc);
|
||||
static traceid use(const Method* method);
|
||||
static traceid use(const Klass* klass, const Method* method);
|
||||
static traceid use(const ModuleEntry* module);
|
||||
static traceid use(const PackageEntry* package);
|
||||
static traceid use(const ClassLoaderData* cld);
|
||||
|
||||
// leak profiler
|
||||
static void set_leakp(const Klass* klass, const Method* method);
|
||||
// load barrier elision
|
||||
static traceid load_raw(const Klass* klass);
|
||||
static traceid load_raw(jclass jc);
|
||||
static traceid load_raw(const Thread* thread);
|
||||
static traceid load_raw(const Method* method);
|
||||
static traceid load_raw(const Klass* klass, const Method* method);
|
||||
static traceid load_raw(const ModuleEntry* module);
|
||||
static traceid load_raw(const PackageEntry* package);
|
||||
static traceid load_raw(const ClassLoaderData* cld);
|
||||
|
||||
static void remove(const Klass* klass);
|
||||
static void restore(const Klass* klass);
|
||||
|
@ -25,112 +25,77 @@
|
||||
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_INLINE_HPP
|
||||
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_INLINE_HPP
|
||||
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "classfile/moduleEntry.hpp"
|
||||
#include "classfile/packageEntry.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
|
||||
#include "jfr/support/jfrKlassExtension.hpp"
|
||||
#include "oops/arrayKlass.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline bool is_not_tagged(traceid value) {
|
||||
const traceid this_epoch_bit = JfrTraceIdEpoch::in_use_this_epoch_bit();
|
||||
return (value & ((this_epoch_bit << META_SHIFT) | this_epoch_bit)) != this_epoch_bit;
|
||||
inline traceid JfrTraceId::load(jclass jc) {
|
||||
return JfrTraceIdLoadBarrier::load(jc);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::load(const Klass* klass) {
|
||||
return JfrTraceIdLoadBarrier::load(klass);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::load(const Method* method) {
|
||||
return JfrTraceIdLoadBarrier::load(method);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::load(const Klass* klass, const Method* method) {
|
||||
return JfrTraceIdLoadBarrier::load(klass, method);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::load(const ModuleEntry* module) {
|
||||
return JfrTraceIdLoadBarrier::load(module);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::load(const PackageEntry* package) {
|
||||
return JfrTraceIdLoadBarrier::load(package);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::load(const ClassLoaderData* cld) {
|
||||
return JfrTraceIdLoadBarrier::load(cld);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::load_leakp(const Klass* klass, const Method* method) {
|
||||
return JfrTraceIdLoadBarrier::load_leakp(klass, method);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool should_tag(const T* t) {
|
||||
inline traceid raw_load(const T* t) {
|
||||
assert(t != NULL, "invariant");
|
||||
return is_not_tagged(TRACE_ID_RAW(t));
|
||||
return TRACE_ID(t);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool should_tag<Method>(const Method* method) {
|
||||
assert(method != NULL, "invariant");
|
||||
return is_not_tagged((traceid)method->trace_flags());
|
||||
inline traceid JfrTraceId::load_raw(const Klass* klass) {
|
||||
return raw_load(klass);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline traceid set_used_and_get(const T* type) {
|
||||
assert(type != NULL, "invariant");
|
||||
if (should_tag(type)) {
|
||||
SET_USED_THIS_EPOCH(type);
|
||||
JfrTraceIdEpoch::set_changed_tag_state();
|
||||
}
|
||||
assert(USED_THIS_EPOCH(type), "invariant");
|
||||
return TRACE_ID(type);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::get(const Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
return TRACE_ID(klass);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::get(const Thread* t) {
|
||||
inline traceid JfrTraceId::load_raw(const Thread* t) {
|
||||
assert(t != NULL, "invariant");
|
||||
return TRACE_ID_RAW(t->jfr_thread_local());
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::use(const Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
if (should_tag(klass)) {
|
||||
SET_USED_THIS_EPOCH(klass);
|
||||
JfrTraceIdEpoch::set_changed_tag_state();
|
||||
}
|
||||
assert(USED_THIS_EPOCH(klass), "invariant");
|
||||
return get(klass);
|
||||
inline traceid JfrTraceId::load_raw(const Method* method) {
|
||||
return (METHOD_ID(method->method_holder(), method));
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::use(const Method* method) {
|
||||
return use(method->method_holder(), method);
|
||||
inline traceid JfrTraceId::load_raw(const ModuleEntry* module) {
|
||||
return raw_load(module);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::use(const Klass* klass, const Method* method) {
|
||||
assert(klass != NULL, "invariant");
|
||||
assert(method != NULL, "invariant");
|
||||
if (METHOD_FLAG_NOT_USED_THIS_EPOCH(method)) {
|
||||
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
|
||||
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
|
||||
JfrTraceIdEpoch::set_changed_tag_state();
|
||||
}
|
||||
return (METHOD_ID(klass, method));
|
||||
inline traceid JfrTraceId::load_raw(const PackageEntry* package) {
|
||||
return raw_load(package);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::use(const ModuleEntry* module) {
|
||||
return set_used_and_get(module);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::use(const PackageEntry* package) {
|
||||
return set_used_and_get(package);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::use(const ClassLoaderData* cld) {
|
||||
assert(cld != NULL, "invariant");
|
||||
return cld->has_class_mirror_holder() ? 0 : set_used_and_get(cld);
|
||||
}
|
||||
|
||||
inline void JfrTraceId::set_leakp(const Klass* klass, const Method* method) {
|
||||
assert(klass != NULL, "invariant");
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
assert(method != NULL, "invariant");
|
||||
assert(klass == method->method_holder(), "invariant");
|
||||
if (METHOD_FLAG_NOT_USED_THIS_EPOCH(method)) {
|
||||
// the method is already logically tagged, just like the klass,
|
||||
// but because of redefinition, the latest Method*
|
||||
// representation might not have a reified tag.
|
||||
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
|
||||
assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
|
||||
}
|
||||
SET_LEAKP(klass);
|
||||
SET_METHOD_LEAKP(method);
|
||||
inline traceid JfrTraceId::load_raw(const ClassLoaderData* cld) {
|
||||
return raw_load(cld);
|
||||
}
|
||||
|
||||
inline bool JfrTraceId::in_visible_set(const Klass* klass) {
|
||||
|
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_HPP
|
||||
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_HPP
|
||||
|
||||
#include "jni.h"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class JfrTraceIdBits : AllStatic {
|
||||
public:
|
||||
template <typename T>
|
||||
static traceid load(const T* ptr);
|
||||
|
||||
template <typename T>
|
||||
static void store(jbyte bits, const T* ptr);
|
||||
|
||||
template <typename T>
|
||||
static void cas(jbyte bits, const T* ptr);
|
||||
|
||||
template <typename T>
|
||||
static void meta_store(jbyte bits, const T* ptr);
|
||||
|
||||
template <typename T>
|
||||
static void mask_store(jbyte mask, const T* ptr);
|
||||
|
||||
template <typename T>
|
||||
static void meta_mask_store(jbyte mask, const T* ptr);
|
||||
|
||||
template <typename T>
|
||||
static void clear(jbyte bits, const T* ptr);
|
||||
|
||||
template <typename T>
|
||||
static void clear_cas(jbyte bits, const T* ptr);
|
||||
|
||||
template <typename T>
|
||||
static void meta_clear(jbyte bits, const T* ptr);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,95 +25,161 @@
|
||||
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
|
||||
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
|
||||
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
static const int low_offset = 0;
|
||||
static const int meta_offset = low_offset + 1;
|
||||
const int low_offset = 0;
|
||||
const int meta_offset = low_offset + 1;
|
||||
#else
|
||||
static const int low_offset = 7;
|
||||
static const int meta_offset = low_offset - 1;
|
||||
const int low_offset = 7;
|
||||
const int meta_offset = low_offset - 1;
|
||||
#endif
|
||||
|
||||
inline void set_bits(jbyte bits, jbyte volatile* const dest) {
|
||||
assert(dest != NULL, "invariant");
|
||||
*dest |= bits;
|
||||
OrderAccess::storestore();
|
||||
inline jbyte* low_addr(jbyte* addr) {
|
||||
assert(addr != NULL, "invariant");
|
||||
return addr + low_offset;
|
||||
}
|
||||
|
||||
inline jbyte traceid_and(jbyte current, jbyte bits) {
|
||||
return current & bits;
|
||||
inline jbyte* low_addr(traceid* addr) {
|
||||
return low_addr((jbyte*)addr);
|
||||
}
|
||||
|
||||
inline jbyte traceid_or(jbyte current, jbyte bits) {
|
||||
return current | bits;
|
||||
inline jbyte* meta_addr(jbyte* addr) {
|
||||
assert(addr != NULL, "invariant");
|
||||
return addr + meta_offset;
|
||||
}
|
||||
|
||||
inline jbyte traceid_xor(jbyte current, jbyte bits) {
|
||||
return current ^ bits;
|
||||
inline jbyte* meta_addr(traceid* addr) {
|
||||
return meta_addr((jbyte*)addr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline jbyte* traceid_tag_byte(const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
return low_addr(ptr->trace_id_addr());
|
||||
}
|
||||
|
||||
template <>
|
||||
inline jbyte* traceid_tag_byte<Method>(const Method* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
return ptr->trace_flags_addr();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline jbyte* traceid_meta_byte(const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
return meta_addr(ptr->trace_id_addr());
|
||||
}
|
||||
|
||||
template <>
|
||||
inline jbyte* traceid_meta_byte<Method>(const Method* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
return ptr->trace_meta_addr();
|
||||
}
|
||||
|
||||
inline jbyte traceid_and(jbyte bits, jbyte current) {
|
||||
return bits & current;
|
||||
}
|
||||
|
||||
inline jbyte traceid_or(jbyte bits, jbyte current) {
|
||||
return bits | current;
|
||||
}
|
||||
|
||||
inline jbyte traceid_xor(jbyte bits, jbyte current) {
|
||||
return bits ^ current;
|
||||
}
|
||||
|
||||
template <jbyte op(jbyte, jbyte)>
|
||||
inline void set_bits_cas_form(jbyte bits, jbyte* const dest) {
|
||||
inline void set_form(jbyte bits, jbyte* dest) {
|
||||
assert(dest != NULL, "invariant");
|
||||
*dest = op(bits, *dest);
|
||||
OrderAccess::storestore();
|
||||
}
|
||||
|
||||
template <jbyte op(jbyte, jbyte)>
|
||||
inline void set_cas_form(jbyte bits, jbyte volatile* dest) {
|
||||
assert(dest != NULL, "invariant");
|
||||
do {
|
||||
const jbyte current = *dest;
|
||||
const jbyte new_value = op(current, bits);
|
||||
if (Atomic::cmpxchg(dest, current, new_value) == current) {
|
||||
const jbyte new_value = op(bits, current);
|
||||
if (current == new_value || Atomic::cmpxchg(dest, current, new_value) == current) {
|
||||
return;
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
|
||||
inline void set_bits_cas(jbyte bits, jbyte* const dest) {
|
||||
set_bits_cas_form<traceid_or>(bits, dest);
|
||||
template <typename T>
|
||||
inline void JfrTraceIdBits::cas(jbyte bits, const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
set_cas_form<traceid_or>(bits, traceid_tag_byte(ptr));
|
||||
}
|
||||
|
||||
inline void clear_bits_cas(jbyte bits, jbyte* const dest) {
|
||||
set_bits_cas_form<traceid_xor>(bits, dest);
|
||||
template <typename T>
|
||||
inline traceid JfrTraceIdBits::load(const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
return ptr->trace_id();
|
||||
}
|
||||
|
||||
inline void set_mask(jbyte mask, jbyte* const dest) {
|
||||
set_bits_cas_form<traceid_and>(mask, dest);
|
||||
}
|
||||
|
||||
inline void set_traceid_bits(jbyte bits, traceid* dest) {
|
||||
set_bits(bits, ((jbyte*)dest) + low_offset);
|
||||
}
|
||||
|
||||
inline void set_traceid_bits_cas(jbyte bits, traceid* dest) {
|
||||
set_bits_cas(bits, ((jbyte*)dest) + low_offset);
|
||||
}
|
||||
|
||||
inline void set_traceid_mask(jbyte mask, traceid* dest) {
|
||||
set_mask(mask, ((jbyte*)dest) + low_offset);
|
||||
}
|
||||
|
||||
inline void set_meta_bits(jbyte bits, jbyte* const dest) {
|
||||
inline void set(jbyte bits, jbyte* dest) {
|
||||
assert(dest != NULL, "invariant");
|
||||
*dest |= bits;
|
||||
set_form<traceid_or>(bits, dest);
|
||||
}
|
||||
|
||||
inline void set_traceid_meta_bits(jbyte bits, traceid* dest) {
|
||||
set_meta_bits(bits, ((jbyte*)dest) + meta_offset);
|
||||
template <typename T>
|
||||
inline void JfrTraceIdBits::store(jbyte bits, const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
set(bits, traceid_tag_byte(ptr));
|
||||
}
|
||||
|
||||
inline void set_meta_mask(jbyte mask, jbyte* const dest) {
|
||||
assert(dest != NULL, "invariant");
|
||||
*dest &= mask;
|
||||
template <typename T>
|
||||
inline void JfrTraceIdBits::meta_store(jbyte bits, const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
set(bits, traceid_meta_byte(ptr));
|
||||
}
|
||||
|
||||
inline void set_traceid_meta_mask(jbyte mask, traceid* dest) {
|
||||
set_meta_mask(mask, ((jbyte*)dest) + meta_offset);
|
||||
inline void set_mask(jbyte mask, jbyte* dest) {
|
||||
set_cas_form<traceid_and>(mask, dest);
|
||||
}
|
||||
|
||||
// only used by a single thread with no visibility requirements
|
||||
inline void clear_meta_bits(jbyte bits, jbyte* const dest) {
|
||||
assert(dest != NULL, "invariant");
|
||||
*dest ^= bits;
|
||||
template <typename T>
|
||||
inline void JfrTraceIdBits::mask_store(jbyte mask, const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
set_mask(mask, traceid_tag_byte(ptr));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void JfrTraceIdBits::meta_mask_store(jbyte mask, const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
set_mask(mask, traceid_meta_byte(ptr));
|
||||
}
|
||||
|
||||
inline void clear_bits(jbyte bits, jbyte* dest) {
|
||||
set_form<traceid_xor>(bits, dest);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void JfrTraceIdBits::clear(jbyte bits, const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
clear_bits(bits, traceid_tag_byte(ptr));
|
||||
}
|
||||
|
||||
inline void clear_bits_cas(jbyte bits, jbyte* dest) {
|
||||
set_cas_form<traceid_xor>(bits, dest);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void JfrTraceIdBits::clear_cas(jbyte bits, const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
clear_bits_cas(bits, traceid_tag_byte(ptr));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void JfrTraceIdBits::meta_clear(jbyte bits, const T* ptr) {
|
||||
assert(ptr != NULL, "invariant");
|
||||
clear_bits(bits, traceid_meta_byte(ptr));
|
||||
}
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_INLINE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,27 +29,27 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
#define USED_BIT 1
|
||||
#define METHOD_USED_BIT (USED_BIT << 2)
|
||||
#define EPOCH_1_SHIFT 0
|
||||
#define EPOCH_2_SHIFT 1
|
||||
#define USED_EPOCH_1_BIT (USED_BIT << EPOCH_1_SHIFT)
|
||||
#define USED_EPOCH_2_BIT (USED_BIT << EPOCH_2_SHIFT)
|
||||
#define METHOD_USED_EPOCH_1_BIT (METHOD_USED_BIT << EPOCH_1_SHIFT)
|
||||
#define METHOD_USED_EPOCH_2_BIT (METHOD_USED_BIT << EPOCH_2_SHIFT)
|
||||
#define METHOD_AND_CLASS_IN_USE_BITS (METHOD_USED_BIT | USED_BIT)
|
||||
#define METHOD_AND_CLASS_IN_USE_EPOCH_1_BITS (METHOD_AND_CLASS_IN_USE_BITS << EPOCH_1_SHIFT)
|
||||
#define METHOD_AND_CLASS_IN_USE_EPOCH_2_BITS (METHOD_AND_CLASS_IN_USE_BITS << EPOCH_2_SHIFT)
|
||||
#define BIT 1
|
||||
#define METHOD_BIT (BIT << 2)
|
||||
#define EPOCH_0_SHIFT 0
|
||||
#define EPOCH_1_SHIFT 1
|
||||
#define EPOCH_0_BIT (BIT << EPOCH_0_SHIFT)
|
||||
#define EPOCH_1_BIT (BIT << EPOCH_1_SHIFT)
|
||||
#define EPOCH_0_METHOD_BIT (METHOD_BIT << EPOCH_0_SHIFT)
|
||||
#define EPOCH_1_METHOD_BIT (METHOD_BIT << EPOCH_1_SHIFT)
|
||||
#define METHOD_AND_CLASS_BITS (METHOD_BIT | BIT)
|
||||
#define EPOCH_0_METHOD_AND_CLASS_BITS (METHOD_AND_CLASS_BITS << EPOCH_0_SHIFT)
|
||||
#define EPOCH_1_METHOD_AND_CLASS_BITS (METHOD_AND_CLASS_BITS << EPOCH_1_SHIFT)
|
||||
|
||||
// Epoch alternation on each rotation allow for concurrent tagging.
|
||||
// The epoch shift happens only during a safepoint.
|
||||
//
|
||||
// _synchronizing is a transition state, the purpose of which is to
|
||||
// have JavaThreads that run _thread_in_native (i.e. Compiler threads)
|
||||
// respect the current epoch shift in-progress during a safepoint.
|
||||
// respect the current epoch shift in-progress during the safepoint.
|
||||
//
|
||||
// _changed_tag_state == true signals an incremental modification to artifact tagging
|
||||
// (klasses, methods, CLDs, etc), used to request collection of artifacts.
|
||||
// (klasses, methods, CLDs, etc), purpose of which is to trigger collection of artifacts.
|
||||
//
|
||||
class JfrTraceIdEpoch : AllStatic {
|
||||
friend class JfrCheckpointManager;
|
||||
@ -90,28 +90,28 @@ class JfrTraceIdEpoch : AllStatic {
|
||||
return Atomic::load_acquire(&_synchronizing);
|
||||
}
|
||||
|
||||
static traceid in_use_this_epoch_bit() {
|
||||
return _epoch_state ? USED_EPOCH_2_BIT : USED_EPOCH_1_BIT;
|
||||
static traceid this_epoch_bit() {
|
||||
return _epoch_state ? EPOCH_1_BIT : EPOCH_0_BIT;
|
||||
}
|
||||
|
||||
static traceid in_use_prev_epoch_bit() {
|
||||
return _epoch_state ? USED_EPOCH_1_BIT : USED_EPOCH_2_BIT;
|
||||
static traceid previous_epoch_bit() {
|
||||
return _epoch_state ? EPOCH_0_BIT : EPOCH_1_BIT;
|
||||
}
|
||||
|
||||
static traceid method_in_use_this_epoch_bit() {
|
||||
return _epoch_state ? METHOD_USED_EPOCH_2_BIT : METHOD_USED_EPOCH_1_BIT;
|
||||
static traceid this_epoch_method_bit() {
|
||||
return _epoch_state ? EPOCH_1_METHOD_BIT : EPOCH_0_METHOD_BIT;
|
||||
}
|
||||
|
||||
static traceid method_in_use_prev_epoch_bit() {
|
||||
return _epoch_state ? METHOD_USED_EPOCH_1_BIT : METHOD_USED_EPOCH_2_BIT;
|
||||
static traceid previous_epoch_method_bit() {
|
||||
return _epoch_state ? EPOCH_0_METHOD_BIT : EPOCH_1_METHOD_BIT;
|
||||
}
|
||||
|
||||
static traceid method_and_class_in_use_this_epoch_bits() {
|
||||
return _epoch_state ? METHOD_AND_CLASS_IN_USE_EPOCH_2_BITS : METHOD_AND_CLASS_IN_USE_EPOCH_1_BITS;
|
||||
static traceid this_epoch_method_and_class_bits() {
|
||||
return _epoch_state ? EPOCH_1_METHOD_AND_CLASS_BITS : EPOCH_0_METHOD_AND_CLASS_BITS;
|
||||
}
|
||||
|
||||
static traceid method_and_class_in_use_prev_epoch_bits() {
|
||||
return _epoch_state ? METHOD_AND_CLASS_IN_USE_EPOCH_1_BITS : METHOD_AND_CLASS_IN_USE_EPOCH_2_BITS;
|
||||
static traceid previous_epoch_method_and_class_bits() {
|
||||
return _epoch_state ? EPOCH_0_METHOD_AND_CLASS_BITS : EPOCH_1_METHOD_AND_CLASS_BITS;
|
||||
}
|
||||
|
||||
static bool has_changed_tag_state() {
|
||||
|
@ -0,0 +1,248 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
#include "jfr/support/jfrThreadLocal.hpp"
|
||||
#include "jfr/utilities/jfrEpochQueue.inline.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
static const u1 UNLOADED_BIT = 1;
|
||||
static const u1 UNCOMPRESSED_BIT = 1 << 1;
|
||||
static const u1 METADATA_SHIFT = UNCOMPRESSED_BIT;
|
||||
static const traceid UNLOADED = UNLOADED_BIT;
|
||||
static const traceid UNCOMPRESSED = UNCOMPRESSED_BIT;
|
||||
static const juint UNLOADED_NARROW = UNLOADED_BIT;
|
||||
static const juint UNCOMPRESSED_NARROW = UNCOMPRESSED_BIT;
|
||||
#else
|
||||
static const u1 UNLOADED_BIT = 1 << 7;
|
||||
static const u1 UNCOMPRESSED_BIT = 1 << 6;
|
||||
static const traceid UNLOADED = (traceid)UNLOADED_BIT << 56;
|
||||
static const traceid UNCOMPRESSED = (traceid)UNCOMPRESSED_BIT << 56;
|
||||
static const traceid METADATA_MASK = ~(UNCOMPRESSED | UNLOADED);
|
||||
static const juint UNLOADED_NARROW = (juint)UNLOADED_BIT << 24;
|
||||
static const juint UNCOMPRESSED_NARROW = (juint)UNCOMPRESSED_BIT << 24;
|
||||
static const juint METADATA_MASK_NARROW = ~(UNCOMPRESSED_NARROW | UNLOADED_NARROW);
|
||||
#endif
|
||||
|
||||
struct JfrEpochQueueKlassElement {
|
||||
traceid id;
|
||||
const Klass* klass;
|
||||
};
|
||||
|
||||
struct JfrEpochQueueNarrowKlassElement {
|
||||
u4 id;
|
||||
narrowKlass compressed_klass;
|
||||
};
|
||||
|
||||
static const size_t ELEMENT_SIZE = sizeof(JfrEpochQueueKlassElement);
|
||||
static const size_t NARROW_ELEMENT_SIZE = sizeof(JfrEpochQueueNarrowKlassElement);
|
||||
static const size_t THRESHOLD_SHIFT = 30;
|
||||
|
||||
// If the upshifted traceid value is less than this threshold (1 073 741 824),
|
||||
// compress the element for more effective queue storage.
|
||||
static const traceid uncompressed_threshold = ((traceid)1) << THRESHOLD_SHIFT;
|
||||
|
||||
static size_t element_size(bool compressed) {
|
||||
return compressed ? NARROW_ELEMENT_SIZE : ELEMENT_SIZE;
|
||||
}
|
||||
|
||||
static bool can_compress_element(traceid id) {
|
||||
return Metaspace::using_class_space() && id < uncompressed_threshold;
|
||||
}
|
||||
|
||||
static size_t element_size(const Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
return element_size(can_compress_element(JfrTraceId::load_raw(klass)));
|
||||
}
|
||||
|
||||
static bool is_unloaded(traceid id, bool previous_epoch) {
|
||||
return JfrKlassUnloading::is_unloaded(id, previous_epoch);
|
||||
}
|
||||
|
||||
static narrowKlass encode(const Klass* klass) {
|
||||
return CompressedKlassPointers::encode(const_cast<Klass*>(klass));
|
||||
}
|
||||
|
||||
static const Klass* decode(narrowKlass klass) {
|
||||
return CompressedKlassPointers::decode(klass);
|
||||
}
|
||||
|
||||
static traceid unmask_id(traceid id, bool compressed) {
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
return id >> METADATA_SHIFT;
|
||||
#else
|
||||
return compressed ? id & METADATA_MASK_NARROW : id & METADATA_MASK;
|
||||
#endif
|
||||
}
|
||||
|
||||
static traceid read_compressed_element(const u1* pos, const Klass** klass) {
|
||||
const JfrEpochQueueNarrowKlassElement* element = (const JfrEpochQueueNarrowKlassElement*)pos;
|
||||
*klass = decode(element->compressed_klass);
|
||||
return unmask_id(element->id, true);
|
||||
}
|
||||
|
||||
static traceid read_uncompressed_element(const u1* pos, const Klass** klass) {
|
||||
const JfrEpochQueueKlassElement* element = (const JfrEpochQueueKlassElement*)pos;
|
||||
*klass = element->klass;
|
||||
return unmask_id(element->id, false);
|
||||
}
|
||||
|
||||
static traceid read_element(const u1* pos, const Klass** klass, bool compressed) {
|
||||
assert(pos != NULL, "invariant");
|
||||
return compressed ? read_compressed_element(pos, klass) : read_uncompressed_element(pos, klass);
|
||||
}
|
||||
|
||||
static void store_compressed_element(traceid id, const Klass* klass, u1* pos) {
|
||||
JfrEpochQueueNarrowKlassElement* const element = new (pos) JfrEpochQueueNarrowKlassElement();
|
||||
element->id = id;
|
||||
element->compressed_klass = encode(klass);
|
||||
}
|
||||
|
||||
static void store_uncompressed_element(traceid id, const Klass* klass, u1* pos) {
|
||||
JfrEpochQueueKlassElement* const element = new (pos) JfrEpochQueueKlassElement();
|
||||
element->id = id | UNCOMPRESSED;
|
||||
element->klass = klass;
|
||||
}
|
||||
|
||||
static void store_element(const Klass* klass, u1* pos) {
|
||||
assert(pos != NULL, "invariant");
|
||||
assert(klass != NULL, "invariant");
|
||||
traceid id = JfrTraceId::load_raw(klass);
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
id <<= METADATA_SHIFT;
|
||||
#endif
|
||||
if (can_compress_element(id)) {
|
||||
store_compressed_element(id, klass, pos);
|
||||
} else {
|
||||
store_uncompressed_element(id, klass, pos);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_unloaded(const u1* pos) {
|
||||
*(const_cast<u1*>(pos)) |= UNLOADED_BIT;
|
||||
}
|
||||
|
||||
static bool is_unloaded(const u1* pos) {
|
||||
return (*pos & UNLOADED_BIT) == UNLOADED_BIT;
|
||||
}
|
||||
|
||||
static bool is_compressed(const u1* pos) {
|
||||
return (*pos & UNCOMPRESSED_BIT) == 0;
|
||||
}
|
||||
|
||||
// this is an optimization to clear out elements
|
||||
// by short-curcuiting the callback loop.
|
||||
static bool _clear = false;
|
||||
|
||||
template <typename Buffer>
|
||||
size_t JfrEpochQueueKlassPolicy<Buffer>::operator()(const u1* pos, KlassFunctor& callback, bool previous_epoch) {
|
||||
assert(pos != NULL, "invariant");
|
||||
const bool compressed = is_compressed(pos);
|
||||
const size_t size = ::element_size(compressed);
|
||||
if (_clear || is_unloaded(pos)) {
|
||||
return size;
|
||||
}
|
||||
const Klass* klass;
|
||||
const traceid id = read_element(pos, &klass, compressed);
|
||||
assert(id > 0, "invariant");
|
||||
if (is_unloaded(id, previous_epoch)) {
|
||||
set_unloaded(pos);
|
||||
return size;
|
||||
}
|
||||
assert(klass != NULL, "invariant");
|
||||
callback(const_cast<Klass*>(klass));
|
||||
return size;
|
||||
}
|
||||
|
||||
template <typename Buffer>
|
||||
void JfrEpochQueueKlassPolicy<Buffer>::store_element(const Klass* klass, Buffer* buffer) {
|
||||
assert(klass != NULL, "invariant");
|
||||
assert(buffer != NULL, "invariant");
|
||||
assert(buffer->free_size() >= ::element_size(klass), "invariant");
|
||||
::store_element(klass, buffer->pos());
|
||||
}
|
||||
|
||||
template <typename Buffer>
|
||||
inline size_t JfrEpochQueueKlassPolicy<Buffer>::element_size(const Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
return ::element_size(klass);
|
||||
}
|
||||
|
||||
template <typename Buffer>
|
||||
inline Buffer* JfrEpochQueueKlassPolicy<Buffer>::thread_local_storage(Thread* thread) const {
|
||||
assert(thread != NULL, "invariant");
|
||||
JfrThreadLocal* tl = thread->jfr_thread_local();
|
||||
return JfrTraceIdEpoch::epoch() ? tl->_load_barrier_buffer_epoch_1 : tl->_load_barrier_buffer_epoch_0;
|
||||
}
|
||||
|
||||
template <typename Buffer>
|
||||
inline void JfrEpochQueueKlassPolicy<Buffer>::set_thread_local_storage(Buffer* buffer, Thread* thread) {
|
||||
assert(thread != NULL, "invariant");
|
||||
JfrThreadLocal* tl = thread->jfr_thread_local();
|
||||
if (JfrTraceIdEpoch::epoch()) {
|
||||
tl->_load_barrier_buffer_epoch_1 = buffer;
|
||||
} else {
|
||||
tl->_load_barrier_buffer_epoch_0 = buffer;
|
||||
}
|
||||
}
|
||||
|
||||
JfrTraceIdKlassQueue::JfrTraceIdKlassQueue() : _queue() {}
|
||||
|
||||
JfrTraceIdKlassQueue::~JfrTraceIdKlassQueue() {
|
||||
delete _queue;
|
||||
}
|
||||
|
||||
bool JfrTraceIdKlassQueue::initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count) {
|
||||
assert(_queue == NULL, "invariant");
|
||||
_queue = new JfrEpochQueue<JfrEpochQueueKlassPolicy>();
|
||||
return _queue != NULL && _queue->initialize(min_elem_size, free_list_cache_count_limit, cache_prealloc_count);
|
||||
}
|
||||
|
||||
void JfrTraceIdKlassQueue::clear() {
|
||||
if (_queue != NULL) {
|
||||
_clear = true;
|
||||
KlassFunctor functor(NULL);
|
||||
_queue->iterate(functor, true);
|
||||
_clear = false;
|
||||
}
|
||||
}
|
||||
|
||||
void JfrTraceIdKlassQueue::enqueue(const Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
_queue->enqueue(klass);
|
||||
}
|
||||
|
||||
void JfrTraceIdKlassQueue::iterate(klass_callback callback, bool previous_epoch) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
KlassFunctor functor(callback);
|
||||
_queue->iterate(functor, previous_epoch);
|
||||
}
|
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDKLASSQUEUE_HPP
|
||||
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDKLASSQUEUE_HPP
|
||||
|
||||
#include "jfr/utilities/jfrAllocation.hpp"
|
||||
#include "jfr/utilities/jfrEpochQueue.hpp"
|
||||
|
||||
class Klass;
|
||||
class Thread;
|
||||
|
||||
typedef void(*klass_callback)(Klass*);
|
||||
|
||||
class KlassFunctor {
|
||||
klass_callback _cb;
|
||||
public:
|
||||
KlassFunctor(klass_callback cb) : _cb(cb) {}
|
||||
void operator()(Klass* klass) const {
|
||||
_cb(klass);
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// The policy template class to be used in combination with JfrEpochQueue to specialize a queue.
|
||||
// It details how to store and process an enqueued Klass representation. See utilities/jfrEpochQueue.hpp.
|
||||
//
|
||||
template <typename Buffer>
|
||||
class JfrEpochQueueKlassPolicy {
|
||||
public:
|
||||
typedef Buffer* BufferPtr;
|
||||
typedef Klass Type;
|
||||
// Encode an individual klass and additional metadata
|
||||
// and store it into the buffer associated with the queue.
|
||||
void store_element(const Klass* klass, BufferPtr buffer);
|
||||
// Element size is a function of the traceid value.
|
||||
size_t element_size(const Klass* klass);
|
||||
// Storage associated with the the queue is distributed and cached in thread locals.
|
||||
BufferPtr thread_local_storage(Thread* thread) const;
|
||||
void set_thread_local_storage(BufferPtr buffer, Thread* thread);
|
||||
// Klasses are validated for liveness before being forwarded to the user provided callback.
|
||||
size_t operator()(const u1* pos, KlassFunctor& callback, bool previous_epoch = false);
|
||||
};
|
||||
|
||||
class JfrTraceIdKlassQueue : public JfrCHeapObj {
|
||||
private:
|
||||
JfrEpochQueue<JfrEpochQueueKlassPolicy>* _queue;
|
||||
public:
|
||||
JfrTraceIdKlassQueue();
|
||||
~JfrTraceIdKlassQueue();
|
||||
bool initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count);
|
||||
void clear();
|
||||
void enqueue(const Klass* klass);
|
||||
void iterate(klass_callback callback, bool previous_epoch = false);
|
||||
};
|
||||
|
||||
#endif //SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDKLASSQUEUE_HPP
|
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.hpp"
|
||||
#include "jfr/support/jfrThreadLocal.hpp"
|
||||
#include "jfr/utilities/jfrEpochQueue.inline.hpp"
|
||||
#include "runtime/jniHandles.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
// The queue instance used by the load barrier to enqueue tagged Klass'es.
|
||||
static JfrTraceIdKlassQueue* _klass_queue = NULL;
|
||||
|
||||
static JfrTraceIdKlassQueue& klass_queue() {
|
||||
assert(_klass_queue != NULL, "invariant");
|
||||
return *_klass_queue;
|
||||
}
|
||||
|
||||
const size_t buffer_size_bytes = 1 * K; // min_elem_size of storage unit
|
||||
const size_t prealloc_count = 32;
|
||||
|
||||
bool JfrTraceIdLoadBarrier::initialize() {
|
||||
assert(_klass_queue == NULL, "invariant");
|
||||
_klass_queue = new JfrTraceIdKlassQueue();
|
||||
return _klass_queue != NULL && _klass_queue->initialize(buffer_size_bytes, JFR_MSPACE_UNLIMITED_CACHE_SIZE, prealloc_count);
|
||||
}
|
||||
|
||||
void JfrTraceIdLoadBarrier::clear() {
|
||||
if (_klass_queue != NULL) {
|
||||
_klass_queue->clear();
|
||||
}
|
||||
}
|
||||
|
||||
void JfrTraceIdLoadBarrier::destroy() {
|
||||
delete _klass_queue;
|
||||
_klass_queue = NULL;
|
||||
}
|
||||
|
||||
void JfrTraceIdLoadBarrier::enqueue(const Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
assert(USED_THIS_EPOCH(klass), "invariant");
|
||||
klass_queue().enqueue(klass);
|
||||
}
|
||||
|
||||
void JfrTraceIdLoadBarrier::do_klasses(klass_callback callback, bool previous_epoch) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
klass_queue().iterate(callback, previous_epoch);
|
||||
}
|
||||
|
||||
traceid JfrTraceIdLoadBarrier::load(jclass jc) {
|
||||
assert(jc != NULL, "invariant");
|
||||
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_vm, "invariant");
|
||||
const oop my_oop = JNIHandles::resolve(jc);
|
||||
assert(my_oop != NULL, "invariant");
|
||||
return load(java_lang_Class::as_Klass(my_oop));
|
||||
}
|
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDLOADBARRIER_HPP
|
||||
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDLOADBARRIER_HPP
|
||||
|
||||
#include "jni.h"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ClassLoaderData;
|
||||
class Klass;
|
||||
class Method;
|
||||
class ModuleEntry;
|
||||
class PackageEntry;
|
||||
|
||||
/*
|
||||
* The JFR equivalent of a 'GC Load Barrier' where, instead of tracking object accesses on the heap,
|
||||
* we track accesses to JVM internal objects in native memory iff it stand in a relation to JFR events.
|
||||
*
|
||||
* Events can have fields referring to VM internal objects, for example Klass*, Method*, ClassLoaderData*, etc.
|
||||
* At an event site, objects, or more specifically pointers to objects, are stored into the event just before
|
||||
* the event is committed. As part of committing the event to the recording stream, instead of serializing these
|
||||
* pointers directly, the writer mechanism writes a unique value of type traceid used by JFR to represent it.
|
||||
* Conceptually, this is very similar to representing a reference using a foreign key.
|
||||
*
|
||||
* After this relation has been established, the JFR system must have a way to later locate the object in order to
|
||||
* serialize the information it represents, i.e to produce "tables" containing information related to foreign keys.
|
||||
* The information in these tables then materialize as constants in the recording stream delivered as part of Checkpoint events,
|
||||
* letting events containing references become resolvable.
|
||||
*
|
||||
* The 'load barrier' is a means to accomplish this: it intercepts loading of traceid values from JVM internal objects,
|
||||
* allowing JFR to keep track.
|
||||
*
|
||||
* Once intercepted, this tracking is implemented using two mechanisms:
|
||||
*
|
||||
* 'Tagging':
|
||||
* ----------
|
||||
* The barrier determines if the object needs to be marked, or tagged, and if so in what way.
|
||||
* Tagging is a function of the current epoch and is implemented as a bit pattern installed into the traceid field of the object.
|
||||
*
|
||||
* 'Root set' of Klasses:
|
||||
* ----------
|
||||
* JFR collects the set of tagged JVM internal objects at certain intervals. This set is derived from a subset, or 'root set',
|
||||
* consisting of incrementally tagged klasses for the epoch. The barrier enqueues a newly tagged klass, as a root, to an epoch-relative,
|
||||
* distributed queue. The collection step will use the queue to process the root set, from which most artifacts tagged can be discovered.
|
||||
*
|
||||
*/
|
||||
class JfrTraceIdLoadBarrier : AllStatic {
|
||||
friend class JfrCheckpointManager;
|
||||
private:
|
||||
static bool initialize();
|
||||
static void clear();
|
||||
static void destroy();
|
||||
static void enqueue(const Klass* klass);
|
||||
public:
|
||||
static traceid load(const ClassLoaderData* cld);
|
||||
static traceid load(const Klass* klass);
|
||||
static traceid load(const Klass* klass, const Method* method);
|
||||
static traceid load(jclass jc);
|
||||
static traceid load(const Method* method);
|
||||
static traceid load(const ModuleEntry* module);
|
||||
static traceid load(const PackageEntry* package);
|
||||
static traceid load_leakp(const Klass* klass, const Method* method); // leak profiler
|
||||
static void do_klasses(void f(Klass*), bool previous_epoch = false);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDLOADBARRIER_HPP
|
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBARRIER_INLINE_HPP
|
||||
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBARRIER_INLINE_HPP
|
||||
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "classfile/moduleEntry.hpp"
|
||||
#include "classfile/packageEntry.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline bool is_not_tagged(traceid value) {
|
||||
const traceid this_epoch_bit = JfrTraceIdEpoch::this_epoch_bit();
|
||||
return ((value & ((this_epoch_bit << META_SHIFT) | this_epoch_bit)) != this_epoch_bit);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool should_tag(const T* t) {
|
||||
assert(t != NULL, "invariant");
|
||||
return is_not_tagged(TRACE_ID_RAW(t));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool should_tag<Method>(const Method* method) {
|
||||
assert(method != NULL, "invariant");
|
||||
return is_not_tagged((traceid)method->trace_flags());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline traceid set_used_and_get(const T* type) {
|
||||
assert(type != NULL, "invariant");
|
||||
if (should_tag(type)) {
|
||||
SET_USED_THIS_EPOCH(type);
|
||||
JfrTraceIdEpoch::set_changed_tag_state();
|
||||
}
|
||||
assert(USED_THIS_EPOCH(type), "invariant");
|
||||
return TRACE_ID(type);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass) {
|
||||
assert(klass != NULL, "invariant");
|
||||
if (should_tag(klass)) {
|
||||
SET_USED_THIS_EPOCH(klass);
|
||||
enqueue(klass);
|
||||
JfrTraceIdEpoch::set_changed_tag_state();
|
||||
}
|
||||
assert(USED_THIS_EPOCH(klass), "invariant");
|
||||
return TRACE_ID(klass);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const Method* method) {
|
||||
return load(method->method_holder(), method);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass, const Method* method) {
|
||||
assert(klass != NULL, "invariant");
|
||||
assert(method != NULL, "invariant");
|
||||
if (METHOD_FLAG_NOT_USED_THIS_EPOCH(method)) {
|
||||
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
|
||||
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
|
||||
enqueue(klass);
|
||||
JfrTraceIdEpoch::set_changed_tag_state();
|
||||
}
|
||||
return (METHOD_ID(klass, method));
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const ModuleEntry* module) {
|
||||
return set_used_and_get(module);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const PackageEntry* package) {
|
||||
return set_used_and_get(package);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const ClassLoaderData* cld) {
|
||||
assert(cld != NULL, "invariant");
|
||||
return cld->has_class_mirror_holder() ? 0 : set_used_and_get(cld);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Method* method) {
|
||||
assert(klass != NULL, "invariant");
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
assert(method != NULL, "invariant");
|
||||
assert(klass == method->method_holder(), "invariant");
|
||||
if (METHOD_FLAG_NOT_USED_THIS_EPOCH(method)) {
|
||||
// the method is already logically tagged, just like the klass,
|
||||
// but because of redefinition, the latest Method*
|
||||
// representation might not have a reified tag.
|
||||
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
|
||||
assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
|
||||
}
|
||||
SET_LEAKP(klass);
|
||||
SET_METHOD_LEAKP(method);
|
||||
return (METHOD_ID(klass, method));
|
||||
}
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBARRIER_INLINE_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,74 +44,74 @@
|
||||
|
||||
// static bits
|
||||
#define META_SHIFT 8
|
||||
#define EPOCH_1_CLEARED_META_BIT USED_BIT
|
||||
#define EPOCH_0_CLEARED_META_BIT BIT
|
||||
#define EPOCH_0_CLEARED_BIT (EPOCH_0_CLEARED_META_BIT << META_SHIFT)
|
||||
#define EPOCH_1_CLEARED_META_BIT (BIT << 1)
|
||||
#define EPOCH_1_CLEARED_BIT (EPOCH_1_CLEARED_META_BIT << META_SHIFT)
|
||||
#define EPOCH_2_CLEARED_META_BIT (USED_BIT << 1)
|
||||
#define EPOCH_2_CLEARED_BIT (EPOCH_2_CLEARED_META_BIT << META_SHIFT)
|
||||
#define LEAKP_META_BIT (USED_BIT << 2)
|
||||
#define LEAKP_META_BIT (BIT << 2)
|
||||
#define LEAKP_BIT (LEAKP_META_BIT << META_SHIFT)
|
||||
#define TRANSIENT_META_BIT (USED_BIT << 3)
|
||||
#define TRANSIENT_META_BIT (BIT << 3)
|
||||
#define TRANSIENT_BIT (TRANSIENT_META_BIT << META_SHIFT)
|
||||
#define SERIALIZED_META_BIT (USED_BIT << 4)
|
||||
#define SERIALIZED_META_BIT (BIT << 4)
|
||||
#define SERIALIZED_BIT (SERIALIZED_META_BIT << META_SHIFT)
|
||||
#define TRACE_ID_SHIFT 16
|
||||
#define METHOD_ID_NUM_MASK ((1 << TRACE_ID_SHIFT) - 1)
|
||||
#define META_BITS (SERIALIZED_BIT | TRANSIENT_BIT | LEAKP_BIT | EPOCH_2_CLEARED_BIT | EPOCH_1_CLEARED_BIT)
|
||||
#define META_BITS (SERIALIZED_BIT | TRANSIENT_BIT | LEAKP_BIT | EPOCH_1_CLEARED_BIT | EPOCH_0_CLEARED_BIT)
|
||||
#define EVENT_BITS (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
|
||||
#define USED_BITS (METHOD_USED_EPOCH_2_BIT | METHOD_USED_EPOCH_1_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)
|
||||
#define ALL_BITS (META_BITS | EVENT_BITS | USED_BITS)
|
||||
#define TAG_BITS (EPOCH_1_METHOD_BIT | EPOCH_0_METHOD_BIT | EPOCH_1_BIT | EPOCH_0_BIT)
|
||||
#define ALL_BITS (META_BITS | EVENT_BITS | TAG_BITS)
|
||||
#define ALL_BITS_MASK (~(ALL_BITS))
|
||||
|
||||
// epoch relative bits
|
||||
#define IN_USE_THIS_EPOCH_BIT (JfrTraceIdEpoch::in_use_this_epoch_bit())
|
||||
#define IN_USE_PREV_EPOCH_BIT (JfrTraceIdEpoch::in_use_prev_epoch_bit())
|
||||
#define METHOD_IN_USE_THIS_EPOCH_BIT (JfrTraceIdEpoch::method_in_use_this_epoch_bit())
|
||||
#define METHOD_IN_USE_PREV_EPOCH_BIT (JfrTraceIdEpoch::method_in_use_prev_epoch_bit())
|
||||
#define METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_this_epoch_bits())
|
||||
#define METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS (JfrTraceIdEpoch::method_and_class_in_use_prev_epoch_bits())
|
||||
#define METHOD_FLAG_IN_USE_THIS_EPOCH_BIT ((jbyte)IN_USE_THIS_EPOCH_BIT)
|
||||
#define METHOD_FLAG_IN_USE_PREV_EPOCH_BIT ((jbyte)IN_USE_PREV_EPOCH_BIT)
|
||||
#define THIS_EPOCH_BIT (JfrTraceIdEpoch::this_epoch_bit())
|
||||
#define PREVIOUS_EPOCH_BIT (JfrTraceIdEpoch::previous_epoch_bit())
|
||||
#define THIS_EPOCH_METHOD_BIT (JfrTraceIdEpoch::this_epoch_method_bit())
|
||||
#define PREVIOUS_EPOCH_METHOD_BIT (JfrTraceIdEpoch::previous_epoch_method_bit())
|
||||
#define THIS_EPOCH_METHOD_AND_CLASS_BITS (JfrTraceIdEpoch::this_epoch_method_and_class_bits())
|
||||
#define PREVIOUS_EPOCH_METHOD_AND_CLASS_BITS (JfrTraceIdEpoch::previous_epoch_method_and_class_bits())
|
||||
#define THIS_EPOCH_METHOD_FLAG_BIT ((jbyte)THIS_EPOCH_BIT)
|
||||
#define PREVIOUS_EPOCH_METHOD_FLAG_BIT ((jbyte)PREVIOUS_EPOCH_BIT)
|
||||
|
||||
// operators
|
||||
#define TRACE_ID_RAW(ptr) ((ptr)->trace_id())
|
||||
#define TRACE_ID_RAW(ptr) (JfrTraceIdBits::load(ptr))
|
||||
#define TRACE_ID(ptr) (TRACE_ID_RAW(ptr) >> TRACE_ID_SHIFT)
|
||||
#define TRACE_ID_MASKED(ptr) (TRACE_ID_RAW(ptr) & ALL_BITS_MASK)
|
||||
#define TRACE_ID_PREDICATE(ptr, bits) ((TRACE_ID_RAW(ptr) & bits) != 0)
|
||||
#define TRACE_ID_TAG(ptr, bits) (set_traceid_bits(bits, (ptr)->trace_id_addr()))
|
||||
#define TRACE_ID_TAG_CAS(ptr, bits) (set_traceid_bits_cas(bits, (ptr)->trace_id_addr()))
|
||||
#define TRACE_ID_CLEAR(ptr, bits) (set_traceid_mask(bits, (ptr)->trace_id_addr()))
|
||||
#define TRACE_ID_META_TAG(ptr, bits) (set_traceid_meta_bits(bits, (ptr)->trace_id_addr()))
|
||||
#define TRACE_ID_META_CLEAR(ptr, bits) (set_traceid_meta_mask(bits, (ptr)->trace_id_addr()))
|
||||
#define TRACE_ID_TAG(ptr, bits) (JfrTraceIdBits::store(bits, ptr))
|
||||
#define TRACE_ID_TAG_CAS(ptr, bits) (JfrTraceIdBits::cas(bits, ptr))
|
||||
#define TRACE_ID_MASK_CLEAR(ptr, mask) (JfrTraceIdBits::mask_store(mask, ptr))
|
||||
#define TRACE_ID_META_TAG(ptr, bits) (JfrTraceIdBits::meta_store(bits, ptr))
|
||||
#define TRACE_ID_META_MASK_CLEAR(ptr, mask) (JfrTraceIdBits::meta_mask_store(mask, ptr))
|
||||
#define METHOD_ID(kls, method) (TRACE_ID_MASKED(kls) | (method)->orig_method_idnum())
|
||||
#define METHOD_FLAG_PREDICATE(method, bits) ((method)->is_trace_flag_set(bits))
|
||||
#define METHOD_FLAG_TAG(method, bits) (set_bits(bits, (method)->trace_flags_addr()))
|
||||
#define METHOD_META_TAG(method, bits) (set_meta_bits(bits, (method)->trace_meta_addr()))
|
||||
#define METHOD_FLAG_CLEAR(method, bits) (clear_bits_cas(bits, (method)->trace_flags_addr()))
|
||||
#define METHOD_META_CLEAR(method, bits) (set_meta_mask(bits, (method)->trace_meta_addr()))
|
||||
#define METHOD_FLAG_TAG(method, bits) (JfrTraceIdBits::store(bits, method))
|
||||
#define METHOD_META_TAG(method, bits) (JfrTraceIdBits::meta_store(bits, method))
|
||||
#define METHOD_FLAG_CLEAR(method, bits) (JfrTraceIdBits::clear_cas(bits, method))
|
||||
#define METHOD_META_MASK_CLEAR(method, mask) (JfrTraceIdBits::meta_mask_store(mask, method))
|
||||
|
||||
// predicates
|
||||
#define USED_THIS_EPOCH(ptr) (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_THIS_EPOCH_BIT)))
|
||||
#define USED_THIS_EPOCH(ptr) (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | THIS_EPOCH_BIT)))
|
||||
#define NOT_USED_THIS_EPOCH(ptr) (!(USED_THIS_EPOCH(ptr)))
|
||||
#define USED_PREV_EPOCH(ptr) (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | IN_USE_PREV_EPOCH_BIT)))
|
||||
#define USED_ANY_EPOCH(ptr) (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)))
|
||||
#define METHOD_USED_THIS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_THIS_EPOCH_BIT)))
|
||||
#define USED_PREVIOUS_EPOCH(ptr) (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | PREVIOUS_EPOCH_BIT)))
|
||||
#define USED_ANY_EPOCH(ptr) (TRACE_ID_PREDICATE(ptr, (TRANSIENT_BIT | EPOCH_1_BIT | EPOCH_0_BIT)))
|
||||
#define METHOD_USED_THIS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (THIS_EPOCH_METHOD_BIT)))
|
||||
#define METHOD_NOT_USED_THIS_EPOCH(kls) (!(METHOD_USED_THIS_EPOCH(kls)))
|
||||
#define METHOD_USED_PREV_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_PREV_EPOCH_BIT)))
|
||||
#define METHOD_USED_ANY_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_IN_USE_PREV_EPOCH_BIT | METHOD_IN_USE_THIS_EPOCH_BIT)))
|
||||
#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS)))
|
||||
#define METHOD_AND_CLASS_USED_PREV_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (METHOD_AND_CLASS_IN_USE_PREV_EPOCH_BITS)))
|
||||
#define METHOD_USED_PREVIOUS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (PREVIOUS_EPOCH_METHOD_BIT)))
|
||||
#define METHOD_USED_ANY_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (EPOCH_1_METHOD_BIT) | EPOCH_0_METHOD_BIT)))
|
||||
#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (THIS_EPOCH_METHOD_AND_CLASS_BITS)))
|
||||
#define METHOD_AND_CLASS_USED_PREVIOUS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (PREVIOUS_EPOCH_METHOD_AND_CLASS_BITS)))
|
||||
#define METHOD_AND_CLASS_USED_ANY_EPOCH(kls) (METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls))
|
||||
#define METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (METHOD_FLAG_IN_USE_THIS_EPOCH_BIT)))
|
||||
#define METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (THIS_EPOCH_METHOD_FLAG_BIT)))
|
||||
#define METHOD_FLAG_NOT_USED_THIS_EPOCH(method) (!(METHOD_FLAG_USED_THIS_EPOCH(method)))
|
||||
#define METHOD_FLAG_USED_PREV_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (METHOD_FLAG_IN_USE_PREV_EPOCH_BIT)))
|
||||
#define METHOD_FLAG_USED_PREVIOUS_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (PREVIOUS_EPOCH_METHOD_FLAG_BIT)))
|
||||
|
||||
// setters
|
||||
#define SET_USED_THIS_EPOCH(ptr) (TRACE_ID_TAG(ptr, IN_USE_THIS_EPOCH_BIT))
|
||||
#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_TAG(kls, METHOD_AND_CLASS_IN_USE_THIS_EPOCH_BITS))
|
||||
#define SET_METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_TAG(method, METHOD_FLAG_IN_USE_THIS_EPOCH_BIT))
|
||||
#define CLEAR_METHOD_AND_CLASS_PREV_EPOCH_MASK (~(METHOD_IN_USE_PREV_EPOCH_BIT | IN_USE_PREV_EPOCH_BIT))
|
||||
#define CLEAR_METHOD_AND_CLASS_PREV_EPOCH(kls) (TRACE_ID_CLEAR(kls, CLEAR_METHOD_AND_CLASS_PREV_EPOCH_MASK))
|
||||
#define CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method) (METHOD_FLAG_CLEAR(method, METHOD_FLAG_IN_USE_PREV_EPOCH_BIT))
|
||||
#define SET_USED_THIS_EPOCH(ptr) (TRACE_ID_TAG(ptr, THIS_EPOCH_BIT))
|
||||
#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_TAG(kls, THIS_EPOCH_METHOD_AND_CLASS_BITS))
|
||||
#define SET_METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_TAG(method, THIS_EPOCH_METHOD_FLAG_BIT))
|
||||
#define PREVIOUS_EPOCH_METHOD_AND_CLASS_BIT_MASK (~(PREVIOUS_EPOCH_METHOD_BIT | PREVIOUS_EPOCH_BIT))
|
||||
#define CLEAR_PREVIOUS_EPOCH_METHOD_AND_CLASS(kls) (TRACE_ID_MASK_CLEAR(kls, PREVIOUS_EPOCH_METHOD_AND_CLASS_BIT_MASK))
|
||||
#define CLEAR_PREVIOUS_EPOCH_METHOD_FLAG(method) (METHOD_FLAG_CLEAR(method, PREVIOUS_EPOCH_METHOD_FLAG_BIT))
|
||||
|
||||
// types
|
||||
#define IS_JDK_JFR_EVENT_KLASS(kls) (TRACE_ID_PREDICATE(kls, JDK_JFR_EVENT_KLASS))
|
||||
@ -133,17 +133,21 @@
|
||||
#define SHOULD_TAG(ptr) (NOT_USED_THIS_EPOCH(ptr))
|
||||
#define SHOULD_TAG_KLASS_METHOD(ptr) (METHOD_NOT_USED_THIS_EPOCH(ptr))
|
||||
#define SET_SERIALIZED(ptr) (TRACE_ID_META_TAG(ptr, SERIALIZED_META_BIT))
|
||||
#define CLEAR_SERIALIZED(ptr) (TRACE_ID_META_CLEAR(ptr, META_MASK))
|
||||
#define SET_PREV_EPOCH_CLEARED_BIT(ptr) (TRACE_ID_META_TAG(ptr, IN_USE_PREV_EPOCH_BIT))
|
||||
#define CLEAR_SERIALIZED(ptr) (TRACE_ID_META_MASK_CLEAR(ptr, META_MASK))
|
||||
#define SET_PREVIOUS_EPOCH_CLEARED_BIT(ptr) (TRACE_ID_META_TAG(ptr, PREVIOUS_EPOCH_BIT))
|
||||
#define IS_THIS_EPOCH_CLEARED(ptr) (TRACE_ID_PREDICATE(ptr, THIS_EPOCH_BIT))
|
||||
#define IS_PREVIOUS_EPOCH_CLEARED(ptr) (TRACE_ID_PREDICATE(ptr, PREVIOUS_EPOCH_BIT))
|
||||
#define IS_METHOD_SERIALIZED(method) (METHOD_FLAG_PREDICATE(method, SERIALIZED_BIT))
|
||||
#define IS_METHOD_LEAKP_USED(method) (METHOD_FLAG_PREDICATE(method, LEAKP_BIT))
|
||||
#define METHOD_NOT_SERIALIZED(method) (!(IS_METHOD_SERIALIZED(method)))
|
||||
#define SET_METHOD_LEAKP(method) (METHOD_META_TAG(method, LEAKP_META_BIT))
|
||||
#define SET_METHOD_SERIALIZED(method) (METHOD_META_TAG(method, SERIALIZED_META_BIT))
|
||||
#define CLEAR_METHOD_SERIALIZED(method) (METHOD_META_CLEAR(method, META_MASK))
|
||||
#define SET_PREV_EPOCH_METHOD_CLEARED_BIT(ptr) (METHOD_META_TAG(ptr, IN_USE_PREV_EPOCH_BIT))
|
||||
#define CLEAR_LEAKP(ptr) (TRACE_ID_META_CLEAR(ptr, (~(LEAKP_META_BIT))))
|
||||
#define CLEAR_THIS_EPOCH_CLEARED_BIT(ptr) (TRACE_ID_META_CLEAR(ptr,(~(IN_USE_THIS_EPOCH_BIT))))
|
||||
#define CLEAR_THIS_EPOCH_METHOD_CLEARED_BIT(ptr) (METHOD_META_CLEAR(ptr,(~(IN_USE_THIS_EPOCH_BIT))))
|
||||
#define CLEAR_SERIALIZED_METHOD(method) (METHOD_META_MASK_CLEAR(method, META_MASK))
|
||||
#define SET_PREVIOUS_EPOCH_METHOD_CLEARED_BIT(ptr) (METHOD_META_TAG(ptr, PREVIOUS_EPOCH_BIT))
|
||||
#define CLEAR_LEAKP(ptr) (TRACE_ID_META_MASK_CLEAR(ptr, (~(LEAKP_META_BIT))))
|
||||
#define CLEAR_THIS_EPOCH_CLEARED_BIT(ptr) (TRACE_ID_META_MASK_CLEAR(ptr,(~(THIS_EPOCH_BIT))))
|
||||
#define CLEAR_THIS_EPOCH_METHOD_CLEARED_BIT(ptr) (METHOD_META_MASK_CLEAR(ptr,(~(THIS_EPOCH_BIT))))
|
||||
#define IS_THIS_EPOCH_METHOD_CLEARED(ptr) (METHOD_FLAG_PREDICATE(method, THIS_EPOCH_BIT))
|
||||
#define IS_PREVIOUS_EPOCH_METHOD_CLEARED(ptr) (METHOD_FLAG_PREDICATE(method, PREVIOUS_EPOCH_BIT))
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,10 +23,10 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfrfiles/jfrTypes.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunk.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
|
||||
#include "jfr/utilities/jfrTime.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
|
||||
|
@ -462,7 +462,6 @@ void JfrRecorderService::vm_error_rotation() {
|
||||
open_new_chunk(true);
|
||||
}
|
||||
if (_chunkwriter.is_valid()) {
|
||||
_checkpoint_manager.register_service_thread(t);
|
||||
_storage.flush_regular_buffer(t->jfr_thread_local()->native_buffer(), t);
|
||||
_chunkwriter.mark_chunk_final();
|
||||
invoke_flush();
|
||||
|
@ -101,7 +101,6 @@ bool JfrRecorderThread::start(JfrCheckpointManager* cp_manager, JfrPostBox* post
|
||||
Thread* const t = start_thread(h_thread_oop, recorderthread_entry,THREAD);
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
Jfr::exclude_thread(t);
|
||||
cp_manager->register_service_thread(t);
|
||||
return true;
|
||||
}
|
||||
assert(HAS_PENDING_EXCEPTION, "invariant");
|
||||
|
@ -191,7 +191,7 @@ bool JfrStackTrace::record_thread(JavaThread& thread, frame& frame) {
|
||||
// none of it is safe
|
||||
return false;
|
||||
}
|
||||
const traceid mid = JfrTraceId::use(method);
|
||||
const traceid mid = JfrTraceId::load(method);
|
||||
int type = st.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
|
||||
int bci = 0;
|
||||
if (method->is_native()) {
|
||||
@ -246,7 +246,7 @@ bool JfrStackTrace::record_safe(JavaThread* thread, int skip) {
|
||||
break;
|
||||
}
|
||||
const Method* method = vfs.method();
|
||||
const traceid mid = JfrTraceId::use(method);
|
||||
const traceid mid = JfrTraceId::load(method);
|
||||
int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
|
||||
int bci = 0;
|
||||
if (method->is_native()) {
|
||||
|
83
src/hotspot/share/jfr/recorder/storage/jfrEpochStorage.hpp
Normal file
83
src/hotspot/share/jfr/recorder/storage/jfrEpochStorage.hpp
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
#ifndef SHARE_JFR_RECORDER_STORAGE_JFREPOCHSTORAGE_HPP
|
||||
#define SHARE_JFR_RECORDER_STORAGE_JFREPOCHSTORAGE_HPP
|
||||
|
||||
#include "jfr/recorder/storage/jfrBuffer.hpp"
|
||||
#include "jfr/recorder/storage/jfrMemorySpace.hpp"
|
||||
#include "jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp"
|
||||
#include "jfr/utilities/jfrAllocation.hpp"
|
||||
#include "jfr/utilities/jfrConcurrentQueue.hpp"
|
||||
#include "jfr/utilities/jfrLinkedList.hpp"
|
||||
|
||||
/*
|
||||
* Provides storage as a function of an epoch, with iteration capabilities for the current and previous epoch.
|
||||
* Iteration over the current epoch is incremental while iteration over the previous epoch is complete,
|
||||
* including storage reclamation. The design caters to use cases having multiple incremental iterations
|
||||
* over the current epoch, and a single, complete, iteration over the previous epoch.
|
||||
*
|
||||
* The JfrEpochStorage can be specialized by the following policies:
|
||||
*
|
||||
* NodeType the type of the Node to be managed by the JfrMemorySpace.
|
||||
*
|
||||
* RetrievalPolicy see jfrMemorySpace.hpp for a description.
|
||||
*
|
||||
*/
|
||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
||||
class JfrEpochStorageHost : public JfrCHeapObj {
|
||||
typedef JfrMemorySpace<JfrEpochStorageHost<NodeType, RetrievalPolicy>,
|
||||
RetrievalPolicy,
|
||||
JfrConcurrentQueue<NodeType>,
|
||||
JfrLinkedList<NodeType>,
|
||||
true> EpochMspace;
|
||||
public:
|
||||
typedef NodeType Buffer;
|
||||
typedef NodeType* BufferPtr;
|
||||
typedef EpochMspace Mspace;
|
||||
|
||||
JfrEpochStorageHost();
|
||||
~JfrEpochStorageHost();
|
||||
bool initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count);
|
||||
|
||||
BufferPtr acquire(size_t size, Thread* thread);
|
||||
void release(BufferPtr buffer);
|
||||
|
||||
template <typename Functor>
|
||||
void iterate(Functor& functor, bool previous_epoch = false);
|
||||
|
||||
DEBUG_ONLY(void verify_previous_empty() const;)
|
||||
|
||||
private:
|
||||
EpochMspace* _mspace;
|
||||
|
||||
// mspace callback
|
||||
void register_full(BufferPtr buffer, Thread* thread);
|
||||
|
||||
template <typename, template <typename> class, typename, typename, bool>
|
||||
friend class JfrMemorySpace;
|
||||
};
|
||||
|
||||
typedef JfrEpochStorageHost<JfrBuffer, JfrMspaceRemoveRetrieval> JfrEpochStorage;
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_STORAGE_JFREPOCHSTORAGE_HPP
|
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_RECORDER_STORAGE_JFREPOCHSTORAGE_INLINE_HPP
|
||||
#define SHARE_JFR_RECORDER_STORAGE_JFREPOCHSTORAGE_INLINE_HPP
|
||||
|
||||
#include "jfr/recorder/storage/jfrEpochStorage.hpp"
|
||||
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
|
||||
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
|
||||
#include "jfr/utilities/jfrConcurrentQueue.inline.hpp"
|
||||
#include "jfr/utilities/jfrLinkedList.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
||||
JfrEpochStorageHost<NodeType, RetrievalPolicy>::JfrEpochStorageHost() : _mspace(NULL) {}
|
||||
|
||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
||||
JfrEpochStorageHost<NodeType, RetrievalPolicy>::~JfrEpochStorageHost() {
|
||||
delete _mspace;
|
||||
}
|
||||
|
||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
||||
bool JfrEpochStorageHost<NodeType, RetrievalPolicy>::initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count) {
|
||||
assert(_mspace == NULL, "invariant");
|
||||
_mspace = new EpochMspace(min_elem_size, free_list_cache_count_limit, this);
|
||||
return _mspace != NULL && _mspace->initialize(cache_prealloc_count);
|
||||
}
|
||||
|
||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
||||
inline NodeType* JfrEpochStorageHost<NodeType, RetrievalPolicy>::acquire(size_t size, Thread* thread) {
|
||||
BufferPtr buffer = mspace_acquire_to_live_list(size, _mspace, thread);
|
||||
if (buffer == NULL) {
|
||||
log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", _mspace->min_element_size(), "epoch storage");
|
||||
return NULL;
|
||||
}
|
||||
assert(buffer->acquired_by_self(), "invariant");
|
||||
return buffer;
|
||||
}
|
||||
|
||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
||||
void JfrEpochStorageHost<NodeType, RetrievalPolicy>::release(NodeType* buffer) {
|
||||
assert(buffer != NULL, "invariant");
|
||||
buffer->set_retired();
|
||||
}
|
||||
|
||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
||||
void JfrEpochStorageHost<NodeType, RetrievalPolicy>::register_full(NodeType* buffer, Thread* thread) {
|
||||
// nothing here at the moment
|
||||
}
|
||||
|
||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
||||
template <typename Functor>
|
||||
void JfrEpochStorageHost<NodeType, RetrievalPolicy>::iterate(Functor& functor, bool previous_epoch) {
|
||||
typedef ReleaseRetiredToFreeListOp<EpochMspace, typename EpochMspace::LiveList> ReleaseStorage;
|
||||
typedef CompositeOperation<Functor, ReleaseStorage> PreviousEpochOperation;
|
||||
if (previous_epoch) {
|
||||
ReleaseStorage rs(_mspace, _mspace->live_list(true));
|
||||
PreviousEpochOperation peo(&functor, &rs);
|
||||
process_live_list(peo, _mspace, true);
|
||||
return;
|
||||
}
|
||||
process_live_list(functor, _mspace, false);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
template <typename Mspace>
|
||||
class EmptyVerifier {
|
||||
private:
|
||||
Mspace* _mspace;
|
||||
public:
|
||||
typedef typename Mspace::Node Node;
|
||||
typedef typename Mspace::NodePtr NodePtr;
|
||||
EmptyVerifier(Mspace* mspace) : _mspace(mspace) {}
|
||||
bool process(NodePtr node) {
|
||||
assert(node != NULL, "invariant");
|
||||
assert(node->empty(), "invariant");
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename NodeType, template <typename> class RetrievalPolicy>
|
||||
void JfrEpochStorageHost<NodeType, RetrievalPolicy>::verify_previous_empty() const {
|
||||
typedef EmptyVerifier<JfrEpochStorage::Mspace> VerifyEmptyMspace;
|
||||
VerifyEmptyMspace vem(_mspace);
|
||||
process_live_list(vem, _mspace, true);
|
||||
}
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_STORAGE_JFREPOCHSTORAGE_INLINE_HPP
|
@ -40,10 +40,13 @@ JfrFullStorage<ValueType, NodeType, AllocPolicy>::~JfrFullStorage() {
|
||||
node = _free_node_list->remove();
|
||||
delete node;
|
||||
}
|
||||
delete _free_node_list;
|
||||
|
||||
while (_queue->is_nonempty()) {
|
||||
node = _queue->remove();
|
||||
delete node;
|
||||
}
|
||||
delete _queue;
|
||||
}
|
||||
|
||||
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
|
||||
|
@ -26,67 +26,113 @@
|
||||
|
||||
#include "jfr/utilities/jfrAllocation.hpp"
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType = FreeListType>
|
||||
const size_t JFR_MSPACE_UNLIMITED_CACHE_SIZE = max_uintx;
|
||||
|
||||
/*
|
||||
* A JfrMemorySpace abstracts a memory area by exposing configurable relations and functions.
|
||||
*
|
||||
* A memory space, or mspace for short, manages committed memory as multiples of a basic unit, min_element_size.
|
||||
* At the lowest level, and for higher levels of control, memory units can be directly managed using the allocate() and deallocate() functions.
|
||||
* More convenience is achieved by instead using one of the many higher level functions, which use allocate() and deallocate() underneath.
|
||||
* For storage, there exist two lists, a free list and a live list, each of a type that is configurable using policies.
|
||||
* To get memory from the mspace, use the acquire() function. To release the memory back, use release().
|
||||
* The exact means for how memory is provisioned and delivered through acquire() is configurable using a RetreivalPolicy.
|
||||
* A JfrMemorySpace can be specialized to be 'epoch aware', meaning it will perform list management as a function of
|
||||
* epoch state. This provides a convenient, relatively low-level mechanism, to process epoch relative data.
|
||||
*
|
||||
* A client of a JfrMemorySpace will specialize it according to the dimensions exposed by the following policies:
|
||||
*
|
||||
* Client the type of the client, an instance is to be passed into the constructor.
|
||||
* a client must provide a single callback function:
|
||||
* register_full(FreeListType::Node*, Thread*);
|
||||
*
|
||||
* RetrievalPolicy a template template class detailing how to retrieve memory for acquire.
|
||||
* the type parameter for the RetrivalPolicy template class is JfrMemorySpace and the policy class must provide:
|
||||
* FreeListType::Node* acquire(JfrMemorySpace* mspace, FreeListType* free_list, Thread*, size_t size, bool previous_epoch);
|
||||
*
|
||||
* FreeListType the type of the free list. The syntactic interface to be fullfilled is most conveniently read from an example,
|
||||
* please see utilities/jfrConcurrentQueue.hpp.
|
||||
*
|
||||
* FreeListType::Node gives the basic node type for each individual unit to be managed by the memory space.
|
||||
*
|
||||
* LiveListType the type of the live list. The syntactic interface is equivalent to the FreeListType.
|
||||
* LiveListType::Node must be compatible with FreeListType::Node.
|
||||
*
|
||||
* epoch_aware boolean, default value is false.
|
||||
*
|
||||
*/
|
||||
|
||||
template <typename Client,
|
||||
template <typename> class RetrievalPolicy,
|
||||
typename FreeListType,
|
||||
typename LiveListType = FreeListType,
|
||||
bool epoch_aware = false>
|
||||
class JfrMemorySpace : public JfrCHeapObj {
|
||||
public:
|
||||
typedef FreeListType FreeList;
|
||||
typedef FullListType FullList;
|
||||
typedef LiveListType LiveList;
|
||||
typedef typename FreeListType::Node Node;
|
||||
typedef typename FreeListType::NodePtr NodePtr;
|
||||
private:
|
||||
FreeList _free_list;
|
||||
FullList _full_list;
|
||||
const size_t _min_elem_size;
|
||||
const size_t _limit_size;
|
||||
const size_t _free_list_cache_count;
|
||||
size_t _free_list_count;
|
||||
Callback* _callback;
|
||||
|
||||
bool should_populate_free_list() const;
|
||||
|
||||
public:
|
||||
JfrMemorySpace(size_t min_elem_size, size_t limit_size, size_t free_list_cache_count, Callback* callback);
|
||||
JfrMemorySpace(size_t min_elem_size, size_t free_list_cache_count_limit, Client* client);
|
||||
~JfrMemorySpace();
|
||||
bool initialize();
|
||||
bool initialize(size_t cache_prealloc_count, bool prealloc_to_free_list = true);
|
||||
|
||||
size_t min_elem_size() const;
|
||||
size_t limit_size() const;
|
||||
size_t min_element_size() const;
|
||||
|
||||
NodePtr allocate(size_t size);
|
||||
void deallocate(NodePtr node);
|
||||
|
||||
NodePtr acquire(Thread* thread, size_t size = 0);
|
||||
NodePtr acquire(size_t size, bool free_list, Thread* thread, bool previous_epoch = false);
|
||||
void release(NodePtr node);
|
||||
void release_live(NodePtr t, bool previous_epoch = false);
|
||||
void release_free(NodePtr t);
|
||||
|
||||
FreeList& free_list();
|
||||
const FreeList& free_list() const;
|
||||
|
||||
FullList& full_list();
|
||||
const FullList& full_list() const;
|
||||
LiveList& live_list(bool previous_epoch = false);
|
||||
const LiveList& live_list(bool previous_epoch = false) const;
|
||||
|
||||
bool free_list_is_empty() const;
|
||||
bool full_list_is_empty() const;
|
||||
bool free_list_is_nonempty() const;
|
||||
bool full_list_is_nonempty() const;
|
||||
bool in_free_list(const Node* node) const;
|
||||
bool in_full_list(const Node* node) const;
|
||||
bool in_mspace(const Node* node) const;
|
||||
bool live_list_is_empty(bool previous_epoch = false) const;
|
||||
bool live_list_is_nonempty(bool previous_epoch = false) const;
|
||||
|
||||
void add_to_free_list(NodePtr node);
|
||||
void add_to_full_list(NodePtr node);
|
||||
void add_to_live_list(NodePtr node, bool previous_epoch = false);
|
||||
|
||||
NodePtr remove_from_free_list();
|
||||
NodePtr remove_from_full_list();
|
||||
template <typename Callback>
|
||||
void iterate_free_list(Callback& callback);
|
||||
|
||||
NodePtr clear_free_list();
|
||||
NodePtr clear_full_list();
|
||||
template <typename Callback>
|
||||
void iterate_live_list(Callback& callback, bool previous_epoch = false);
|
||||
|
||||
template <typename Processor>
|
||||
void iterate(Processor& processor, bool full_list = true);
|
||||
bool in_free_list(const Node* node) const;
|
||||
bool in_live_list(const Node* node, bool previous_epoch = false) const;
|
||||
bool in_current_epoch_list(const Node* node) const;
|
||||
bool in_previous_epoch_list(const Node* node) const;
|
||||
|
||||
void decrement_free_list_count();
|
||||
|
||||
void register_full(NodePtr node, Thread* thread);
|
||||
|
||||
private:
|
||||
FreeList _free_list;
|
||||
LiveList _live_list_epoch_0;
|
||||
LiveList _live_list_epoch_1;
|
||||
Client* _client;
|
||||
const size_t _min_element_size;
|
||||
const size_t _free_list_cache_count_limit;
|
||||
size_t _free_list_cache_count;
|
||||
|
||||
bool should_populate_free_list_cache() const;
|
||||
bool is_free_list_cache_limited() const;
|
||||
const LiveList& epoch_list_selector(u1 epoch) const;
|
||||
LiveList& epoch_list_selector(u1 epoch);
|
||||
const LiveList& current_epoch_list() const;
|
||||
LiveList& current_epoch_list();
|
||||
const LiveList& previous_epoch_list() const;
|
||||
LiveList& previous_epoch_list();
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP
|
||||
|
@ -25,134 +25,191 @@
|
||||
#ifndef SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_INLINE_HPP
|
||||
#define SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_INLINE_HPP
|
||||
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/recorder/storage/jfrMemorySpace.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::
|
||||
JfrMemorySpace(size_t min_elem_size, size_t limit_size, size_t free_list_cache_count, Callback* callback) :
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::
|
||||
JfrMemorySpace(size_t min_element_size, size_t free_list_cache_count_limit, Client* client) :
|
||||
_free_list(),
|
||||
_full_list(),
|
||||
_min_elem_size(min_elem_size),
|
||||
_limit_size(limit_size),
|
||||
_free_list_cache_count(free_list_cache_count),
|
||||
_free_list_count(0),
|
||||
_callback(callback) {}
|
||||
_live_list_epoch_0(),
|
||||
_live_list_epoch_1(),
|
||||
_client(client),
|
||||
_min_element_size(min_element_size),
|
||||
_free_list_cache_count_limit(free_list_cache_count_limit),
|
||||
_free_list_cache_count(0) {}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::~JfrMemorySpace() {
|
||||
while (full_list_is_nonempty()) {
|
||||
NodePtr node = remove_from_full_list();
|
||||
deallocate(node);
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::~JfrMemorySpace() {
|
||||
while (_live_list_epoch_0.is_nonempty()) {
|
||||
deallocate(_live_list_epoch_0.remove());
|
||||
}
|
||||
while (free_list_is_nonempty()) {
|
||||
NodePtr node = remove_from_free_list();
|
||||
deallocate(node);
|
||||
while (_live_list_epoch_1.is_nonempty()) {
|
||||
deallocate(_live_list_epoch_1.remove());
|
||||
}
|
||||
while (_free_list.is_nonempty()) {
|
||||
deallocate(_free_list.remove());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
bool JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::initialize() {
|
||||
if (!(_free_list.initialize() && _full_list.initialize())) {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::initialize(size_t cache_prealloc_count, bool prealloc_to_free_list) {
|
||||
if (!(_free_list.initialize() && _live_list_epoch_0.initialize() && _live_list_epoch_1.initialize())) {
|
||||
return false;
|
||||
}
|
||||
assert(_min_elem_size % os::vm_page_size() == 0, "invariant");
|
||||
assert(_limit_size % os::vm_page_size() == 0, "invariant");
|
||||
// pre-allocate free list cache elements
|
||||
for (size_t i = 0; i < _free_list_cache_count; ++i) {
|
||||
NodePtr const node = allocate(_min_elem_size);
|
||||
// pre-allocate elements to be cached in the requested list
|
||||
for (size_t i = 0; i < cache_prealloc_count; ++i) {
|
||||
NodePtr const node = allocate(_min_element_size);
|
||||
if (node == NULL) {
|
||||
return false;
|
||||
}
|
||||
add_to_free_list(node);
|
||||
if (prealloc_to_free_list) {
|
||||
add_to_free_list(node);
|
||||
} else {
|
||||
add_to_live_list(node);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline bool JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::should_populate_free_list() const {
|
||||
return _free_list_count < _free_list_cache_count;
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::should_populate_free_list_cache() const {
|
||||
return !is_free_list_cache_limited() || _free_list_cache_count < _free_list_cache_count_limit;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline size_t JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::min_elem_size() const {
|
||||
return _min_elem_size;
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::is_free_list_cache_limited() const {
|
||||
return _free_list_cache_count_limit != JFR_MSPACE_UNLIMITED_CACHE_SIZE;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline size_t JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::limit_size() const {
|
||||
return _limit_size;
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline size_t JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::min_element_size() const {
|
||||
return _min_element_size;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline FreeListType& JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::free_list() {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline FreeListType& JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::free_list() {
|
||||
return _free_list;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline const FreeListType& JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::free_list() const {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline const FreeListType& JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::free_list() const {
|
||||
return _free_list;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline FullListType& JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::full_list() {
|
||||
return _full_list;
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline FullListType& JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::live_list(bool previous_epoch) {
|
||||
if (epoch_aware) {
|
||||
return previous_epoch ? previous_epoch_list() : current_epoch_list();
|
||||
}
|
||||
return _live_list_epoch_0;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline const FullListType& JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::full_list() const {
|
||||
return _full_list;
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline const FullListType& JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::live_list(bool previous_epoch) const {
|
||||
if (epoch_aware) {
|
||||
return previous_epoch ? previous_epoch_list() : current_epoch_list();
|
||||
}
|
||||
return _live_list_epoch_0;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline bool JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::free_list_is_empty() const {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::free_list_is_empty() const {
|
||||
return _free_list.is_empty();
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline bool JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::free_list_is_nonempty() const {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::free_list_is_nonempty() const {
|
||||
return !free_list_is_empty();
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline bool JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::full_list_is_empty() const {
|
||||
return _full_list.is_empty();
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::live_list_is_empty(bool previous_epoch) const {
|
||||
return live_list().is_empty();
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline bool JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::full_list_is_nonempty() const {
|
||||
return !full_list_is_empty();
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::live_list_is_nonempty(bool previous_epoch) const {
|
||||
return live_list().is_nonempty();
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
bool JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::in_free_list(const typename FreeListType::Node* node) const {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::in_free_list(const typename FreeListType::Node* node) const {
|
||||
return _free_list.in_list(node);
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
bool JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::in_full_list(const typename FreeListType::Node* node) const {
|
||||
return _full_list.in_list(node);
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline const typename JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::LiveList&
|
||||
JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::epoch_list_selector(u1 epoch) const {
|
||||
assert(epoch_aware, "invariant");
|
||||
return epoch == 0 ? _live_list_epoch_0 : _live_list_epoch_1;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
bool JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::in_mspace(const typename FreeListType::Node* node) const {
|
||||
return in_full_list(node) || in_free_list(node);
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline typename JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::LiveList&
|
||||
JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::epoch_list_selector(u1 epoch) {
|
||||
return const_cast<typename JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::LiveList&>(
|
||||
const_cast<const JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>*>(this)->epoch_list_selector(epoch));
|
||||
}
|
||||
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline const typename JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::LiveList&
|
||||
JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::current_epoch_list() const {
|
||||
assert(epoch_aware, "invariant");
|
||||
return epoch_list_selector(JfrTraceIdEpoch::current());
|
||||
}
|
||||
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline typename JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::LiveList&
|
||||
JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::current_epoch_list() {
|
||||
return const_cast<typename JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::LiveList&>(
|
||||
const_cast<const JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>*>(this)->current_epoch_list());
|
||||
}
|
||||
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline const typename JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::LiveList&
|
||||
JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::previous_epoch_list() const {
|
||||
assert(epoch_aware, "invariant");
|
||||
return epoch_list_selector(JfrTraceIdEpoch::previous());
|
||||
}
|
||||
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline typename JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::LiveList&
|
||||
JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::previous_epoch_list() {
|
||||
return const_cast<typename JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::LiveList&>(
|
||||
const_cast<const JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>*>(this)->previous_epoch_list());
|
||||
}
|
||||
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::in_live_list(const typename FreeListType::Node* node, bool previous_epoch) const {
|
||||
return live_list(previous_epoch).in_list(node);
|
||||
}
|
||||
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::in_current_epoch_list(const typename FreeListType::Node* node) const {
|
||||
assert(epoch_aware, "invariant");
|
||||
return current_epoch_list().in_list(node);
|
||||
}
|
||||
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline bool JfrMemorySpace< Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::in_previous_epoch_list(const typename FreeListType::Node* node) const {
|
||||
assert(epoch_aware, "invariant");
|
||||
return previous_epoch_list().in_list(node);
|
||||
}
|
||||
|
||||
// allocations are even multiples of the mspace min size
|
||||
static inline size_t align_allocation_size(size_t requested_size, size_t min_elem_size) {
|
||||
assert((int)min_elem_size % os::vm_page_size() == 0, "invariant");
|
||||
u8 alloc_size_bytes = min_elem_size;
|
||||
static inline size_t align_allocation_size(size_t requested_size, size_t min_element_size) {
|
||||
u8 alloc_size_bytes = min_element_size;
|
||||
while (requested_size > alloc_size_bytes) {
|
||||
alloc_size_bytes <<= 1;
|
||||
}
|
||||
assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant");
|
||||
return (size_t)alloc_size_bytes;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline typename FreeListType::NodePtr JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::allocate(size_t size) {
|
||||
const size_t aligned_size_bytes = align_allocation_size(size, _min_elem_size);
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline typename FreeListType::NodePtr JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::allocate(size_t size) {
|
||||
const size_t aligned_size_bytes = align_allocation_size(size, _min_element_size);
|
||||
void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(Node));
|
||||
if (allocation == NULL) {
|
||||
return NULL;
|
||||
@ -166,22 +223,23 @@ inline typename FreeListType::NodePtr JfrMemorySpace<Callback, RetrievalPolicy,
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline void JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::deallocate(typename FreeListType::NodePtr node) {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::deallocate(typename FreeListType::NodePtr node) {
|
||||
assert(node != NULL, "invariant");
|
||||
assert(!in_free_list(node), "invariant");
|
||||
assert(!in_full_list(node), "invariant");
|
||||
assert(!_live_list_epoch_0.in_list(node), "invariant");
|
||||
assert(!_live_list_epoch_1.in_list(node), "invariant");
|
||||
assert(node != NULL, "invariant");
|
||||
JfrCHeapObj::free(node, node->total_size());
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline typename FreeListType::NodePtr JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::acquire(Thread* thread, size_t size /* 0 */) {
|
||||
return RetrievalPolicy<JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType> >::acquire(this, thread, size);
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline typename FreeListType::NodePtr JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::acquire(size_t size, bool free_list, Thread* thread, bool previous_epoch) {
|
||||
return RetrievalPolicy<JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware> >::acquire(this, free_list, thread, size, previous_epoch);
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline void JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::release(typename FreeListType::NodePtr node) {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::release(typename FreeListType::NodePtr node) {
|
||||
assert(node != NULL, "invariant");
|
||||
if (node->transient()) {
|
||||
deallocate(node);
|
||||
@ -190,82 +248,61 @@ inline void JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType
|
||||
assert(node->empty(), "invariant");
|
||||
assert(!node->retired(), "invariant");
|
||||
assert(node->identity() == NULL, "invariant");
|
||||
if (should_populate_free_list()) {
|
||||
if (should_populate_free_list_cache()) {
|
||||
add_to_free_list(node);
|
||||
} else {
|
||||
deallocate(node);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline void JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::add_to_free_list(typename FreeListType::NodePtr node) {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::add_to_free_list(typename FreeListType::NodePtr node) {
|
||||
assert(node != NULL, "invariant");
|
||||
assert(!in_free_list(node), "invariant");
|
||||
_free_list.add(node);
|
||||
Atomic::inc(&_free_list_count);
|
||||
if (is_free_list_cache_limited()) {
|
||||
Atomic::inc(&_free_list_cache_count);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline void JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::add_to_full_list(typename FreeListType::NodePtr node) {
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::add_to_live_list(typename FreeListType::NodePtr node, bool previous_epoch) {
|
||||
assert(node != NULL, "invariant");
|
||||
_full_list.add(node);
|
||||
live_list(previous_epoch).add(node);
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline typename FreeListType::NodePtr JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::remove_from_free_list() {
|
||||
NodePtr node = _free_list.remove();
|
||||
if (node != NULL) {
|
||||
decrement_free_list_count();
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline typename FreeListType::NodePtr JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::remove_from_full_list() {
|
||||
return _full_list.remove();
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline void JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::decrement_free_list_count() {
|
||||
Atomic::dec(&_free_list_count);
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline typename FreeListType::NodePtr JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::clear_free_list() {
|
||||
NodePtr node = _free_list.clear();
|
||||
NodePtr temp = node;
|
||||
while (temp != NULL) {
|
||||
decrement_free_list_count();
|
||||
temp = temp->next();
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline typename FreeListType::NodePtr JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::clear_full_list() {
|
||||
return _full_list.clear();
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
template <typename Processor>
|
||||
inline void JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::iterate(Processor& processor, bool full_list /* true */) {
|
||||
if (full_list) {
|
||||
_full_list.iterate(processor);
|
||||
} else {
|
||||
_free_list.iterate(processor);
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::decrement_free_list_count() {
|
||||
if (is_free_list_cache_limited()) {
|
||||
Atomic::dec(&_free_list_cache_count);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType>
|
||||
inline void JfrMemorySpace<Callback, RetrievalPolicy, FreeListType, FullListType>::register_full(typename FreeListType::NodePtr node, Thread* thread) {
|
||||
_callback->register_full(node, thread);
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
template <typename Callback>
|
||||
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::iterate_free_list(Callback& callback) {
|
||||
return _free_list.iterate(callback);
|
||||
}
|
||||
|
||||
template <typename Mspace, typename Callback>
|
||||
static inline Mspace* create_mspace(size_t min_elem_size, size_t limit, size_t free_list_cache_count, Callback* cb) {
|
||||
Mspace* const mspace = new Mspace(min_elem_size, limit, free_list_cache_count, cb);
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
template <typename Callback>
|
||||
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::iterate_live_list(Callback& callback, bool previous_epoch) {
|
||||
if (epoch_aware) {
|
||||
live_list(previous_epoch).iterate(callback);
|
||||
return;
|
||||
}
|
||||
_live_list_epoch_0.iterate(callback);
|
||||
}
|
||||
|
||||
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
|
||||
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::register_full(typename FreeListType::NodePtr node, Thread* thread) {
|
||||
_client->register_full(node, thread);
|
||||
}
|
||||
|
||||
template <typename Mspace, typename Client>
|
||||
static inline Mspace* create_mspace(size_t min_element_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count, bool prealloc_to_free_list, Client* cb) {
|
||||
Mspace* const mspace = new Mspace(min_element_size, free_list_cache_count_limit, cb);
|
||||
if (mspace != NULL) {
|
||||
mspace->initialize();
|
||||
mspace->initialize(cache_prealloc_count, prealloc_to_free_list);
|
||||
}
|
||||
return mspace;
|
||||
}
|
||||
@ -301,33 +338,6 @@ inline typename Mspace::NodePtr mspace_allocate_transient_lease(size_t size, Msp
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_allocate_to_full(size_t size, Mspace* mspace, Thread* thread) {
|
||||
typename Mspace::NodePtr node = mspace_allocate_acquired(size, mspace, thread);
|
||||
if (node == NULL) return NULL;
|
||||
assert(node->acquired_by_self(), "invariant");
|
||||
mspace->add_to_full_list(node);
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_allocate_transient_to_full(size_t size, Mspace* mspace, Thread* thread) {
|
||||
typename Mspace::NodePtr node = mspace_allocate_transient(size, mspace, thread);
|
||||
if (node == NULL) return NULL;
|
||||
assert(node->transient(), "invariant");
|
||||
mspace->add_to_full_list(node);
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_allocate_transient_lease_to_full(size_t size, Mspace* mspace, Thread* thread) {
|
||||
typename Mspace::NodePtr node = mspace_allocate_transient_lease(size, mspace, thread);
|
||||
if (node == NULL) return NULL;
|
||||
assert(node->lease(), "invariant");
|
||||
mspace->add_to_full_list(node);
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_allocate_transient_lease_to_free(size_t size, Mspace* mspace, Thread* thread) {
|
||||
typename Mspace::NodePtr node = mspace_allocate_transient_lease(size, mspace, thread);
|
||||
@ -338,15 +348,15 @@ inline typename Mspace::NodePtr mspace_allocate_transient_lease_to_free(size_t s
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_get_free(size_t size, Mspace* mspace, Thread* thread) {
|
||||
return mspace->acquire(thread, size);
|
||||
inline typename Mspace::NodePtr mspace_acquire_free(size_t size, Mspace* mspace, Thread* thread) {
|
||||
return mspace->acquire(size, true, thread);
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_get_free_with_retry(size_t size, Mspace* mspace, size_t retry_count, Thread* thread) {
|
||||
assert(size <= mspace->min_elem_size(), "invariant");
|
||||
inline typename Mspace::NodePtr mspace_acquire_free_with_retry(size_t size, Mspace* mspace, size_t retry_count, Thread* thread) {
|
||||
assert(size <= mspace->min_element_size(), "invariant");
|
||||
for (size_t i = 0; i < retry_count; ++i) {
|
||||
typename Mspace::NodePtr node = mspace_get_free(size, mspace, thread);
|
||||
typename Mspace::NodePtr node = mspace_acquire_free(size, mspace, thread);
|
||||
if (node != NULL) {
|
||||
return node;
|
||||
}
|
||||
@ -355,36 +365,79 @@ inline typename Mspace::NodePtr mspace_get_free_with_retry(size_t size, Mspace*
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_get_free_lease_with_retry(size_t size, Mspace* mspace, size_t retry_count, Thread* thread) {
|
||||
typename Mspace::NodePtr node = mspace_get_free_with_retry(size, mspace, retry_count, thread);
|
||||
if (node != NULL) {
|
||||
node->set_lease();
|
||||
}
|
||||
inline typename Mspace::NodePtr mspace_allocate_to_live_list(size_t size, Mspace* mspace, Thread* thread) {
|
||||
typename Mspace::NodePtr node = mspace_allocate_acquired(size, mspace, thread);
|
||||
if (node == NULL) return NULL;
|
||||
assert(node->acquired_by_self(), "invariant");
|
||||
mspace->add_to_live_list(node);
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_get_free_to_full(size_t size, Mspace* mspace, Thread* thread) {
|
||||
assert(size <= mspace->min_elem_size(), "invariant");
|
||||
typename Mspace::NodePtr node = mspace_get_free(size, mspace, thread);
|
||||
inline typename Mspace::NodePtr mspace_allocate_transient_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) {
|
||||
typename Mspace::NodePtr node = mspace_allocate_transient(size, mspace, thread);
|
||||
if (node == NULL) return NULL;
|
||||
assert(node->transient(), "invariant");
|
||||
mspace->add_to_live_list(node, previous_epoch);
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_allocate_transient_lease_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) {
|
||||
typename Mspace::NodePtr node = mspace_allocate_transient_lease(size, mspace, thread);
|
||||
if (node == NULL) return NULL;
|
||||
assert(node->lease(), "invariant");
|
||||
mspace->add_to_live_list(node, previous_epoch);
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_acquire_free_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) {
|
||||
assert(size <= mspace->min_element_size(), "invariant");
|
||||
typename Mspace::NodePtr node = mspace_acquire_free(size, mspace, thread);
|
||||
if (node == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(node->acquired_by_self(), "invariant");
|
||||
assert(!mspace->in_free_list(node), "invariant");
|
||||
mspace->add_to_full_list(node);
|
||||
mspace->add_to_live_list(node, previous_epoch);
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_get_to_full(size_t size, Mspace* mspace, Thread* thread) {
|
||||
if (size <= mspace->min_elem_size()) {
|
||||
typename Mspace::NodePtr node = mspace_get_free_to_full(size, mspace, thread);
|
||||
inline typename Mspace::NodePtr mspace_acquire_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) {
|
||||
if (size <= mspace->min_element_size()) {
|
||||
typename Mspace::NodePtr node = mspace_acquire_free_to_live_list(size, mspace, thread, previous_epoch);
|
||||
if (node != NULL) {
|
||||
return node;
|
||||
}
|
||||
}
|
||||
return mspace_allocate_to_full(size, mspace, thread);
|
||||
return mspace_allocate_to_live_list(size, mspace, thread);
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_acquire_live(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) {
|
||||
return mspace->acquire(size, false, thread, previous_epoch);
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_acquire_live_with_retry(size_t size, Mspace* mspace, size_t retry_count, Thread* thread, bool previous_epoch = false) {
|
||||
assert(size <= mspace->min_element_size(), "invariant");
|
||||
for (size_t i = 0; i < retry_count; ++i) {
|
||||
typename Mspace::NodePtr const node = mspace_acquire_live(size, mspace, thread, previous_epoch);
|
||||
if (node != NULL) {
|
||||
return node;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
inline typename Mspace::NodePtr mspace_acquire_lease_with_retry(size_t size, Mspace* mspace, size_t retry_count, Thread* thread, bool previous_epoch = false) {
|
||||
typename Mspace::NodePtr node = mspace_acquire_live_with_retry(size, mspace, retry_count, thread, previous_epoch);
|
||||
if (node != NULL) {
|
||||
node->set_lease();
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
@ -395,25 +448,24 @@ inline void mspace_release(typename Mspace::NodePtr node, Mspace* mspace) {
|
||||
mspace->release(node);
|
||||
}
|
||||
|
||||
template <typename Processor, typename Mspace>
|
||||
inline void process_full_list(Processor& processor, Mspace* mspace) {
|
||||
template <typename Callback, typename Mspace>
|
||||
inline void process_live_list(Callback& callback, Mspace* mspace, bool previous_epoch = false) {
|
||||
assert(mspace != NULL, "invariant");
|
||||
if (mspace->full_list_is_nonempty()) {
|
||||
mspace->iterate(processor);
|
||||
}
|
||||
mspace->iterate_live_list(callback, previous_epoch);
|
||||
}
|
||||
|
||||
template <typename Processor, typename Mspace>
|
||||
inline void process_free_list(Processor& processor, Mspace* mspace) {
|
||||
template <typename Callback, typename Mspace>
|
||||
inline void process_free_list(Callback& callback, Mspace* mspace) {
|
||||
assert(mspace != NULL, "invariant");
|
||||
assert(mspace->free_list_is_nonempty(), "invariant");
|
||||
mspace->iterate(processor, false);
|
||||
mspace->iterate_free_list(callback);
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
class ReleaseOp : public StackObj {
|
||||
private:
|
||||
Mspace* _mspace;
|
||||
bool _previous_epoch;
|
||||
public:
|
||||
typedef typename Mspace::Node Node;
|
||||
ReleaseOp(Mspace* mspace) : _mspace(mspace) {}
|
||||
@ -439,47 +491,113 @@ inline bool ReleaseOp<Mspace>::process(typename Mspace::NodePtr node) {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Mspace>
|
||||
class ScavengingReleaseOp : public StackObj {
|
||||
template <typename Mspace, typename List>
|
||||
class ReleaseOpWithExcision : public ReleaseOp<Mspace> {
|
||||
private:
|
||||
Mspace* _mspace;
|
||||
typename Mspace::FullList& _full_list;
|
||||
typename Mspace::NodePtr _prev;
|
||||
List& _list;
|
||||
typename List::NodePtr _prev;
|
||||
size_t _count;
|
||||
size_t _amount;
|
||||
public:
|
||||
typedef typename Mspace::Node Node;
|
||||
ScavengingReleaseOp(Mspace* mspace) :
|
||||
_mspace(mspace), _full_list(mspace->full_list()), _prev(NULL), _count(0), _amount(0) {}
|
||||
bool process(typename Mspace::NodePtr node);
|
||||
ReleaseOpWithExcision(Mspace* mspace, List& list) :
|
||||
ReleaseOp<Mspace>(mspace), _list(list), _prev(NULL), _count(0), _amount(0) {}
|
||||
bool process(typename List::NodePtr node);
|
||||
size_t processed() const { return _count; }
|
||||
size_t amount() const { return _amount; }
|
||||
};
|
||||
|
||||
template <typename Mspace>
|
||||
inline bool ScavengingReleaseOp<Mspace>::process(typename Mspace::NodePtr node) {
|
||||
template <typename Mspace, typename List>
|
||||
inline bool ReleaseOpWithExcision<Mspace, List>::process(typename List::NodePtr node) {
|
||||
assert(node != NULL, "invariant");
|
||||
if (node->transient()) {
|
||||
_prev = _list.excise(_prev, node);
|
||||
} else {
|
||||
_prev = node;
|
||||
}
|
||||
return ReleaseOp<Mspace>::process(node);
|
||||
}
|
||||
|
||||
template <typename Mspace, typename List>
|
||||
class ScavengingReleaseOp : public StackObj {
|
||||
protected:
|
||||
Mspace* _mspace;
|
||||
List& _list;
|
||||
typename List::NodePtr _prev;
|
||||
size_t _count;
|
||||
size_t _amount;
|
||||
bool excise_with_release(typename List::NodePtr node);
|
||||
public:
|
||||
typedef typename List::Node Node;
|
||||
ScavengingReleaseOp(Mspace* mspace, List& list) :
|
||||
_mspace(mspace), _list(list), _prev(NULL), _count(0), _amount(0) {}
|
||||
bool process(typename List::NodePtr node);
|
||||
size_t processed() const { return _count; }
|
||||
size_t amount() const { return _amount; }
|
||||
};
|
||||
|
||||
template <typename Mspace, typename List>
|
||||
inline bool ScavengingReleaseOp<Mspace, List>::process(typename List::NodePtr node) {
|
||||
assert(node != NULL, "invariant");
|
||||
assert(!node->transient(), "invariant");
|
||||
if (node->retired()) {
|
||||
_prev = _full_list.excise(_prev, node);
|
||||
if (node->transient()) {
|
||||
_mspace->deallocate(node);
|
||||
return true;
|
||||
}
|
||||
assert(node->identity() != NULL, "invariant");
|
||||
assert(node->empty(), "invariant");
|
||||
assert(!node->lease(), "invariant");
|
||||
assert(!node->excluded(), "invariant");
|
||||
++_count;
|
||||
_amount += node->total_size();
|
||||
node->clear_retired();
|
||||
node->release();
|
||||
mspace_release(node, _mspace);
|
||||
return true;
|
||||
return excise_with_release(node);
|
||||
}
|
||||
_prev = node;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Mspace, typename List>
|
||||
inline bool ScavengingReleaseOp<Mspace, List>::excise_with_release(typename List::NodePtr node) {
|
||||
assert(node != NULL, "invariant");
|
||||
assert(node->retired(), "invariant");
|
||||
_prev = _list.excise(_prev, node);
|
||||
if (node->transient()) {
|
||||
_mspace->deallocate(node);
|
||||
return true;
|
||||
}
|
||||
assert(node->identity() != NULL, "invariant");
|
||||
assert(node->empty(), "invariant");
|
||||
assert(!node->lease(), "invariant");
|
||||
assert(!node->excluded(), "invariant");
|
||||
++_count;
|
||||
_amount += node->total_size();
|
||||
node->clear_retired();
|
||||
node->release();
|
||||
mspace_release(node, _mspace);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Mspace, typename FromList>
|
||||
class ReleaseRetiredToFreeListOp : public StackObj {
|
||||
private:
|
||||
Mspace* _mspace;
|
||||
FromList& _list;
|
||||
typename Mspace::NodePtr _prev;
|
||||
public:
|
||||
typedef typename Mspace::Node Node;
|
||||
ReleaseRetiredToFreeListOp(Mspace* mspace, FromList& list) :
|
||||
_mspace(mspace), _list(list), _prev(NULL) {}
|
||||
bool process(Node* node);
|
||||
};
|
||||
|
||||
template <typename Mspace, typename FromList>
|
||||
inline bool ReleaseRetiredToFreeListOp<Mspace, FromList>::process(typename Mspace::Node* node) {
|
||||
assert(node != NULL, "invariant");
|
||||
// assumes some means of exclusive access to node
|
||||
const bool retired = node->retired();
|
||||
node->reinitialize();
|
||||
assert(node->empty(), "invariant");
|
||||
assert(!node->retired(), "invariant");
|
||||
if (retired) {
|
||||
_prev = _list.excise(_prev, node);
|
||||
node->release();
|
||||
mspace_release(node, _mspace);
|
||||
} else {
|
||||
_prev = node;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
template <typename Node>
|
||||
inline void assert_migration_state(const Node* old, const Node* new_node, size_t used, size_t requested) {
|
||||
|
@ -33,8 +33,18 @@ template <typename Mspace>
|
||||
class JfrMspaceRetrieval {
|
||||
public:
|
||||
typedef typename Mspace::Node Node;
|
||||
static Node* acquire(Mspace* mspace, Thread* thread, size_t size) {
|
||||
StopOnNullCondition<typename Mspace::FreeList> iterator(mspace->free_list());
|
||||
static Node* acquire(Mspace* mspace, bool free_list, Thread* thread, size_t size, bool previous_epoch) {
|
||||
if (free_list) {
|
||||
StopOnNullCondition<typename Mspace::FreeList> iterator(mspace->free_list());
|
||||
return acquire(mspace, iterator, thread, size);
|
||||
}
|
||||
StopOnNullCondition<typename Mspace::LiveList> iterator(mspace->live_list(previous_epoch));
|
||||
return acquire(mspace, iterator, thread, size);
|
||||
}
|
||||
private:
|
||||
template <typename Iterator>
|
||||
static Node* acquire(Mspace* mspace, Iterator& iterator, Thread* thread, size_t size) {
|
||||
assert(mspace != NULL, "invariant");
|
||||
while (iterator.has_next()) {
|
||||
Node* const node = iterator.next();
|
||||
if (node->retired()) continue;
|
||||
@ -55,13 +65,25 @@ template <typename Mspace>
|
||||
class JfrMspaceRemoveRetrieval : AllStatic {
|
||||
public:
|
||||
typedef typename Mspace::Node Node;
|
||||
static Node* acquire(Mspace* mspace, Thread* thread, size_t size) {
|
||||
StopOnNullConditionRemoval<typename Mspace::FreeList> iterator(mspace->free_list());
|
||||
static Node* acquire(Mspace* mspace, bool free_list, Thread* thread, size_t size, bool previous_epoch) {
|
||||
// it is the iterator that removes the nodes
|
||||
if (free_list) {
|
||||
StopOnNullConditionRemoval<typename Mspace::FreeList> iterator(mspace->free_list());
|
||||
Node* const node = acquire(iterator, thread, size);
|
||||
if (node != NULL) {
|
||||
mspace->decrement_free_list_count();
|
||||
}
|
||||
return node;
|
||||
}
|
||||
StopOnNullConditionRemoval<typename Mspace::LiveList> iterator(mspace->live_list(previous_epoch));
|
||||
return acquire(iterator, thread, size);
|
||||
}
|
||||
private:
|
||||
template <typename Iterator>
|
||||
static Node* acquire(Iterator& iterator, Thread* thread, size_t size) {
|
||||
while (iterator.has_next()) {
|
||||
Node* const node = iterator.next();
|
||||
if (node == NULL) return NULL;
|
||||
mspace->decrement_free_list_count();
|
||||
assert(node->free_size() >= size, "invariant");
|
||||
assert(!node->retired(), "invariant");
|
||||
assert(node->identity() == NULL, "invariant");
|
||||
|
@ -91,11 +91,9 @@ JfrStorage::~JfrStorage() {
|
||||
_instance = NULL;
|
||||
}
|
||||
|
||||
static const size_t in_memory_discard_threshold_delta = 2; // start to discard data when the only this number of free buffers are left
|
||||
static const size_t unlimited_mspace_size = 0;
|
||||
static const size_t thread_local_cache_count = 8;
|
||||
static const size_t thread_local_scavenge_threshold = thread_local_cache_count / 2;
|
||||
static const size_t transient_buffer_size_multiplier = 8; // against thread local buffer size
|
||||
// start to discard data when the only this number of free buffers are left
|
||||
static const size_t in_memory_discard_threshold_delta = 2;
|
||||
|
||||
bool JfrStorage::initialize() {
|
||||
assert(_control == NULL, "invariant");
|
||||
@ -104,7 +102,6 @@ bool JfrStorage::initialize() {
|
||||
|
||||
const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
|
||||
assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
|
||||
const size_t memory_size = (size_t)JfrOptionSet::memory_size();
|
||||
const size_t global_buffer_size = (size_t)JfrOptionSet::global_buffer_size();
|
||||
const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
|
||||
|
||||
@ -112,14 +109,24 @@ bool JfrStorage::initialize() {
|
||||
if (_control == NULL) {
|
||||
return false;
|
||||
}
|
||||
_global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size, memory_size, num_global_buffers, this);
|
||||
_global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size,
|
||||
num_global_buffers, // cache count limit
|
||||
num_global_buffers, // cache_preallocate count
|
||||
false, // preallocate_to_free_list (== preallocate directly to live list)
|
||||
this);
|
||||
if (_global_mspace == NULL) {
|
||||
return false;
|
||||
}
|
||||
_thread_local_mspace = create_mspace<JfrThreadLocalMspace>(thread_buffer_size, unlimited_mspace_size, thread_local_cache_count, this);
|
||||
assert(_global_mspace->live_list_is_nonempty(), "invariant");
|
||||
_thread_local_mspace = create_mspace<JfrThreadLocalMspace>(thread_buffer_size,
|
||||
thread_local_cache_count, // cache count limit
|
||||
thread_local_cache_count, // cache preallocate count
|
||||
true, // preallocate_to_free_list
|
||||
this);
|
||||
if (_thread_local_mspace == NULL) {
|
||||
return false;
|
||||
}
|
||||
assert(_thread_local_mspace->free_list_is_nonempty(), "invariant");
|
||||
// The full list will contain nodes pointing to retired global and transient buffers.
|
||||
_full_list = new JfrFullList(*_control);
|
||||
return _full_list != NULL && _full_list->initialize(num_global_buffers * 2);
|
||||
@ -134,7 +141,7 @@ static void log_allocation_failure(const char* msg, size_t size) {
|
||||
}
|
||||
|
||||
BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
|
||||
BufferPtr buffer = mspace_get_to_full(size, instance()._thread_local_mspace, thread);
|
||||
BufferPtr buffer = mspace_acquire_to_live_list(size, instance()._thread_local_mspace, thread);
|
||||
if (buffer == NULL) {
|
||||
log_allocation_failure("thread local_memory", size);
|
||||
return NULL;
|
||||
@ -155,10 +162,10 @@ BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static BufferPtr get_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
|
||||
assert(size <= mspace->min_elem_size(), "invariant");
|
||||
static BufferPtr acquire_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
|
||||
assert(size <= mspace->min_element_size(), "invariant");
|
||||
while (true) {
|
||||
BufferPtr buffer = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
|
||||
BufferPtr buffer = mspace_acquire_lease_with_retry(size, mspace, retry_count, thread);
|
||||
if (buffer == NULL && storage_instance.control().should_discard()) {
|
||||
storage_instance.discard_oldest(thread);
|
||||
continue;
|
||||
@ -167,10 +174,10 @@ static BufferPtr get_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& st
|
||||
}
|
||||
}
|
||||
|
||||
static BufferPtr get_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
|
||||
assert(size <= mspace->min_elem_size(), "invariant");
|
||||
static BufferPtr acquire_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
|
||||
assert(size <= mspace->min_element_size(), "invariant");
|
||||
while (true) {
|
||||
BufferPtr buffer= mspace_get_free_with_retry(size, mspace, retry_count, thread);
|
||||
BufferPtr buffer= mspace_acquire_live_with_retry(size, mspace, retry_count, thread);
|
||||
if (buffer == NULL && storage_instance.control().should_discard()) {
|
||||
storage_instance.discard_oldest(thread);
|
||||
continue;
|
||||
@ -183,10 +190,10 @@ static const size_t lease_retry = 10;
|
||||
|
||||
BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
|
||||
JfrStorage& storage_instance = instance();
|
||||
const size_t max_elem_size = storage_instance._global_mspace->min_elem_size(); // min is also max
|
||||
const size_t max_elem_size = storage_instance._global_mspace->min_element_size(); // min is also max
|
||||
// if not too large and capacity is still available, ask for a lease from the global system
|
||||
if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
|
||||
BufferPtr const buffer = get_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
|
||||
BufferPtr const buffer = acquire_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
|
||||
if (buffer != NULL) {
|
||||
assert(buffer->acquired_by_self(), "invariant");
|
||||
assert(!buffer->transient(), "invariant");
|
||||
@ -247,7 +254,7 @@ bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
|
||||
return true;
|
||||
}
|
||||
|
||||
BufferPtr const promotion_buffer = get_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
|
||||
BufferPtr const promotion_buffer = acquire_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
|
||||
if (promotion_buffer == NULL) {
|
||||
write_data_loss(buffer, thread);
|
||||
return false;
|
||||
@ -497,47 +504,50 @@ typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
|
||||
typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
|
||||
|
||||
typedef Excluded<JfrBuffer, true> NonExcluded;
|
||||
typedef PredicatedConcurrentWriteOp<WriteOperation, NonExcluded> ConcurrentWriteOperationNonExcluded;
|
||||
typedef PredicatedConcurrentWriteOp<WriteOperation, NonExcluded> ConcurrentNonExcludedWriteOperation;
|
||||
|
||||
typedef ReleaseOp<JfrThreadLocalMspace> ReleaseFullOperation;
|
||||
typedef ScavengingReleaseOp<JfrThreadLocalMspace> ReleaseThreadLocalOperation;
|
||||
typedef CompositeOperation<ConcurrentWriteOperationNonExcluded, ReleaseThreadLocalOperation> ConcurrentWriteThreadLocalOperationWithRelease;
|
||||
typedef ScavengingReleaseOp<JfrThreadLocalMspace, JfrThreadLocalMspace::LiveList> ReleaseThreadLocalOperation;
|
||||
typedef CompositeOperation<ConcurrentNonExcludedWriteOperation, ReleaseThreadLocalOperation> ConcurrentWriteReleaseThreadLocalOperation;
|
||||
|
||||
size_t JfrStorage::write() {
|
||||
const size_t full_elements = write_full();
|
||||
WriteOperation wo(_chunkwriter);
|
||||
NonExcluded ne;
|
||||
ConcurrentWriteOperationNonExcluded cwone(wo, ne);
|
||||
ReleaseThreadLocalOperation rtlo(_thread_local_mspace);
|
||||
ConcurrentWriteThreadLocalOperationWithRelease tl_op(&cwone, &rtlo);
|
||||
process_full_list(tl_op, _thread_local_mspace);
|
||||
assert(_global_mspace->full_list_is_empty(), "invariant");
|
||||
process_free_list(cwone, _global_mspace);
|
||||
ConcurrentNonExcludedWriteOperation cnewo(wo, ne);
|
||||
ReleaseThreadLocalOperation rtlo(_thread_local_mspace, _thread_local_mspace->live_list());
|
||||
ConcurrentWriteReleaseThreadLocalOperation tlop(&cnewo, &rtlo);
|
||||
process_live_list(tlop, _thread_local_mspace);
|
||||
assert(_global_mspace->free_list_is_empty(), "invariant");
|
||||
assert(_global_mspace->live_list_is_nonempty(), "invariant");
|
||||
process_live_list(cnewo, _global_mspace);
|
||||
return full_elements + wo.elements();
|
||||
}
|
||||
|
||||
size_t JfrStorage::write_at_safepoint() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
const size_t full_elements = write_full();
|
||||
WriteOperation wo(_chunkwriter);
|
||||
NonExcluded ne;
|
||||
ConcurrentWriteOperationNonExcluded cwone(wo, ne); // concurrent because of gc's
|
||||
process_full_list(cwone, _thread_local_mspace);
|
||||
assert(_global_mspace->full_list_is_empty(), "invariant");
|
||||
process_free_list(cwone, _global_mspace);
|
||||
return wo.elements();
|
||||
ConcurrentNonExcludedWriteOperation cnewo(wo, ne); // concurrent because of gc's
|
||||
process_live_list(cnewo, _thread_local_mspace);
|
||||
assert(_global_mspace->free_list_is_empty(), "invariant");
|
||||
assert(_global_mspace->live_list_is_nonempty(), "invariant");
|
||||
process_live_list(cnewo, _global_mspace);
|
||||
return full_elements + wo.elements();
|
||||
}
|
||||
|
||||
typedef DiscardOp<DefaultDiscarder<JfrStorage::Buffer> > DiscardOperation;
|
||||
typedef CompositeOperation<DiscardOperation, ReleaseThreadLocalOperation> ThreadLocalDiscardOperation;
|
||||
typedef CompositeOperation<DiscardOperation, ReleaseThreadLocalOperation> DiscardReleaseThreadLocalOperation;
|
||||
|
||||
size_t JfrStorage::clear() {
|
||||
const size_t full_elements = clear_full();
|
||||
DiscardOperation discarder(concurrent); // concurrent discard mode
|
||||
ReleaseThreadLocalOperation rtlo(_thread_local_mspace);
|
||||
ThreadLocalDiscardOperation tldo(&discarder, &rtlo);
|
||||
process_full_list(tldo, _thread_local_mspace);
|
||||
assert(_global_mspace->full_list_is_empty(), "invariant");
|
||||
process_free_list(discarder, _global_mspace);
|
||||
ReleaseThreadLocalOperation rtlo(_thread_local_mspace, _thread_local_mspace->live_list());
|
||||
DiscardReleaseThreadLocalOperation tldo(&discarder, &rtlo);
|
||||
process_live_list(tldo, _thread_local_mspace);
|
||||
assert(_global_mspace->free_list_is_empty(), "invariant");
|
||||
assert(_global_mspace->live_list_is_nonempty(), "invariant");
|
||||
process_live_list(discarder, _global_mspace);
|
||||
return full_elements + discarder.elements();
|
||||
}
|
||||
|
||||
@ -566,7 +576,8 @@ static void log(size_t count, size_t amount, bool clear = false) {
|
||||
}
|
||||
}
|
||||
|
||||
typedef CompositeOperation<MutexedWriteOperation, ReleaseFullOperation> FullOperation;
|
||||
typedef ReleaseOp<JfrThreadLocalMspace> ReleaseFullOperation;
|
||||
typedef CompositeOperation<MutexedWriteOperation, ReleaseFullOperation> WriteFullOperation;
|
||||
|
||||
// full writer
|
||||
// Assumption is retired only; exclusive access
|
||||
@ -580,8 +591,8 @@ size_t JfrStorage::write_full() {
|
||||
WriteOperation wo(_chunkwriter);
|
||||
MutexedWriteOperation writer(wo); // a retired buffer implies mutexed access
|
||||
ReleaseFullOperation rfo(_thread_local_mspace);
|
||||
FullOperation cmd(&writer, &rfo);
|
||||
const size_t count = process_full(cmd, _full_list, control());
|
||||
WriteFullOperation wfo(&writer, &rfo);
|
||||
const size_t count = process_full(wfo, _full_list, control());
|
||||
if (count != 0) {
|
||||
log(count, writer.size());
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ class JfrStorage : public JfrCHeapObj {
|
||||
|
||||
friend class JfrRecorder;
|
||||
friend class JfrRecorderService;
|
||||
template <typename, template <typename> class, typename, typename>
|
||||
template <typename, template <typename> class, typename, typename, bool>
|
||||
friend class JfrMemorySpace;
|
||||
};
|
||||
|
||||
|
@ -191,7 +191,7 @@ class DiscardOp {
|
||||
|
||||
template <typename Operation>
|
||||
class ExclusiveDiscardOp : private DiscardOp<Operation> {
|
||||
public:
|
||||
public:
|
||||
typedef typename Operation::Type Type;
|
||||
ExclusiveDiscardOp(jfr_operation_mode mode = concurrent) : DiscardOp<Operation>(mode) {}
|
||||
bool process(Type* t);
|
||||
@ -200,4 +200,18 @@ public:
|
||||
size_t size() const { return DiscardOp<Operation>::size(); }
|
||||
};
|
||||
|
||||
template <typename Operation>
|
||||
class EpochDispatchOp {
|
||||
Operation& _operation;
|
||||
size_t _elements;
|
||||
bool _previous_epoch;
|
||||
size_t dispatch(bool previous_epoch, const u1* data, size_t size);
|
||||
public:
|
||||
typedef typename Operation::Type Type;
|
||||
EpochDispatchOp(Operation& operation, bool previous_epoch) :
|
||||
_operation(operation), _elements(0), _previous_epoch(previous_epoch) {}
|
||||
bool process(Type* t);
|
||||
size_t elements() const { return _elements; }
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_HPP
|
||||
|
@ -136,4 +136,30 @@ inline bool ExclusiveDiscardOp<Operation>::process(typename Operation::Type* t)
|
||||
return DiscardOp<Operation>::process(t);
|
||||
}
|
||||
|
||||
template <typename Operation>
|
||||
inline bool EpochDispatchOp<Operation>::process(typename Operation::Type* t) {
|
||||
assert(t != NULL, "invariant");
|
||||
const u1* const current_top = _previous_epoch ? t->start() : t->top();
|
||||
const size_t unflushed_size = Atomic::load_acquire(t->pos_address()) - current_top;
|
||||
if (unflushed_size == 0) {
|
||||
return true;
|
||||
}
|
||||
_elements = dispatch(_previous_epoch, current_top, unflushed_size);
|
||||
t->set_top(current_top + unflushed_size);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Operation>
|
||||
size_t EpochDispatchOp<Operation>::dispatch(bool previous_epoch, const u1* element, size_t size) {
|
||||
assert(element != NULL, "invariant");
|
||||
const u1* const limit = element + size;
|
||||
size_t elements = 0;
|
||||
while (element < limit) {
|
||||
element += _operation(element, previous_epoch);
|
||||
++elements;
|
||||
}
|
||||
assert(element == limit, "invariant");
|
||||
return elements;
|
||||
}
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP
|
||||
|
@ -92,18 +92,19 @@ void JfrStringPool::destroy() {
|
||||
JfrStringPool::JfrStringPool(JfrChunkWriter& cw) : _mspace(NULL), _chunkwriter(cw) {}
|
||||
|
||||
JfrStringPool::~JfrStringPool() {
|
||||
if (_mspace != NULL) {
|
||||
delete _mspace;
|
||||
}
|
||||
delete _mspace;
|
||||
}
|
||||
|
||||
static const size_t unlimited_mspace_size = 0;
|
||||
static const size_t string_pool_cache_count = 2;
|
||||
static const size_t string_pool_buffer_size = 512 * K;
|
||||
|
||||
bool JfrStringPool::initialize() {
|
||||
assert(_mspace == NULL, "invariant");
|
||||
_mspace = create_mspace<JfrStringPoolMspace>(string_pool_buffer_size, unlimited_mspace_size, string_pool_cache_count, this);
|
||||
_mspace = create_mspace<JfrStringPoolMspace>(string_pool_buffer_size,
|
||||
string_pool_cache_count, // cache limit
|
||||
string_pool_cache_count, // cache preallocate count
|
||||
false, // preallocate_to_free_list (== preallocate directly to live list)
|
||||
this);
|
||||
return _mspace != NULL;
|
||||
}
|
||||
|
||||
@ -145,9 +146,9 @@ BufferPtr JfrStringPool::flush(BufferPtr old, size_t used, size_t requested, Thr
|
||||
static const size_t lease_retry = 10;
|
||||
|
||||
BufferPtr JfrStringPool::lease(Thread* thread, size_t size /* 0 */) {
|
||||
BufferPtr buffer = mspace_get_free_lease_with_retry(size, instance()._mspace, lease_retry, thread);
|
||||
BufferPtr buffer = mspace_acquire_lease_with_retry(size, instance()._mspace, lease_retry, thread);
|
||||
if (buffer == NULL) {
|
||||
buffer = mspace_allocate_transient_lease_to_full(size, instance()._mspace, thread);
|
||||
buffer = mspace_allocate_transient_lease_to_live_list(size, instance()._mspace, thread);
|
||||
}
|
||||
assert(buffer->acquired_by_self(), "invariant");
|
||||
assert(buffer->lease(), "invariant");
|
||||
@ -207,23 +208,19 @@ typedef StringPoolOp<UnBufferedWriteToChunk> WriteOperation;
|
||||
typedef StringPoolOp<StringPoolDiscarderStub> DiscardOperation;
|
||||
typedef ExclusiveOp<WriteOperation> ExclusiveWriteOperation;
|
||||
typedef ExclusiveOp<DiscardOperation> ExclusiveDiscardOperation;
|
||||
typedef ReleaseOp<JfrStringPoolMspace> StringPoolReleaseFreeOperation;
|
||||
typedef ScavengingReleaseOp<JfrStringPoolMspace> StringPoolReleaseFullOperation;
|
||||
typedef CompositeOperation<ExclusiveWriteOperation, StringPoolReleaseFreeOperation> StringPoolWriteFreeOperation;
|
||||
typedef CompositeOperation<ExclusiveWriteOperation, StringPoolReleaseFullOperation> StringPoolWriteFullOperation;
|
||||
typedef CompositeOperation<ExclusiveDiscardOperation, StringPoolReleaseFreeOperation> StringPoolDiscardFreeOperation;
|
||||
typedef CompositeOperation<ExclusiveDiscardOperation, StringPoolReleaseFullOperation> StringPoolDiscardFullOperation;
|
||||
typedef ReleaseOpWithExcision<JfrStringPoolMspace, JfrStringPoolMspace::LiveList> ReleaseOperation;
|
||||
typedef CompositeOperation<ExclusiveWriteOperation, ReleaseOperation> WriteReleaseOperation;
|
||||
typedef CompositeOperation<ExclusiveDiscardOperation, ReleaseOperation> DiscardReleaseOperation;
|
||||
|
||||
size_t JfrStringPool::write() {
|
||||
Thread* const thread = Thread::current();
|
||||
WriteOperation wo(_chunkwriter, thread);
|
||||
ExclusiveWriteOperation ewo(wo);
|
||||
StringPoolReleaseFreeOperation free_release_op(_mspace);
|
||||
StringPoolWriteFreeOperation free_op(&ewo, &free_release_op);
|
||||
process_free_list(free_op, _mspace);
|
||||
StringPoolReleaseFullOperation full_release_op(_mspace);
|
||||
StringPoolWriteFullOperation full_op(&ewo, &full_release_op);
|
||||
process_full_list(full_op, _mspace);
|
||||
assert(_mspace->free_list_is_empty(), "invariant");
|
||||
ReleaseOperation ro(_mspace, _mspace->live_list());
|
||||
WriteReleaseOperation wro(&ewo, &ro);
|
||||
assert(_mspace->live_list_is_nonempty(), "invariant");
|
||||
process_live_list(wro, _mspace);
|
||||
return wo.processed();
|
||||
}
|
||||
|
||||
@ -236,12 +233,11 @@ size_t JfrStringPool::clear() {
|
||||
increment_serialized_generation();
|
||||
DiscardOperation discard_operation;
|
||||
ExclusiveDiscardOperation edo(discard_operation);
|
||||
StringPoolReleaseFreeOperation free_release_op(_mspace);
|
||||
StringPoolDiscardFreeOperation free_op(&edo, &free_release_op);
|
||||
process_free_list(free_op, _mspace);
|
||||
StringPoolReleaseFullOperation full_release_op(_mspace);
|
||||
StringPoolDiscardFullOperation full_op(&edo, &full_release_op);
|
||||
process_full_list(full_op, _mspace);
|
||||
assert(_mspace->free_list_is_empty(), "invariant");
|
||||
ReleaseOperation ro(_mspace, _mspace->live_list());
|
||||
DiscardReleaseOperation discard_op(&edo, &ro);
|
||||
assert(_mspace->live_list_is_nonempty(), "invariant");
|
||||
process_live_list(discard_op, _mspace);
|
||||
return discard_operation.processed();
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ class JfrStringPool : public JfrCHeapObj {
|
||||
friend class JfrRecorderService;
|
||||
friend class JfrStringPoolFlush;
|
||||
friend class JfrStringPoolWriter;
|
||||
template <typename, template <typename> class, typename, typename>
|
||||
template <typename, template <typename> class, typename, typename, bool>
|
||||
friend class JfrMemorySpace;
|
||||
};
|
||||
|
||||
|
@ -41,11 +41,6 @@ class JfrStringPoolBuffer : public JfrBuffer {
|
||||
void increment(uint64_t value);
|
||||
void set_string_pos(uint64_t value);
|
||||
void set_string_top(uint64_t value);
|
||||
|
||||
template <typename, typename>
|
||||
friend class JfrLinkedList;
|
||||
template <typename, typename>
|
||||
friend class JfrConcurrentLinkedList;
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLBUFFER_HPP
|
||||
|
@ -1,87 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/support/jfrEventClass.hpp"
|
||||
|
||||
bool JdkJfrEvent::is(const Klass* k) {
|
||||
return JfrTraceId::is_jdk_jfr_event(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is(const jclass jc) {
|
||||
return JfrTraceId::is_jdk_jfr_event(jc);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as(const Klass* k) {
|
||||
JfrTraceId::tag_as_jdk_jfr_event(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_subklass(const Klass* k) {
|
||||
return JfrTraceId::is_jdk_jfr_event_sub(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_subklass(const jclass jc) {
|
||||
return JfrTraceId::is_jdk_jfr_event_sub(jc);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as_subklass(const Klass* k) {
|
||||
JfrTraceId::tag_as_jdk_jfr_event_sub(k);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as_subklass(const jclass jc) {
|
||||
JfrTraceId::tag_as_jdk_jfr_event_sub(jc);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_a(const Klass* k) {
|
||||
return JfrTraceId::in_jdk_jfr_event_hierarchy(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_a(const jclass jc) {
|
||||
return JfrTraceId::in_jdk_jfr_event_hierarchy(jc);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_host(const Klass* k) {
|
||||
return JfrTraceId::is_event_host(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_host(const jclass jc) {
|
||||
return JfrTraceId::is_event_host(jc);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as_host(const Klass* k) {
|
||||
JfrTraceId::tag_as_event_host(k);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as_host(const jclass jc) {
|
||||
JfrTraceId::tag_as_event_host(jc);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_visible(const Klass* k) {
|
||||
return JfrTraceId::in_visible_set(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_visible(const jclass jc) {
|
||||
return JfrTraceId::in_visible_set(jc);
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,30 +25,16 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "jfr/jni/jfrGetAllEventClasses.hpp"
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
#include "jfr/support/jfrEventClass.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/support/jfrJdkJfrEvent.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
// incremented during class unloading for each unloaded event class
|
||||
static jlong unloaded_event_classes = 0;
|
||||
|
||||
jlong JfrEventClasses::unloaded_event_classes_count() {
|
||||
return unloaded_event_classes;
|
||||
}
|
||||
|
||||
void JfrEventClasses::increment_unloaded_event_class() {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
++unloaded_event_classes;
|
||||
}
|
||||
|
||||
static jobject empty_java_util_arraylist = NULL;
|
||||
|
||||
static oop new_java_util_arraylist(TRAPS) {
|
||||
@ -59,10 +45,16 @@ static oop new_java_util_arraylist(TRAPS) {
|
||||
return (oop)result.get_jobject();
|
||||
}
|
||||
|
||||
static const int initial_array_size = 64;
|
||||
|
||||
template <typename T>
|
||||
static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
|
||||
return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
|
||||
}
|
||||
|
||||
static bool initialize(TRAPS) {
|
||||
static bool initialized = false;
|
||||
if (!initialized) {
|
||||
unloaded_event_classes = 0;
|
||||
assert(NULL == empty_java_util_arraylist, "invariant");
|
||||
const oop array_list = new_java_util_arraylist(CHECK_false);
|
||||
empty_java_util_arraylist = JfrJavaSupport::global_jni_handle(array_list, THREAD);
|
||||
@ -88,7 +80,6 @@ static void fill_klasses(GrowableArray<const void*>& event_subklasses, const Kla
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
|
||||
|
||||
Stack<const Klass*, mtTracing> mark_stack;
|
||||
MutexLocker ml(thread, Compile_lock);
|
||||
mark_stack.push(event_klass->subklass());
|
||||
|
||||
while (!mark_stack.is_empty()) {
|
||||
@ -114,7 +105,7 @@ static void fill_klasses(GrowableArray<const void*>& event_subklasses, const Kla
|
||||
assert(mark_stack.is_empty(), "invariant");
|
||||
}
|
||||
|
||||
static void transform_klasses_to_local_jni_handles(GrowableArray<const void*>& event_subklasses, Thread* thread) {
|
||||
static void transform_klasses_to_local_jni_handles(GrowableArray<const void*>& event_subklasses, Thread* thread) {
|
||||
assert(event_subklasses.is_nonempty(), "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
|
||||
|
||||
@ -125,9 +116,7 @@ static void fill_klasses(GrowableArray<const void*>& event_subklasses, const Kla
|
||||
}
|
||||
}
|
||||
|
||||
static const int initial_size_growable_array = 64;
|
||||
|
||||
jobject JfrEventClasses::get_all_event_classes(TRAPS) {
|
||||
jobject JdkJfrEvent::get_all_klasses(TRAPS) {
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
|
||||
initialize(THREAD);
|
||||
assert(empty_java_util_arraylist != NULL, "should have been setup already!");
|
||||
@ -148,7 +137,7 @@ jobject JfrEventClasses::get_all_event_classes(TRAPS) {
|
||||
}
|
||||
|
||||
ResourceMark rm(THREAD);
|
||||
GrowableArray<const void*> event_subklasses(THREAD, initial_size_growable_array);
|
||||
GrowableArray<const void*> event_subklasses(THREAD, initial_array_size);
|
||||
fill_klasses(event_subklasses, klass, THREAD);
|
||||
|
||||
if (event_subklasses.is_empty()) {
|
||||
@ -185,3 +174,63 @@ jobject JfrEventClasses::get_all_event_classes(TRAPS) {
|
||||
}
|
||||
return JfrJavaSupport::local_jni_handle(h_array_list(), THREAD);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is(const Klass* k) {
|
||||
return JfrTraceId::is_jdk_jfr_event(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is(const jclass jc) {
|
||||
return JfrTraceId::is_jdk_jfr_event(jc);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as(const Klass* k) {
|
||||
JfrTraceId::tag_as_jdk_jfr_event(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_subklass(const Klass* k) {
|
||||
return JfrTraceId::is_jdk_jfr_event_sub(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_subklass(const jclass jc) {
|
||||
return JfrTraceId::is_jdk_jfr_event_sub(jc);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as_subklass(const Klass* k) {
|
||||
JfrTraceId::tag_as_jdk_jfr_event_sub(k);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as_subklass(const jclass jc) {
|
||||
JfrTraceId::tag_as_jdk_jfr_event_sub(jc);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_a(const Klass* k) {
|
||||
return JfrTraceId::in_jdk_jfr_event_hierarchy(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_a(const jclass jc) {
|
||||
return JfrTraceId::in_jdk_jfr_event_hierarchy(jc);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_host(const Klass* k) {
|
||||
return JfrTraceId::is_event_host(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_host(const jclass jc) {
|
||||
return JfrTraceId::is_event_host(jc);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as_host(const Klass* k) {
|
||||
JfrTraceId::tag_as_event_host(k);
|
||||
}
|
||||
|
||||
void JdkJfrEvent::tag_as_host(const jclass jc) {
|
||||
JfrTraceId::tag_as_event_host(jc);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_visible(const Klass* k) {
|
||||
return JfrTraceId::in_visible_set(k);
|
||||
}
|
||||
|
||||
bool JdkJfrEvent::is_visible(const jclass jc) {
|
||||
return JfrTraceId::in_visible_set(jc);
|
||||
}
|
@ -22,16 +22,26 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_SUPPORT_JFREVENTCLASS_HPP
|
||||
#define SHARE_JFR_SUPPORT_JFREVENTCLASS_HPP
|
||||
#ifndef SHARE_JFR_SUPPORT_JFRJDKJFREVENT_HPP
|
||||
#define SHARE_JFR_SUPPORT_JFRJDKJFREVENT_HPP
|
||||
|
||||
#include "jni.h"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
||||
class Klass;
|
||||
|
||||
//
|
||||
// For convenient access to the jdk.jfr.Event klass hierarchy.
|
||||
// For convenient access to the event klass hierarchy:
|
||||
//
|
||||
// - jdk.internal.event.Event (java.base)
|
||||
// - jdk.jfr.Event (jdk.jfr)
|
||||
// - sub klasses (...)
|
||||
//
|
||||
// Although the top level klass is really jdk.internal.event.Event,
|
||||
// its role is primarily to allow event programming in module java.base.
|
||||
// We still call it the jdk.jfr.Event klass hierarchy, including
|
||||
// jdk.internal.event.Event.
|
||||
//
|
||||
class JdkJfrEvent : AllStatic {
|
||||
public:
|
||||
@ -59,6 +69,9 @@ class JdkJfrEvent : AllStatic {
|
||||
// in the set of classes made visible to java
|
||||
static bool is_visible(const Klass* k);
|
||||
static bool is_visible(const jclass jc);
|
||||
|
||||
// all klasses in the hierarchy
|
||||
static jobject get_all_klasses(TRAPS);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_SUPPORT_JFREVENTCLASS_HPP
|
||||
#endif // SHARE_JFR_SUPPORT_JFRJDKJFREVENT_HPP
|
131
src/hotspot/share/jfr/support/jfrKlassUnloading.cpp
Normal file
131
src/hotspot/share/jfr/support/jfrKlassUnloading.cpp
Normal file
@ -0,0 +1,131 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
#include "jfr/utilities/jfrPredicate.hpp"
|
||||
#include "jfr/utilities/jfrRelation.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
static const int initial_array_size = 64;
|
||||
|
||||
template <typename T>
|
||||
static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
|
||||
return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
|
||||
}
|
||||
|
||||
// Track the set of unloaded klasses during a chunk / epoch.
|
||||
static GrowableArray<traceid>* _unload_set_epoch_0 = NULL;
|
||||
static GrowableArray<traceid>* _unload_set_epoch_1 = NULL;
|
||||
|
||||
static s8 event_klass_unloaded_count = 0;
|
||||
|
||||
static GrowableArray<traceid>* unload_set_epoch_0() {
|
||||
if (_unload_set_epoch_0 == NULL) {
|
||||
_unload_set_epoch_0 = c_heap_allocate_array<traceid>(initial_array_size);
|
||||
}
|
||||
return _unload_set_epoch_0;
|
||||
}
|
||||
|
||||
static GrowableArray<traceid>* unload_set_epoch_1() {
|
||||
if (_unload_set_epoch_1 == NULL) {
|
||||
_unload_set_epoch_1 = c_heap_allocate_array<traceid>(initial_array_size);
|
||||
}
|
||||
return _unload_set_epoch_1;
|
||||
}
|
||||
|
||||
static GrowableArray<traceid>* get_unload_set(u1 epoch) {
|
||||
return epoch == 0 ? unload_set_epoch_0() : unload_set_epoch_1();
|
||||
}
|
||||
|
||||
static GrowableArray<traceid>* get_unload_set() {
|
||||
return get_unload_set(JfrTraceIdEpoch::current());
|
||||
}
|
||||
|
||||
static GrowableArray<traceid>* get_unload_set_previous_epoch() {
|
||||
return get_unload_set(JfrTraceIdEpoch::previous());
|
||||
}
|
||||
|
||||
static void sort_set(GrowableArray<traceid>* set) {
|
||||
assert(set != NULL, "invariant");
|
||||
assert(set->is_nonempty(), "invariant");
|
||||
set->sort(sort_traceid);
|
||||
}
|
||||
|
||||
static bool is_nonempty_set(u1 epoch) {
|
||||
if (epoch == 0) {
|
||||
return _unload_set_epoch_0 != NULL && _unload_set_epoch_0->is_nonempty();
|
||||
}
|
||||
return _unload_set_epoch_1 != NULL && _unload_set_epoch_1->is_nonempty();
|
||||
}
|
||||
|
||||
void JfrKlassUnloading::sort(bool previous_epoch) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
if (is_nonempty_set(JfrTraceIdEpoch::current())) {
|
||||
sort_set(get_unload_set());
|
||||
}
|
||||
if (previous_epoch && is_nonempty_set(JfrTraceIdEpoch::previous())) {
|
||||
sort_set(get_unload_set_previous_epoch());
|
||||
}
|
||||
}
|
||||
|
||||
void JfrKlassUnloading::clear() {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
if (is_nonempty_set(JfrTraceIdEpoch::previous())) {
|
||||
get_unload_set_previous_epoch()->clear();
|
||||
}
|
||||
}
|
||||
|
||||
static bool add_to_unloaded_klass_set(traceid klass_id, bool current_epoch) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
GrowableArray<traceid>* const unload_set = current_epoch ? get_unload_set() : get_unload_set_previous_epoch();
|
||||
assert(unload_set != NULL, "invariant");
|
||||
assert(unload_set->find(klass_id) == -1, "invariant");
|
||||
unload_set->append(klass_id);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool JfrKlassUnloading::on_unload(const Klass* k) {
|
||||
assert(k != NULL, "invariant");
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
if (IS_JDK_JFR_EVENT_SUBKLASS(k)) {
|
||||
++event_klass_unloaded_count;
|
||||
}
|
||||
return USED_ANY_EPOCH(k) && add_to_unloaded_klass_set(JfrTraceId::load_raw(k), USED_THIS_EPOCH(k));
|
||||
}
|
||||
|
||||
bool JfrKlassUnloading::is_unloaded(traceid klass_id, bool previous_epoch /* false */) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
if (previous_epoch) {
|
||||
if (JfrPredicate<traceid, compare_traceid>::test(get_unload_set_previous_epoch(), klass_id)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return JfrPredicate<traceid, compare_traceid>::test(get_unload_set(), klass_id);
|
||||
}
|
||||
|
||||
int64_t JfrKlassUnloading::event_class_count() {
|
||||
return event_klass_unloaded_count;
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -21,21 +21,22 @@
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
#ifndef SHARE_JFR_JNI_JFRGETALLEVENTCLASSES_HPP
|
||||
#define SHARE_JFR_JNI_JFRGETALLEVENTCLASSES_HPP
|
||||
|
||||
#include "jni.h"
|
||||
#ifndef SHARE_JFR_SUPPORT_JFRKLASSUNLOADING_HPP
|
||||
#define SHARE_JFR_SUPPORT_JFRKLASSUNLOADING_HPP
|
||||
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
||||
//
|
||||
// Responsible for the delivery of currently loaded jdk.jfr.Event subklasses to Java.
|
||||
//
|
||||
class JfrEventClasses : AllStatic {
|
||||
class Klass;
|
||||
|
||||
class JfrKlassUnloading : AllStatic {
|
||||
public:
|
||||
static void increment_unloaded_event_class();
|
||||
static jlong unloaded_event_classes_count();
|
||||
static jobject get_all_event_classes(TRAPS);
|
||||
static bool on_unload(const Klass* k);
|
||||
static int64_t event_class_count();
|
||||
static bool is_unloaded(traceid klass_id, bool previous_epoch = false);
|
||||
static void sort(bool previous_epoch = false);
|
||||
static void clear();
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_JNI_JFRGETALLEVENTCLASSES_HPP
|
||||
#endif // SHARE_JFR_SUPPORT_JFRKLASSUNLOADING_HPP
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
|
||||
#include "jfr/support/jfrMethodLookup.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,9 +27,9 @@
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
|
||||
#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/storage/jfrStorage.hpp"
|
||||
#include "jfr/support/jfrThreadLocal.hpp"
|
||||
@ -38,12 +38,13 @@
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/sizes.hpp"
|
||||
|
||||
/* This data structure is per thread and only accessed by the thread itself, no locking required */
|
||||
JfrThreadLocal::JfrThreadLocal() :
|
||||
_java_event_writer(NULL),
|
||||
_java_buffer(NULL),
|
||||
_native_buffer(NULL),
|
||||
_shelved_buffer(NULL),
|
||||
_load_barrier_buffer_epoch_0(NULL),
|
||||
_load_barrier_buffer_epoch_1(NULL),
|
||||
_stackframes(NULL),
|
||||
_trace_id(JfrTraceId::assign_thread_id()),
|
||||
_thread(),
|
||||
@ -57,7 +58,6 @@ JfrThreadLocal::JfrThreadLocal() :
|
||||
_entering_suspend_flag(0),
|
||||
_excluded(false),
|
||||
_dead(false) {
|
||||
|
||||
Thread* thread = Thread::current_or_null();
|
||||
_parent_trace_id = thread != NULL ? thread->jfr_thread_local()->trace_id() : (traceid)0;
|
||||
}
|
||||
@ -134,6 +134,14 @@ void JfrThreadLocal::release(Thread* t) {
|
||||
FREE_C_HEAP_ARRAY(JfrStackFrame, _stackframes);
|
||||
_stackframes = NULL;
|
||||
}
|
||||
if (_load_barrier_buffer_epoch_0 != NULL) {
|
||||
_load_barrier_buffer_epoch_0->set_retired();
|
||||
_load_barrier_buffer_epoch_0 = NULL;
|
||||
}
|
||||
if (_load_barrier_buffer_epoch_1 != NULL) {
|
||||
_load_barrier_buffer_epoch_1->set_retired();
|
||||
_load_barrier_buffer_epoch_1 = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void JfrThreadLocal::release(JfrThreadLocal* tl, Thread* t) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,6 +39,8 @@ class JfrThreadLocal {
|
||||
mutable JfrBuffer* _java_buffer;
|
||||
mutable JfrBuffer* _native_buffer;
|
||||
JfrBuffer* _shelved_buffer;
|
||||
JfrBuffer* _load_barrier_buffer_epoch_0;
|
||||
JfrBuffer* _load_barrier_buffer_epoch_1;
|
||||
mutable JfrStackFrame* _stackframes;
|
||||
mutable traceid _trace_id;
|
||||
JfrBlobHandle _thread;
|
||||
@ -230,6 +232,9 @@ class JfrThreadLocal {
|
||||
// Code generation
|
||||
static ByteSize trace_id_offset();
|
||||
static ByteSize java_event_writer_offset();
|
||||
|
||||
template <typename>
|
||||
friend class JfrEpochQueueKlassPolicy;
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_SUPPORT_JFRTHREADLOCAL_HPP
|
||||
|
76
src/hotspot/share/jfr/utilities/jfrEpochQueue.hpp
Normal file
76
src/hotspot/share/jfr/utilities/jfrEpochQueue.hpp
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_UTILITIES_JFREPOCHQUEUE_HPP
|
||||
#define SHARE_JFR_UTILITIES_JFREPOCHQUEUE_HPP
|
||||
|
||||
#include "jfr/recorder/storage/jfrEpochStorage.hpp"
|
||||
|
||||
/*
|
||||
* An ElmentPolicy template template argument provides the implementation for how elements
|
||||
* associated with the queue is encoded and managed by exposing the following members:
|
||||
*
|
||||
* ElmentPolicy::Type the type of the element to be stored in the queue.
|
||||
* size_t element_size(Type* t); per element storage size requirement.
|
||||
* void store_element(Type* t, Buffer* buffer); encode and store element of Type into storage of type Buffer.
|
||||
* Buffer* thread_local_storage(Thread* thread); quick access to thread local storage.
|
||||
* void set_thread_lcoal_storage(Buffer* buffer, Thread* thread); store back capability for newly acquired storage.
|
||||
*
|
||||
* The ElementPolicy is also the callback when iterating elements of the queue.
|
||||
* The iteration callback signature to be provided by the policy class:
|
||||
*
|
||||
* size_t operator()(const u1* next_element, Callback& callback, bool previous_epoch = false);
|
||||
*/
|
||||
template <template <typename> class ElementPolicy>
|
||||
class JfrEpochQueue : public JfrCHeapObj {
|
||||
public:
|
||||
typedef JfrEpochStorage::Buffer Buffer;
|
||||
typedef JfrEpochStorage::BufferPtr BufferPtr;
|
||||
typedef typename ElementPolicy<Buffer>::Type Type;
|
||||
typedef const Type* TypePtr;
|
||||
JfrEpochQueue();
|
||||
~JfrEpochQueue();
|
||||
bool initialize(size_t min_buffer_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count);
|
||||
void enqueue(TypePtr t);
|
||||
template <typename Callback>
|
||||
void iterate(Callback& callback, bool previous_epoch = false);
|
||||
private:
|
||||
typedef ElementPolicy<Buffer> Policy;
|
||||
Policy _policy;
|
||||
JfrEpochStorage* _storage;
|
||||
BufferPtr storage_for_element(TypePtr t, size_t element_size);
|
||||
|
||||
template <typename Callback>
|
||||
class ElementDispatch {
|
||||
private:
|
||||
Callback& _callback;
|
||||
Policy& _policy;
|
||||
public:
|
||||
typedef Buffer Type;
|
||||
ElementDispatch(Callback& callback, Policy& policy);
|
||||
size_t operator()(const u1* element, bool previous_epoch);
|
||||
};
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_UTILITIES_JFREPOCHQUEUE_HPP
|
100
src/hotspot/share/jfr/utilities/jfrEpochQueue.inline.hpp
Normal file
100
src/hotspot/share/jfr/utilities/jfrEpochQueue.inline.hpp
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_UTILITIES_JFREPOCHQUEUE_INLINE_HPP
|
||||
#define SHARE_JFR_UTILITIES_JFREPOCHQUEUE_INLINE_HPP
|
||||
|
||||
#include "jfr/recorder/storage/jfrEpochStorage.inline.hpp"
|
||||
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
|
||||
#include "jfr/utilities/jfrEpochQueue.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
template <template <typename> class ElementPolicy>
|
||||
JfrEpochQueue<ElementPolicy>::JfrEpochQueue() : _policy(), _storage(NULL) {}
|
||||
|
||||
template <template <typename> class ElementPolicy>
|
||||
JfrEpochQueue<ElementPolicy>::~JfrEpochQueue() {
|
||||
delete _storage;
|
||||
}
|
||||
|
||||
template <template <typename> class ElementPolicy>
|
||||
bool JfrEpochQueue<ElementPolicy>::initialize(size_t min_buffer_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count) {
|
||||
assert(_storage == NULL, "invariant");
|
||||
_storage = new JfrEpochStorage();
|
||||
return _storage != NULL && _storage->initialize(min_buffer_size, free_list_cache_count_limit, cache_prealloc_count);
|
||||
}
|
||||
|
||||
template <template <typename> class ElementPolicy>
|
||||
inline typename JfrEpochQueue<ElementPolicy>::BufferPtr
|
||||
JfrEpochQueue<ElementPolicy>::storage_for_element(JfrEpochQueue<ElementPolicy>::TypePtr t, size_t element_size) {
|
||||
assert(_policy.element_size(t) == element_size, "invariant");
|
||||
Thread* const thread = Thread::current();
|
||||
BufferPtr buffer = _policy.thread_local_storage(thread);
|
||||
if (buffer == NULL) {
|
||||
buffer = _storage->acquire(element_size, thread);
|
||||
_policy.set_thread_local_storage(buffer, thread);
|
||||
} else if (buffer->free_size() < element_size) {
|
||||
_storage->release(buffer);
|
||||
buffer = _storage->acquire(element_size, thread);
|
||||
_policy.set_thread_local_storage(buffer, thread);
|
||||
}
|
||||
assert(buffer->free_size() >= element_size, "invariant");
|
||||
assert(_policy.thread_local_storage(thread) == buffer, "invariant");
|
||||
return buffer;
|
||||
}
|
||||
|
||||
template <template <typename> class ElementPolicy>
|
||||
void JfrEpochQueue<ElementPolicy>::enqueue(JfrEpochQueue<ElementPolicy>::TypePtr t) {
|
||||
assert(t != NULL, "invariant");
|
||||
static size_t element_size = _policy.element_size(t);
|
||||
BufferPtr buffer = storage_for_element(t, element_size);
|
||||
assert(buffer != NULL, "invariant");
|
||||
_policy.store_element(t, buffer);
|
||||
buffer->set_pos(element_size);
|
||||
}
|
||||
|
||||
template <template <typename> class ElementPolicy>
|
||||
template <typename Callback>
|
||||
JfrEpochQueue<ElementPolicy>::ElementDispatch<Callback>::ElementDispatch(Callback& callback, JfrEpochQueue<ElementPolicy>::Policy& policy) :
|
||||
_callback(callback),_policy(policy) {}
|
||||
|
||||
template <template <typename> class ElementPolicy>
|
||||
template <typename Callback>
|
||||
size_t JfrEpochQueue<ElementPolicy>::ElementDispatch<Callback>::operator()(const u1* element, bool previous_epoch) {
|
||||
assert(element != NULL, "invariant");
|
||||
return _policy(element, _callback, previous_epoch);
|
||||
}
|
||||
|
||||
template <template <typename> class ElementPolicy>
|
||||
template <typename Callback>
|
||||
void JfrEpochQueue<ElementPolicy>::iterate(Callback& callback, bool previous_epoch) {
|
||||
typedef ElementDispatch<Callback> ElementDispatcher;
|
||||
typedef EpochDispatchOp<ElementDispatcher> QueueDispatcher;
|
||||
ElementDispatcher element_dispatcher(callback, _policy);
|
||||
QueueDispatcher dispatch(element_dispatcher, previous_epoch);
|
||||
_storage->iterate(dispatch, previous_epoch);
|
||||
DEBUG_ONLY(_storage->verify_previous_empty();)
|
||||
}
|
||||
|
||||
#endif // SHARE_JFR_UTILITIES_JFREPOCHQUEUE_INLINE_HPP
|
62
src/hotspot/share/jfr/utilities/jfrPredicate.hpp
Normal file
62
src/hotspot/share/jfr/utilities/jfrPredicate.hpp
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_UTILITIES_JFRPREDICATE_HPP
|
||||
#define SHARE_JFR_UTILITIES_JFRPREDICATE_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
/*
|
||||
* Premise is that the set is sorted.
|
||||
*/
|
||||
template <typename T, int cmp(const T&, const T&)>
|
||||
class JfrPredicate : AllStatic {
|
||||
public:
|
||||
static bool test(GrowableArray<T>* set, T value) {
|
||||
assert(set != NULL, "invariant");
|
||||
bool found = false;
|
||||
set->template find_sorted<T, cmp>(value, found);
|
||||
return found;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Premise is that the set is sorted.
|
||||
*/
|
||||
template <typename T, int cmp(const T&, const T&)>
|
||||
class JfrMutablePredicate : AllStatic {
|
||||
public:
|
||||
static bool test(GrowableArray<T>* set, T value) {
|
||||
assert(set != NULL, "invariant");
|
||||
bool found = false;
|
||||
const int location = set->template find_sorted<T, cmp>(value, found);
|
||||
if (!found) {
|
||||
set->insert_before(location, value);
|
||||
}
|
||||
return found;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_UTILITIES_JFRPREDICATE_HPP
|
@ -26,6 +26,25 @@
|
||||
#define SHARE_JFR_UTILITIES_JFRRELATION_HPP
|
||||
|
||||
#include "jfr/utilities/jfrNode.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
|
||||
inline int compare_traceid(const traceid& lhs, const traceid& rhs) {
|
||||
return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
|
||||
}
|
||||
|
||||
inline int sort_traceid(traceid* lhs, traceid* rhs) {
|
||||
return compare_traceid(*lhs, *rhs);
|
||||
}
|
||||
|
||||
class Klass;
|
||||
|
||||
inline int compare_klasses(const Klass*const& lhs, const Klass*const& rhs) {
|
||||
return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
|
||||
}
|
||||
|
||||
inline int sort_klasses(const Klass* lhs, const Klass* rhs) {
|
||||
return compare_klasses(lhs, rhs);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
class LessThan {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,14 +39,6 @@ const u4 STACK_DEPTH_DEFAULT = 64;
|
||||
const u4 MIN_STACK_DEPTH = 1;
|
||||
const u4 MAX_STACK_DEPTH = 2048;
|
||||
|
||||
inline int compare_traceid(const traceid& lhs, const traceid& rhs) {
|
||||
return lhs > rhs ? 1 : (lhs < rhs) ? -1 : 0;
|
||||
}
|
||||
|
||||
inline int sort_traceid(traceid* lhs, traceid* rhs) {
|
||||
return compare_traceid(*lhs, *rhs);
|
||||
}
|
||||
|
||||
enum ReservedEvent {
|
||||
EVENT_METADATA = 0,
|
||||
EVENT_CHECKPOINT = 1
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -237,7 +237,7 @@ inline void WriterHost<BE, IE, WriterPolicyImpl>::write(jstring string) {
|
||||
template <typename Writer, typename T>
|
||||
inline void tag_write(Writer* w, const T* t) {
|
||||
assert(w != NULL, "invariant");
|
||||
const traceid id = t == NULL ? 0 : JfrTraceId::use(t);
|
||||
const traceid id = t == NULL ? 0 : JfrTraceId::load(t);
|
||||
w->write(id);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user