8313552: Fix -Wconversion warnings in JFR code

Reviewed-by: coleenp
This commit is contained in:
Markus Grönlund 2023-08-08 11:01:59 +00:00
parent 7e209528d3
commit 091e65e95b
28 changed files with 238 additions and 228 deletions

View File

@ -167,25 +167,27 @@ static u1 boolean_method_code_attribute[] = {
0x0, // attributes_count
};
// annotation processing support
/*
Annotation layout.
enum { // initial annotation layout
atype_off = 0, // utf8 such as 'Ljava/lang/annotation/Retention;'
count_off = 2, // u2 such as 1 (one value)
member_off = 4, // utf8 such as 'value'
tag_off = 6, // u1 such as 'c' (type) or 'e' (enum)
e_tag_val = 'e',
e_type_off = 7, // utf8 such as 'Ljava/lang/annotation/RetentionPolicy;'
e_con_off = 9, // utf8 payload, such as 'SOURCE', 'CLASS', 'RUNTIME'
e_size = 11, // end of 'e' annotation
c_tag_val = 'c', // payload is type
c_con_off = 7, // utf8 payload, such as 'I'
c_size = 9, // end of 'c' annotation
s_tag_val = 's', // payload is String
s_con_off = 7, // utf8 payload, such as 'Ljava/lang/String;'
s_size = 9,
min_size = 6 // smallest possible size (zero members)
};
enum { // initial annotation layout
atype_off = 0, // utf8 such as 'Ljava/lang/annotation/Retention;'
count_off = 2, // u2 such as 1 (one value)
member_off = 4, // utf8 such as 'value'
tag_off = 6, // u1 such as 'c' (type) or 'e' (enum)
e_tag_val = 'e',
e_type_off = 7, // utf8 such as 'Ljava/lang/annotation/RetentionPolicy;'
e_con_off = 9, // utf8 payload, such as 'SOURCE', 'CLASS', 'RUNTIME'
e_size = 11, // end of 'e' annotation
c_tag_val = 'c', // payload is type
c_con_off = 7, // utf8 payload, such as 'I'
c_size = 9, // end of 'c' annotation
s_tag_val = 's', // payload is String
s_con_off = 7, // utf8 payload, such as 'Ljava/lang/String;'
s_size = 9,
min_size = 6 // smallest possible size (zero members)
};
*/
static int skip_annotation_value(const address, int, int); // fwd decl
@ -196,7 +198,7 @@ static int next_annotation_index(const address buffer, int limit, int index) {
if ((index += 2) >= limit) {
return limit;
}
int nof_members = JfrBigEndian::read<u2>(buffer + index - 2);
int nof_members = JfrBigEndian::read<int, u2>(buffer + index - 2);
while (--nof_members >= 0 && index < limit) {
index += 2; // skip member
index = skip_annotation_value(buffer, limit, index);
@ -240,7 +242,7 @@ static int skip_annotation_value(const address buffer, int limit, int index) {
if ((index += 2) >= limit) {
return limit;
}
int nof_values = JfrBigEndian::read<u2>(buffer + index - 2);
int nof_values = JfrBigEndian::read<int, u2>(buffer + index - 2);
while (--nof_values >= 0 && index < limit) {
index = skip_annotation_value(buffer, limit, index);
}
@ -255,11 +257,11 @@ static int skip_annotation_value(const address buffer, int limit, int index) {
return index;
}
static const u2 number_of_elements_offset = (u2)2;
static const u2 element_name_offset = (u2)(number_of_elements_offset + 2);
static const u2 element_name_size = (u2)2;
static const u2 value_type_relative_offset = (u2)2;
static const u2 value_relative_offset = (u2)(value_type_relative_offset + 1);
static constexpr const int number_of_elements_offset = 2;
static constexpr const int element_name_offset = number_of_elements_offset + 2;
static constexpr const int element_name_size = 2;
static constexpr const int value_type_relative_offset = 2;
static constexpr const int value_relative_offset = value_type_relative_offset + 1;
// see JVMS - 4.7.16. The RuntimeVisibleAnnotations Attribute
@ -267,19 +269,20 @@ class AnnotationElementIterator : public StackObj {
private:
const InstanceKlass* _ik;
const address _buffer;
const u2 _limit; // length of annotation
mutable u2 _current; // element
mutable u2 _next; // element
u2 value_index() const {
return JfrBigEndian::read<u2>(_buffer + _current + value_relative_offset);
const int _limit; // length of annotation
mutable int _current; // element
mutable int _next; // element
int value_index() const {
return JfrBigEndian::read<int, u2>(_buffer + _current + value_relative_offset);
}
public:
AnnotationElementIterator(const InstanceKlass* ik, address buffer, u2 limit) : _ik(ik),
_buffer(buffer),
_limit(limit),
_current(element_name_offset),
_next(element_name_offset) {
AnnotationElementIterator(const InstanceKlass* ik, address buffer, int limit) : _ik(ik),
_buffer(buffer),
_limit(limit),
_current(element_name_offset),
_next(element_name_offset) {
assert(_buffer != nullptr, "invariant");
assert(_next == element_name_offset, "invariant");
assert(_current == element_name_offset, "invariant");
@ -299,17 +302,17 @@ class AnnotationElementIterator : public StackObj {
assert(_current <= _limit, "invariant");
}
u2 number_of_elements() const {
return JfrBigEndian::read<u2>(_buffer + number_of_elements_offset);
int number_of_elements() const {
return JfrBigEndian::read<int, u2>(_buffer + number_of_elements_offset);
}
const Symbol* name() const {
assert(_current < _next, "invariant");
return _ik->constants()->symbol_at(JfrBigEndian::read<u2>(_buffer + _current));
return _ik->constants()->symbol_at(JfrBigEndian::read<int, u2>(_buffer + _current));
}
char value_type() const {
return JfrBigEndian::read<u1>(_buffer + _current + value_type_relative_offset);
return JfrBigEndian::read<char, u1>(_buffer + _current + value_type_relative_offset);
}
jint read_int() const {
@ -325,10 +328,10 @@ class AnnotationIterator : public StackObj {
private:
const InstanceKlass* _ik;
// ensure _limit field is declared before _buffer
u2 _limit; // length of annotations array
int _limit; // length of annotations array
const address _buffer;
mutable u2 _current; // annotation
mutable u2 _next; // annotation
mutable int _current; // annotation
mutable int _next; // annotation
public:
AnnotationIterator(const InstanceKlass* ik, AnnotationArray* ar) : _ik(ik),
@ -353,14 +356,16 @@ class AnnotationIterator : public StackObj {
assert(_next <= _limit, "invariant");
assert(_current <= _limit, "invariant");
}
const AnnotationElementIterator elements() const {
assert(_current < _next, "invariant");
return AnnotationElementIterator(_ik, _buffer + _current, _next - _current);
}
const Symbol* type() const {
assert(_buffer != nullptr, "invariant");
assert(_current < _limit, "invariant");
return _ik->constants()->symbol_at(JfrBigEndian::read<u2>(_buffer + _current));
return _ik->constants()->symbol_at(JfrBigEndian::read<int, u2>(_buffer + _current));
}
};
@ -476,13 +481,13 @@ static u2 utf8_info_index(const InstanceKlass* ik, const Symbol* const target, T
assert(target != nullptr, "invariant");
const ConstantPool* cp = ik->constants();
const int cp_len = cp->length();
for (u2 index = 1; index < cp_len; ++index) {
for (int index = 1; index < cp_len; ++index) {
const constantTag tag = cp->tag_at(index);
if (tag.is_utf8()) {
const Symbol* const utf8_sym = cp->symbol_at(index);
assert(utf8_sym != nullptr, "invariant");
if (utf8_sym == target) {
return index;
return static_cast<u2>(index);
}
}
}
@ -680,7 +685,7 @@ static u2 position_stream_after_cp(const ClassFileStream* stream) {
continue;
}
case JVM_CONSTANT_Utf8: {
u2 utf8_length = stream->get_u2_fast();
int utf8_length = static_cast<int>(stream->get_u2_fast());
stream->skip_u1_fast(utf8_length); // skip 2 + len bytes
continue;
}
@ -725,8 +730,7 @@ static u2 position_stream_after_fields(const ClassFileStream* stream) {
const u2 attrib_info_len = stream->get_u2_fast();
for (u2 j = 0; j < attrib_info_len; ++j) {
stream->skip_u2_fast(1);
const u4 attrib_len = stream->get_u4_fast();
stream->skip_u1_fast(attrib_len);
stream->skip_u1_fast(static_cast<int>(stream->get_u4_fast()));
}
}
return orig_fields_len;
@ -754,7 +758,7 @@ static u2 position_stream_after_methods(JfrBigEndianWriter& writer,
const u2 orig_methods_len = stream->get_u2_fast();
// Move copy position past original method_count
// in order to not copy the original count
orig_method_len_offset += sizeof(u2);
orig_method_len_offset += 2;
for (u2 i = 0; i < orig_methods_len; ++i) {
const u4 method_offset = stream->current_offset();
stream->skip_u2_fast(1); // Access Flags
@ -763,8 +767,7 @@ static u2 position_stream_after_methods(JfrBigEndianWriter& writer,
const u2 attributes_count = stream->get_u2_fast();
for (u2 j = 0; j < attributes_count; ++j) {
stream->skip_u2_fast(1);
const u4 attrib_len = stream->get_u4_fast();
stream->skip_u1_fast(attrib_len);
stream->skip_u1_fast(static_cast<int>(stream->get_u4_fast()));
}
if (clinit_method != nullptr && name_index == clinit_method->name_index()) {
// The method just parsed is an existing <clinit> method.
@ -853,7 +856,7 @@ static void adjust_exception_table(JfrBigEndianWriter& writer, u2 bci_adjustment
}
}
enum StackMapFrameTypes {
enum StackMapFrameTypes : u1 {
SAME_FRAME_BEGIN = 0,
SAME_FRAME_END = 63,
SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN = 64,
@ -895,7 +898,8 @@ static void adjust_stack_map(JfrBigEndianWriter& writer,
} else if (frame_type >= SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN &&
frame_type <= SAME_LOCALS_1_STACK_ITEM_FRAME_END) {
writer.write<u1>(SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED);
writer.write<u2>((frame_type - SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN) + bci_adjustment_offset);
const u2 value = frame_type - SAME_LOCALS_1_STACK_ITEM_FRAME_BEGIN;
writer.write<u2>(value + bci_adjustment_offset);
} else if (frame_type >= SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED) {
// SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED to FULL_FRAME
// has a u2 offset_delta field
@ -909,9 +913,9 @@ static void adjust_stack_map(JfrBigEndianWriter& writer,
writer.write<u1>(stream.get_u1(THREAD));
}
u4 stack_map_attrib_len = writer.current_offset() - stack_map_attrib_len_offset;
u4 stack_map_attrib_len = static_cast<u4>(writer.current_offset() - stack_map_attrib_len_offset);
// the stack_map_table_attributes_length value is exclusive
stack_map_attrib_len -= sizeof(u4);
stack_map_attrib_len -= 4;
writer.write_at_offset(stack_map_attrib_len, stack_map_attrib_len_offset);
}
@ -938,9 +942,9 @@ static void adjust_line_number_table(JfrBigEndianWriter& writer,
writer.write<u2>((u2)lnt_stream.line());
}
writer.write_at_offset(line_number_table_entries, lnt_attributes_entries_offset);
u4 lnt_table_attributes_len = writer.current_offset() - lnt_attributes_length_offset;
u4 lnt_table_attributes_len = static_cast<u4>(writer.current_offset() - lnt_attributes_length_offset);
// the line_number_table_attributes_length value is exclusive
lnt_table_attributes_len -= sizeof(u4);
lnt_table_attributes_len -= 4;
writer.write_at_offset(lnt_table_attributes_len, lnt_attributes_length_offset);
}
@ -971,9 +975,9 @@ static u2 adjust_local_variable_table(JfrBigEndianWriter& writer,
++num_lvtt_entries;
}
}
u4 lvt_table_attributes_len = writer.current_offset() - lvt_attributes_length_offset;
u4 lvt_table_attributes_len = static_cast<u4>(writer.current_offset() - lvt_attributes_length_offset);
// the lvt_table_attributes_length value is exclusive
lvt_table_attributes_len -= sizeof(u4);
lvt_table_attributes_len -= 4;
writer.write_at_offset(lvt_table_attributes_len, lvt_attributes_length_offset);
return num_lvtt_entries;
}
@ -1001,9 +1005,9 @@ static void adjust_local_variable_type_table(JfrBigEndianWriter& writer,
writer.write<u2>(table[i].slot);
}
}
u4 lvtt_table_attributes_len = writer.current_offset() - lvtt_attributes_length_offset;
u4 lvtt_table_attributes_len = static_cast<u4>(writer.current_offset() - lvtt_attributes_length_offset);
// the lvtt_table_attributes_length value is exclusive
lvtt_table_attributes_len -= sizeof(u4);
lvtt_table_attributes_len -= 4;
writer.write_at_offset(lvtt_table_attributes_len, lvtt_attributes_length_offset);
}
@ -1061,8 +1065,8 @@ static jlong insert_clinit_method(const InstanceKlass* ik,
const u2 name_index = utf8_indexes[UTF8_OPT_clinit];
assert(name_index != invalid_cp_index, "invariant");
const u2 desc_index = utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC];
const u2 max_stack = MAX2(clinit_method != nullptr ? clinit_method->verifier_max_stack() : 1, 1);
const u2 max_locals = MAX2(clinit_method != nullptr ? clinit_method->max_locals() : 0, 0);
const u2 max_stack = MAX2<u2>(clinit_method != nullptr ? clinit_method->verifier_max_stack() : 1, 1);
const u2 max_locals = MAX2<u2>(clinit_method != nullptr ? clinit_method->max_locals() : 0, 0);
const u2 orig_bytecodes_length = clinit_method != nullptr ? (u2)clinit_method->code_size() : 0;
const address orig_bytecodes = clinit_method != nullptr ? clinit_method->code_base() : nullptr;
const u2 new_code_length = injected_code_length + orig_bytecodes_length;
@ -1111,9 +1115,9 @@ static jlong insert_clinit_method(const InstanceKlass* ik,
assert(writer.is_valid(), "invariant");
adjust_code_attributes(writer, utf8_indexes, injected_code_length, clinit_method, THREAD);
assert(writer.is_valid(), "invariant");
u4 code_attribute_len = writer.current_offset() - code_attribute_length_offset;
u4 code_attribute_len = static_cast<u4>(writer.current_offset() - code_attribute_length_offset);
// the code_attribute_length value is exclusive
code_attribute_len -= sizeof(u4);
code_attribute_len -= 4;
writer.write_at_offset(code_attribute_len, code_attribute_length_offset);
return writer.current_offset();
}
@ -1212,7 +1216,7 @@ static u2 find_or_add_utf8_info(JfrBigEndianWriter& writer,
assert(utf8_constant != nullptr, "invariant");
TempNewSymbol utf8_sym = SymbolTable::new_symbol(utf8_constant);
// lookup existing
const int utf8_orig_idx = utf8_info_index(ik, utf8_sym, THREAD);
const u2 utf8_orig_idx = utf8_info_index(ik, utf8_sym, THREAD);
if (utf8_orig_idx != invalid_cp_index) {
// existing constant pool entry found
return utf8_orig_idx;
@ -1405,8 +1409,10 @@ static u1* schema_extend_event_subklass_bytes(const InstanceKlass* ik,
//
if (register_klass) {
insert_clinit_method(ik, parser, writer, orig_cp_len, utf8_indexes, flr_register_method_ref_index, clinit_method, THREAD);
if (clinit_method == nullptr) {
++number_of_new_methods;
}
}
number_of_new_methods += clinit_method != nullptr ? 0 : register_klass ? 1 : 0;
// Update classfile methods_count
writer.write_at_offset<u2>(orig_methods_len + number_of_new_methods, new_method_len_offset);
assert(writer.is_valid(), "invariant");

View File

@ -51,13 +51,13 @@ class ObjectSample : public JfrCHeapObj {
JfrBlobHandle _type_set;
WeakHandle _object;
Ticks _allocation_time;
traceid _stack_trace_id;
traceid _thread_id;
int _index;
traceid _stack_trace_id;
traceid _stack_trace_hash;
size_t _span;
size_t _allocated;
size_t _heap_used_at_last_gc;
unsigned int _stack_trace_hash;
int _index;
bool _virtual_thread;
void release_references() {
@ -75,13 +75,13 @@ class ObjectSample : public JfrCHeapObj {
_thread(),
_type_set(),
_allocation_time(),
_stack_trace_id(0),
_thread_id(0),
_index(0),
_stack_trace_id(0),
_stack_trace_hash(0),
_span(0),
_allocated(0),
_heap_used_at_last_gc(0),
_stack_trace_hash(0),
_index(0),
_virtual_thread(false) {}
ObjectSample* next() const {
@ -170,11 +170,11 @@ class ObjectSample : public JfrCHeapObj {
_stack_trace_id = id;
}
unsigned int stack_trace_hash() const {
traceid stack_trace_hash() const {
return _stack_trace_hash;
}
void set_stack_trace_hash(unsigned int hash) {
void set_stack_trace_hash(traceid hash) {
_stack_trace_hash = hash;
}

View File

@ -249,7 +249,7 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool
sample->set_thread(bh);
const JfrThreadLocal* const tl = thread->jfr_thread_local();
const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
const traceid stacktrace_hash = tl->cached_stack_trace_hash();
if (stacktrace_hash != 0) {
sample->set_stack_trace_id(tl->cached_stack_trace_id());
sample->set_stack_trace_hash(stacktrace_hash);

View File

@ -338,7 +338,7 @@ TRACE_REQUEST_FUNC(ThreadContextSwitchRate) {
#define SEND_FLAGS_OF_TYPE(eventType, flagType) \
do { \
JVMFlag *flag = JVMFlag::flags; \
while (flag->name() != nullptr) { \
while (flag->name() != nullptr) { \
if (flag->is_ ## flagType()) { \
if (flag->is_unlocked()) { \
Event ## eventType event; \
@ -416,7 +416,7 @@ TRACE_REQUEST_FUNC(GCConfiguration) {
event.set_usesDynamicGCThreads(conf.uses_dynamic_gc_threads());
event.set_isExplicitGCConcurrent(conf.is_explicit_gc_concurrent());
event.set_isExplicitGCDisabled(conf.is_explicit_gc_disabled());
event.set_gcTimeRatio(conf.gc_time_ratio());
event.set_gcTimeRatio(static_cast<unsigned int>(conf.gc_time_ratio()));
event.set_pauseTarget((s8)pause_target);
event.commit();
}
@ -433,8 +433,8 @@ TRACE_REQUEST_FUNC(GCTLABConfiguration) {
TRACE_REQUEST_FUNC(GCSurvivorConfiguration) {
GCSurvivorConfiguration conf;
EventGCSurvivorConfiguration event;
event.set_maxTenuringThreshold(conf.max_tenuring_threshold());
event.set_initialTenuringThreshold(conf.initial_tenuring_threshold());
event.set_maxTenuringThreshold(static_cast<u1>(conf.max_tenuring_threshold()));
event.set_initialTenuringThreshold(static_cast<u1>(conf.initial_tenuring_threshold()));
event.commit();
}
@ -447,7 +447,7 @@ TRACE_REQUEST_FUNC(GCHeapConfiguration) {
event.set_usesCompressedOops(conf.uses_compressed_oops());
event.set_compressedOopsMode(conf.narrow_oop_mode());
event.set_objectAlignment(conf.object_alignment_in_bytes());
event.set_heapAddressBits(conf.heap_address_size_in_bits());
event.set_heapAddressBits(static_cast<u1>(conf.heap_address_size_in_bits()));
event.commit();
}
@ -457,7 +457,7 @@ TRACE_REQUEST_FUNC(YoungGenerationConfiguration) {
EventYoungGenerationConfiguration event;
event.set_maxSize((u8)max_size);
event.set_minSize(conf.min_size());
event.set_newRatio(conf.new_ratio());
event.set_newRatio(static_cast<unsigned int>(conf.new_ratio()));
event.commit();
}
@ -660,7 +660,7 @@ TRACE_REQUEST_FUNC(CompilerStatistics) {
TRACE_REQUEST_FUNC(CompilerConfiguration) {
EventCompilerConfiguration event;
event.set_threadCount(CICompilerCount);
event.set_threadCount(static_cast<s4>(CICompilerCount));
event.set_tieredCompilation(TieredCompilation);
event.set_dynamicCompilerThreadCount(UseDynamicNumberOfCompilerThreads);
event.commit();

View File

@ -81,7 +81,7 @@ bool JfrThreadCPULoadEvent::update_event(EventThreadCPULoad& event, JavaThread*
jlong user_time = cur_user_time - prev_user_time;
jlong system_time = cur_system_time - prev_system_time;
jlong wallclock_time = cur_wallclock_time - prev_wallclock_time;
jlong total_available_time = wallclock_time * processor_count;
const float total_available_time = static_cast<float>(wallclock_time * processor_count);
// Avoid reporting percentages above the theoretical max
if (user_time + system_time > wallclock_time) {
@ -97,8 +97,8 @@ bool JfrThreadCPULoadEvent::update_event(EventThreadCPULoad& event, JavaThread*
system_time -= excess;
}
}
event.set_user(total_available_time > 0 ? (double)user_time / total_available_time : 0);
event.set_system(total_available_time > 0 ? (double)system_time / total_available_time : 0);
event.set_user(total_available_time > 0 ? static_cast<float>(user_time) / total_available_time : 0);
event.set_system(total_available_time > 0 ? static_cast<float>(system_time) / total_available_time : 0);
tl->set_user_time(cur_user_time);
tl->set_cpu_time(cur_cpu_time);
return true;

View File

@ -312,7 +312,7 @@ static const size_t payload_offset = types_offset + sizeof(uint32_t);
template <typename Return>
static Return read_data(const u1* data) {
return JfrBigEndian::read<Return>(data);
return JfrBigEndian::read<Return, Return>(data);
}
static size_t total_size(const u1* data) {

View File

@ -305,7 +305,7 @@ traceid JfrThreadGroup::thread_group_id_internal(JfrThreadGroupsHelper& helper)
}
JfrThreadGroupEntry* tge = nullptr;
int parent_thread_group_id = 0;
traceid parent_thread_group_id = 0;
while (helper.has_next()) {
JfrThreadGroupPointers& ptrs = helper.next();
tge = tg_instance->find_entry(ptrs);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,6 @@
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_HPP
#include "jfr/utilities/jfrTypes.hpp"
#include "jni.h"
#include "memory/allStatic.hpp"
class JfrTraceIdBits : AllStatic {
@ -35,28 +34,28 @@ class JfrTraceIdBits : AllStatic {
static traceid load(const T* ptr);
template <typename T>
static void store(jbyte bits, const T* ptr);
static void store(uint8_t bits, const T* ptr);
template <typename T>
static void cas(jbyte bits, const T* ptr);
static void cas(uint8_t bits, const T* ptr);
template <typename T>
static void meta_store(jbyte bits, const T* ptr);
static void meta_store(uint8_t bits, const T* ptr);
template <typename T>
static void mask_store(jbyte mask, const T* ptr);
static void mask_store(uint8_t mask, const T* ptr);
template <typename T>
static void meta_mask_store(jbyte mask, const T* ptr);
static void meta_mask_store(uint8_t mask, const T* ptr);
template <typename T>
static void clear(jbyte bits, const T* ptr);
static void clear(uint8_t bits, const T* ptr);
template <typename T>
static void clear_cas(jbyte bits, const T* ptr);
static void clear_cas(uint8_t bits, const T* ptr);
template <typename T>
static void meta_clear(jbyte bits, const T* ptr);
static void meta_clear(uint8_t bits, const T* ptr);
};
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDBITS_HPP

View File

@ -39,73 +39,73 @@ const int low_offset = 7;
const int meta_offset = low_offset - 1;
#endif
inline jbyte* low_addr(jbyte* addr) {
inline uint8_t* low_addr(uint8_t* addr) {
assert(addr != nullptr, "invariant");
return addr + low_offset;
}
inline jbyte* low_addr(traceid* addr) {
return low_addr((jbyte*)addr);
inline uint8_t* low_addr(traceid* addr) {
return low_addr(reinterpret_cast<uint8_t*>(addr));
}
inline jbyte* meta_addr(jbyte* addr) {
inline uint8_t* meta_addr(uint8_t* addr) {
assert(addr != nullptr, "invariant");
return addr + meta_offset;
}
inline jbyte* meta_addr(traceid* addr) {
return meta_addr((jbyte*)addr);
inline uint8_t* meta_addr(traceid* addr) {
return meta_addr(reinterpret_cast<uint8_t*>(addr));
}
template <typename T>
inline jbyte* traceid_tag_byte(const T* ptr) {
inline uint8_t* traceid_tag_byte(const T* ptr) {
assert(ptr != nullptr, "invariant");
return low_addr(ptr->trace_id_addr());
}
template <>
inline jbyte* traceid_tag_byte<Method>(const Method* ptr) {
inline uint8_t* traceid_tag_byte<Method>(const Method* ptr) {
assert(ptr != nullptr, "invariant");
return ptr->trace_flags_addr();
}
template <typename T>
inline jbyte* traceid_meta_byte(const T* ptr) {
inline uint8_t* traceid_meta_byte(const T* ptr) {
assert(ptr != nullptr, "invariant");
return meta_addr(ptr->trace_id_addr());
}
template <>
inline jbyte* traceid_meta_byte<Method>(const Method* ptr) {
inline uint8_t* traceid_meta_byte<Method>(const Method* ptr) {
assert(ptr != nullptr, "invariant");
return ptr->trace_meta_addr();
}
inline jbyte traceid_and(jbyte bits, jbyte current) {
inline uint8_t traceid_and(uint8_t bits, uint8_t current) {
return bits & current;
}
inline jbyte traceid_or(jbyte bits, jbyte current) {
inline uint8_t traceid_or(uint8_t bits, uint8_t current) {
return bits | current;
}
inline jbyte traceid_xor(jbyte bits, jbyte current) {
inline uint8_t traceid_xor(uint8_t bits, uint8_t current) {
return bits ^ current;
}
template <jbyte op(jbyte, jbyte)>
inline void set_form(jbyte bits, jbyte* dest) {
template <uint8_t op(uint8_t, uint8_t)>
inline void set_form(uint8_t bits, uint8_t* dest) {
assert(dest != nullptr, "invariant");
*dest = op(bits, *dest);
OrderAccess::storestore();
}
template <jbyte op(jbyte, jbyte)>
inline void set_cas_form(jbyte bits, jbyte volatile* dest) {
template <uint8_t op(uint8_t, uint8_t)>
inline void set_cas_form(uint8_t bits, uint8_t volatile* dest) {
assert(dest != nullptr, "invariant");
do {
const jbyte current = *dest;
const jbyte new_value = op(bits, current);
const uint8_t current = *dest;
const uint8_t new_value = op(bits, current);
if (current == new_value || Atomic::cmpxchg(dest, current, new_value) == current) {
return;
}
@ -113,7 +113,7 @@ inline void set_cas_form(jbyte bits, jbyte volatile* dest) {
}
template <typename T>
inline void JfrTraceIdBits::cas(jbyte bits, const T* ptr) {
inline void JfrTraceIdBits::cas(uint8_t bits, const T* ptr) {
assert(ptr != nullptr, "invariant");
set_cas_form<traceid_or>(bits, traceid_tag_byte(ptr));
}
@ -124,13 +124,13 @@ inline traceid JfrTraceIdBits::load(const T* ptr) {
return ptr->trace_id();
}
inline void set(jbyte bits, jbyte* dest) {
inline void set(uint8_t bits, uint8_t* dest) {
assert(dest != nullptr, "invariant");
set_form<traceid_or>(bits, dest);
}
template <typename T>
inline void JfrTraceIdBits::store(jbyte bits, const T* ptr) {
inline void JfrTraceIdBits::store(uint8_t bits, const T* ptr) {
assert(ptr != nullptr, "invariant");
// gcc12 warns "writing 1 byte into a region of size 0" when T == Klass.
// The warning seems to be a false positive. And there is no warning for
@ -144,49 +144,49 @@ inline void JfrTraceIdBits::store(jbyte bits, const T* ptr) {
}
template <typename T>
inline void JfrTraceIdBits::meta_store(jbyte bits, const T* ptr) {
inline void JfrTraceIdBits::meta_store(uint8_t bits, const T* ptr) {
assert(ptr != nullptr, "invariant");
set(bits, traceid_meta_byte(ptr));
}
inline void set_mask(jbyte mask, jbyte* dest) {
inline void set_mask(uint8_t mask, uint8_t* dest) {
set_cas_form<traceid_and>(mask, dest);
}
template <typename T>
inline void JfrTraceIdBits::mask_store(jbyte mask, const T* ptr) {
inline void JfrTraceIdBits::mask_store(uint8_t mask, const T* ptr) {
assert(ptr != nullptr, "invariant");
set_mask(mask, traceid_tag_byte(ptr));
}
template <typename T>
inline void JfrTraceIdBits::meta_mask_store(jbyte mask, const T* ptr) {
inline void JfrTraceIdBits::meta_mask_store(uint8_t mask, const T* ptr) {
assert(ptr != nullptr, "invariant");
set_mask(mask, traceid_meta_byte(ptr));
}
inline void clear_bits(jbyte bits, jbyte* dest) {
inline void clear_bits(uint8_t bits, uint8_t* dest) {
set_form<traceid_xor>(bits, dest);
}
template <typename T>
inline void JfrTraceIdBits::clear(jbyte bits, const T* ptr) {
inline void JfrTraceIdBits::clear(uint8_t bits, const T* ptr) {
assert(ptr != nullptr, "invariant");
clear_bits(bits, traceid_tag_byte(ptr));
}
inline void clear_bits_cas(jbyte bits, jbyte* dest) {
inline void clear_bits_cas(uint8_t bits, uint8_t* dest) {
set_cas_form<traceid_xor>(bits, dest);
}
template <typename T>
inline void JfrTraceIdBits::clear_cas(jbyte bits, const T* ptr) {
inline void JfrTraceIdBits::clear_cas(uint8_t bits, const T* ptr) {
assert(ptr != nullptr, "invariant");
clear_bits_cas(bits, traceid_tag_byte(ptr));
}
template <typename T>
inline void JfrTraceIdBits::meta_clear(jbyte bits, const T* ptr) {
inline void JfrTraceIdBits::meta_clear(uint8_t bits, const T* ptr) {
assert(ptr != nullptr, "invariant");
clear_bits(bits, traceid_meta_byte(ptr));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,27 +96,27 @@ class JfrTraceIdEpoch : AllStatic {
return Atomic::load_acquire(&_synchronizing);
}
static traceid this_epoch_bit() {
static uint8_t this_epoch_bit() {
return _epoch_state ? EPOCH_1_BIT : EPOCH_0_BIT;
}
static traceid previous_epoch_bit() {
static uint8_t previous_epoch_bit() {
return _epoch_state ? EPOCH_0_BIT : EPOCH_1_BIT;
}
static traceid this_epoch_method_bit() {
static uint8_t this_epoch_method_bit() {
return _epoch_state ? EPOCH_1_METHOD_BIT : EPOCH_0_METHOD_BIT;
}
static traceid previous_epoch_method_bit() {
static uint8_t previous_epoch_method_bit() {
return _epoch_state ? EPOCH_0_METHOD_BIT : EPOCH_1_METHOD_BIT;
}
static traceid this_epoch_method_and_class_bits() {
static uint8_t this_epoch_method_and_class_bits() {
return _epoch_state ? EPOCH_1_METHOD_AND_CLASS_BITS : EPOCH_0_METHOD_AND_CLASS_BITS;
}
static traceid previous_epoch_method_and_class_bits() {
static uint8_t previous_epoch_method_and_class_bits() {
return _epoch_state ? EPOCH_0_METHOD_AND_CLASS_BITS : EPOCH_1_METHOD_AND_CLASS_BITS;
}

View File

@ -121,24 +121,31 @@ static traceid read_element(const u1* pos, const Klass** klass, bool compressed)
return compressed ? read_compressed_element(pos, klass) : read_uncompressed_element(pos, klass);
}
template <typename T>
static inline void store_traceid(T* element, traceid id, bool uncompressed) {
static inline void store_traceid(JfrEpochQueueKlassElement* element, traceid id) {
#ifdef VM_LITTLE_ENDIAN
id <<= METADATA_SHIFT;
#endif
element->id = uncompressed ? id | UNCOMPRESSED : id;
element->id = id | UNCOMPRESSED;
}
static inline void store_traceid(JfrEpochQueueNarrowKlassElement* element, traceid id) {
assert(id < uncompressed_threshold, "invariant");
#ifdef VM_LITTLE_ENDIAN
id <<= METADATA_SHIFT;
#endif
element->id = static_cast<u4>(id);
}
static void store_compressed_element(traceid id, const Klass* klass, u1* pos) {
assert(can_compress_element(id), "invariant");
JfrEpochQueueNarrowKlassElement* const element = new (pos) JfrEpochQueueNarrowKlassElement();
store_traceid(element, id, false);
store_traceid(element, id);
element->compressed_klass = encode(klass);
}
static void store_uncompressed_element(traceid id, const Klass* klass, u1* pos) {
JfrEpochQueueKlassElement* const element = new (pos) JfrEpochQueueKlassElement();
store_traceid(element, id, true);
store_traceid(element, id);
element->klass = klass;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,8 +70,6 @@
#define PREVIOUS_EPOCH_METHOD_BIT (JfrTraceIdEpoch::previous_epoch_method_bit())
#define THIS_EPOCH_METHOD_AND_CLASS_BITS (JfrTraceIdEpoch::this_epoch_method_and_class_bits())
#define PREVIOUS_EPOCH_METHOD_AND_CLASS_BITS (JfrTraceIdEpoch::previous_epoch_method_and_class_bits())
#define THIS_EPOCH_METHOD_FLAG_BIT ((jbyte)THIS_EPOCH_BIT)
#define PREVIOUS_EPOCH_METHOD_FLAG_BIT ((jbyte)PREVIOUS_EPOCH_BIT)
// operators
#define TRACE_ID_RAW(ptr) (JfrTraceIdBits::load(ptr))
@ -102,18 +100,18 @@
#define METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (THIS_EPOCH_METHOD_AND_CLASS_BITS)))
#define METHOD_AND_CLASS_USED_PREVIOUS_EPOCH(kls) (TRACE_ID_PREDICATE(kls, (PREVIOUS_EPOCH_METHOD_AND_CLASS_BITS)))
#define METHOD_AND_CLASS_USED_ANY_EPOCH(kls) (METHOD_USED_ANY_EPOCH(kls) && USED_ANY_EPOCH(kls))
#define METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (THIS_EPOCH_METHOD_FLAG_BIT)))
#define METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (THIS_EPOCH_BIT)))
#define METHOD_FLAG_NOT_USED_THIS_EPOCH(method) (!(METHOD_FLAG_USED_THIS_EPOCH(method)))
#define METHOD_FLAG_USED_PREVIOUS_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (PREVIOUS_EPOCH_METHOD_FLAG_BIT)))
#define METHOD_FLAG_USED_PREVIOUS_EPOCH(method) (METHOD_FLAG_PREDICATE(method, (PREVIOUS_EPOCH_BIT)))
#define IS_METHOD_BLESSED(method) (METHOD_FLAG_PREDICATE(method, BLESSED_METHOD_BIT))
// setters
#define SET_USED_THIS_EPOCH(ptr) (TRACE_ID_TAG(ptr, THIS_EPOCH_BIT))
#define SET_METHOD_AND_CLASS_USED_THIS_EPOCH(kls) (TRACE_ID_TAG(kls, THIS_EPOCH_METHOD_AND_CLASS_BITS))
#define SET_METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_TAG(method, THIS_EPOCH_METHOD_FLAG_BIT))
#define SET_METHOD_FLAG_USED_THIS_EPOCH(method) (METHOD_FLAG_TAG(method, THIS_EPOCH_BIT))
#define PREVIOUS_EPOCH_METHOD_AND_CLASS_BIT_MASK (~(PREVIOUS_EPOCH_METHOD_BIT | PREVIOUS_EPOCH_BIT))
#define CLEAR_PREVIOUS_EPOCH_METHOD_AND_CLASS(kls) (TRACE_ID_MASK_CLEAR(kls, PREVIOUS_EPOCH_METHOD_AND_CLASS_BIT_MASK))
#define CLEAR_PREVIOUS_EPOCH_METHOD_FLAG(method) (METHOD_FLAG_CLEAR(method, PREVIOUS_EPOCH_METHOD_FLAG_BIT))
#define CLEAR_PREVIOUS_EPOCH_METHOD_FLAG(method) (METHOD_FLAG_CLEAR(method, PREVIOUS_EPOCH_BIT))
#define BLESS_METHOD(method) (METHOD_FLAG_TAG(method, BLESSED_METHOD_BIT))
// types

View File

@ -204,7 +204,7 @@ int64_t JfrChunkWriter::write_chunk_header_checkpoint(bool flushpoint) {
head.write_next_generation(!flushpoint);
head.write_flags();
assert(current_offset() - header_content_pos == HEADER_SIZE, "invariant");
const u4 checkpoint_size = current_offset() - event_size_offset;
const u4 checkpoint_size = static_cast<u4>(current_offset() - event_size_offset);
write_padded_at_offset<u4>(checkpoint_size, event_size_offset);
set_last_checkpoint_offset(event_size_offset);
const int64_t sz_written = size_written();

View File

@ -247,10 +247,10 @@ inline double compute_ewma_alpha_coefficient(size_t lookback_count) {
static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) {
assert(sample_size_ewma != nullptr, "invariant");
if (log_is_enabled(Debug, jfr, system, throttle)) {
*sample_size_ewma = exponentially_weighted_moving_average(expired->sample_size(), compute_ewma_alpha_coefficient(expired->params().window_lookback_count), *sample_size_ewma);
*sample_size_ewma = exponentially_weighted_moving_average(static_cast<double>(expired->sample_size()), compute_ewma_alpha_coefficient(expired->params().window_lookback_count), *sample_size_ewma);
log_debug(jfr, system, throttle)("jdk.ObjectAllocationSample: avg.sample size: %0.4f, window set point: %zu, sample size: %zu, population size: %zu, ratio: %.4f, window duration: %zu ms\n",
*sample_size_ewma, expired->params().sample_points_per_window, expired->sample_size(), expired->population_size(),
expired->population_size() == 0 ? 0 : (double)expired->sample_size() / (double)expired->population_size(),
expired->population_size() == 0 ? 0 : static_cast<double>(expired->sample_size()) / static_cast<double>(expired->population_size()),
expired->params().window_duration_ms);
}
}

View File

@ -46,10 +46,10 @@ static void copy_frames(JfrStackFrame** lhs_frames, u4 length, const JfrStackFra
}
}
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, const InstanceKlass* ik) :
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, u1 type, const InstanceKlass* ik) :
_klass(ik), _methodid(id), _line(0), _bci(bci), _type(type) {}
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, int lineno, const InstanceKlass* ik) :
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, u1 type, int lineno, const InstanceKlass* ik) :
_klass(ik), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
@ -256,7 +256,7 @@ bool JfrStackTrace::record_async(JavaThread* jt, const frame& frame) {
return false;
}
const traceid mid = JfrTraceId::load(method);
int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
u1 type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
int bci = 0;
if (method->is_native()) {
type = JfrStackFrame::FRAME_NATIVE;
@ -307,7 +307,7 @@ bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip) {
}
const Method* method = vfs.method();
const traceid mid = JfrTraceId::load(method);
int type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
u1 type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
int bci = 0;
if (method->is_native()) {
type = JfrStackFrame::FRAME_NATIVE;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,15 +44,15 @@ class JfrStackFrame {
u1 _type;
public:
JfrStackFrame(const traceid& id, int bci, int type, const InstanceKlass* klass);
JfrStackFrame(const traceid& id, int bci, int type, int lineno, const InstanceKlass* klass);
JfrStackFrame(const traceid& id, int bci, u1 type, const InstanceKlass* klass);
JfrStackFrame(const traceid& id, int bci, u1 type, int lineno, const InstanceKlass* klass);
bool equals(const JfrStackFrame& rhs) const;
void write(JfrChunkWriter& cw) const;
void write(JfrCheckpointWriter& cpw) const;
void resolve_lineno() const;
enum {
enum : u1 {
FRAME_INTERPRETER = 0,
FRAME_JIT,
FRAME_INLINE,
@ -72,7 +72,7 @@ class JfrStackTrace : public JfrCHeapObj {
const JfrStackTrace* _next;
JfrStackFrame* _frames;
traceid _id;
unsigned int _hash;
traceid _hash;
u4 _nr_of_frames;
u4 _max_frames;
bool _frames_ownership;
@ -105,7 +105,7 @@ class JfrStackTrace : public JfrCHeapObj {
~JfrStackTrace();
public:
unsigned int hash() const { return _hash; }
traceid hash() const { return _hash; }
traceid id() const { return _id; }
};

View File

@ -192,7 +192,7 @@ void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* current_threa
assert(!tl->has_cached_stack_trace(), "invariant");
JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
stacktrace.record(current_thread, skip);
const unsigned int hash = stacktrace.hash();
const traceid hash = stacktrace.hash();
if (hash != 0) {
tl->set_cached_stack_trace_id(add(leak_profiler_instance(), stacktrace), hash);
}
@ -222,7 +222,7 @@ traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
}
// invariant is that the entry to be resolved actually exists in the table
const JfrStackTrace* JfrStackTraceRepository::lookup_for_leak_profiler(unsigned int hash, traceid id) {
const JfrStackTrace* JfrStackTraceRepository::lookup_for_leak_profiler(traceid hash, traceid id) {
const size_t index = (hash % TABLE_SIZE);
const JfrStackTrace* trace = leak_profiler_instance()._table[index];
while (trace != nullptr && trace->id() != id) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ class JfrStackTraceRepository : public JfrCHeapObj {
static size_t clear(JfrStackTraceRepository& repo);
size_t write(JfrChunkWriter& cw, bool clear);
static const JfrStackTrace* lookup_for_leak_profiler(unsigned int hash, traceid id);
static const JfrStackTrace* lookup_for_leak_profiler(traceid hash, traceid id);
static void record_for_leak_profiler(JavaThread* thread, int skip = 0);
static void clear_leak_profiler();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Datadog, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -222,7 +222,7 @@ JfrSamplerWindow* JfrAdaptiveSampler::set_rate(const JfrSamplerParams& params, c
next->_projected_population_size = 0;
return next;
}
next->_sampling_interval = derive_sampling_interval(sample_size, expired);
next->_sampling_interval = derive_sampling_interval(static_cast<double>(sample_size), expired);
assert(next->_sampling_interval >= 1, "invariant");
next->_projected_population_size = sample_size * next->_sampling_interval;
return next;
@ -310,12 +310,12 @@ inline size_t next_geometric(double p, double u) {
u = 0.99;
}
// Inverse CDF for the geometric distribution.
return ceil(log(1.0 - u) / log(1.0 - p));
return static_cast<size_t>(ceil(log(1.0 - u) / log(1.0 - p)));
}
size_t JfrAdaptiveSampler::derive_sampling_interval(double sample_size, const JfrSamplerWindow* expired) {
assert(sample_size > 0, "invariant");
const size_t population_size = project_population_size(expired);
const double population_size = project_population_size(expired);
if (population_size <= sample_size) {
return 1;
}
@ -325,9 +325,9 @@ size_t JfrAdaptiveSampler::derive_sampling_interval(double sample_size, const Jf
}
// The projected population size is an exponentially weighted moving average, a function of the window_lookback_count.
inline size_t JfrAdaptiveSampler::project_population_size(const JfrSamplerWindow* expired) {
inline double JfrAdaptiveSampler::project_population_size(const JfrSamplerWindow* expired) {
assert(expired != nullptr, "invariant");
_avg_population_size = exponentially_weighted_moving_average(expired->population_size(), _ewma_population_size_alpha, _avg_population_size);
_avg_population_size = exponentially_weighted_moving_average(static_cast<double>(expired->population_size()), _ewma_population_size_alpha, _avg_population_size);
return _avg_population_size;
}
@ -362,7 +362,7 @@ bool JfrGTestFixedRateSampler::initialize() {
static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) {
assert(sample_size_ewma != nullptr, "invariant");
if (log_is_enabled(Debug, jfr, system, throttle)) {
*sample_size_ewma = exponentially_weighted_moving_average(expired->sample_size(), compute_ewma_alpha_coefficient(expired->params().window_lookback_count), *sample_size_ewma);
*sample_size_ewma = exponentially_weighted_moving_average(static_cast<double>(expired->sample_size()), compute_ewma_alpha_coefficient(expired->params().window_lookback_count), *sample_size_ewma);
log_debug(jfr, system, throttle)("JfrGTestFixedRateSampler: avg.sample size: %0.4f, window set point: %zu, sample size: %zu, population size: %zu, ratio: %.4f, window duration: %zu ms\n",
*sample_size_ewma, expired->params().sample_points_per_window, expired->sample_size(), expired->population_size(),
expired->population_size() == 0 ? 0 : (double)expired->sample_size() / (double)expired->population_size(),

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Datadog, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -122,7 +122,7 @@ class JfrAdaptiveSampler : public JfrCHeapObj {
size_t amortize_debt(const JfrSamplerWindow* expired);
size_t derive_sampling_interval(double sample_size, const JfrSamplerWindow* expired);
size_t project_population_size(const JfrSamplerWindow* expired);
double project_population_size(const JfrSamplerWindow* expired);
size_t project_sample_size(const JfrSamplerParams& params, const JfrSamplerWindow* expired);
JfrSamplerWindow* set_rate(const JfrSamplerParams& params, const JfrSamplerWindow* expired);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ class JfrStackTraceMark {
private:
Thread* _t;
traceid _previous_id;
unsigned int _previous_hash;
traceid _previous_hash;
public:
JfrStackTraceMark();
JfrStackTraceMark(Thread* t);

View File

@ -63,12 +63,12 @@ JfrThreadLocal::JfrThreadLocal() :
_thread_id_alias(max_julong),
_data_lost(0),
_stack_trace_id(max_julong),
_stack_trace_hash(0),
_parent_trace_id(0),
_last_allocated_bytes(0),
_user_time(0),
_cpu_time(0),
_wallclock_time(os::javaTimeNanos()),
_stack_trace_hash(0),
_stackdepth(0),
_entering_suspend_flag(0),
_critical_section(0),

View File

@ -57,12 +57,12 @@ class JfrThreadLocal {
mutable traceid _thread_id_alias;
u8 _data_lost;
traceid _stack_trace_id;
traceid _stack_trace_hash;
traceid _parent_trace_id;
int64_t _last_allocated_bytes;
jlong _user_time;
jlong _cpu_time;
jlong _wallclock_time;
unsigned int _stack_trace_hash;
mutable u4 _stackdepth;
volatile jint _entering_suspend_flag;
mutable volatile int _critical_section;
@ -187,7 +187,7 @@ class JfrThreadLocal {
return _parent_trace_id;
}
void set_cached_stack_trace_id(traceid id, unsigned int hash = 0) {
void set_cached_stack_trace_id(traceid id, traceid hash = 0) {
_stack_trace_id = id;
_stack_trace_hash = hash;
}
@ -205,7 +205,7 @@ class JfrThreadLocal {
return _stack_trace_id;
}
unsigned int cached_stack_trace_hash() const {
traceid cached_stack_trace_hash() const {
return _stack_trace_hash;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,34 +46,34 @@
class JfrTraceFlag {
private:
mutable jshort _flags;
mutable uint16_t _flags;
public:
JfrTraceFlag() : _flags(0) {}
bool is_set(jshort flag) const {
bool is_set(uint16_t flag) const {
return (_flags & flag) != 0;
}
jshort flags() const {
uint16_t flags() const {
return _flags;
}
void set_flags(jshort flags) const {
void set_flags(uint16_t flags) const {
_flags = flags;
}
jbyte* flags_addr() const {
uint8_t* flags_addr() const {
#ifdef VM_LITTLE_ENDIAN
return (jbyte*)&_flags;
return reinterpret_cast<uint8_t*>(&_flags);
#else
return ((jbyte*)&_flags) + 1;
return reinterpret_cast<uint8_t*>(&_flags) + 1;
#endif
}
jbyte* meta_addr() const {
uint8_t* meta_addr() const {
#ifdef VM_LITTLE_ENDIAN
return ((jbyte*)&_flags) + 1;
return reinterpret_cast<uint8_t*>(&_flags) + 1;
#else
return (jbyte*)&_flags;
return reinterpret_cast<uint8_t*>(&_flags);
#endif
}
};
@ -81,19 +81,19 @@ class JfrTraceFlag {
#define DEFINE_TRACE_FLAG mutable JfrTraceFlag _trace_flags
#define DEFINE_TRACE_FLAG_ACCESSOR \
bool is_trace_flag_set(jshort flag) const { \
bool is_trace_flag_set(uint16_t flag) const { \
return _trace_flags.is_set(flag); \
} \
jshort trace_flags() const { \
uint16_t trace_flags() const { \
return _trace_flags.flags(); \
} \
void set_trace_flags(jshort flags) const { \
void set_trace_flags(uint16_t flags) const { \
_trace_flags.set_flags(flags); \
} \
jbyte* trace_flags_addr() const { \
uint8_t* trace_flags_addr() const { \
return _trace_flags.flags_addr(); \
} \
jbyte* trace_meta_addr() const { \
uint8_t* trace_meta_addr() const { \
return _trace_flags.meta_addr(); \
}

View File

@ -44,13 +44,13 @@ class JfrBigEndian : AllStatic {
private:
template <typename T>
static T read_bytes(const address location);
template <typename T>
static T read_unaligned(const address location);
template <typename R, typename T>
static R read_unaligned(const address location);
public:
static bool platform_supports_unaligned_reads(void);
static bool is_aligned(const void* location, size_t size);
template <typename T>
static T read(const void* location);
template <typename R, typename T>
static R read(const void* location);
};
inline bool JfrBigEndian::is_aligned(const void* location, size_t size) {
@ -82,18 +82,18 @@ inline u8 JfrBigEndian::read_bytes(const address location) {
return Bytes::get_Java_u8(location);
}
template <typename T>
inline T JfrBigEndian::read_unaligned(const address location) {
template <typename R, typename T>
inline R JfrBigEndian::read_unaligned(const address location) {
assert(location != nullptr, "just checking");
switch (sizeof(T)) {
case sizeof(u1) :
return read_bytes<u1>(location);
return static_cast<R>(read_bytes<u1>(location));
case sizeof(u2):
return read_bytes<u2>(location);
return static_cast<R>(read_bytes<u2>(location));
case sizeof(u4):
return read_bytes<u4>(location);
return static_cast<R>(read_bytes<u4>(location));
case sizeof(u8):
return read_bytes<u8>(location);
return static_cast<R>(read_bytes<u8>(location));
default:
assert(false, "not reach");
}
@ -111,27 +111,27 @@ inline bool JfrBigEndian::platform_supports_unaligned_reads(void) {
#endif
}
template<typename T>
inline T JfrBigEndian::read(const void* location) {
template<typename R, typename T>
inline R JfrBigEndian::read(const void* location) {
assert(location != nullptr, "just checking");
assert(sizeof(T) <= sizeof(u8), "no support for arbitrary sizes");
if (sizeof(T) == sizeof(u1)) {
return *(T*)location;
return static_cast<R>(*(u1*)location);
}
if (is_aligned(location, sizeof(T)) || platform_supports_unaligned_reads()) {
// fastest case
switch (sizeof(T)) {
case sizeof(u1):
return *(T*)location;
case sizeof(u1) :
return static_cast<R>(*(u1*)location);
case sizeof(u2):
return bigendian_16(*(T*)(location));
return static_cast<R>(bigendian_16(*(u2*)location));
case sizeof(u4):
return bigendian_32(*(T*)(location));
return static_cast<R>(bigendian_32(*(u4*)location));
case sizeof(u8):
return bigendian_64(*(T*)(location));
return static_cast<R>(bigendian_64(*(u8*)location));
}
}
return read_unaligned<T>((const address)location);
return read_unaligned<R, T>((const address)location);
}
#endif // SHARE_JFR_UTILITIES_JFRBIGENDIAN_HPP

View File

@ -77,15 +77,15 @@ inline size_t BigEndianEncoderImpl::encode(T value, u1* dest) {
return 0;
}
case 2: {
Bytes::put_Java_u2(dest, value);
Bytes::put_Java_u2(dest, static_cast<u2>(value));
return 2;
}
case 4: {
Bytes::put_Java_u4(dest, value);
Bytes::put_Java_u4(dest, static_cast<u4>(value));
return 4;
}
case 8: {
Bytes::put_Java_u8(dest, value);
Bytes::put_Java_u8(dest, static_cast<u8>(value));
return 8;
}
}

View File

@ -100,7 +100,7 @@ static void post_safepoint_synchronize_event(EventSafepointStateSynchronization&
event.set_safepointId(safepoint_id);
event.set_initialThreadCount(initial_number_of_threads);
event.set_runningThreadCount(threads_waiting_to_block);
event.set_iterations(iterations);
event.set_iterations(checked_cast<u4>(iterations));
event.commit();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Datadog, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -58,7 +58,7 @@ namespace {
return c;
}
static jlong counter_to_millis(jlong c, bool is_os_time = false) {
return c * NANOS_PER_MILLISEC;
return c * (jlong)NANOS_PER_MILLISEC;
}
static jlong nanos_to_countertime(jlong c, bool as_os_time = false) {
return c;
@ -128,20 +128,20 @@ class JfrGTestAdaptiveSampling : public ::testing::Test {
sample_sum += i * sample[i];
}
double population_mean = population_sum / (double)population_size;
double sample_mean = sample_sum / (double)sample_size;
double population_mean = (double)population_sum / (double)population_size;
double sample_mean = (double)sample_sum / (double)sample_size;
double population_variance = 0;
double sample_variance = 0;
for (int i = 0; i < distr_slots; i++) {
double population_diff = i - population_mean;
population_variance = population[i] * population_diff * population_diff;
population_variance = (double)population[i] * population_diff * population_diff;
double sample_diff = i - sample_mean;
sample_variance = sample[i] * sample_diff * sample_diff;
sample_variance = (double)sample[i] * sample_diff * sample_diff;
}
population_variance = population_variance / (population_size - 1);
sample_variance = sample_variance / (sample_size - 1);
population_variance = population_variance / (double)(population_size - 1);
sample_variance = sample_variance / (double)(sample_size - 1);
double population_stdev = sqrt(population_variance);
double sample_stdev = sqrt(sample_variance);
@ -227,7 +227,7 @@ void JfrGTestAdaptiveSampling::test(JfrGTestAdaptiveSampling::incoming inc, size
}
const size_t target_sample_size = sample_points_per_window * window_count;
EXPECT_NEAR(target_sample_size, sample_size, expected_sample_points * error_factor) << output;
EXPECT_NEAR((double)target_sample_size, (double)sample_size, (double)expected_sample_points * error_factor) << output;
strcat(output, ", hit distribution");
assertDistributionProperties(100, population, sample, population_size, sample_size, output);
}