8314265: Fix -Wconversion warnings in miscellaneous runtime code

Reviewed-by: stuefe, dholmes, chagedorn
This commit is contained in:
Coleen Phillimore 2023-08-18 12:06:02 +00:00
parent 2f04bc5f93
commit 752121114f
35 changed files with 89 additions and 87 deletions

@ -456,12 +456,12 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s
// identity_hash for all shared objects, so they are less likely to be written
// into during run time, increasing the potential of memory sharing.
if (src_obj != nullptr) {
int src_hash = src_obj->identity_hash();
intptr_t src_hash = src_obj->identity_hash();
fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
assert(fake_oop->mark().is_unlocked(), "sanity");
DEBUG_ONLY(int archived_hash = fake_oop->identity_hash());
assert(src_hash == archived_hash, "Different hash codes: original %x, archived %x", src_hash, archived_hash);
DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
}
}

@ -73,6 +73,7 @@ ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : _id2k
vm_exit_during_initialization("Loading classlist failed", errmsg);
}
_line_no = 0;
_token = _line;
_interfaces = new (mtClass) GrowableArray<int>(10, mtClass);
_indy_items = new (mtClass) GrowableArray<const char*>(9, mtClass);
_parse_mode = parse_mode;
@ -413,7 +414,7 @@ void ClassListParser::print_actual_interfaces(InstanceKlass* ik) {
void ClassListParser::error(const char* msg, ...) {
va_list ap;
va_start(ap, msg);
int error_index = _token - _line;
int error_index = pointer_delta_as_int(_token, _line);
if (error_index >= _line_len) {
error_index = _line_len - 1;
}

@ -223,8 +223,8 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
_app_class_paths_start_index = ClassLoaderExt::app_class_paths_start_index();
_app_module_paths_start_index = ClassLoaderExt::app_module_paths_start_index();
_num_module_paths = ClassLoader::num_module_path_entries();
_max_used_path_index = ClassLoaderExt::max_used_path_index();
_num_module_paths = ClassLoader::num_module_path_entries();
_verify_local = BytecodeVerificationLocal;
_verify_remote = BytecodeVerificationRemote;

@ -217,8 +217,8 @@ private:
jshort _app_class_paths_start_index; // Index of first app classpath entry
jshort _app_module_paths_start_index; // Index of first module path entry
jshort _num_module_paths; // number of module path entries
jshort _max_used_path_index; // max path index referenced during CDS dump
int _num_module_paths; // number of module path entries
bool _verify_local; // BytecodeVerificationLocal setting
bool _verify_remote; // BytecodeVerificationRemote setting
bool _has_platform_or_app_classes; // Archive contains app classes
@ -276,7 +276,7 @@ public:
jshort max_used_path_index() const { return _max_used_path_index; }
jshort app_module_paths_start_index() const { return _app_module_paths_start_index; }
jshort app_class_paths_start_index() const { return _app_class_paths_start_index; }
jshort num_module_paths() const { return _num_module_paths; }
int num_module_paths() const { return _num_module_paths; }
void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; }
void set_cloned_vtables(char* p) { set_as_offset(p, &_cloned_vtables_offset); }

@ -155,7 +155,7 @@ class SystemDictionaryShared: public SystemDictionary {
};
public:
enum {
enum : char {
FROM_FIELD_IS_PROTECTED = 1 << 0,
FROM_IS_ARRAY = 1 << 1,
FROM_IS_OBJECT = 1 << 2

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -156,11 +156,11 @@ JVMFlag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose) {
}
}
JVMFlag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error MinMetaspaceFreeRatioConstraintFunc(uint value, bool verbose) {
if (value > MaxMetaspaceFreeRatio) {
JVMFlag::printError(verbose,
"MinMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
"less than or equal to MaxMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
"MinMetaspaceFreeRatio (%u) must be "
"less than or equal to MaxMetaspaceFreeRatio (%u)\n",
value, MaxMetaspaceFreeRatio);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
@ -168,11 +168,11 @@ JVMFlag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
}
}
JVMFlag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error MaxMetaspaceFreeRatioConstraintFunc(uint value, bool verbose) {
if (value < MinMetaspaceFreeRatio) {
JVMFlag::printError(verbose,
"MaxMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
"greater than or equal to MinMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
"MaxMetaspaceFreeRatio (%u) must be "
"greater than or equal to MinMetaspaceFreeRatio (%u)\n",
value, MinMetaspaceFreeRatio);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,8 +47,8 @@
f(uintx, MaxHeapFreeRatioConstraintFunc) \
f(intx, SoftRefLRUPolicyMSPerMBConstraintFunc) \
f(size_t, MarkStackSizeConstraintFunc) \
f(uintx, MinMetaspaceFreeRatioConstraintFunc) \
f(uintx, MaxMetaspaceFreeRatioConstraintFunc) \
f(uint, MinMetaspaceFreeRatioConstraintFunc) \
f(uint, MaxMetaspaceFreeRatioConstraintFunc) \
f(uintx, InitialTenuringThresholdConstraintFunc) \
f(uintx, MaxTenuringThresholdConstraintFunc) \
\

@ -1333,7 +1333,7 @@ void SignatureHandlerLibrary::add(const methodHandle& method) {
ResourceMark rm;
ptrdiff_t align_offset = align_up(_buffer, CodeEntryAlignment) - (address)_buffer;
CodeBuffer buffer((address)(_buffer + align_offset),
SignatureHandlerLibrary::buffer_size - align_offset);
checked_cast<int>(SignatureHandlerLibrary::buffer_size - align_offset));
InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint);
// copy into code heap
address handler = set_handler(&buffer);

@ -50,7 +50,7 @@ void TemplateInterpreter::initialize_stub() {
// 270+ interpreter codelets are generated and each of them is aligned to HeapWordSize,
// plus their code section is aligned to CodeEntryAlignement. So we need additional size due to alignment.
int max_aligned_codelets = 280;
int max_aligned_bytes = max_aligned_codelets * (HeapWordSize + CodeEntryAlignment);
int max_aligned_bytes = checked_cast<int>(max_aligned_codelets * (HeapWordSize + CodeEntryAlignment));
_code = new StubQueue(new InterpreterCodeletInterface, code_size + max_aligned_bytes, nullptr,
"Interpreter");
}

@ -65,8 +65,8 @@ void LogOutput::add_to_config_string(const LogSelection& selection) {
}
static int tag_cmp(const void *a, const void *b) {
return static_cast<const LogTagType*>(a) - static_cast<const LogTagType*>(b);
static int tag_cmp(const LogTagType *a, const LogTagType *b) {
return primitive_compare(a, b);
}
static void sort_tags(LogTagType tags[LogTag::MaxTags]) {
@ -74,7 +74,7 @@ static void sort_tags(LogTagType tags[LogTag::MaxTags]) {
while (tags[ntags] != LogTag::__NO_TAG) {
ntags++;
}
qsort(tags, ntags, sizeof(*tags), tag_cmp);
qsort(tags, ntags, sizeof(*tags), (_sort_Fn)tag_cmp);
}
static const size_t MaxSubsets = 1 << LogTag::MaxTags;

@ -226,7 +226,7 @@ double LogSelection::similarity(const LogSelection& other) const {
}
}
}
return 2.0 * intersecting / (_ntags + other._ntags);
return 2.0 * (double)intersecting / (double)(_ntags + other._ntags);
}
// Comparator used for sorting LogSelections based on their similarity to a specific LogSelection.

@ -430,7 +430,7 @@ void MetaspaceGC::compute_new_size() {
// Including the chunk free lists in the definition of "in use" is therefore
// necessary. Not including the chunk free lists can cause capacity_until_GC to
// shrink below committed_bytes() and this has caused serious bugs in the past.
const size_t used_after_gc = MetaspaceUtils::committed_bytes();
const double used_after_gc = (double)MetaspaceUtils::committed_bytes();
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
@ -464,10 +464,10 @@ void MetaspaceGC::compute_new_size() {
new_capacity_until_GC,
MetaspaceGCThresholdUpdater::ComputeNewSize);
log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB",
minimum_desired_capacity / (double) K,
expand_bytes / (double) K,
MinMetaspaceExpansion / (double) K,
new_capacity_until_GC / (double) K);
(double) minimum_desired_capacity / (double) K,
(double) expand_bytes / (double) K,
(double) MinMetaspaceExpansion / (double) K,
(double) new_capacity_until_GC / (double) K);
}
return;
}
@ -490,7 +490,7 @@ void MetaspaceGC::compute_new_size() {
log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
maximum_free_percentage, minimum_used_percentage);
log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB",
minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
(double) minimum_desired_capacity / (double) K, (double) maximum_desired_capacity / (double) K);
assert(minimum_desired_capacity <= maximum_desired_capacity,
"sanity check");
@ -517,9 +517,9 @@ void MetaspaceGC::compute_new_size() {
_shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
}
log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK",
MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
(double) MetaspaceSize / (double) K, (double) maximum_desired_capacity / (double) K);
log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK",
shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
(double) shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, (double) MinMetaspaceExpansion / (double) K);
}
}
@ -708,7 +708,7 @@ void Metaspace::ergo_initialize() {
// class space : non class space usage is about 1:6. With many small classes,
// it can get as low as 1:2. It is not a big deal though since ccs is only
// reserved and will be committed on demand only.
size_t max_ccs_size = MaxMetaspaceSize * 0.8;
size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10);
size_t adjusted_ccs_size = MIN2(CompressedClassSpaceSize, max_ccs_size);
// CCS must be aligned to root chunk size, and be at least the size of one

@ -103,7 +103,7 @@ void print_human_readable_size(outputStream* st, size_t byte_size, size_t scale,
st->print(SIZE_FORMAT " words", byte_size / BytesPerWord);
} else {
const char* display_unit = display_unit_for_scale(scale);
float display_value = (float) byte_size / scale;
float display_value = (float) byte_size / (float)scale;
// Prevent very small but non-null values showing up as 0.00.
if (byte_size > 0 && display_value < 0.01f) {
st->print("<0.01 %s", display_unit);
@ -118,7 +118,7 @@ void print_human_readable_size(outputStream* st, size_t byte_size, size_t scale,
st->print("%*" PRIuPTR " words", width, byte_size / BytesPerWord);
} else {
const char* display_unit = display_unit_for_scale(scale);
float display_value = (float) byte_size / scale;
float display_value = (float) byte_size / (float)scale;
// Since we use width to display a number with two trailing digits, increase it a bit.
width += 3;
// Prevent very small but non-null values showing up as 0.00.
@ -142,7 +142,7 @@ void print_percentage(outputStream* st, size_t total, size_t part) {
st->print("100%%");
} else {
// Note: clearly print very-small-but-not-0% and very-large-but-not-100% percentages.
float p = ((float)part / total) * 100.0f;
float p = ((float)part / (float)total) * 100.0f;
if (p < 1.0f) {
st->print(" <1%%");
} else if (p > 99.0f){

@ -1778,7 +1778,6 @@ void InstanceKlass::print_nonstatic_fields(FieldClosure* cl) {
if (i > 0) {
int length = i;
assert(length == fields_sorted.length(), "duh");
// _sort_Fn is defined in growableArray.hpp.
fields_sorted.sort(compare_fields_by_offset);
for (int i = 0; i < length; i++) {
fd.reinitialize(this, fields_sorted.at(i).second);

@ -452,7 +452,7 @@ void ObjArrayKlass::oop_print_on(oop obj, outputStream* st) {
ArrayKlass::oop_print_on(obj, st);
assert(obj->is_objArray(), "must be objArray");
objArrayOop oa = objArrayOop(obj);
int print_len = MIN2((intx) oa->length(), MaxElementPrintSize);
int print_len = MIN2(oa->length(), MaxElementPrintSize);
for(int index = 0; index < print_len; index++) {
st->print(" - %3d : ", index);
if (oa->obj_at(index) != nullptr) {

@ -343,7 +343,7 @@ void TypeArrayKlass::oop_print_on(oop obj, outputStream* st) {
}
void TypeArrayKlass::oop_print_elements_on(typeArrayOop ta, outputStream* st) {
int print_len = MIN2((intx) ta->length(), MaxElementPrintSize);
int print_len = MIN2(ta->length(), MaxElementPrintSize);
switch (element_type()) {
case T_BOOLEAN: print_boolean_array(ta, print_len, st); break;
case T_CHAR: print_char_array(ta, print_len, st); break;

@ -553,7 +553,7 @@ const int ObjectAlignmentInBytes = 8;
"directory) of the dump file (defaults to java_pid<pid>.hprof " \
"in the working directory)") \
\
product(intx, HeapDumpGzipLevel, 0, MANAGEABLE, \
product(int, HeapDumpGzipLevel, 0, MANAGEABLE, \
"When HeapDumpOnOutOfMemoryError is on, the gzip compression " \
"level of the dump file. 0 (the default) disables gzip " \
"compression. Otherwise the level must be between 1 and 9.") \
@ -1315,7 +1315,7 @@ const int ObjectAlignmentInBytes = 8;
"max number of compiled code units to print in error log") \
range(0, VMError::max_error_log_print_code) \
\
notproduct(intx, MaxElementPrintSize, 256, \
notproduct(int, MaxElementPrintSize, 256, \
"maximum number of elements to print") \
\
notproduct(intx, MaxSubklassPrintSize, 4, \
@ -1457,13 +1457,13 @@ const int ObjectAlignmentInBytes = 8;
"The minimum expansion of Metaspace (in bytes)") \
range(0, max_uintx) \
\
product(uintx, MaxMetaspaceFreeRatio, 70, \
product(uint, MaxMetaspaceFreeRatio, 70, \
"The maximum percentage of Metaspace free after GC to avoid " \
"shrinking") \
range(0, 100) \
constraint(MaxMetaspaceFreeRatioConstraintFunc,AfterErgo) \
\
product(uintx, MinMetaspaceFreeRatio, 40, \
product(uint, MinMetaspaceFreeRatio, 40, \
"The minimum percentage of Metaspace free after GC to avoid " \
"expansion") \
range(0, 99) \

@ -63,6 +63,7 @@
#include "utilities/events.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/macros.hpp"
#include "utilities/parseInteger.hpp"
#ifdef LINUX
#include "trimCHeapDCmd.hpp"
#include "mallocInfoDcmd.hpp"
@ -878,11 +879,11 @@ EventLogDCmd::EventLogDCmd(outputStream* output, bool heap) :
void EventLogDCmd::execute(DCmdSource source, TRAPS) {
const char* max_value = _max.value();
long max = -1;
int max = -1;
if (max_value != nullptr) {
char* endptr = nullptr;
max = ::strtol(max_value, &endptr, 10);
if (max == 0 && max_value == endptr) {
int max;
if (!parse_integer(max_value, &max)) {
output()->print_cr("Invalid max option: \"%s\".", max_value);
return;
}

@ -967,7 +967,7 @@ u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
}
// We write the value itself plus a name and a one byte type tag per field.
return size + field_count * (sizeof(address) + 1);
return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
}
// dumps static fields of the given class
@ -1080,7 +1080,7 @@ void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
u4 static_size = get_static_fields_size(ik, static_fields_count);
u2 instance_fields_count = get_instance_fields_count(ik);
u4 instance_fields_size = instance_fields_count * (sizeof(address) + 1);
u4 size = 1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size;
u4 size = checked_cast<u4>(1 + sizeof(address) + 4 + 6 * sizeof(address) + 4 + 2 + 2 + static_size + 2 + instance_fields_size);
writer->start_sub_record(HPROF_GC_CLASS_DUMP, size);
@ -1190,7 +1190,7 @@ void DumperSupport::dump_object_array(AbstractDumpWriter* writer, objArrayOop ar
// sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
short header_size = 1 + 2 * 4 + 2 * sizeof(address);
int length = calculate_array_max_length(writer, array, header_size);
u4 size = header_size + length * sizeof(address);
u4 size = checked_cast<u4>(header_size + length * sizeof(address));
writer->start_sub_record(HPROF_GC_OBJ_ARRAY_DUMP, size);
writer->write_objectID(array);
@ -2153,7 +2153,7 @@ void VM_HeapDumper::dump_stack_traces() {
depth += extra_frames;
// write HPROF_TRACE record for one thread
DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4) + depth*oopSize);
DumperSupport::write_header(writer(), HPROF_TRACE, checked_cast<u4>(3*sizeof(u4) + depth*oopSize));
int stack_serial_num = _num_threads + STACK_TRACE_ID;
writer()->write_u4(stack_serial_num); // stack trace serial number
writer()->write_u4((u4) _num_threads); // thread serial number

@ -229,7 +229,7 @@ void MallocSiteTable::print_tuning_statistics(outputStream* st) {
st->print_cr("Malloc allocation site table:");
st->print_cr("\tTotal entries: %d", total_entries);
st->print_cr("\tEmpty entries (no outstanding mallocs): %d (%2.2f%%)",
empty_entries, ((float)empty_entries * 100) / total_entries);
empty_entries, ((float)empty_entries * 100) / (float)total_entries);
st->cr();
qsort(lengths, table_size, sizeof(uint16_t), qsort_helper);

@ -282,7 +282,7 @@ void MemSummaryReporter::report_metadata(Metaspace::MetadataType type) const {
const MetaspaceStats stats = MetaspaceUtils::get_statistics(type);
size_t waste = stats.committed() - stats.used();
float waste_percentage = stats.committed() > 0 ? (((float)waste * 100)/stats.committed()) : 0.0f;
float waste_percentage = stats.committed() > 0 ? (((float)waste * 100)/(float)stats.committed()) : 0.0f;
out->print_cr("%27s ( %s)", " ", name);
out->print("%27s ( ", " ");
@ -713,7 +713,7 @@ void MemSummaryDiffReporter::print_metaspace_diff(const char* header,
// Diff waste
const float waste_percentage = current_stats.committed() == 0 ? 0.0f :
(current_waste * 100.0f) / current_stats.committed();
((float)current_waste * 100.0f) / (float)current_stats.committed();
out->print("%27s ( waste=" SIZE_FORMAT "%s =%2.2f%%", " ",
amount_in_current_scale(current_waste), scale, waste_percentage);
if (diff_waste != 0) {

@ -162,7 +162,7 @@ class NMTPreInitAllocationTable {
static index_t index_for_key(const void* p) {
const uint64_t hash = calculate_hash(p);
// "table_size" is a Mersenne prime, so "modulo" is all we need here.
return hash % table_size;
return checked_cast<index_t>(hash % table_size);
}
const NMTPreInitAllocation* const * find_entry(const void* p) const {

@ -1,6 +1,6 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -131,7 +131,7 @@ void ThreadIdTable::item_removed() {
}
double ThreadIdTable::get_load_factor() {
return ((double)_items_count) / _current_size;
return ((double)_items_count) / (double)_current_size;
}
size_t ThreadIdTable::table_size() {

@ -103,8 +103,8 @@ public:
static jlong get_total_thread_count() { return _total_threads_count->get_value(); }
static jlong get_peak_thread_count() { return _peak_threads_count->get_value(); }
static jlong get_live_thread_count() { return _atomic_threads_count; }
static jlong get_daemon_thread_count() { return _atomic_daemon_threads_count; }
static int get_live_thread_count() { return _atomic_threads_count; }
static int get_daemon_thread_count() { return _atomic_daemon_threads_count; }
static jlong exited_allocated_bytes() { return Atomic::load(&_exited_allocated_bytes); }
static void incr_exited_allocated_bytes(jlong size) {

@ -43,7 +43,7 @@ bool ThreadStackTracker::initialize(NMT_TrackingLevel level) {
}
int ThreadStackTracker::compare_thread_stack_base(const SimpleThreadStackSite& s1, const SimpleThreadStackSite& s2) {
return s1.base() - s2.base();
return primitive_compare(s1.base(), s2.base());
}
void ThreadStackTracker::new_thread_stack(void* base, size_t size, const NativeCallStack& stack) {

@ -236,7 +236,7 @@ void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
}
} else if (bits % sizeof(jshort) == 0) {
jshort fill = (jushort)( (jubyte)value ); // zero-extend
fill += fill << 8;
fill += (jshort)(fill << 8);
//Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort));
for (uintptr_t off = 0; off < size; off += sizeof(jshort)) {
*(jshort*)(dst + off) = fill;

@ -789,7 +789,7 @@ bool DwarfFile::DebugAranges::read_set_header(DebugArangesSetHeader& header) {
// We must align to twice the address size.
uint8_t alignment = DwarfFile::ADDRESS_SIZE * 2;
uint8_t padding = alignment - (_reader.get_position() - _section_start_address) % alignment;
long padding = alignment - (_reader.get_position() - _section_start_address) % alignment;
return _reader.move_position(padding);
}
@ -1423,7 +1423,7 @@ bool DwarfFile::LineNumberProgram::apply_extended_opcode() {
// Must be an unsigned integer as specified in section 6.2.2 of the DWARF 4 spec for the discriminator register.
return false;
}
_state->_discriminator = discriminator;
_state->_discriminator = static_cast<uint32_t>(discriminator);
break;
default:
assert(false, "Unknown extended opcode");
@ -1446,11 +1446,12 @@ bool DwarfFile::LineNumberProgram::apply_standard_opcode(const uint8_t opcode) {
}
break;
case DW_LNS_advance_pc: { // 1 operand
uint64_t operation_advance;
if (!_reader.read_uleb128(&operation_advance, 4)) {
uint64_t adv;
if (!_reader.read_uleb128(&adv, 4)) {
// Must be at most 4 bytes because the index register is only 4 bytes wide.
return false;
}
uint32_t operation_advance = checked_cast<uint32_t>(adv);
_state->add_to_address_register(operation_advance, _header);
if (_state->_dwarf_version == 4) {
_state->set_index_register(operation_advance, _header);
@ -1464,7 +1465,7 @@ bool DwarfFile::LineNumberProgram::apply_standard_opcode(const uint8_t opcode) {
// line register is 4 bytes wide.
return false;
}
_state->_line += line;
_state->_line += static_cast<uint32_t>(line);
DWARF_LOG_TRACE(" DW_LNS_advance_line (%d)", _state->_line);
break;
case DW_LNS_set_file: // 1 operand
@ -1473,7 +1474,7 @@ bool DwarfFile::LineNumberProgram::apply_standard_opcode(const uint8_t opcode) {
// file register is 4 bytes wide.
return false;
}
_state->_file = file;
_state->_file = static_cast<uint32_t>(file);
DWARF_LOG_TRACE(" DW_LNS_set_file (%u)", _state->_file);
break;
case DW_LNS_set_column: // 1 operand
@ -1482,7 +1483,7 @@ bool DwarfFile::LineNumberProgram::apply_standard_opcode(const uint8_t opcode) {
// column register is 4 bytes wide.
return false;
}
_state->_column = column;
_state->_column = static_cast<uint32_t>(column);
DWARF_LOG_TRACE(" DW_LNS_set_column (%u)", _state->_column);
break;
case DW_LNS_negate_stmt: // No operands
@ -1528,7 +1529,7 @@ bool DwarfFile::LineNumberProgram::apply_standard_opcode(const uint8_t opcode) {
// isa register is 4 bytes wide.
return false;
}
_state->_isa = isa;
_state->_isa = static_cast<uint32_t>(isa); // only save 4 bytes
DWARF_LOG_TRACE(" DW_LNS_set_isa (%u)", _state->_isa);
break;
default:

@ -483,7 +483,7 @@ class DwarfFile : public ElfFile {
DwarfFile* _dwarf_file;
MarkedDwarfFileReader _reader;
uint32_t _section_start_address;
uintptr_t _section_start_address;
// a calculated end position
long _entry_end;

@ -46,7 +46,7 @@ ElfFuncDescTable::ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index) :
ElfFuncDescTable::~ElfFuncDescTable() {
}
address ElfFuncDescTable::lookup(Elf_Word index) {
address ElfFuncDescTable::lookup(Elf_Addr index) {
if (NullDecoder::is_error(_status)) {
return nullptr;
}

@ -133,7 +133,7 @@ public:
~ElfFuncDescTable();
// return the function address for the function descriptor at 'index' or null on error
address lookup(Elf_Word index);
address lookup(Elf_Addr index);
int get_index() const { return _index; };

@ -48,7 +48,7 @@ ElfSymbolTable::~ElfSymbolTable() {
bool ElfSymbolTable::compare(const Elf_Sym* sym, address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable) {
if (STT_FUNC == ELF_ST_TYPE(sym->st_info)) {
Elf_Word st_size = sym->st_size;
Elf64_Xword st_size = sym->st_size;
const Elf_Shdr* shdr = _section.section_header();
address sym_addr;
if (funcDescTable != nullptr && funcDescTable->get_index() == sym->st_shndx) {
@ -77,7 +77,7 @@ bool ElfSymbolTable::lookup(address addr, int* stringtableIndex, int* posIndex,
}
size_t sym_size = sizeof(Elf_Sym);
int count = _section.section_header()->sh_size / sym_size;
int count = checked_cast<int>(_section.section_header()->sh_size / sym_size);
Elf_Sym* symbols = (Elf_Sym*)_section.section_data();
if (symbols != nullptr) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,7 +64,7 @@ inline unsigned population_count(T x) {
// The preceding multiply by z_ones is the only place where the intermediate
// calculations can exceed the range of T. We need to discard any such excess
// before the right-shift, hence the conversion back to T.
return static_cast<T>(r) >> (((sizeof(T) - 1) * BitsPerByte));
return checked_cast<unsigned>(static_cast<T>(r) >> (((sizeof(T) - 1) * BitsPerByte)));
}
#endif // SHARE_UTILITIES_POPULATION_COUNT_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,11 +74,11 @@ void TableRateStatistics::stamp() {
}
float TableRateStatistics::get_add_rate() {
return (float)((_added_items_stamp - _added_items_stamp_prev) / _seconds_stamp);
return (float)(((double)_added_items_stamp - (double)_added_items_stamp_prev) / _seconds_stamp);
}
float TableRateStatistics::get_remove_rate() {
return (float)((_removed_items_stamp - _removed_items_stamp_prev) / _seconds_stamp);
return (float)(_removed_items_stamp - _removed_items_stamp_prev) / (float)_seconds_stamp;
}
TableStatistics::TableStatistics() :
@ -101,12 +101,12 @@ TableStatistics::TableStatistics(NumberSeq summary, size_t literal_bytes, size_t
_add_rate(0), _remove_rate(0) {
_number_of_buckets = summary.num();
_number_of_entries = summary.sum();
_number_of_entries = (size_t)summary.sum();
_maximum_bucket_size = summary.maximum();
_average_bucket_size = summary.avg();
_variance_of_bucket_size = summary.variance();
_stddev_of_bucket_size = summary.sd();
_maximum_bucket_size = (size_t)summary.maximum();
_average_bucket_size = (float)summary.avg();
_variance_of_bucket_size = (float)summary.variance();
_stddev_of_bucket_size = (float)summary.sd();
_bucket_bytes = _number_of_buckets * bucket_bytes;
_entry_bytes = _number_of_entries * node_bytes;
@ -140,7 +140,7 @@ void TableStatistics::print(outputStream* st, const char *table_name) {
" bytes, each " SIZE_FORMAT,
_number_of_entries, _entry_bytes, _entry_size);
if (_literal_bytes != 0) {
float literal_avg = (_number_of_entries <= 0) ? 0 : (_literal_bytes / _number_of_entries);
float literal_avg = (_number_of_entries <= 0) ? 0.0f : (float)(_literal_bytes / _number_of_entries);
st->print_cr("Number of literals : %9" PRIuPTR " = %9" PRIuPTR
" bytes, avg %7.3f",
_number_of_entries, _literal_bytes, literal_avg);

@ -160,7 +160,7 @@ void xmlStream::see_tag(const char* tag, bool push) {
char* old_low = _element_close_stack_low;
char* push_ptr = old_ptr - (tag_len+1);
if (push_ptr < old_low) {
int old_len = _element_close_stack_high - old_ptr;
int old_len = pointer_delta_as_int(_element_close_stack_high, old_ptr);
int new_len = old_len * 2;
if (new_len < 100) new_len = 100;
char* new_low = NEW_C_HEAP_ARRAY(char, new_len, mtInternal);

@ -54,7 +54,7 @@ class ChunkManagerRandomChunkAllocTest {
// Assuming we allocate only the largest type of chunk, committed to the fullest commit factor,
// how many chunks can we accomodate before hitting max_footprint_words?
const size_t largest_chunk_size = word_size_for_level(r.lowest());
int max_chunks = (max_footprint_words * commit_factor) / largest_chunk_size;
int max_chunks = (int)((max_footprint_words * commit_factor) / (float) largest_chunk_size);
// .. but cap at (min) 50 and (max) 1000
max_chunks = MIN2(1000, max_chunks);
max_chunks = MAX2(50, max_chunks);
@ -96,7 +96,7 @@ class ChunkManagerRandomChunkAllocTest {
// Given a chunk level and a factor, return a random commit size.
static size_t random_committed_words(chunklevel_t lvl, float commit_factor) {
const size_t sz = word_size_for_level(lvl) * commit_factor;
const size_t sz = (size_t)((float)word_size_for_level(lvl) * commit_factor);
if (sz < 2) {
return 0;
}