8313882: Fix -Wconversion warnings in runtime code

Reviewed-by: pchilanomate, dlong, dholmes
This commit is contained in:
Coleen Phillimore 2023-08-10 11:57:25 +00:00
parent 0cb9ab04f4
commit f47767ffef
26 changed files with 129 additions and 135 deletions

@ -274,7 +274,7 @@ class Bytecode_checkcast: public Bytecode {
void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); }
// Returns index
long index() const { return get_index_u2(Bytecodes::_checkcast); };
u2 index() const { return get_index_u2(Bytecodes::_checkcast); };
};
// Abstraction for instanceof
@ -284,7 +284,7 @@ class Bytecode_instanceof: public Bytecode {
void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); }
// Returns index
long index() const { return get_index_u2(Bytecodes::_instanceof); };
u2 index() const { return get_index_u2(Bytecodes::_instanceof); };
};
class Bytecode_new: public Bytecode {
@ -293,7 +293,7 @@ class Bytecode_new: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_new, "check new"); }
// Returns index
long index() const { return get_index_u2(Bytecodes::_new); };
u2 index() const { return get_index_u2(Bytecodes::_new); };
};
class Bytecode_multianewarray: public Bytecode {
@ -302,7 +302,7 @@ class Bytecode_multianewarray: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); }
// Returns index
long index() const { return get_index_u2(Bytecodes::_multianewarray); };
u2 index() const { return get_index_u2(Bytecodes::_multianewarray); };
};
class Bytecode_anewarray: public Bytecode {
@ -311,7 +311,7 @@ class Bytecode_anewarray: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); }
// Returns index
long index() const { return get_index_u2(Bytecodes::_anewarray); };
u2 index() const { return get_index_u2(Bytecodes::_anewarray); };
};
// Abstraction for ldc, ldc_w and ldc2_w

@ -215,7 +215,7 @@
nonstatic_field(JavaThread, _jni_environment, JNIEnv) \
nonstatic_field(JavaThread, _poll_data, SafepointMechanism::ThreadData) \
nonstatic_field(JavaThread, _stack_overflow_state._reserved_stack_activation, address) \
nonstatic_field(JavaThread, _held_monitor_count, int64_t) \
nonstatic_field(JavaThread, _held_monitor_count, intx) \
JVMTI_ONLY(nonstatic_field(JavaThread, _is_in_VTMS_transition, bool)) \
JVMTI_ONLY(nonstatic_field(JavaThread, _is_in_tmp_VTMS_transition, bool)) \
\

@ -1177,7 +1177,7 @@ bool Arguments::process_settings_file(const char* file_name, bool should_exist,
bool in_white_space = true;
bool in_comment = false;
bool in_quote = false;
char quote_c = 0;
int quote_c = 0;
bool result = true;
int c = getc(stream);
@ -1189,7 +1189,7 @@ bool Arguments::process_settings_file(const char* file_name, bool should_exist,
if (c == '#') in_comment = true;
else if (!isspace(c)) {
in_white_space = false;
token[pos++] = c;
token[pos++] = checked_cast<char>(c);
}
}
} else {
@ -1209,7 +1209,7 @@ bool Arguments::process_settings_file(const char* file_name, bool should_exist,
} else if (in_quote && (c == quote_c)) {
in_quote = false;
} else {
token[pos++] = c;
token[pos++] = checked_cast<char>(c);
}
}
c = getc(stream);
@ -1565,22 +1565,22 @@ void Arguments::set_heap_size() {
// Convert deprecated flags
if (FLAG_IS_DEFAULT(MaxRAMPercentage) &&
!FLAG_IS_DEFAULT(MaxRAMFraction))
MaxRAMPercentage = 100.0 / MaxRAMFraction;
MaxRAMPercentage = 100.0 / (double)MaxRAMFraction;
if (FLAG_IS_DEFAULT(MinRAMPercentage) &&
!FLAG_IS_DEFAULT(MinRAMFraction))
MinRAMPercentage = 100.0 / MinRAMFraction;
MinRAMPercentage = 100.0 / (double)MinRAMFraction;
if (FLAG_IS_DEFAULT(InitialRAMPercentage) &&
!FLAG_IS_DEFAULT(InitialRAMFraction))
InitialRAMPercentage = 100.0 / InitialRAMFraction;
InitialRAMPercentage = 100.0 / (double)InitialRAMFraction;
// If the maximum heap size has not been set with -Xmx,
// then set it as fraction of the size of physical memory,
// respecting the maximum and minimum sizes of the heap.
if (FLAG_IS_DEFAULT(MaxHeapSize)) {
julong reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
const julong reasonable_min = (julong)((phys_mem * MinRAMPercentage) / 100);
julong reasonable_max = (julong)(((double)phys_mem * MaxRAMPercentage) / 100);
const julong reasonable_min = (julong)(((double)phys_mem * MinRAMPercentage) / 100);
if (reasonable_min < MaxHeapSize) {
// Small physical memory, so use a minimum fraction of it for the heap
reasonable_max = reasonable_min;
@ -1664,7 +1664,7 @@ void Arguments::set_heap_size() {
reasonable_minimum = limit_heap_by_allocatable_memory(reasonable_minimum);
if (InitialHeapSize == 0) {
julong reasonable_initial = (julong)((phys_mem * InitialRAMPercentage) / 100);
julong reasonable_initial = (julong)(((double)phys_mem * InitialRAMPercentage) / 100);
reasonable_initial = limit_heap_by_allocatable_memory(reasonable_initial);
reasonable_initial = MAX3(reasonable_initial, reasonable_minimum, (julong)MinHeapSize);
@ -1965,15 +1965,15 @@ static const char* system_assertion_options[] = {
"-dsa", "-esa", "-disablesystemassertions", "-enablesystemassertions", 0
};
bool Arguments::parse_uintx(const char* value,
uintx* uintx_arg,
uintx min_size) {
uintx n;
bool Arguments::parse_uint(const char* value,
uint* uint_arg,
uint min_size) {
uint n;
if (!parse_integer(value, &n)) {
return false;
}
if (n >= min_size) {
*uintx_arg = n;
*uint_arg = n;
return true;
} else {
return false;
@ -2728,8 +2728,8 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
return JNI_EINVAL;
}
} else if (match_option(option, "-XX:MaxTenuringThreshold=", &tail)) {
uintx max_tenuring_thresh = 0;
if (!parse_uintx(tail, &max_tenuring_thresh, 0)) {
uint max_tenuring_thresh = 0;
if (!parse_uint(tail, &max_tenuring_thresh, 0)) {
jio_fprintf(defaultStream::error_stream(),
"Improperly specified VM option \'MaxTenuringThreshold=%s\'\n", tail);
return JNI_EINVAL;

@ -378,10 +378,10 @@ class Arguments : AllStatic {
static jint parse(const JavaVMInitArgs* args);
// Parse a string for a unsigned integer. Returns true if value
// is an unsigned integer greater than or equal to the minimum
// parameter passed and returns the value in uintx_arg. Returns
// false otherwise, with uintx_arg undefined.
static bool parse_uintx(const char* value, uintx* uintx_arg,
uintx min_size);
// parameter passed and returns the value in uint_arg. Returns
// false otherwise, with uint_arg undefined.
static bool parse_uint(const char* value, uint* uintx_arg,
uint min_size);
// Apply ergonomics
static jint apply_ergo();
// Adjusts the arguments after the OS have adjusted the arguments

@ -404,7 +404,7 @@ protected:
// slow path
virtual stackChunkOop allocate_chunk_slow(size_t stack_size) = 0;
int cont_size() { return _cont_stack_bottom - _cont_stack_top; }
int cont_size() { return pointer_delta_as_int(_cont_stack_bottom, _cont_stack_top); }
private:
// slow path
@ -1064,7 +1064,7 @@ NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, fr
// The frame's top never includes the stack arguments to the callee
intptr_t* const stack_frame_top = ContinuationHelper::InterpretedFrame::frame_top(f, callee_argsize, callee_interpreted);
intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
const int fsize = stack_frame_bottom - stack_frame_top;
const int fsize = pointer_delta_as_int(stack_frame_bottom, stack_frame_top);
DEBUG_ONLY(verify_frame_top(f, stack_frame_top));
@ -1123,7 +1123,7 @@ freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
// including metadata between f and its stackargs
const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
const int fsize = stack_frame_bottom + argsize - stack_frame_top;
const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
ContinuationHelper::Frame::frame_method(f) != nullptr ?
@ -1627,7 +1627,7 @@ static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoi
if (scope == cont_scope) {
break;
}
int monitor_count = entry->parent_held_monitor_count();
intx monitor_count = entry->parent_held_monitor_count();
entry = entry->parent();
if (entry == nullptr) {
break;
@ -2068,7 +2068,7 @@ void ThawBase::finalize_thaw(frame& entry, int argsize) {
}
assert(_stream.is_done() == chunk->is_empty(), "");
int total_thawed = _stream.unextended_sp() - _top_unextended_sp_before_thaw;
int total_thawed = pointer_delta_as_int(_stream.unextended_sp(), _top_unextended_sp_before_thaw);
chunk->set_max_thawing_size(chunk->max_thawing_size() - total_thawed);
_cont.set_argsize(argsize);
@ -2154,7 +2154,7 @@ NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& c
assert(hf.is_heap_frame(), "should be");
assert(!f.is_heap_frame(), "should not be");
const int fsize = heap_frame_bottom - heap_frame_top;
const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
assert((stack_frame_bottom == stack_frame_top + fsize), "");
// Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.

@ -248,11 +248,11 @@ Deoptimization::UnrollBlock::~UnrollBlock() {
int Deoptimization::UnrollBlock::size_of_frames() const {
// Account first for the adjustment of the initial frame
int result = _caller_adjustment;
intptr_t result = _caller_adjustment;
for (int index = 0; index < number_of_frames(); index++) {
result += frame_sizes()[index];
}
return result;
return checked_cast<int>(result);
}
void Deoptimization::UnrollBlock::print() {
@ -1081,7 +1081,7 @@ protected:
objArrayOop cache = CacheType::cache(ik);
assert(cache->length() > 0, "Empty cache");
_low = BoxType::value(cache->obj_at(0));
_high = _low + cache->length() - 1;
_high = checked_cast<PrimitiveType>(_low + cache->length() - 1);
_cache = JNIHandles::make_global(Handle(thread, cache));
}
}
@ -1100,7 +1100,7 @@ public:
}
oop lookup(PrimitiveType value) {
if (_low <= value && value <= _high) {
int offset = value - _low;
int offset = checked_cast<int>(value - _low);
return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset);
}
return nullptr;
@ -1654,7 +1654,7 @@ vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, Re
// stuff a C2I adapter we can properly fill in the callee-save
// register locations.
frame caller = fr.sender(reg_map);
int frame_size = caller.sp() - fr.sp();
int frame_size = pointer_delta_as_int(caller.sp(), fr.sp());
frame sender = caller;

@ -726,10 +726,10 @@ const int ObjectAlignmentInBytes = 8;
/* because of overflow issue */ \
product(intx, MonitorDeflationMax, 1000000, DIAGNOSTIC, \
"The maximum number of monitors to deflate, unlink and delete " \
"at one time (minimum is 1024).") \
"at one time (minimum is 1024).") \
range(1024, max_jint) \
\
product(intx, MonitorUsedDeflationThreshold, 90, DIAGNOSTIC, \
product(int, MonitorUsedDeflationThreshold, 90, DIAGNOSTIC, \
"Percentage of used monitors before triggering deflation (0 is " \
"off). The check is performed on GuaranteedSafepointInterval, " \
"AsyncDeflationInterval or GuaranteedAsyncDeflationInterval, " \

@ -204,16 +204,17 @@ void print_method_invocation_histogram() {
total = int_total + comp_total;
special_total = final_total + static_total +synch_total + native_total + access_total;
tty->print_cr("Invocations summary for %d methods:", collected_invoked_methods->length());
double total_div = (double)total;
tty->print_cr("\t" UINT64_FORMAT_W(12) " (100%%) total", total);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- interpreted", int_total, 100.0 * int_total / total);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- compiled", comp_total, 100.0 * comp_total / total);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- interpreted", int_total, 100.0 * (double)int_total / total_div);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- compiled", comp_total, 100.0 * (double)comp_total / total_div);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- special methods (interpreted and compiled)",
special_total, 100.0 * special_total/ total);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- synchronized",synch_total, 100.0 * synch_total / total);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- final", final_total, 100.0 * final_total / total);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- static", static_total, 100.0 * static_total / total);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- native", native_total, 100.0 * native_total / total);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- accessor", access_total, 100.0 * access_total / total);
special_total, 100.0 * (double)special_total/ total_div);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- synchronized",synch_total, 100.0 * (double)synch_total / total_div);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- final", final_total, 100.0 * (double)final_total / total_div);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- static", static_total, 100.0 * (double)static_total / total_div);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- native", native_total, 100.0 * (double)native_total / total_div);
tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- accessor", access_total, 100.0 * (double)access_total / total_div);
tty->cr();
SharedRuntime::print_call_statistics(comp_total);
}

@ -77,11 +77,11 @@ class JDK_Version {
static const char* _runtime_vendor_version;
static const char* _runtime_vendor_vm_bug_url;
uint8_t _major;
uint8_t _minor;
uint8_t _security;
uint8_t _patch;
uint8_t _build;
int _major;
int _minor;
int _security;
int _patch;
int _build;
bool is_valid() const {
return (_major != 0);
@ -96,8 +96,8 @@ class JDK_Version {
_major(0), _minor(0), _security(0), _patch(0), _build(0)
{}
JDK_Version(uint8_t major, uint8_t minor = 0, uint8_t security = 0,
uint8_t patch = 0, uint8_t build = 0) :
JDK_Version(int major, int minor = 0, int security = 0,
int patch = 0, int build = 0) :
_major(major), _minor(minor), _security(security), _patch(patch), _build(build)
{}
@ -105,7 +105,7 @@ class JDK_Version {
static JDK_Version current() { return _current; }
// Factory methods for convenience
static JDK_Version jdk(uint8_t m) {
static JDK_Version jdk(int m) {
return JDK_Version(m);
}
@ -117,11 +117,11 @@ class JDK_Version {
return _major == 0;
}
uint8_t major_version() const { return _major; }
uint8_t minor_version() const { return _minor; }
uint8_t security_version() const { return _security; }
uint8_t patch_version() const { return _patch; }
uint8_t build_number() const { return _build; }
int major_version() const { return _major; }
int minor_version() const { return _minor; }
int security_version() const { return _security; }
int patch_version() const { return _patch; }
int build_number() const { return _build; }
// Performs a full ordering comparison using all fields (patch, build, etc.)
int compare(const JDK_Version& other) const;

@ -875,10 +875,10 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
// Since above code may not release JNI monitors and if someone forgot to do an
// JNI monitorexit, held count should be equal jni count.
// Consider scan all object monitor for this owner if JNI count > 0 (at least on detach).
assert(this->held_monitor_count() == this->jni_monitor_count(),
"held monitor count should be equal to jni: " INT64_FORMAT " != " INT64_FORMAT,
(int64_t)this->held_monitor_count(), (int64_t)this->jni_monitor_count());
if (CheckJNICalls && this->jni_monitor_count() > 0) {
assert(held_monitor_count() == jni_monitor_count(),
"held monitor count should be equal to jni: " INTX_FORMAT " != " INTX_FORMAT,
held_monitor_count(), jni_monitor_count());
if (CheckJNICalls && jni_monitor_count() > 0) {
// We would like a fatal here, but due to we never checked this before there
// is a lot of tests which breaks, even with an error log.
log_debug(jni)("JavaThread %s (tid: " UINTX_FORMAT ") with Objects still locked by JNI MonitorEnter.",
@ -1940,24 +1940,24 @@ void JavaThread::trace_stack() {
#endif // PRODUCT
void JavaThread::inc_held_monitor_count(int i, bool jni) {
void JavaThread::inc_held_monitor_count(intx i, bool jni) {
#ifdef SUPPORT_MONITOR_COUNT
assert(_held_monitor_count >= 0, "Must always be greater than 0: " INT64_FORMAT, (int64_t)_held_monitor_count);
assert(_held_monitor_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _held_monitor_count);
_held_monitor_count += i;
if (jni) {
assert(_jni_monitor_count >= 0, "Must always be greater than 0: " INT64_FORMAT, (int64_t)_jni_monitor_count);
assert(_jni_monitor_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _jni_monitor_count);
_jni_monitor_count += i;
}
#endif
}
void JavaThread::dec_held_monitor_count(int i, bool jni) {
void JavaThread::dec_held_monitor_count(intx i, bool jni) {
#ifdef SUPPORT_MONITOR_COUNT
_held_monitor_count -= i;
assert(_held_monitor_count >= 0, "Must always be greater than 0: " INT64_FORMAT, (int64_t)_held_monitor_count);
assert(_held_monitor_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _held_monitor_count);
if (jni) {
_jni_monitor_count -= i;
assert(_jni_monitor_count >= 0, "Must always be greater than 0: " INT64_FORMAT, (int64_t)_jni_monitor_count);
assert(_jni_monitor_count >= 0, "Must always be greater than 0: " INTX_FORMAT, _jni_monitor_count);
}
#endif
}

@ -450,14 +450,10 @@ class JavaThread: public Thread {
intptr_t* _cont_fastpath; // the sp of the oldest known interpreted/call_stub frame inside the
// continuation that we know about
int _cont_fastpath_thread_state; // whether global thread state allows continuation fastpath (JVMTI)
// It's signed for error detection.
#ifdef _LP64
int64_t _held_monitor_count; // used by continuations for fast lock detection
int64_t _jni_monitor_count;
#else
int32_t _held_monitor_count; // used by continuations for fast lock detection
int32_t _jni_monitor_count;
#endif
intx _held_monitor_count; // used by continuations for fast lock detection
intx _jni_monitor_count;
private:
@ -599,11 +595,11 @@ private:
bool cont_fastpath() const { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; }
bool cont_fastpath_thread_state() const { return _cont_fastpath_thread_state != 0; }
void inc_held_monitor_count(int i = 1, bool jni = false);
void dec_held_monitor_count(int i = 1, bool jni = false);
void inc_held_monitor_count(intx i = 1, bool jni = false);
void dec_held_monitor_count(intx i = 1, bool jni = false);
int64_t held_monitor_count() { return (int64_t)_held_monitor_count; }
int64_t jni_monitor_count() { return (int64_t)_jni_monitor_count; }
intx held_monitor_count() { return _held_monitor_count; }
intx jni_monitor_count() { return _jni_monitor_count; }
void clear_jni_monitor_count() { _jni_monitor_count = 0; }
inline bool is_vthread_mounted() const;

@ -235,7 +235,7 @@ private:
// to the ObjectMonitor reference manipulation code:
//
#define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
((in_bytes(ObjectMonitor::f ## _offset())) - markWord::monitor_value)
((in_bytes(ObjectMonitor::f ## _offset())) - checked_cast<int>(markWord::monitor_value))
markWord header() const;
volatile markWord* header_addr();

@ -136,11 +136,11 @@ char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t b
assert(false, "buffer_length too small");
return nullptr;
}
const int milliseconds_per_microsecond = 1000;
const int milliseconds_per_second = 1000;
const time_t seconds_since_19700101 =
milliseconds_since_19700101 / milliseconds_per_microsecond;
milliseconds_since_19700101 / milliseconds_per_second;
const int milliseconds_after_second =
milliseconds_since_19700101 % milliseconds_per_microsecond;
checked_cast<int>(milliseconds_since_19700101 % milliseconds_per_second);
// Convert the time value to a tm and timezone variable
struct tm time_struct;
if (utc) {

@ -295,7 +295,7 @@ void Relocator::change_jump(int bci, int offset, bool is_short, int break_bci, i
if (is_short && ((new_delta > MAX_SHORT) || new_delta < MIN_SHORT)) {
push_jump_widen(bci, delta, new_delta);
} else if (is_short) {
short_at_put(offset, new_delta);
short_at_put(offset, checked_cast<short>(new_delta));
} else {
int_at_put(offset, new_delta);
}
@ -397,13 +397,13 @@ void Relocator::adjust_exception_table(int bci, int delta) {
ExceptionTable table(_method());
for (int index = 0; index < table.length(); index ++) {
if (table.start_pc(index) > bci) {
table.set_start_pc(index, table.start_pc(index) + delta);
table.set_end_pc(index, table.end_pc(index) + delta);
table.set_start_pc(index, checked_cast<u2>(table.start_pc(index) + delta));
table.set_end_pc(index, checked_cast<u2>(table.end_pc(index) + delta));
} else if (bci < table.end_pc(index)) {
table.set_end_pc(index, table.end_pc(index) + delta);
table.set_end_pc(index, checked_cast<u2>(table.end_pc(index) + delta));
}
if (table.handler_pc(index) > bci)
table.set_handler_pc(index, table.handler_pc(index) + delta);
table.set_handler_pc(index, checked_cast<u2>(table.handler_pc(index) + delta));
}
}
@ -449,11 +449,11 @@ void Relocator::adjust_local_var_table(int bci, int delta) {
for (int i = 0; i < localvariable_table_length; i++) {
u2 current_bci = table[i].start_bci;
if (current_bci > bci) {
table[i].start_bci = current_bci + delta;
table[i].start_bci = checked_cast<u2>(current_bci + delta);
} else {
u2 current_length = table[i].length;
if (current_bci + current_length > bci) {
table[i].length = current_length + delta;
table[i].length = checked_cast<u2>(current_length + delta);
}
}
}
@ -531,7 +531,7 @@ void Relocator::adjust_stack_map_table(int bci, int delta) {
// Now convert the frames in place
if (frame->is_same_frame()) {
same_frame_extended::create_at(frame_addr, new_offset_delta);
same_frame_extended::create_at(frame_addr, checked_cast<u2>(new_offset_delta));
} else {
same_locals_1_stack_item_extended::create_at(
frame_addr, new_offset_delta, nullptr);
@ -549,7 +549,7 @@ void Relocator::adjust_stack_map_table(int bci, int delta) {
for (int i = 0; i < number_of_types; ++i) {
if (types->is_uninitialized() && types->bci() > bci) {
types->set_bci(types->bci() + delta);
types->set_bci(checked_cast<u2>(types->bci() + delta));
}
types = types->next();
}
@ -562,7 +562,7 @@ void Relocator::adjust_stack_map_table(int bci, int delta) {
types = ff->stack(eol);
for (int i = 0; i < number_of_types; ++i) {
if (types->is_uninitialized() && types->bci() > bci) {
types->set_bci(types->bci() + delta);
types->set_bci(checked_cast<u2>(types->bci() + delta));
}
types = types->next();
}
@ -632,6 +632,7 @@ bool Relocator::relocate_code(int bci, int ilen, int delta) {
memmove(addr_at(next_bci + delta), addr_at(next_bci), code_length() - next_bci);
set_code_length(code_length() + delta);
// Also adjust exception tables...
adjust_exception_table(bci, delta);
// Line number tables...
@ -707,12 +708,12 @@ bool Relocator::handle_jump_widen(int bci, int delta) {
if (!relocate_code(bci, 3, /*delta*/add_bci)) return false;
// if bytecode points to goto_w instruction
short_at_put(bci + 1, ilen + goto_length);
short_at_put(bci + 1, checked_cast<short>(ilen + goto_length));
int cbci = bci + ilen;
// goto around
code_at_put(cbci, Bytecodes::_goto);
short_at_put(cbci + 1, add_bci);
short_at_put(cbci + 1, checked_cast<short>(add_bci));
// goto_w <wide delta>
cbci = cbci + goto_length;
code_at_put(cbci, Bytecodes::_goto_w);

@ -95,7 +95,7 @@ static void post_safepoint_synchronize_event(EventSafepointStateSynchronization&
uint64_t safepoint_id,
int initial_number_of_threads,
int threads_waiting_to_block,
uint64_t iterations) {
int iterations) {
if (event.should_commit()) {
event.set_safepointId(safepoint_id);
event.set_initialThreadCount(initial_number_of_threads);

@ -685,7 +685,7 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address
#if INCLUDE_JVMCI
if (cm->is_compiled_by_jvmci()) {
// lookup exception handler for this pc
int catch_pco = ret_pc - cm->code_begin();
int catch_pco = pointer_delta_as_int(ret_pc, cm->code_begin());
ExceptionHandlerTable table(cm);
HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
if (t != nullptr) {
@ -744,7 +744,7 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address
}
// found handling method => lookup exception handler
int catch_pco = ret_pc - nm->code_begin();
int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
ExceptionHandlerTable table(nm);
HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
@ -2309,7 +2309,7 @@ void SharedRuntime::print_statistics() {
}
inline double percent(int64_t x, int64_t y) {
return 100.0 * x / MAX2(y, (int64_t)1);
return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
}
class MethodArityHistogram {
@ -2345,13 +2345,13 @@ class MethodArityHistogram {
const int N = MIN2(9, n);
double sum = 0;
double weighted_sum = 0;
for (int i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
if (sum >= 1.0) { // prevent divide by zero or divide overflow
for (int i = 0; i <= n; i++) { sum += (double)histo[i]; weighted_sum += (double)(i*histo[i]); }
if (sum >= 1) { // prevent divide by zero or divide overflow
double rest = sum;
double percent = sum / 100;
for (int i = 0; i <= N; i++) {
rest -= histo[i];
tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], histo[i] / percent);
rest -= (double)histo[i];
tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], (double)histo[i] / percent);
}
tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent);
tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);

@ -334,7 +334,7 @@ inline int SignatureStream::scan_type(BasicType type) {
switch (type) {
case T_OBJECT:
tem = (const u1*) memchr(&base[end], JVM_SIGNATURE_ENDCLASS, limit - end);
return (tem == nullptr ? limit : tem + 1 - base);
return (tem == nullptr ? limit : pointer_delta_as_int(tem + 1, base));
case T_ARRAY:
while ((end < limit) && ((char)base[end] == JVM_SIGNATURE_ARRAY)) { end++; }
@ -346,7 +346,7 @@ inline int SignatureStream::scan_type(BasicType type) {
_array_prefix = end - _end; // number of '[' chars just skipped
if (Signature::has_envelope(base[end])) {
tem = (const u1 *) memchr(&base[end], JVM_SIGNATURE_ENDCLASS, limit - end);
return (tem == nullptr ? limit : tem + 1 - base);
return (tem == nullptr ? limit : pointer_delta_as_int(tem + 1, base));
}
// Skipping over a single character for a primitive type.
assert(is_java_primitive(decode_signature_char(base[end])), "only primitives expected");

@ -1169,8 +1169,8 @@ static bool monitors_used_above_threshold(MonitorList* list) {
}
if (NoAsyncDeflationProgressMax != 0 &&
_no_progress_cnt >= NoAsyncDeflationProgressMax) {
float remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
size_t new_ceiling = ceiling + (ceiling * remainder) + 1;
double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
size_t new_ceiling = ceiling + (size_t)((double)ceiling * remainder) + 1;
ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
log_info(monitorinflation)("Too many deflations without progress; "
"bumping in_use_list_ceiling from " SIZE_FORMAT
@ -1183,7 +1183,7 @@ static bool monitors_used_above_threshold(MonitorList* list) {
size_t monitor_usage = (monitors_used * 100LL) / ceiling;
if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
log_info(monitorinflation)("monitors_used=" SIZE_FORMAT ", ceiling=" SIZE_FORMAT
", monitor_usage=" SIZE_FORMAT ", threshold=" INTX_FORMAT,
", monitor_usage=" SIZE_FORMAT ", threshold=%d",
monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
return true;
}

@ -450,10 +450,10 @@ void Thread::print_on(outputStream* st, bool print_extended_info) const {
}
st->print("cpu=%.2fms ",
os::thread_cpu_time(const_cast<Thread*>(this), true) / 1000000.0
(double)os::thread_cpu_time(const_cast<Thread*>(this), true) / 1000000.0
);
st->print("elapsed=%.2fs ",
_statistical_info.getElapsedTime() / 1000.0
(double)_statistical_info.getElapsedTime() / 1000.0
);
if (is_Java_thread() && (PrintExtendedThreadInfo || print_extended_info)) {
size_t allocated_bytes = (size_t) const_cast<Thread*>(this)->cooked_allocated_bytes();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Google and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -356,7 +356,7 @@ double ThreadHeapSampler::fast_log2(const double& d) {
assert(sizeof(d) == sizeof(x),
"double and uint64_t do not have the same size");
x = *reinterpret_cast<const uint64_t*>(&d);
const uint32_t x_high = x >> 32;
const uint32_t x_high = checked_cast<uint32_t>(x >> 32);
assert(FastLogNumBits <= 20, "FastLogNumBits should be less than 20.");
const uint32_t y = x_high >> (20 - FastLogNumBits) & FastLogMask;
const int32_t exponent = ((x_high >> 20) & 0x7FF) - 1023;

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@
double TimeHelper::counter_to_seconds(jlong counter) {
double freq = (double) os::elapsed_frequency();
return counter / freq;
return (double)counter / freq;
}
double TimeHelper::counter_to_millis(jlong counter) {

@ -116,8 +116,8 @@ class NativeHeapTrimmerThread : public NamedThread {
ml.wait(0); // infinite
} else if (next_trim_time > tnow) {
times_waited ++;
const int64_t wait_ms = MAX2(1.0, to_ms(next_trim_time - tnow));
ml.wait(wait_ms);
const double wait_ms = MAX2(1.0, to_ms(next_trim_time - tnow));
ml.wait((int64_t)wait_ms);
} else if (at_or_nearing_safepoint()) {
times_safepoint ++;
const int64_t wait_ms = MIN2<int64_t>(TrimNativeHeapInterval, safepoint_poll_ms);

@ -947,9 +947,6 @@
static_field(Abstract_VM_Version, _vm_security_version, int) \
static_field(Abstract_VM_Version, _vm_build_number, int) \
\
static_field(JDK_Version, _current, JDK_Version) \
nonstatic_field(JDK_Version, _major, unsigned char) \
\
/*************************/ \
/* JVMTI */ \
/*************************/ \
@ -1873,7 +1870,6 @@
/********************/ \
\
declare_toplevel_type(Abstract_VM_Version) \
declare_toplevel_type(JDK_Version) \
\
/*************/ \
/* Arguments */ \
@ -3037,7 +3033,7 @@ static int recursiveFindType(VMTypeEntry* origtypes, const char* typeName, bool
}
if (start != nullptr) {
const char * end = strrchr(typeName, '>');
int len = end - start + 1;
int len = pointer_delta_as_int(end, start) + 1;
char * s = NEW_C_HEAP_ARRAY(char, len, mtInternal);
strncpy(s, start, len - 1);
s[len-1] = '\0';

@ -234,13 +234,13 @@ jint dump_heap(AttachOperation* op, outputStream* out) {
}
const char* num_str = op->arg(2);
uintx level = 0;
uint level = 0;
if (num_str != nullptr && num_str[0] != '\0') {
if (!Arguments::parse_uintx(num_str, &level, 0)) {
if (!Arguments::parse_uint(num_str, &level, 0)) {
out->print_cr("Invalid compress level: [%s]", num_str);
return JNI_ERR;
} else if (level < 1 || level > 9) {
out->print_cr("Compression level out of range (1-9): " UINTX_FORMAT, level);
out->print_cr("Compression level out of range (1-9): %u", level);
return JNI_ERR;
}
}
@ -249,7 +249,7 @@ jint dump_heap(AttachOperation* op, outputStream* out) {
// This helps reduces the amount of unreachable objects in the dump
// and makes it easier to browse.
HeapDumper dumper(live_objects_only /* request GC */);
dumper.dump(path, out, (int)level, false, HeapDumper::default_num_of_dump_threads());
dumper.dump(path, out, level, false, HeapDumper::default_num_of_dump_threads());
}
return JNI_OK;
}
@ -287,13 +287,13 @@ static jint heap_inspection(AttachOperation* op, outputStream* out) {
const char* num_str = op->arg(2);
if (num_str != nullptr && num_str[0] != '\0') {
uintx num;
if (!Arguments::parse_uintx(num_str, &num, 0)) {
uint num;
if (!Arguments::parse_uint(num_str, &num, 0)) {
out->print_cr("Invalid parallel thread number: [%s]", num_str);
delete fs;
return JNI_ERR;
}
parallel_thread_num = num == 0 ? parallel_thread_num : (uint)num;
parallel_thread_num = num == 0 ? parallel_thread_num : num;
}
VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */, parallel_thread_num);

@ -144,10 +144,10 @@ TEST(os, test_random) {
ASSERT_EQ(num, 1043618065) << "bad seed";
// tty->print_cr("mean of the 1st 10000 numbers: %f", mean);
int intmean = mean*100;
int intmean = (int)(mean*100);
ASSERT_EQ(intmean, 50);
// tty->print_cr("variance of the 1st 10000 numbers: %f", variance);
int intvariance = variance*100;
int intvariance = (int)(variance*100);
ASSERT_EQ(intvariance, 33);
const double eps = 0.0001;
t = fabsd(mean - 0.5018);
@ -223,7 +223,7 @@ TEST_VM(os, test_print_hex_dump) {
// Test dumping readable memory
address arr = (address)os::malloc(100, mtInternal);
for (int c = 0; c < 100; c++) {
for (u1 c = 0; c < 100; c++) {
arr[c] = c;
}

@ -30,8 +30,8 @@ typedef void (*arraycopy_fn)(address src, address dst, int count);
// simple tests of generated arraycopy functions
static void test_arraycopy_func(address func, int alignment) {
int v = 0xcc;
int v2 = 0x11;
u_char v = 0xcc;
u_char v2 = 0x11;
jlong lbuffer[8];
jlong lbuffer2[8];
address fbuffer = (address) lbuffer;