diff --git a/src/hotspot/share/oops/methodData.cpp b/src/hotspot/share/oops/methodData.cpp index 80d1cf10dd9..5a2a1e22843 100644 --- a/src/hotspot/share/oops/methodData.cpp +++ b/src/hotspot/share/oops/methodData.cpp @@ -480,7 +480,7 @@ address RetData::fixup_ret(int return_bci, MethodData* h_mdo) { // Now check to see if any of the cache slots are open. for (uint row = 0; row < row_limit(); row++) { if (bci(row) == no_bci) { - set_bci_displacement(row, mdp - dp()); + set_bci_displacement(row, checked_cast(mdp - dp())); set_bci_count(row, DataLayout::counter_increment); // Barrier to ensure displacement is written before the bci; allows // the interpreter to read displacement without fear of race condition. @@ -987,7 +987,7 @@ int MethodData::initialize_data(BytecodeStream* stream, return 0; } int cell_count = -1; - int tag = DataLayout::no_tag; + u1 tag = DataLayout::no_tag; DataLayout* data_layout = data_layout_at(data_index); Bytecodes::Code c = stream->code(); switch (c) { @@ -1098,7 +1098,7 @@ int MethodData::initialize_data(BytecodeStream* stream, if (cell_count >= 0) { assert(tag != DataLayout::no_tag, "bad tag"); assert(bytecode_has_profile(c), "agree w/ BHP"); - data_layout->initialize(tag, stream->bci(), cell_count); + data_layout->initialize(tag, checked_cast(stream->bci()), cell_count); return DataLayout::compute_size_in_bytes(cell_count); } else { assert(!bytecode_has_profile(c), "agree w/ !BHP"); @@ -1310,8 +1310,8 @@ void MethodData::init() { double scale = 1.0; methodHandle mh(Thread::current(), _method); CompilerOracle::has_option_value(mh, CompileCommand::CompileThresholdScaling, scale); - _invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift; - _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; + _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift; + _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; _tenure_traps = 0; _num_loops = 0; @@ -1479,7 +1479,7 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi return nullptr; } DataLayout temp; - temp.initialize(tag, bci, 0); + temp.initialize(tag, checked_cast(bci), 0); dp->set_header(temp.header()); assert(dp->tag() == tag, "sane"); diff --git a/src/hotspot/share/oops/methodData.hpp b/src/hotspot/share/oops/methodData.hpp index 3384df3788d..122a51b111f 100644 --- a/src/hotspot/share/oops/methodData.hpp +++ b/src/hotspot/share/oops/methodData.hpp @@ -114,7 +114,7 @@ public: }; // Tag values - enum { + enum : u1 { no_tag, bit_data_tag, counter_data_tag, @@ -204,7 +204,7 @@ public: } void set_flag_at(u1 flag_number) { - _header._struct._flags |= (0x1 << flag_number); + _header._struct._flags |= (u1)(0x1 << flag_number); } bool flag_at(u1 flag_number) const { return (_header._struct._flags & (0x1 << flag_number)) != 0; @@ -233,7 +233,7 @@ public: return temp._header._struct._flags; } // Return a value which, when or-ed as a word into _header, sets the flag. - static u8 flag_mask_to_header_mask(uint byte_constant) { + static u8 flag_mask_to_header_mask(u1 byte_constant) { DataLayout temp; temp.set_header(0); temp._header._struct._flags = byte_constant; return temp._header._bits; @@ -344,10 +344,10 @@ protected: return cast_to_oop(intptr_at(index)); } - void set_flag_at(int flag_number) { + void set_flag_at(u1 flag_number) { data()->set_flag_at(flag_number); } - bool flag_at(int flag_number) const { + bool flag_at(u1 flag_number) const { return data()->flag_at(flag_number); } @@ -355,7 +355,7 @@ protected: static ByteSize cell_offset(int index) { return DataLayout::cell_offset(index); } - static int flag_number_to_constant(int flag_number) { + static u1 flag_number_to_constant(u1 flag_number) { return DataLayout::flag_number_to_constant(flag_number); } @@ -487,7 +487,7 @@ class BitData : public ProfileData { friend class VMStructs; friend class JVMCIVMStructs; protected: - enum { + enum : u1 { // null_seen: // saw a null operand (cast/aastore/instanceof) null_seen_flag = DataLayout::first_flag + 0 @@ -525,7 +525,7 @@ public: #endif // Code generation support - static int null_seen_byte_constant() { + static u1 null_seen_byte_constant() { return flag_number_to_constant(null_seen_flag); } @@ -1126,7 +1126,7 @@ public: // Direct accessors static uint row_limit() { - return TypeProfileWidth; + return (uint) TypeProfileWidth; } static int receiver_cell_index(uint row) { return receiver0_offset + row * receiver_type_row_cell_count; @@ -1430,7 +1430,7 @@ public: } static uint row_limit() { - return BciProfileWidth; + return (uint) BciProfileWidth; } static int bci_cell_index(uint row) { return bci0_offset + row * ret_row_cell_count; @@ -2010,7 +2010,7 @@ public: assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob"); uint cnt1 = 1 + _trap_hist._array[reason]; if ((cnt1 & _trap_hist_mask) != 0) { // if no counter overflow... - _trap_hist._array[reason] = cnt1; + _trap_hist._array[reason] = (u1)cnt1; return cnt1; } else { return _trap_hist_mask + (++_nof_overflow_traps); @@ -2262,9 +2262,9 @@ public: bool would_profile() const { return _would_profile != no_profile; } int num_loops() const { return _num_loops; } - void set_num_loops(int n) { _num_loops = n; } + void set_num_loops(short n) { _num_loops = n; } int num_blocks() const { return _num_blocks; } - void set_num_blocks(int n) { _num_blocks = n; } + void set_num_blocks(short n) { _num_blocks = n; } bool is_mature() const; // consult mileage and ProfileMaturityPercentage static int mileage_of(Method* m); @@ -2326,7 +2326,7 @@ public: // Convert a dp (data pointer) to a di (data index). int dp_to_di(address dp) const { - return dp - ((address)_data); + return (int)(dp - ((address)_data)); } // bci to di/dp conversion. @@ -2366,7 +2366,7 @@ public: DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); } DataLayout* args_data_limit() const { return (DataLayout*)((address)this + size_in_bytes() - parameters_size_in_bytes()); } - int extra_data_size() const { return (address)extra_data_limit() - (address)extra_data_base(); } + int extra_data_size() const { return (int)((address)extra_data_limit() - (address)extra_data_base()); } static DataLayout* next_extra(DataLayout* dp); // Return (uint)-1 for overflow. diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index 180ba7f337b..e6518c9e635 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -1110,11 +1110,11 @@ const int ObjectAlignmentInBytes = 8; "X, Y and Z in 0=off ; 1=jsr292 only; 2=all methods") \ constraint(TypeProfileLevelConstraintFunc, AfterErgo) \ \ - product(intx, TypeProfileArgsLimit, 2, \ + product(int, TypeProfileArgsLimit, 2, \ "max number of call arguments to consider for type profiling") \ range(0, 16) \ \ - product(intx, TypeProfileParmsLimit, 2, \ + product(int, TypeProfileParmsLimit, 2, \ "max number of incoming parameters to consider for type profiling"\ ", -1 for all") \ range(-1, 64) \ @@ -1377,7 +1377,7 @@ const int ObjectAlignmentInBytes = 8; "Limit on traps (of one kind) at a particular BCI") \ range(0, max_jint) \ \ - product(intx, SpecTrapLimitExtraEntries, 3, EXPERIMENTAL, \ + product(int, SpecTrapLimitExtraEntries, 3, EXPERIMENTAL, \ "Extra method data trap entries for speculation") \ \ product(double, InlineFrequencyRatio, 0.25, DIAGNOSTIC, \