8310948: Fix ignored-qualifiers warning in Hotspot

Reviewed-by: kbarrett, dholmes
This commit is contained in:
Daniel Jeliński 2023-07-03 07:51:38 +00:00
parent 2c29705d7b
commit 055b4b426c
74 changed files with 223 additions and 223 deletions

@ -84,11 +84,11 @@ CFLAGS_VM_VERSION := \
# Disabled warnings
DISABLED_WARNINGS_gcc := array-bounds comment delete-non-virtual-dtor \
empty-body ignored-qualifiers implicit-fallthrough int-in-bool-context \
empty-body implicit-fallthrough int-in-bool-context \
maybe-uninitialized missing-field-initializers parentheses \
shift-negative-value unknown-pragmas
DISABLED_WARNINGS_clang := ignored-qualifiers sometimes-uninitialized \
DISABLED_WARNINGS_clang := sometimes-uninitialized \
missing-braces delete-non-abstract-non-virtual-dtor unknown-pragmas
ifneq ($(DEBUG_LEVEL), release)

@ -2285,7 +2285,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
//=============================================================================
const bool Matcher::match_rule_supported(int opcode) {
bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode))
return false;
@ -2320,7 +2320,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length)
}
// Vector calling convention not yet implemented.
const bool Matcher::supports_vector_calling_convention(void) {
bool Matcher::supports_vector_calling_convention(void) {
return false;
}
@ -2340,7 +2340,7 @@ bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
}
// Vector width in bytes.
const int Matcher::vector_width_in_bytes(BasicType bt) {
int Matcher::vector_width_in_bytes(BasicType bt) {
// The MaxVectorSize should have been set by detecting SVE max vector register size.
int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
// Minimum 2 values in vector
@ -2351,11 +2351,11 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Limits on vector size (number of elements) loaded into vector.
const int Matcher::max_vector_size(const BasicType bt) {
int Matcher::max_vector_size(const BasicType bt) {
return vector_width_in_bytes(bt)/type2aelembytes(bt);
}
const int Matcher::min_vector_size(const BasicType bt) {
int Matcher::min_vector_size(const BasicType bt) {
int max_size = max_vector_size(bt);
// Limit the min vector size to 8 bytes.
int size = 8 / type2aelembytes(bt);
@ -2370,17 +2370,17 @@ const int Matcher::min_vector_size(const BasicType bt) {
return MIN2(size, max_size);
}
const int Matcher::superword_max_vector_size(const BasicType bt) {
int Matcher::superword_max_vector_size(const BasicType bt) {
return Matcher::max_vector_size(bt);
}
// Actual max scalable vector register length.
const int Matcher::scalable_vector_reg_size(const BasicType bt) {
int Matcher::scalable_vector_reg_size(const BasicType bt) {
return Matcher::max_vector_size(bt);
}
// Vector ideal reg.
const uint Matcher::vector_ideal_reg(int len) {
uint Matcher::vector_ideal_reg(int len) {
if (UseSVE > 0 && 16 < len && len <= 256) {
return Op_VecA;
}

@ -125,7 +125,7 @@ source %{
}
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
if (UseSVE == 0) {
// These operations are not profitable to be vectorized on NEON, because no direct
// NEON instructions support them. But the match rule support for them is profitable for
@ -148,7 +148,7 @@ source %{
// Identify extra cases that we might want to provide match rules for vector nodes and
// other intrinsics guarded with vector length (vlen) and element type (bt).
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
if (!match_rule_supported(opcode)) {
return false;
}
@ -232,7 +232,7 @@ source %{
return vector_size_supported(bt, vlen);
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
// Only SVE supports masked operations.
if (UseSVE == 0) {
return false;
@ -271,7 +271,7 @@ source %{
return match_rule_supported_vector(opcode, vlen, bt);
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
// Only SVE has partial vector operations
if (UseSVE == 0) {
return false;

@ -115,7 +115,7 @@ source %{
}
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
if (UseSVE == 0) {
// These operations are not profitable to be vectorized on NEON, because no direct
// NEON instructions support them. But the match rule support for them is profitable for
@ -138,7 +138,7 @@ source %{
// Identify extra cases that we might want to provide match rules for vector nodes and
// other intrinsics guarded with vector length (vlen) and element type (bt).
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
if (!match_rule_supported(opcode)) {
return false;
}
@ -222,7 +222,7 @@ source %{
return vector_size_supported(bt, vlen);
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
// Only SVE supports masked operations.
if (UseSVE == 0) {
return false;
@ -261,7 +261,7 @@ source %{
return match_rule_supported_vector(opcode, vlen, bt);
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
// Only SVE has partial vector operations
if (UseSVE == 0) {
return false;

@ -4211,7 +4211,7 @@ Instruction_aarch64::~Instruction_aarch64() {
#undef starti
// Invert a condition
inline const Assembler::Condition operator~(const Assembler::Condition cond) {
inline Assembler::Condition operator~(const Assembler::Condition cond) {
return Assembler::Condition(int(cond) ^ 1);
}

@ -33,7 +33,7 @@
// Whether this platform implements the scalable vector feature
static const bool implements_scalable_vector = true;
static const bool supports_scalable_vector() {
static bool supports_scalable_vector() {
return UseSVE > 0;
}
@ -144,12 +144,12 @@
}
// Does the CPU supports vector unsigned comparison instructions?
static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
static constexpr bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
return true;
}
// Some microarchitectures have mask registers used on vectors
static const bool has_predicated_vectors(void) {
static bool has_predicated_vectors(void) {
return UseSVE > 0;
}

@ -947,7 +947,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
return offset;
}
const bool Matcher::match_rule_supported(int opcode) {
bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode))
return false;
@ -1002,11 +1002,11 @@ const bool Matcher::match_rule_supported(int opcode) {
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
return match_rule_supported_vector(opcode, vlen, bt);
}
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
// TODO
// identify extra cases that we might want to provide match rules for
@ -1017,11 +1017,11 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType
return ret_value; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
return false;
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
return false;
}
@ -1034,7 +1034,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length)
}
// Vector calling convention not yet implemented.
const bool Matcher::supports_vector_calling_convention(void) {
bool Matcher::supports_vector_calling_convention(void) {
return false;
}
@ -1044,16 +1044,16 @@ OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
}
// Vector width in bytes
const int Matcher::vector_width_in_bytes(BasicType bt) {
int Matcher::vector_width_in_bytes(BasicType bt) {
return MaxVectorSize;
}
const int Matcher::scalable_vector_reg_size(const BasicType bt) {
int Matcher::scalable_vector_reg_size(const BasicType bt) {
return -1;
}
// Vector ideal reg corresponding to specified size in bytes
const uint Matcher::vector_ideal_reg(int size) {
uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize >= size, "");
switch(size) {
case 8: return Op_VecD;
@ -1064,17 +1064,17 @@ const uint Matcher::vector_ideal_reg(int size) {
}
// Limits on vector size (number of elements) loaded into vector.
const int Matcher::max_vector_size(const BasicType bt) {
int Matcher::max_vector_size(const BasicType bt) {
assert(is_java_primitive(bt), "only primitive type vectors");
return vector_width_in_bytes(bt)/type2aelembytes(bt);
}
const int Matcher::min_vector_size(const BasicType bt) {
int Matcher::min_vector_size(const BasicType bt) {
assert(is_java_primitive(bt), "only primitive type vectors");
return 8/type2aelembytes(bt);
}
const int Matcher::superword_max_vector_size(const BasicType bt) {
int Matcher::superword_max_vector_size(const BasicType bt) {
return Matcher::max_vector_size(bt);
}

@ -245,7 +245,7 @@ class Address {
bool uses(Register reg) const { return _base == reg || _index == reg; }
const relocInfo::relocType rtype() { return _rspec.type(); }
relocInfo::relocType rtype() { return _rspec.type(); }
const RelocationHolder& rspec() { return _rspec; }
// Convert the raw encoding form into the form expected by the

@ -39,7 +39,7 @@
// PPC implementation uses VSX load/store instructions (if
// SuperwordUseVSX) which support 4 byte but not arbitrary alignment
static const bool misaligned_vectors_ok() {
static constexpr bool misaligned_vectors_ok() {
return false;
}
@ -155,7 +155,7 @@
// true means we have fast l2f conversion
// false means that conversion is done by runtime call
static const bool convL2FSupported(void) {
static bool convL2FSupported(void) {
// fcfids can do the conversion (>= Power7).
// fcfid + frsp showed rounding problem when result should be 0x3f800001.
return VM_Version::has_fcfids();

@ -2100,7 +2100,7 @@ static int frame_slots_bias(int reg_enc, PhaseRegAlloc* ra_) {
return 0;
}
const bool Matcher::match_rule_supported(int opcode) {
bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode)) {
return false; // no match rule present
}
@ -2170,22 +2170,22 @@ const bool Matcher::match_rule_supported(int opcode) {
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
return match_rule_supported_vector(opcode, vlen, bt);
}
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
return false;
}
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
return false;
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
return false;
}
@ -2198,7 +2198,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length)
}
// Vector calling convention not yet implemented.
const bool Matcher::supports_vector_calling_convention(void) {
bool Matcher::supports_vector_calling_convention(void) {
return false;
}
@ -2208,7 +2208,7 @@ OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
}
// Vector width in bytes.
const int Matcher::vector_width_in_bytes(BasicType bt) {
int Matcher::vector_width_in_bytes(BasicType bt) {
if (SuperwordUseVSX) {
assert(MaxVectorSize == 16, "");
return 16;
@ -2219,7 +2219,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg.
const uint Matcher::vector_ideal_reg(int size) {
uint Matcher::vector_ideal_reg(int size) {
if (SuperwordUseVSX) {
assert(MaxVectorSize == 16 && size == 16, "");
return Op_VecX;
@ -2230,20 +2230,20 @@ const uint Matcher::vector_ideal_reg(int size) {
}
// Limits on vector size (number of elements) loaded into vector.
const int Matcher::max_vector_size(const BasicType bt) {
int Matcher::max_vector_size(const BasicType bt) {
assert(is_java_primitive(bt), "only primitive type vectors");
return vector_width_in_bytes(bt)/type2aelembytes(bt);
}
const int Matcher::min_vector_size(const BasicType bt) {
int Matcher::min_vector_size(const BasicType bt) {
return max_vector_size(bt); // Same as max.
}
const int Matcher::superword_max_vector_size(const BasicType bt) {
int Matcher::superword_max_vector_size(const BasicType bt) {
return Matcher::max_vector_size(bt);
}
const int Matcher::scalable_vector_reg_size(const BasicType bt) {
int Matcher::scalable_vector_reg_size(const BasicType bt) {
return -1;
}

@ -270,7 +270,7 @@ class Address {
return _mode != literal && base() == reg;
}
const address target() const {
address target() const {
assert_is_literal();
return _literal._target;
}

@ -71,7 +71,7 @@ public:
intptr_t* last_Java_sp(void) const { return _last_Java_sp; }
const address last_Java_pc(void) { return _last_Java_pc; }
address last_Java_pc(void) { return _last_Java_pc; }
private:

@ -34,7 +34,7 @@
// Whether this platform implements the scalable vector feature
static const bool implements_scalable_vector = true;
static const bool supports_scalable_vector() {
static bool supports_scalable_vector() {
return UseRVV;
}
@ -143,12 +143,12 @@
}
// Does the CPU supports vector unsigned comparison instructions?
static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
static constexpr bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
return false;
}
// Some microarchitectures have mask registers used on vectors
static const bool has_predicated_vectors(void) {
static bool has_predicated_vectors(void) {
return UseRVV;
}

@ -1872,7 +1872,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
//=============================================================================
const bool Matcher::match_rule_supported(int opcode) {
bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode)) {
return false;
}
@ -1921,7 +1921,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length)
}
// Vector calling convention not yet implemented.
const bool Matcher::supports_vector_calling_convention(void) {
bool Matcher::supports_vector_calling_convention(void) {
return false;
}
@ -1954,7 +1954,7 @@ bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
}
// Vector width in bytes.
const int Matcher::vector_width_in_bytes(BasicType bt) {
int Matcher::vector_width_in_bytes(BasicType bt) {
if (UseRVV) {
// The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV.
// MaxVectorSize == VM_Version::_initial_vector_length
@ -1969,11 +1969,11 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Limits on vector size (number of elements) loaded into vector.
const int Matcher::max_vector_size(const BasicType bt) {
int Matcher::max_vector_size(const BasicType bt) {
return vector_width_in_bytes(bt) / type2aelembytes(bt);
}
const int Matcher::min_vector_size(const BasicType bt) {
int Matcher::min_vector_size(const BasicType bt) {
int max_size = max_vector_size(bt);
// Limit the min vector size to 8 bytes.
int size = 8 / type2aelembytes(bt);
@ -1988,12 +1988,12 @@ const int Matcher::min_vector_size(const BasicType bt) {
return MIN2(size, max_size);
}
const int Matcher::superword_max_vector_size(const BasicType bt) {
int Matcher::superword_max_vector_size(const BasicType bt) {
return Matcher::max_vector_size(bt);
}
// Vector ideal reg.
const uint Matcher::vector_ideal_reg(int len) {
uint Matcher::vector_ideal_reg(int len) {
assert(MaxVectorSize >= len, "");
if (UseRVV) {
return Op_VecA;
@ -2003,7 +2003,7 @@ const uint Matcher::vector_ideal_reg(int len) {
return 0;
}
const int Matcher::scalable_vector_reg_size(const BasicType bt) {
int Matcher::scalable_vector_reg_size(const BasicType bt) {
return Matcher::max_vector_size(bt);
}

@ -46,13 +46,13 @@ source %{
}
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
return match_rule_supported_vector(opcode, vlen, bt);
}
// Identify extra cases that we might want to provide match rules for vector nodes
// and other intrinsics guarded with vector length (vlen) and element type (bt).
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
if (!UseRVV) {
return false;
}
@ -79,14 +79,14 @@ source %{
return true;
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
if (!UseRVV) {
return false;
}
return match_rule_supported_vector(opcode, vlen, bt);
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
return false;
}

@ -55,9 +55,9 @@ class VM_Version : public Abstract_VM_Version {
_enabled = true;
_value = value;
}
const char* const pretty() { return _pretty; }
const uint64_t feature_bit() { return _feature_bit; }
const bool feature_string() { return _feature_string; }
const char* pretty() { return _pretty; }
uint64_t feature_bit() { return _feature_bit; }
bool feature_string() { return _feature_string; }
bool enabled() { return _enabled; }
int64_t value() { return _value; }
virtual void update_flag() = 0;

@ -349,7 +349,7 @@ class AddressLiteral {
intptr_t value() const { return (intptr_t) _address; }
const relocInfo::relocType rtype() const { return _rspec.type(); }
relocInfo::relocType rtype() const { return _rspec.type(); }
const RelocationHolder& rspec() const { return _rspec; }
RelocationHolder rspec(int offset) const {

@ -35,7 +35,7 @@
// Whether this platform implements the scalable vector feature
static const bool implements_scalable_vector = false;
static constexpr const bool supports_scalable_vector() {
static constexpr bool supports_scalable_vector() {
return false;
}
@ -64,10 +64,10 @@
}
// Suppress CMOVL. Conditional move available on z/Architecture only from z196 onwards. Not exploited yet.
static const int long_cmove_cost() { return ConditionalMoveLimit; }
static int long_cmove_cost() { return ConditionalMoveLimit; }
// Suppress CMOVF. Conditional move available on z/Architecture only from z196 onwards. Not exploited yet.
static const int float_cmove_cost() { return ConditionalMoveLimit; }
static int float_cmove_cost() { return ConditionalMoveLimit; }
// Set this as clone_shift_expressions.
static bool narrow_oop_use_complex_address() {

@ -1492,7 +1492,7 @@ static Register reg_to_register_object(int register_encoding) {
return as_Register(register_encoding);
}
const bool Matcher::match_rule_supported(int opcode) {
bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode)) {
return false; // no match rule present
}
@ -1510,22 +1510,22 @@ const bool Matcher::match_rule_supported(int opcode) {
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
return match_rule_supported_vector(opcode, vlen, bt);
}
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
return false;
}
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
return false;
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
return false;
}
@ -1538,7 +1538,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length)
}
// Vector calling convention not yet implemented.
const bool Matcher::supports_vector_calling_convention(void) {
bool Matcher::supports_vector_calling_convention(void) {
return false;
}
@ -1550,32 +1550,32 @@ OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
//----------SUPERWORD HELPERS----------------------------------------
// Vector width in bytes.
const int Matcher::vector_width_in_bytes(BasicType bt) {
int Matcher::vector_width_in_bytes(BasicType bt) {
assert(MaxVectorSize == 8, "");
return 8;
}
// Vector ideal reg.
const uint Matcher::vector_ideal_reg(int size) {
uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL;
}
// Limits on vector size (number of elements) loaded into vector.
const int Matcher::max_vector_size(const BasicType bt) {
int Matcher::max_vector_size(const BasicType bt) {
assert(is_java_primitive(bt), "only primitive type vectors");
return vector_width_in_bytes(bt)/type2aelembytes(bt);
}
const int Matcher::min_vector_size(const BasicType bt) {
int Matcher::min_vector_size(const BasicType bt) {
return max_vector_size(bt); // Same as max.
}
const int Matcher::superword_max_vector_size(const BasicType bt) {
int Matcher::superword_max_vector_size(const BasicType bt) {
return Matcher::max_vector_size(bt);
}
const int Matcher::scalable_vector_reg_size(const BasicType bt) {
int Matcher::scalable_vector_reg_size(const BasicType bt) {
return -1;
}

@ -169,7 +169,7 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
// NativeNMethodCmpBarrier::verify() will immediately complain when it does
// not find the expected native instruction at this offset, which needs updating.
// Note that this offset is invariant of PreserveFramePointer.
static const int entry_barrier_offset(nmethod* nm) {
static int entry_barrier_offset(nmethod* nm) {
#ifdef _LP64
if (nm->is_compiled_by_c2()) {
return -14;

@ -97,14 +97,14 @@
}
// Prefer ConN+DecodeN over ConP.
static const bool const_oop_prefer_decode() {
static bool const_oop_prefer_decode() {
NOT_LP64(ShouldNotCallThis();)
// Prefer ConN+DecodeN over ConP.
return true;
}
// Prefer ConP over ConNKlass+DecodeNKlass.
static const bool const_klass_prefer_decode() {
static bool const_klass_prefer_decode() {
NOT_LP64(ShouldNotCallThis();)
return false;
}
@ -165,12 +165,12 @@
}
// Does the CPU supports vector unsigned comparison instructions?
static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
static constexpr bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
return true;
}
// Some microarchitectures have mask registers used on vectors
static const bool has_predicated_vectors(void) {
static bool has_predicated_vectors(void) {
return VM_Version::supports_evex();
}

@ -1392,7 +1392,7 @@ Assembler::Width widthForType(BasicType bt) {
static address vector_double_signflip() { return StubRoutines::x86::vector_double_sign_flip();}
//=============================================================================
const bool Matcher::match_rule_supported(int opcode) {
bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode)) {
return false; // no match rule present
}
@ -1694,13 +1694,13 @@ static inline bool is_pop_count_instr_target(BasicType bt) {
(is_non_subword_integral_type(bt) && VM_Version::supports_avx512_vpopcntdq());
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
return match_rule_supported_vector(opcode, vlen, bt);
}
// Identify extra cases that we might want to provide match rules for vector nodes and
// other intrinsics guarded with vector length (vlen) and element type (bt).
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false);
if (!match_rule_supported(opcode)) {
return false;
@ -1988,7 +1988,7 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
// ADLC based match_rule_supported routine checks for the existence of pattern based
// on IR opcode. Most of the unary/binary/ternary masked operation share the IR nodes
// of their non-masked counterpart with mask edge being the differentiator.
@ -2147,7 +2147,7 @@ const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, Bas
}
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
return false;
}
@ -2219,7 +2219,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length)
}
// Max vector size in bytes. 0 if not supported.
const int Matcher::vector_width_in_bytes(BasicType bt) {
int Matcher::vector_width_in_bytes(BasicType bt) {
assert(is_java_primitive(bt), "only primitive type vectors");
if (UseSSE < 2) return 0;
// SSE2 supports 128bit vectors for all types.
@ -2262,10 +2262,10 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Limits on vector size (number of elements) loaded into vector.
const int Matcher::max_vector_size(const BasicType bt) {
int Matcher::max_vector_size(const BasicType bt) {
return vector_width_in_bytes(bt)/type2aelembytes(bt);
}
const int Matcher::min_vector_size(const BasicType bt) {
int Matcher::min_vector_size(const BasicType bt) {
int max_size = max_vector_size(bt);
// Min size which can be loaded into vector is 4 bytes.
int size = (type2aelembytes(bt) == 1) ? 4 : 2;
@ -2276,7 +2276,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
return MIN2(size,max_size);
}
const int Matcher::superword_max_vector_size(const BasicType bt) {
int Matcher::superword_max_vector_size(const BasicType bt) {
// Limit the max vector size for auto vectorization to 256 bits (32 bytes)
// by default on Cascade Lake
if (VM_Version::is_default_intel_cascade_lake()) {
@ -2285,12 +2285,12 @@ const int Matcher::superword_max_vector_size(const BasicType bt) {
return Matcher::max_vector_size(bt);
}
const int Matcher::scalable_vector_reg_size(const BasicType bt) {
int Matcher::scalable_vector_reg_size(const BasicType bt) {
return -1;
}
// Vector ideal reg corresponding to specified size in bytes
const uint Matcher::vector_ideal_reg(int size) {
uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize >= size, "");
switch(size) {
case 4: return Op_VecS;

@ -1407,7 +1407,7 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
// Vector calling convention not supported.
const bool Matcher::supports_vector_calling_convention() {
bool Matcher::supports_vector_calling_convention() {
return false;
}

@ -1718,7 +1718,7 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
const bool Matcher::supports_vector_calling_convention(void) {
bool Matcher::supports_vector_calling_convention(void) {
if (EnableVectorSupport && UseVectorStubs) {
return true;
}

@ -906,14 +906,14 @@ void ArchDesc::build_pipe_classes(FILE *fp_cpp) {
pipeclass->forceSerialization() ? "true" : "false",
pipeclass->mayHaveNoCode() ? "true" : "false" );
if (paramcount > 0) {
fprintf(fp_cpp, "\n (enum machPipelineStages * const) pipeline_reads_%03d,\n ",
fprintf(fp_cpp, "\n (enum machPipelineStages *) pipeline_reads_%03d,\n ",
pipeline_reads_index+1);
}
else
fprintf(fp_cpp, " nullptr,");
fprintf(fp_cpp, " (enum machPipelineStages * const) pipeline_res_stages_%03d,\n",
fprintf(fp_cpp, " (enum machPipelineStages *) pipeline_res_stages_%03d,\n",
pipeline_res_stages_index+1);
fprintf(fp_cpp, " (uint * const) pipeline_res_cycles_%03d,\n",
fprintf(fp_cpp, " (uint *) pipeline_res_cycles_%03d,\n",
pipeline_res_cycles_index+1);
fprintf(fp_cpp, " Pipeline_Use(%s, (Pipeline_Use_Element *)",
pipeline_res_args.name(pipeline_res_mask_index));
@ -4209,7 +4209,7 @@ void ArchDesc::buildMachNodeGenerator(FILE *fp_cpp) {
// instruction has a matching rule for the host architecture.
void ArchDesc::buildInstructMatchCheck(FILE *fp_cpp) const {
fprintf(fp_cpp, "\n\n");
fprintf(fp_cpp, "const bool Matcher::has_match_rule(int opcode) {\n");
fprintf(fp_cpp, "bool Matcher::has_match_rule(int opcode) {\n");
fprintf(fp_cpp, " assert(_last_machine_leaf < opcode && opcode < _last_opcode, \"opcode in range\");\n");
fprintf(fp_cpp, " return _hasMatchRule[opcode];\n");
fprintf(fp_cpp, "}\n\n");

@ -114,7 +114,7 @@ class ValueType: public CompilationResourceObj {
assert(_size > -1, "shouldn't be asking for size");
return _size;
}
virtual const char tchar() const = 0; // the type 'character' for printing
virtual char tchar() const = 0; // the type 'character' for printing
virtual const char* name() const = 0; // the type name for printing
virtual bool is_constant() const { return false; }
@ -177,7 +177,7 @@ class VoidType: public ValueType {
public:
VoidType(): ValueType(voidTag, 0) {}
virtual ValueType* base() const { return voidType; }
virtual const char tchar() const { return 'v'; }
virtual char tchar() const { return 'v'; }
virtual const char* name() const { return "void"; }
virtual VoidType* as_VoidType() { return this; }
};
@ -187,7 +187,7 @@ class IntType: public ValueType {
public:
IntType(): ValueType(intTag, 1) {}
virtual ValueType* base() const { return intType; }
virtual const char tchar() const { return 'i'; }
virtual char tchar() const { return 'i'; }
virtual const char* name() const { return "int"; }
virtual IntType* as_IntType() { return this; }
};
@ -211,7 +211,7 @@ class LongType: public ValueType {
public:
LongType(): ValueType(longTag, 2) {}
virtual ValueType* base() const { return longType; }
virtual const char tchar() const { return 'l'; }
virtual char tchar() const { return 'l'; }
virtual const char* name() const { return "long"; }
virtual LongType* as_LongType() { return this; }
};
@ -235,7 +235,7 @@ class FloatType: public ValueType {
public:
FloatType(): ValueType(floatTag, 1) {}
virtual ValueType* base() const { return floatType; }
virtual const char tchar() const { return 'f'; }
virtual char tchar() const { return 'f'; }
virtual const char* name() const { return "float"; }
virtual FloatType* as_FloatType() { return this; }
};
@ -259,7 +259,7 @@ class DoubleType: public ValueType {
public:
DoubleType(): ValueType(doubleTag, 2) {}
virtual ValueType* base() const { return doubleType; }
virtual const char tchar() const { return 'd'; }
virtual char tchar() const { return 'd'; }
virtual const char* name() const { return "double"; }
virtual DoubleType* as_DoubleType() { return this; }
};
@ -283,7 +283,7 @@ class ObjectType: public ValueType {
public:
ObjectType(): ValueType(objectTag, 1) {}
virtual ValueType* base() const { return objectType; }
virtual const char tchar() const { return 'a'; }
virtual char tchar() const { return 'a'; }
virtual const char* name() const { return "object"; }
virtual ObjectType* as_ObjectType() { return this; }
virtual ciObject* constant_value() const { ShouldNotReachHere(); return nullptr; }
@ -371,7 +371,7 @@ class MetadataType: public ValueType {
public:
MetadataType(): ValueType(metaDataTag, 1) {}
virtual ValueType* base() const { return objectType; }
virtual const char tchar() const { return 'a'; }
virtual char tchar() const { return 'a'; }
virtual const char* name() const { return "object"; }
virtual MetadataType* as_MetadataType() { return this; }
bool is_loaded() const;
@ -428,7 +428,7 @@ class AddressType: public ValueType {
public:
AddressType(): ValueType(addressTag, 1) {}
virtual ValueType* base() const { return addressType; }
virtual const char tchar() const { return 'r'; }
virtual char tchar() const { return 'r'; }
virtual const char* name() const { return "address"; }
virtual AddressType* as_AddressType() { return this; }
};
@ -453,7 +453,7 @@ class IllegalType: public ValueType {
public:
IllegalType(): ValueType(illegalTag, -1) {}
virtual ValueType* base() const { return illegalType; }
virtual const char tchar() const { return ' '; }
virtual char tchar() const { return ' '; }
virtual const char* name() const { return "illegal"; }
virtual IllegalType* as_IllegalType() { return this; }
};

@ -4961,7 +4961,7 @@ void ClassFileParser::verify_legal_field_signature(const Symbol* name,
TRAPS) const {
if (!_need_verify) { return; }
const char* const bytes = (const char* const)signature->bytes();
const char* const bytes = (const char*)signature->bytes();
const unsigned int length = signature->utf8_length();
const char* const p = skip_over_field_signature(bytes, false, length, CHECK);
@ -5672,7 +5672,7 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
parse_constant_pool(stream, cp, _orig_cp_size, CHECK);
assert(cp_size == (const u2)cp->length(), "invariant");
assert(cp_size == (u2)cp->length(), "invariant");
// ACCESS FLAGS
stream->guarantee_more(8, CHECK); // flags, this_class, super_class, infs_len

@ -54,7 +54,7 @@ const u1* ClassFileStream::clone_buffer() const {
return new_buffer_start;
}
const char* const ClassFileStream::clone_source() const {
const char* ClassFileStream::clone_source() const {
const char* const src = source();
char* source_copy = nullptr;
if (src != nullptr) {

@ -49,7 +49,7 @@ class ClassFileStream: public ResourceObj {
protected:
const u1* clone_buffer() const;
const char* const clone_source() const;
const char* clone_source() const;
public:
static const bool verify;

@ -204,7 +204,7 @@ template<> void TypedMethodOptionMatcher::set_value(bool value) {
}
template<> void TypedMethodOptionMatcher::set_value(ccstr value) {
_u.ccstr_value = (const ccstr)os::strdup_check_oom(value);
_u.ccstr_value = (ccstr)os::strdup_check_oom(value);
}
void TypedMethodOptionMatcher::print() {

@ -1357,8 +1357,8 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask {
_humongous_regions_removed(0) { }
size_t freed_bytes() { return _freed_bytes; }
const uint old_regions_removed() { return _old_regions_removed; }
const uint humongous_regions_removed() { return _humongous_regions_removed; }
uint old_regions_removed() { return _old_regions_removed; }
uint humongous_regions_removed() { return _humongous_regions_removed; }
bool do_heap_region(HeapRegion *hr) {
if (hr->used() > 0 && hr->live_bytes() == 0 && !hr->is_young()) {

@ -46,7 +46,7 @@ public:
// In some places we iterate over a list in order to generate output
// for the list's elements. By exposing this we can avoid this
// iteration if the printer is not active.
const bool is_active() { return log_is_enabled(Trace, gc, region); }
bool is_active() { return log_is_enabled(Trace, gc, region); }
// The methods below are convenient wrappers for the print() method.

@ -1246,7 +1246,7 @@ class PSParallelCompact : AllStatic {
static ParallelCompactData& summary_data() { return _summary_data; }
// Reference Processing
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
static ReferenceProcessor* ref_processor() { return _ref_processor; }
static STWGCTimer* gc_timer() { return &_gc_timer; }

@ -77,7 +77,7 @@ class PSScavenge: AllStatic {
static bool should_attempt_scavenge();
// Private accessors
static PSCardTable* const card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; }
static PSCardTable* card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; }
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
public:
@ -92,7 +92,7 @@ class PSScavenge: AllStatic {
_span_based_discoverer.set_span(mr);
}
// Used by scavenge_contents
static ReferenceProcessor* const reference_processor() {
static ReferenceProcessor* reference_processor() {
assert(_ref_processor != nullptr, "Sanity");
return _ref_processor;
}

@ -167,7 +167,7 @@ class DefNewGeneration: public Generation {
// allocate and initialize ("weak") refs processing support
void ref_processor_init();
ReferenceProcessor* const ref_processor() { return _ref_processor; }
ReferenceProcessor* ref_processor() { return _ref_processor; }
// Accessing spaces
ContiguousSpace* eden() const { return _eden_space; }

@ -132,7 +132,7 @@ class MarkSweep : AllStatic {
static uint total_invocations() { return _total_invocations; }
// Reference Processing
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
static ReferenceProcessor* ref_processor() { return _ref_processor; }
static STWGCTimer* gc_timer() { return _gc_timer; }
static SerialOldTracer* gc_tracer() { return _gc_tracer; }

@ -122,11 +122,11 @@ class G1HeapSummary : public GCHeapSummary {
public:
G1HeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, size_t edenUsed, size_t edenCapacity, size_t survivorUsed, size_t oldGenUsed, uint numberOfRegions) :
GCHeapSummary(heap_space, heap_used), _edenUsed(edenUsed), _edenCapacity(edenCapacity), _survivorUsed(survivorUsed), _oldGenUsed(oldGenUsed), _numberOfRegions(numberOfRegions) { }
const size_t edenUsed() const { return _edenUsed; }
const size_t edenCapacity() const { return _edenCapacity; }
const size_t survivorUsed() const { return _survivorUsed; }
const size_t oldGenUsed() const { return _oldGenUsed; }
const uint numberOfRegions() const { return _numberOfRegions; }
size_t edenUsed() const { return _edenUsed; }
size_t edenCapacity() const { return _edenCapacity; }
size_t survivorUsed() const { return _survivorUsed; }
size_t oldGenUsed() const { return _oldGenUsed; }
uint numberOfRegions() const { return _numberOfRegions; }
virtual void accept(GCHeapSummaryVisitor* visitor) const {
visitor->visit(this);

@ -178,11 +178,11 @@ public:
size_t used() const { return allocated() - (wasted() + unused()); }
size_t undo_wasted() const { return _undo_wasted; }
static const size_t min_size() {
static size_t min_size() {
return PLAB::min_size();
}
static const size_t max_size() {
static size_t max_size() {
return PLAB::max_size();
}

@ -48,7 +48,7 @@ private:
_gc_id(GCId::current_or_undefined()) {}
const char* name() const { return _name; }
const uint gc_id() const { return _gc_id; }
uint gc_id() const { return _gc_id; }
virtual void work(uint worker_id) = 0;
};

@ -236,10 +236,10 @@ public:
inline size_t num_regions() const { return _num_regions; }
inline bool is_heap_region_special() { return _heap_region_special; }
inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const;
inline size_t heap_region_index_containing(const void* addr) const;
inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
inline ShenandoahHeapRegion* get_region(size_t region_idx) const;
void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;

@ -80,7 +80,7 @@ inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) con
return index;
}
inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const {
size_t index = heap_region_index_containing(addr);
ShenandoahHeapRegion* const result = get_region(index);
assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
@ -548,7 +548,7 @@ inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* regi
}
}
inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const {
if (region_idx < _num_regions) {
return _regions[region_idx];
} else {

@ -163,7 +163,7 @@ private:
void report_illegal_transition(const char* method);
public:
static const int region_states_num() {
static int region_states_num() {
return _REGION_STATES_NUM;
}

@ -189,7 +189,7 @@ public:
page->undo_alloc_object(addr, size);
}
const size_t in_place_count() const {
size_t in_place_count() const {
return _in_place_count;
}
};
@ -266,7 +266,7 @@ public:
page->undo_alloc_object_atomic(addr, size);
}
const size_t in_place_count() const {
size_t in_place_count() const {
return _in_place_count;
}
};

@ -473,7 +473,7 @@ public:
page->undo_alloc_object(addr, size);
}
const size_t in_place_count() const {
size_t in_place_count() const {
return _in_place_count;
}
};
@ -567,7 +567,7 @@ public:
page->undo_alloc_object_atomic(addr, size);
}
const size_t in_place_count() const {
size_t in_place_count() const {
return _in_place_count;
}
};

@ -65,16 +65,16 @@ static bool initialize(TRAPS) {
return initialized;
}
static const typeArrayOop invoke(jlong trace_id,
jboolean force_instrumentation,
jboolean boot_class_loader,
jclass class_being_redefined,
jint class_data_len,
const unsigned char* class_data,
Symbol* method_sym,
Symbol* signature_sym,
jint& new_bytes_length,
TRAPS) {
static typeArrayOop invoke(jlong trace_id,
jboolean force_instrumentation,
jboolean boot_class_loader,
jclass class_being_redefined,
jint class_data_len,
const unsigned char* class_data,
Symbol* method_sym,
Symbol* signature_sym,
jint& new_bytes_length,
TRAPS) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
const Klass* klass = SystemDictionary::resolve_or_fail(jvm_upcalls_class_sym, true, CHECK_NULL);
assert(klass != nullptr, "invariant");

@ -29,11 +29,11 @@
Edge::Edge(const Edge* parent, UnifiedOopRef reference) : _parent(parent),
_reference(reference) {}
const oop Edge::pointee() const {
oop Edge::pointee() const {
return _reference.dereference();
}
const oop Edge::reference_owner() const {
oop Edge::reference_owner() const {
return is_root() ? (oop)nullptr : _parent->pointee();
}

@ -45,8 +45,8 @@ class Edge {
bool is_root() const {
return _parent == nullptr;
}
const oop pointee() const;
const oop reference_owner() const;
oop pointee() const;
oop reference_owner() const;
size_t distance_to_root() const;
void* operator new (size_t sz, void* here) {

@ -97,7 +97,7 @@ static int array_offset(const Edge& edge) {
UnifiedOopRef reference = edge.reference();
assert(!reference.is_null(), "invariant");
assert(ref_owner->is_array(), "invariant");
const objArrayOop ref_owner_array = static_cast<const objArrayOop>(ref_owner);
const objArrayOop ref_owner_array = static_cast<objArrayOop>(ref_owner);
const int offset = (int)pointer_delta(reference.addr<HeapWord*>(), ref_owner_array->base(), heapOopSize);
assert(offset >= 0 && offset < ref_owner_array->length(), "invariant");
return offset;

@ -408,11 +408,11 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
private:
GrowableArray<const ObjectSampleRootDescriptionInfo*>* _unresolved_roots;
const uintptr_t high() const {
uintptr_t high() const {
return _unresolved_roots->last()->_data._root_edge->reference().addr<uintptr_t>();
}
const uintptr_t low() const {
uintptr_t low() const {
return _unresolved_roots->first()->_data._root_edge->reference().addr<uintptr_t>();
}

@ -35,7 +35,7 @@ void ObjectSample::reset() {
_virtual_thread = false;
}
const oop ObjectSample::object() const {
oop ObjectSample::object() const {
return _object.resolve();
}

@ -102,7 +102,7 @@ class ObjectSample : public JfrCHeapObj {
bool is_dead() const;
const oop object() const;
oop object() const;
void set_object(oop object);
const oop* object_addr() const;
@ -141,7 +141,7 @@ class ObjectSample : public JfrCHeapObj {
return _allocation_time;
}
const void set_allocation_time(const JfrTicks& time) {
void set_allocation_time(const JfrTicks& time) {
_allocation_time = Ticks(time.value());
}

@ -62,7 +62,7 @@ static void write_module_dependency_event(const void* from_module, const ModuleE
EventModuleRequire event(UNTIMED);
event.set_starttime(invocation_time);
event.set_endtime(invocation_time);
event.set_source((const ModuleEntry* const)from_module);
event.set_source((const ModuleEntry*)from_module);
event.set_requiredModule(to_module);
event.commit();
}

@ -53,7 +53,7 @@ class JfrThreadGroupPointers : public ResourceObj {
JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref);
Handle thread_group_handle() const;
jweak thread_group_weak_ref() const;
oopDesc* const thread_group_oop() const;
oopDesc* thread_group_oop() const;
jweak transfer_weak_global_handle_ownership();
void clear_weak_ref();
};
@ -70,7 +70,7 @@ jweak JfrThreadGroupPointers::thread_group_weak_ref() const {
return _thread_group_weak_ref;
}
oopDesc* const JfrThreadGroupPointers::thread_group_oop() const {
oopDesc* JfrThreadGroupPointers::thread_group_oop() const {
assert(_thread_group_weak_ref == nullptr ||
JNIHandles::resolve_non_null(_thread_group_weak_ref) == _thread_group_handle(), "invariant");
return _thread_group_handle();
@ -209,7 +209,7 @@ class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj {
traceid thread_group_id() const { return _thread_group_id; }
void set_thread_group_id(traceid tgid) { _thread_group_id = tgid; }
const char* const thread_group_name() const { return _thread_group_name; }
const char* thread_group_name() const { return _thread_group_name; }
void set_thread_group_name(const char* tgname);
traceid parent_group_id() const { return _parent_group_id; }
@ -217,7 +217,7 @@ class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj {
void set_thread_group(JfrThreadGroupPointers& ptrs);
bool is_equal(const JfrThreadGroupPointers& ptrs) const;
const oop thread_group() const;
oop thread_group() const;
};
JfrThreadGroup::JfrThreadGroupEntry::JfrThreadGroupEntry(const char* tgname, JfrThreadGroupPointers& ptrs) :
@ -248,7 +248,7 @@ void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgna
}
}
const oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const {
oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const {
return _thread_group_weak_ref != nullptr ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop;
}

@ -150,7 +150,7 @@ jobject JdkJfrEvent::get_all_klasses(TRAPS) {
JavaValue result(T_BOOLEAN);
for (int i = 0; i < event_subklasses.length(); ++i) {
const jclass clazz = (const jclass)event_subklasses.at(i);
const jclass clazz = (jclass)event_subklasses.at(i);
assert(JdkJfrEvent::is_subklass(clazz), "invariant");
JfrJavaArguments args(&result, array_list_klass, add_method_sym, add_method_sig_sym);
args.set_receiver(h_array_list());

@ -32,7 +32,7 @@
#define DEFINE_TRACE_ID_METHODS \
traceid trace_id() const { return _trace_id; } \
traceid* const trace_id_addr() const { return &_trace_id; } \
traceid* trace_id_addr() const { return &_trace_id; } \
void set_trace_id(traceid id) const { _trace_id = id; }
#define DEFINE_TRACE_ID_SIZE \

@ -116,7 +116,7 @@ class OopMapBlock {
}
// sizeof(OopMapBlock) in words.
static const int size_in_words() {
static int size_in_words() {
return align_up((int)sizeof(OopMapBlock), wordSize) >>
LogBytesPerWord;
}

@ -410,7 +410,7 @@ class Method : public Metadata {
// nmethod/verified compiler entry
address verified_code_entry();
bool check_code() const; // Not inline to avoid circular ref
CompiledMethod* volatile code() const;
CompiledMethod* code() const;
// Locks CompiledMethod_lock if not held.
void unlink_code(CompiledMethod *compare);

@ -45,7 +45,7 @@ inline void Method::set_method_data(MethodData* data) {
Atomic::release_store(&_method_data, data);
}
inline CompiledMethod* volatile Method::code() const {
inline CompiledMethod* Method::code() const {
assert( check_code(), "" );
return Atomic::load_acquire(&_code);
}

@ -602,8 +602,8 @@ public:
}
const TypeFunc* tf() const { return _tf; }
const address entry_point() const { return _entry_point; }
const float cnt() const { return _cnt; }
address entry_point() const { return _entry_point; }
float cnt() const { return _cnt; }
CallGenerator* generator() const { return _generator; }
void set_tf(const TypeFunc* tf) { _tf = tf; }

@ -117,7 +117,7 @@ class CastIINode: public ConstraintCastNode {
virtual Node* Identity(PhaseGVN* phase);
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
const bool has_range_check() {
bool has_range_check() {
#ifdef _LP64
return _range_check_dependency;
#else

@ -239,10 +239,10 @@ public:
virtual const TypePtr *adr_type() const { verify_adr_type(true); return _adr_type; }
void set_inst_mem_id(int inst_mem_id) { _inst_mem_id = inst_mem_id; }
const int inst_mem_id() const { return _inst_mem_id; }
const int inst_id() const { return _inst_id; }
const int inst_index() const { return _inst_index; }
const int inst_offset() const { return _inst_offset; }
int inst_mem_id() const { return _inst_mem_id; }
int inst_id() const { return _inst_id; }
int inst_index() const { return _inst_index; }
int inst_offset() const { return _inst_offset; }
bool is_same_inst_field(const Type* tp, int mem_id, int id, int index, int offset) {
return type()->basic_type() == tp->basic_type() &&
inst_mem_id() == mem_id &&

@ -898,9 +898,9 @@ public:
float _cnt; // Estimate of number of times called
bool _guaranteed_safepoint; // Do we need to observe safepoint?
const TypeFunc* tf() const { return _tf; }
const address entry_point() const { return _entry_point; }
const float cnt() const { return _cnt; }
const TypeFunc* tf() const { return _tf; }
address entry_point() const { return _entry_point; }
float cnt() const { return _cnt; }
void set_tf(const TypeFunc* tf) { _tf = tf; }
void set_entry_point(address p) { _entry_point = p; }

@ -447,7 +447,7 @@ static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
return rms;
}
const int Matcher::scalable_predicate_reg_slots() {
int Matcher::scalable_predicate_reg_slots() {
assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(),
"scalable predicate vector should be supported");
int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte;

@ -319,50 +319,50 @@ public:
RegMask *_calling_convention_mask; // Array of RegMasks per argument
// Does matcher have a match rule for this ideal node?
static const bool has_match_rule(int opcode);
static bool has_match_rule(int opcode);
static const bool _hasMatchRule[_last_opcode];
// Does matcher have a match rule for this ideal node and is the
// predicate (if there is one) true?
// NOTE: If this function is used more commonly in the future, ADLC
// should generate this one.
static const bool match_rule_supported(int opcode);
static bool match_rule_supported(int opcode);
// Identify extra cases that we might want to vectorize automatically
// And exclude cases which are not profitable to auto-vectorize.
static const bool match_rule_supported_superword(int opcode, int vlen, BasicType bt);
static bool match_rule_supported_superword(int opcode, int vlen, BasicType bt);
// identify extra cases that we might want to provide match rules for
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen
static const bool match_rule_supported_vector(int opcode, int vlen, BasicType bt);
static bool match_rule_supported_vector(int opcode, int vlen, BasicType bt);
static const bool match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt);
static bool match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt);
static const bool vector_needs_partial_operations(Node* node, const TypeVect* vt);
static bool vector_needs_partial_operations(Node* node, const TypeVect* vt);
static const RegMask* predicate_reg_mask(void);
static const TypeVectMask* predicate_reg_type(const Type* elemTy, int length);
// Vector width in bytes
static const int vector_width_in_bytes(BasicType bt);
static int vector_width_in_bytes(BasicType bt);
// Limits on vector size (number of elements).
static const int max_vector_size(const BasicType bt);
static const int min_vector_size(const BasicType bt);
static const bool vector_size_supported(const BasicType bt, int size) {
static int max_vector_size(const BasicType bt);
static int min_vector_size(const BasicType bt);
static bool vector_size_supported(const BasicType bt, int size) {
return (Matcher::max_vector_size(bt) >= size &&
Matcher::min_vector_size(bt) <= size);
}
// Limits on max vector size (number of elements) for auto-vectorization.
static const int superword_max_vector_size(const BasicType bt);
static int superword_max_vector_size(const BasicType bt);
// Actual max scalable vector register length.
static const int scalable_vector_reg_size(const BasicType bt);
static int scalable_vector_reg_size(const BasicType bt);
// Actual max scalable predicate register length.
static const int scalable_predicate_reg_slots();
static int scalable_predicate_reg_slots();
// Vector ideal reg
static const uint vector_ideal_reg(int len);
static uint vector_ideal_reg(int len);
// Vector length
static uint vector_length(const Node* n);
@ -449,7 +449,7 @@ public:
static RegMask c_frame_ptr_mask;
// Java-Native vector calling convention
static const bool supports_vector_calling_convention();
static bool supports_vector_calling_convention();
static OptoRegPair vector_return_value(uint ideal_reg);
// Is this branch offset small enough to be addressed by a short branch?

@ -829,9 +829,9 @@ protected:
}
public:
const juint class_id() const { return _class_id; }
juint class_id() const { return _class_id; }
const juint flags() const { return _flags; }
juint flags() const { return _flags; }
void add_flag(juint fl) { init_flags(fl); }

@ -999,8 +999,8 @@ public:
const int _offset; // Offset into oop, with TOP & BOT
const PTR _ptr; // Pointer equivalence class
const int offset() const { return _offset; }
const PTR ptr() const { return _ptr; }
int offset() const { return _offset; }
PTR ptr() const { return _ptr; }
static const TypePtr *make(TYPES t, PTR ptr, int offset,
const TypePtr* speculative = nullptr,

@ -174,7 +174,7 @@ jint </xsl:text>
<xsl:text>
// Check Event Capabilities
const bool JvmtiUtil::has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr) {
bool JvmtiUtil::has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr) {
switch (event_type) {
</xsl:text>
<xsl:for-each select="//eventsection/event">

@ -1405,16 +1405,16 @@ class AdvancedHeapWalkContext: public HeapWalkContext {
jint heap_filter() const { return _heap_filter; }
Klass* klass_filter() const { return _klass_filter; }
const jvmtiHeapReferenceCallback heap_reference_callback() const {
jvmtiHeapReferenceCallback heap_reference_callback() const {
return _heap_callbacks->heap_reference_callback;
};
const jvmtiPrimitiveFieldCallback primitive_field_callback() const {
jvmtiPrimitiveFieldCallback primitive_field_callback() const {
return _heap_callbacks->primitive_field_callback;
}
const jvmtiArrayPrimitiveValueCallback array_primitive_value_callback() const {
jvmtiArrayPrimitiveValueCallback array_primitive_value_callback() const {
return _heap_callbacks->array_primitive_value_callback;
}
const jvmtiStringPrimitiveValueCallback string_primitive_value_callback() const {
jvmtiStringPrimitiveValueCallback string_primitive_value_callback() const {
return _heap_callbacks->string_primitive_value_callback;
}
};

@ -50,9 +50,9 @@ public:
static const char* error_name(int num) { return _error_names[num]; } // To Do: add range checking
static const bool has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr);
static bool has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr);
static const bool event_threaded(int num) {
static bool event_threaded(int num) {
if (num >= JVMTI_MIN_EVENT_TYPE_VAL && num <= JVMTI_MAX_EVENT_TYPE_VAL) {
return _event_threaded[num];
}

@ -197,11 +197,11 @@ public:
template <class T>
void threads_do(T *cl) const;
uint length() const { return _length; }
uint length() const { return _length; }
JavaThread *const thread_at(uint i) const { return _threads[i]; }
JavaThread *thread_at(uint i) const { return _threads[i]; }
JavaThread *const *threads() const { return _threads; }
JavaThread *const *threads() const { return _threads; }
// Returns -1 if target is not found.
int find_index_of_JavaThread(JavaThread* target);

@ -67,10 +67,10 @@ public:
static MemoryPool* get_memory_pool(instanceHandle pool);
static MemoryManager* get_memory_manager(instanceHandle mgr);
static const int num_memory_pools() {
static int num_memory_pools() {
return _pools_list->length();
}
static const int num_memory_managers() {
static int num_memory_managers() {
return _managers_list->length();
}

@ -157,7 +157,7 @@ class BitMap {
void set_word (idx_t word) { set_word(word, ~(bm_word_t)0); }
void clear_word(idx_t word) { _map[word] = 0; }
static inline const bm_word_t load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order);
static inline bm_word_t load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order);
// Utilities for ranges of bits. Ranges are half-open [beg, end).

@ -42,7 +42,7 @@ inline void BitMap::clear_bit(idx_t bit) {
*word_addr(bit) &= ~bit_mask(bit);
}
inline const BitMap::bm_word_t BitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) {
inline BitMap::bm_word_t BitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) {
if (memory_order == memory_order_relaxed || memory_order == memory_order_release) {
return Atomic::load(addr);
} else {

@ -308,7 +308,7 @@ class ElfFile: public CHeapObj<mtInternal> {
static uint gnu_debuglink_crc32(uint32_t crc, uint8_t* buf, size_t len);
protected:
FILE* const fd() const { return _file; }
FILE* fd() const { return _file; }
// Read the section header of section 'name'.
bool read_section_header(const char* name, Elf_Shdr& hdr) const;