8249149: Remove obsolete UseNewFieldLayout option and associated code

Reviewed-by: hseigel, iklam
This commit is contained in:
Frederic Parain 2020-07-10 09:13:03 -04:00
parent 0a38584c10
commit 985061ac28
5 changed files with 8 additions and 492 deletions

View File

@ -4028,43 +4028,6 @@ const InstanceKlass* ClassFileParser::parse_super_class(ConstantPool* const cp,
return super_klass;
}
#ifndef PRODUCT
static void print_field_layout(const Symbol* name,
Array<u2>* fields,
ConstantPool* cp,
int instance_size,
int instance_fields_start,
int instance_fields_end,
int static_fields_end) {
assert(name != NULL, "invariant");
tty->print("%s: field layout\n", name->as_klass_external_name());
tty->print(" @%3d %s\n", instance_fields_start, "--- instance fields start ---");
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
if (!fs.access_flags().is_static()) {
tty->print(" @%3d \"%s\" %s\n",
fs.offset(),
fs.name()->as_klass_external_name(),
fs.signature()->as_klass_external_name());
}
}
tty->print(" @%3d %s\n", instance_fields_end, "--- instance fields end ---");
tty->print(" @%3d %s\n", instance_size * wordSize, "--- instance ends ---");
tty->print(" @%3d %s\n", InstanceMirrorKlass::offset_of_static_fields(), "--- static fields start ---");
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
tty->print(" @%3d \"%s\" %s\n",
fs.offset(),
fs.name()->as_klass_external_name(),
fs.signature()->as_klass_external_name());
}
}
tty->print(" @%3d %s\n", static_fields_end, "--- static fields end ---");
tty->print("\n");
}
#endif
OopMapBlocksBuilder::OopMapBlocksBuilder(unsigned int max_blocks) {
_max_nonstatic_oop_maps = max_blocks;
_nonstatic_oop_map_count = 0;
@ -4181,432 +4144,6 @@ void OopMapBlocksBuilder::print_value_on(outputStream* st) const {
print_on(st);
}
// Layout fields and fill in FieldLayoutInfo. Could use more refactoring!
void ClassFileParser::layout_fields(ConstantPool* cp,
const FieldAllocationCount* fac,
const ClassAnnotationCollector* parsed_annotations,
FieldLayoutInfo* info,
TRAPS) {
assert(cp != NULL, "invariant");
// Field size and offset computation
int nonstatic_field_size = _super_klass == NULL ? 0 :
_super_klass->nonstatic_field_size();
// Count the contended fields by type.
//
// We ignore static fields, because @Contended is not supported for them.
// The layout code below will also ignore the static fields.
int nonstatic_contended_count = 0;
FieldAllocationCount fac_contended;
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
if (fs.is_contended()) {
fac_contended.count[atype]++;
if (!fs.access_flags().is_static()) {
nonstatic_contended_count++;
}
}
}
// Calculate the starting byte offsets
int next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields();
int next_static_double_offset = next_static_oop_offset +
((fac->count[STATIC_OOP]) * heapOopSize);
if (fac->count[STATIC_DOUBLE]) {
next_static_double_offset = align_up(next_static_double_offset, BytesPerLong);
}
int next_static_word_offset = next_static_double_offset +
((fac->count[STATIC_DOUBLE]) * BytesPerLong);
int next_static_short_offset = next_static_word_offset +
((fac->count[STATIC_WORD]) * BytesPerInt);
int next_static_byte_offset = next_static_short_offset +
((fac->count[STATIC_SHORT]) * BytesPerShort);
int nonstatic_fields_start = instanceOopDesc::base_offset_in_bytes() +
nonstatic_field_size * heapOopSize;
int next_nonstatic_field_offset = nonstatic_fields_start;
const bool is_contended_class = parsed_annotations->is_contended();
// Class is contended, pad before all the fields
if (is_contended_class) {
next_nonstatic_field_offset += ContendedPaddingWidth;
}
// Compute the non-contended fields count.
// The packing code below relies on these counts to determine if some field
// can be squeezed into the alignment gap. Contended fields are obviously
// exempt from that.
unsigned int nonstatic_double_count = fac->count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
unsigned int nonstatic_word_count = fac->count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD];
unsigned int nonstatic_short_count = fac->count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT];
unsigned int nonstatic_byte_count = fac->count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE];
unsigned int nonstatic_oop_count = fac->count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP];
// Total non-static fields count, including every contended field
unsigned int nonstatic_fields_count = fac->count[NONSTATIC_DOUBLE] + fac->count[NONSTATIC_WORD] +
fac->count[NONSTATIC_SHORT] + fac->count[NONSTATIC_BYTE] +
fac->count[NONSTATIC_OOP];
const bool super_has_nonstatic_fields =
(_super_klass != NULL && _super_klass->has_nonstatic_fields());
const bool has_nonstatic_fields =
super_has_nonstatic_fields || (nonstatic_fields_count != 0);
// Prepare list of oops for oop map generation.
//
// "offset" and "count" lists are describing the set of contiguous oop
// regions. offset[i] is the start of the i-th region, which then has
// count[i] oops following. Before we know how many regions are required,
// we pessimistically allocate the maps to fit all the oops into the
// distinct regions.
int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count();
int max_oop_map_count = super_oop_map_count + fac->count[NONSTATIC_OOP];
OopMapBlocksBuilder* nonstatic_oop_maps = new OopMapBlocksBuilder(max_oop_map_count);
if (super_oop_map_count > 0) {
nonstatic_oop_maps->initialize_inherited_blocks(_super_klass->start_of_nonstatic_oop_maps(),
_super_klass->nonstatic_oop_map_count());
}
int first_nonstatic_oop_offset = 0; // will be set for first oop field
bool compact_fields = true;
bool allocate_oops_first = false;
int next_nonstatic_oop_offset = 0;
int next_nonstatic_double_offset = 0;
// Rearrange fields for a given allocation style
if (allocate_oops_first) {
// Fields order: oops, longs/doubles, ints, shorts/chars, bytes, padded fields
next_nonstatic_oop_offset = next_nonstatic_field_offset;
next_nonstatic_double_offset = next_nonstatic_oop_offset +
(nonstatic_oop_count * heapOopSize);
} else {
// Fields order: longs/doubles, ints, shorts/chars, bytes, oops, padded fields
next_nonstatic_double_offset = next_nonstatic_field_offset;
}
int nonstatic_oop_space_count = 0;
int nonstatic_word_space_count = 0;
int nonstatic_short_space_count = 0;
int nonstatic_byte_space_count = 0;
int nonstatic_oop_space_offset = 0;
int nonstatic_word_space_offset = 0;
int nonstatic_short_space_offset = 0;
int nonstatic_byte_space_offset = 0;
// Try to squeeze some of the fields into the gaps due to
// long/double alignment.
if (nonstatic_double_count > 0) {
int offset = next_nonstatic_double_offset;
next_nonstatic_double_offset = align_up(offset, BytesPerLong);
if (compact_fields && offset != next_nonstatic_double_offset) {
// Allocate available fields into the gap before double field.
int length = next_nonstatic_double_offset - offset;
assert(length == BytesPerInt, "");
nonstatic_word_space_offset = offset;
if (nonstatic_word_count > 0) {
nonstatic_word_count -= 1;
nonstatic_word_space_count = 1; // Only one will fit
length -= BytesPerInt;
offset += BytesPerInt;
}
nonstatic_short_space_offset = offset;
while (length >= BytesPerShort && nonstatic_short_count > 0) {
nonstatic_short_count -= 1;
nonstatic_short_space_count += 1;
length -= BytesPerShort;
offset += BytesPerShort;
}
nonstatic_byte_space_offset = offset;
while (length > 0 && nonstatic_byte_count > 0) {
nonstatic_byte_count -= 1;
nonstatic_byte_space_count += 1;
length -= 1;
}
// Allocate oop field in the gap if there are no other fields for that.
nonstatic_oop_space_offset = offset;
if (length >= heapOopSize && nonstatic_oop_count > 0 &&
!allocate_oops_first) { // when oop fields not first
nonstatic_oop_count -= 1;
nonstatic_oop_space_count = 1; // Only one will fit
length -= heapOopSize;
offset += heapOopSize;
}
}
}
int next_nonstatic_word_offset = next_nonstatic_double_offset +
(nonstatic_double_count * BytesPerLong);
int next_nonstatic_short_offset = next_nonstatic_word_offset +
(nonstatic_word_count * BytesPerInt);
int next_nonstatic_byte_offset = next_nonstatic_short_offset +
(nonstatic_short_count * BytesPerShort);
int next_nonstatic_padded_offset = next_nonstatic_byte_offset +
nonstatic_byte_count;
// let oops jump before padding with this allocation style
if (!allocate_oops_first) {
next_nonstatic_oop_offset = next_nonstatic_padded_offset;
if( nonstatic_oop_count > 0 ) {
next_nonstatic_oop_offset = align_up(next_nonstatic_oop_offset, heapOopSize);
}
next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
}
// Iterate over fields again and compute correct offsets.
// The field allocation type was temporarily stored in the offset slot.
// oop fields are located before non-oop fields (static and non-static).
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
// skip already laid out fields
if (fs.is_offset_set()) continue;
// contended instance fields are handled below
if (fs.is_contended() && !fs.access_flags().is_static()) continue;
int real_offset = 0;
const FieldAllocationType atype = (const FieldAllocationType) fs.allocation_type();
// pack the rest of the fields
switch (atype) {
case STATIC_OOP:
real_offset = next_static_oop_offset;
next_static_oop_offset += heapOopSize;
break;
case STATIC_BYTE:
real_offset = next_static_byte_offset;
next_static_byte_offset += 1;
break;
case STATIC_SHORT:
real_offset = next_static_short_offset;
next_static_short_offset += BytesPerShort;
break;
case STATIC_WORD:
real_offset = next_static_word_offset;
next_static_word_offset += BytesPerInt;
break;
case STATIC_DOUBLE:
real_offset = next_static_double_offset;
next_static_double_offset += BytesPerLong;
break;
case NONSTATIC_OOP:
if( nonstatic_oop_space_count > 0 ) {
real_offset = nonstatic_oop_space_offset;
nonstatic_oop_space_offset += heapOopSize;
nonstatic_oop_space_count -= 1;
} else {
real_offset = next_nonstatic_oop_offset;
next_nonstatic_oop_offset += heapOopSize;
}
nonstatic_oop_maps->add(real_offset, 1);
break;
case NONSTATIC_BYTE:
if( nonstatic_byte_space_count > 0 ) {
real_offset = nonstatic_byte_space_offset;
nonstatic_byte_space_offset += 1;
nonstatic_byte_space_count -= 1;
} else {
real_offset = next_nonstatic_byte_offset;
next_nonstatic_byte_offset += 1;
}
break;
case NONSTATIC_SHORT:
if( nonstatic_short_space_count > 0 ) {
real_offset = nonstatic_short_space_offset;
nonstatic_short_space_offset += BytesPerShort;
nonstatic_short_space_count -= 1;
} else {
real_offset = next_nonstatic_short_offset;
next_nonstatic_short_offset += BytesPerShort;
}
break;
case NONSTATIC_WORD:
if( nonstatic_word_space_count > 0 ) {
real_offset = nonstatic_word_space_offset;
nonstatic_word_space_offset += BytesPerInt;
nonstatic_word_space_count -= 1;
} else {
real_offset = next_nonstatic_word_offset;
next_nonstatic_word_offset += BytesPerInt;
}
break;
case NONSTATIC_DOUBLE:
real_offset = next_nonstatic_double_offset;
next_nonstatic_double_offset += BytesPerLong;
break;
default:
ShouldNotReachHere();
}
fs.set_offset(real_offset);
}
// Handle the contended cases.
//
// Each contended field should not intersect the cache line with another contended field.
// In the absence of alignment information, we end up with pessimistically separating
// the fields with full-width padding.
//
// Additionally, this should not break alignment for the fields, so we round the alignment up
// for each field.
if (nonstatic_contended_count > 0) {
// if there is at least one contended field, we need to have pre-padding for them
next_nonstatic_padded_offset += ContendedPaddingWidth;
// collect all contended groups
ResourceBitMap bm(cp->size());
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
// skip already laid out fields
if (fs.is_offset_set()) continue;
if (fs.is_contended()) {
bm.set_bit(fs.contended_group());
}
}
int current_group = -1;
while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
// skip already laid out fields
if (fs.is_offset_set()) continue;
// skip non-contended fields and fields from different group
if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
// handle statics below
if (fs.access_flags().is_static()) continue;
int real_offset = 0;
FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
switch (atype) {
case NONSTATIC_BYTE:
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, 1);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += 1;
break;
case NONSTATIC_SHORT:
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerShort);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += BytesPerShort;
break;
case NONSTATIC_WORD:
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerInt);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += BytesPerInt;
break;
case NONSTATIC_DOUBLE:
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerLong);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += BytesPerLong;
break;
case NONSTATIC_OOP:
next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, heapOopSize);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += heapOopSize;
nonstatic_oop_maps->add(real_offset, 1);
break;
default:
ShouldNotReachHere();
}
if (fs.contended_group() == 0) {
// Contended group defines the equivalence class over the fields:
// the fields within the same contended group are not inter-padded.
// The only exception is default group, which does not incur the
// equivalence, and so requires intra-padding.
next_nonstatic_padded_offset += ContendedPaddingWidth;
}
fs.set_offset(real_offset);
} // for
// Start laying out the next group.
// Note that this will effectively pad the last group in the back;
// this is expected to alleviate memory contention effects for
// subclass fields and/or adjacent object.
// If this was the default group, the padding is already in place.
if (current_group != 0) {
next_nonstatic_padded_offset += ContendedPaddingWidth;
}
}
// handle static fields
}
// Entire class is contended, pad in the back.
// This helps to alleviate memory contention effects for subclass fields
// and/or adjacent object.
if (is_contended_class) {
next_nonstatic_padded_offset += ContendedPaddingWidth;
}
int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset;
int nonstatic_fields_end = align_up(notaligned_nonstatic_fields_end, heapOopSize);
int instance_end = align_up(notaligned_nonstatic_fields_end, wordSize);
int static_fields_end = align_up(next_static_byte_offset, wordSize);
int static_field_size = (static_fields_end -
InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
nonstatic_field_size = nonstatic_field_size +
(nonstatic_fields_end - nonstatic_fields_start) / heapOopSize;
int instance_size = align_object_size(instance_end / wordSize);
assert(instance_size == align_object_size(align_up(
(instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
wordSize) / wordSize), "consistent layout helper value");
// Invariant: nonstatic_field end/start should only change if there are
// nonstatic fields in the class, or if the class is contended. We compare
// against the non-aligned value, so that end alignment will not fail the
// assert without actually having the fields.
assert((notaligned_nonstatic_fields_end == nonstatic_fields_start) ||
is_contended_class ||
(nonstatic_fields_count > 0), "double-check nonstatic start/end");
// Number of non-static oop map blocks allocated at end of klass.
nonstatic_oop_maps->compact();
#ifndef PRODUCT
if (PrintFieldLayout) {
print_field_layout(_class_name,
_fields,
cp,
instance_size,
nonstatic_fields_start,
nonstatic_fields_end,
static_fields_end);
}
#endif
// Pass back information needed for InstanceKlass creation
info->oop_map_blocks = nonstatic_oop_maps;
info->_instance_size = instance_size;
info->_static_field_size = static_field_size;
info->_nonstatic_field_size = nonstatic_field_size;
info->_has_nonstatic_fields = has_nonstatic_fields;
}
void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) {
assert(ik != NULL, "invariant");
@ -6668,13 +6205,9 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
assert(_parsed_annotations != NULL, "invariant");
_field_info = new FieldLayoutInfo();
if (UseNewFieldLayout) {
FieldLayoutBuilder lb(class_name(), super_klass(), _cp, _fields,
_parsed_annotations->is_contended(), _field_info);
lb.build_layout();
} else {
layout_fields(cp, _fac, _parsed_annotations, _field_info, CHECK);
}
FieldLayoutBuilder lb(class_name(), super_klass(), _cp, _fields,
_parsed_annotations->is_contended(), _field_info);
lb.build_layout();
// Compute reference typ
_rt = (NULL ==_super_klass) ? REF_NONE : _super_klass->reference_type();

View File

@ -533,13 +533,6 @@ class ClassFileParser {
int annotation_default_length,
TRAPS);
// lays out fields in class and returns the total oopmap count
void layout_fields(ConstantPool* cp,
const FieldAllocationCount* fac,
const ClassAnnotationCollector* parsed_annotations,
FieldLayoutInfo* info,
TRAPS);
void update_class_name(Symbol* new_name);
public:

View File

@ -37,17 +37,10 @@ class instanceOopDesc : public oopDesc {
// If compressed, the offset of the fields of the instance may not be aligned.
static int base_offset_in_bytes() {
if (UseNewFieldLayout) {
return (UseCompressedClassPointers) ?
klass_gap_offset_in_bytes() :
sizeof(instanceOopDesc);
} else {
// The old layout could not deal with compressed oops being off and compressed
// class pointers being off.
return (UseCompressedOops && UseCompressedClassPointers) ?
klass_gap_offset_in_bytes() :
sizeof(instanceOopDesc);
}
return (UseCompressedClassPointers) ?
klass_gap_offset_in_bytes() :
sizeof(instanceOopDesc);
}
};

View File

@ -522,7 +522,6 @@ static SpecialFlag const special_jvm_flags[] = {
{ "UseMembar", JDK_Version::jdk(10), JDK_Version::jdk(12), JDK_Version::undefined() },
{ "AllowRedefinitionToAddDeleteMethods", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
{ "FlightRecorder", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
{ "UseNewFieldLayout", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
{ "ForceNUMA", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
{ "UseBiasedLocking", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
{ "BiasedLockingStartupDelay", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
@ -552,6 +551,7 @@ static SpecialFlag const special_jvm_flags[] = {
{ "UseOprofile", JDK_Version::undefined(), JDK_Version::jdk(16), JDK_Version::jdk(17) },
#endif
{ "PrintVMQWaitTime", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
{ "UseNewFieldLayout", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
#ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
// These entries will generate build errors. Their purpose is to test the macros.

View File

@ -2455,9 +2455,6 @@ const size_t minimumSymbolTableSize = 1024;
experimental(bool, UseFastUnorderedTimeStamps, false, \
"Use platform unstable time where supported for timestamps only") \
\
product(bool, UseNewFieldLayout, true, \
"(Deprecated) Use new algorithm to compute field layouts") \
\
product(bool, UseEmptySlotsInSupers, true, \
"Allow allocating fields in empty slots of super-classes") \
\