8036860: Pad and cache-align the BiasedMappedArray

Pad and cache-align BiasedMappedArray instances by default to avoid performance variability problems due to false sharing, as instances of this data structures are typically used for performance sensitive code.

Reviewed-by: brutisso, stefank
This commit is contained in:
Thomas Schatzl 2014-03-17 13:07:55 +01:00
parent 483ea400a5
commit a07b2194f7
4 changed files with 28 additions and 6 deletions

View File

@ -24,6 +24,14 @@
#include "precompiled.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "memory/padded.inline.hpp"
// Allocate a new array, generic version.
address G1BiasedMappedArrayBase::create_new_base_array(size_t length, size_t elem_size) {
assert(length > 0, "just checking");
assert(elem_size > 0, "just checking");
return PaddedPrimitiveArray<u_char, mtGC>::create_unfreeable(length * elem_size);
}
#ifndef PRODUCT
void G1BiasedMappedArrayBase::verify_index(idx_t index) const {

View File

@ -25,8 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
#include "memory/allocation.inline.hpp"
// Implements the common base functionality for arrays that contain provisions
// for accessing its elements using a biased index.
@ -48,11 +48,7 @@ protected:
_bias(0), _shift_by(0) { }
// Allocate a new array, generic version.
static address create_new_base_array(size_t length, size_t elem_size) {
assert(length > 0, "just checking");
assert(elem_size > 0, "just checking");
return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
}
static address create_new_base_array(size_t length, size_t elem_size);
// Initialize the members of this class. The biased start address of this array
// is the bias (in elements) multiplied by the element size.

View File

@ -101,4 +101,12 @@ class Padded2DArray {
static T** create_unfreeable(uint rows, uint columns, size_t* allocation_size = NULL);
};
// Helper class to create an array of T objects. The array as a whole will
// start at a multiple of alignment and its size will be aligned to alignment.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class PaddedPrimitiveArray {
public:
static T* create_unfreeable(size_t length);
};
#endif // SHARE_VM_MEMORY_PADDED_HPP

View File

@ -76,3 +76,13 @@ T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint column
return result;
}
template <class T, MEMFLAGS flags, size_t alignment>
T* PaddedPrimitiveArray<T, flags, alignment>::create_unfreeable(size_t length) {
// Allocate a chunk of memory large enough to allow for some alignment.
void* chunk = AllocateHeap(length * sizeof(T) + alignment, flags);
memset(chunk, 0, length * sizeof(T) + alignment);
return (T*)align_pointer_up(chunk, alignment);
}