8311248: Refactor CodeCache::initialize_heaps to simplify adding new CodeCache segments

Reviewed-by: thartmann
This commit is contained in:
Boris Ulasevich 2024-04-10 06:29:44 +00:00
parent bab70193dd
commit d037a597a9
5 changed files with 227 additions and 151 deletions
src/hotspot/share
test/hotspot/jtreg/compiler/codecache

@ -176,143 +176,122 @@ GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap
GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
// Prepare error message
const char* error = "Invalid code heap sizes";
err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
" + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
if (total_size > cache_size) {
// Some code heap sizes were explicitly set: total_size must be <= cache_size
message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
vm_exit_during_initialization(error, message);
} else if (all_set && total_size != cache_size) {
// All code heap sizes were explicitly set: total_size must equal cache_size
message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
vm_exit_during_initialization(error, message);
static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
if (size < required_size) {
log_debug(codecache)("Code heap (%s) size " SIZE_FORMAT "K below required minimal size " SIZE_FORMAT "K",
codeheap, size/K, required_size/K);
err_msg title("Not enough space in %s to run VM", codeheap);
err_msg message(SIZE_FORMAT "K < " SIZE_FORMAT "K", size/K, required_size/K);
vm_exit_during_initialization(title, message);
}
}
struct CodeHeapInfo {
size_t size;
bool set;
bool enabled;
};
static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
assert(!heap->set, "sanity");
heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
}
void CodeCache::initialize_heaps() {
bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
const size_t ps = page_size(false, 8);
const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
const size_t cache_size = ReservedCodeCacheSize;
size_t non_nmethod_size = NonNMethodCodeHeapSize;
size_t profiled_size = ProfiledCodeHeapSize;
size_t non_profiled_size = NonProfiledCodeHeapSize;
// Check if total size set via command line flags exceeds the reserved size
check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size),
(profiled_set ? profiled_size : min_size),
(non_profiled_set ? non_profiled_size : min_size),
cache_size,
non_nmethod_set && profiled_set && non_profiled_set);
// Determine size of compiler buffers
size_t code_buffers_size = 0;
#ifdef COMPILER1
// C1 temporary code buffers (see Compiler::init_buffer_blob())
const int c1_count = CompilationPolicy::c1_count();
code_buffers_size += c1_count * Compiler::code_buffer_size();
#endif
#ifdef COMPILER2
// C2 scratch buffers (see Compile::init_scratch_buffer_blob())
const int c2_count = CompilationPolicy::c2_count();
// Initial size of constant table (this may be increased if a compiled method needs more space)
code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif
CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
// Increase default non_nmethod_size to account for compiler buffers
if (!non_nmethod_set) {
non_nmethod_size += code_buffers_size;
}
// Calculate default CodeHeap sizes if not set by user
if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
// Leave room for the other two parts of the code cache
const size_t max_non_nmethod_size = cache_size - 2 * min_size;
// Check if we have enough space for the non-nmethod code heap
if (max_non_nmethod_size >= non_nmethod_size) {
// Use the default value for non_nmethod_size and one half of the
// remaining size for non-profiled and one half for profiled methods
size_t remaining_size = cache_size - non_nmethod_size;
profiled_size = remaining_size / 2;
non_profiled_size = remaining_size - profiled_size;
} else {
// Use all space for the non-nmethod heap and set other heaps to minimal size
non_nmethod_size = max_non_nmethod_size;
profiled_size = min_size;
non_profiled_size = min_size;
}
} else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
// The user explicitly set some code heap sizes. Increase or decrease the (default)
// sizes of the other code heaps accordingly. First adapt non-profiled and profiled
// code heap sizes and then only change non-nmethod code heap size if still necessary.
intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
if (non_profiled_set) {
if (!profiled_set) {
// Adapt size of profiled code heap
if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
// Not enough space available, set to minimum size
diff_size += profiled_size - min_size;
profiled_size = min_size;
} else {
profiled_size += diff_size;
diff_size = 0;
}
}
} else if (profiled_set) {
// Adapt size of non-profiled code heap
if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
// Not enough space available, set to minimum size
diff_size += non_profiled_size - min_size;
non_profiled_size = min_size;
} else {
non_profiled_size += diff_size;
diff_size = 0;
}
} else if (non_nmethod_set) {
// Distribute remaining size between profiled and non-profiled code heaps
diff_size = cache_size - non_nmethod_size;
profiled_size = diff_size / 2;
non_profiled_size = diff_size - profiled_size;
diff_size = 0;
}
if (diff_size != 0) {
// Use non-nmethod code heap for remaining space requirements
assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
non_nmethod_size += diff_size;
}
}
const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
const size_t ps = page_size(false, 8);
const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
size_t cache_size = align_up(ReservedCodeCacheSize, min_size);
// We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
// Prerequisites
if (!heap_available(CodeBlobType::MethodProfiled)) {
non_profiled_size += profiled_size;
profiled_size = 0;
}
// We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
if (!heap_available(CodeBlobType::MethodNonProfiled)) {
non_nmethod_size += non_profiled_size;
non_profiled_size = 0;
}
// Make sure we have enough space for VM internal code
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (non_nmethod_size < min_code_cache_size) {
vm_exit_during_initialization(err_msg(
"Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
non_nmethod_size/K, min_code_cache_size/K));
// For compatibility reasons, disabled tiered compilation overrides
// segment size even if it is set explicitly.
non_profiled.size += profiled.size;
// Profiled code heap is not available, forcibly set size to 0
profiled.size = 0;
profiled.set = true;
profiled.enabled = false;
}
// Verify sizes and update flag values
assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
// Print warning if using large pages but not able to use the size given
size_t compiler_buffer_size = 0;
COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
if (!non_nmethod.set) {
non_nmethod.size += compiler_buffer_size;
}
if (!profiled.set && !non_profiled.set) {
non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
(cache_size - non_nmethod.size) / 2 : min_size;
}
if (profiled.set && !non_profiled.set) {
set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
}
if (!profiled.set && non_profiled.set) {
set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
}
// Compatibility.
size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
if (!non_nmethod.set && profiled.set && non_profiled.set) {
set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size);
}
size_t total = non_nmethod.size + profiled.size + non_profiled.size;
if (total != cache_size && !cache_size_set) {
log_info(codecache)("ReservedCodeCache size " SIZE_FORMAT "K changed to total segments size NonNMethod "
SIZE_FORMAT "K NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K = " SIZE_FORMAT "K",
cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K);
// Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
cache_size = total;
}
log_debug(codecache)("Initializing code heaps ReservedCodeCache " SIZE_FORMAT "K NonNMethod " SIZE_FORMAT "K"
" NonProfiled " SIZE_FORMAT "K Profiled " SIZE_FORMAT "K",
cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K);
// Validation
// Check minimal required sizes
check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size);
if (profiled.enabled) {
check_min_size("profiled code heap", profiled.size, min_size);
}
if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
check_min_size("non-profiled code heap", non_profiled.size, min_size);
}
if (cache_size_set) {
check_min_size("reserved code cache", cache_size, min_cache_size);
}
// ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
if (total != cache_size && cache_size_set) {
err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K)", non_nmethod.size/K);
if (profiled.enabled) {
message.append(" + ProfiledCodeHeapSize (" SIZE_FORMAT "K)", profiled.size/K);
}
if (non_profiled.enabled) {
message.append(" + NonProfiledCodeHeapSize (" SIZE_FORMAT "K)", non_profiled.size/K);
}
message.append(" = " SIZE_FORMAT "K", total/K);
message.append((total > cache_size) ? " is greater than " : " is less than ");
message.append("ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
vm_exit_during_initialization("Invalid code heap sizes", message);
}
// Compatibility. Print warning if using large pages but not able to use the size given
if (UseLargePages) {
const size_t lg_ps = page_size(false, 1);
if (ps < lg_ps) {
@ -324,32 +303,40 @@ void CodeCache::initialize_heaps() {
// Note: if large page support is enabled, min_size is at least the large
// page size. This ensures that the code cache is covered by large pages.
non_nmethod_size = align_up(non_nmethod_size, min_size);
profiled_size = align_down(profiled_size, min_size);
non_profiled_size = align_down(non_profiled_size, min_size);
non_profiled.size += non_nmethod.size & alignment_mask(min_size);
non_profiled.size += profiled.size & alignment_mask(min_size);
non_nmethod.size = align_down(non_nmethod.size, min_size);
profiled.size = align_down(profiled.size, min_size);
non_profiled.size = align_down(non_profiled.size, min_size);
FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
// Reserve one continuous chunk of memory for CodeHeaps and split it into
// parts for the individual heaps. The memory layout looks like this:
// ---------- high -----------
// Non-profiled nmethods
// Non-nmethods
// Profiled nmethods
// ---------- low ------------
ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
ReservedSpace profiled_space = rs.first_part(profiled_size);
ReservedSpace rest = rs.last_part(profiled_size);
ReservedSpace non_method_space = rest.first_part(non_nmethod_size);
ReservedSpace non_profiled_space = rest.last_part(non_nmethod_size);
// Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
size_t offset = 0;
if (profiled.enabled) {
ReservedSpace profiled_space = rs.partition(offset, profiled.size);
offset += profiled.size;
// Tier 2 and tier 3 (profiled) methods
add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
}
ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
offset += non_nmethod.size;
// Non-nmethods (stubs, adapters, ...)
add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
// Tier 2 and tier 3 (profiled) methods
add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
// Tier 1 and tier 4 (non-profiled) methods and native methods
add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
if (non_profiled.enabled) {
ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size);
// Tier 1 and tier 4 (non-profiled) methods and native methods
add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}
}
size_t CodeCache::page_size(bool aligned, size_t min_pages) {

@ -110,8 +110,7 @@ class CodeCache : AllStatic {
// CodeHeap management
static void initialize_heaps(); // Initializes the CodeHeaps
// Check the code heap sizes set by the user via command line
static void check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set);
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
static void add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type);
static CodeHeap* get_code_heap_containing(void* p); // Returns the CodeHeap containing the given pointer, or nullptr

@ -314,15 +314,18 @@ ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment)
return result;
}
ReservedSpace
ReservedSpace::last_part(size_t partition_size, size_t alignment) {
ReservedSpace ReservedSpace::last_part(size_t partition_size, size_t alignment) {
assert(partition_size <= size(), "partition failed");
ReservedSpace result(base() + partition_size, size() - partition_size,
alignment, page_size(), special(), executable());
return result;
}
ReservedSpace ReservedSpace::partition(size_t offset, size_t partition_size, size_t alignment) {
assert(offset + partition_size <= size(), "partition failed");
ReservedSpace result(base() + offset, partition_size, alignment, page_size(), special(), executable());
return result;
}
size_t ReservedSpace::page_align_size_up(size_t size) {
return align_up(size, os::vm_page_size());

@ -95,10 +95,12 @@ class ReservedSpace {
// This splits the space into two spaces, the first part of which will be returned.
ReservedSpace first_part(size_t partition_size, size_t alignment);
ReservedSpace last_part (size_t partition_size, size_t alignment);
ReservedSpace partition (size_t offset, size_t partition_size, size_t alignment);
// These simply call the above using the default alignment.
inline ReservedSpace first_part(size_t partition_size);
inline ReservedSpace last_part (size_t partition_size);
inline ReservedSpace partition (size_t offset, size_t partition_size);
// Alignment
static size_t page_align_size_up(size_t size);
@ -113,8 +115,7 @@ class ReservedSpace {
size_t page_size, bool special, bool executable);
};
ReservedSpace
ReservedSpace::first_part(size_t partition_size)
ReservedSpace ReservedSpace::first_part(size_t partition_size)
{
return first_part(partition_size, alignment());
}
@ -124,6 +125,11 @@ ReservedSpace ReservedSpace::last_part(size_t partition_size)
return last_part(partition_size, alignment());
}
ReservedSpace ReservedSpace::partition(size_t offset, size_t partition_size)
{
return partition(offset, partition_size, alignment());
}
// Class encapsulating behavior specific of memory space reserved for Java heap.
class ReservedHeapSpace : public ReservedSpace {
private:

@ -82,6 +82,36 @@ public class CheckSegmentedCodeCache {
out.shouldHaveExitValue(1);
}
private static void verifyCodeHeapSize(ProcessBuilder pb, String heapName, long heapSize) throws Exception {
OutputAnalyzer out = new OutputAnalyzer(pb.start());
out.shouldHaveExitValue(0);
long actualHeapSize = Long.parseLong(out.firstMatch(heapName + "\\s+=\\s(\\d+)", 1));
if (heapSize != actualHeapSize) {
throw new RuntimeException("Unexpected " + heapName + " size: " + actualHeapSize + " != " + heapSize);
}
// Sanity checks:
// - segment sizes are aligned to at least 1KB
// - sum of segment sizes equals ReservedCodeCacheSize
long nonNMethodCodeHeapSize = Long.parseLong(out.firstMatch("NonNMethodCodeHeapSize\\s+=\\s(\\d+)", 1));
long nonProfiledCodeHeapSize = Long.parseLong(out.firstMatch("NonProfiledCodeHeapSize\\s+=\\s(\\d+)", 1));
long profiledCodeHeapSize = Long.parseLong(out.firstMatch(" ProfiledCodeHeapSize\\s+=\\s(\\d+)", 1));
long reservedCodeCacheSize = Long.parseLong(out.firstMatch("ReservedCodeCacheSize\\s+=\\s(\\d+)", 1));
if (reservedCodeCacheSize != nonNMethodCodeHeapSize + nonProfiledCodeHeapSize + profiledCodeHeapSize) {
throw new RuntimeException("Unexpected segments size sum: " + reservedCodeCacheSize + " != " +
nonNMethodCodeHeapSize + "+" + nonProfiledCodeHeapSize + "+" + profiledCodeHeapSize);
}
if ((reservedCodeCacheSize % 1024 != 0) || (nonNMethodCodeHeapSize % 1024 != 0) ||
(nonProfiledCodeHeapSize % 1024 != 0) || (profiledCodeHeapSize % 1024 != 0)) {
throw new RuntimeException("Unexpected segments size alignment: " + reservedCodeCacheSize + ", " +
nonNMethodCodeHeapSize + ", " + nonProfiledCodeHeapSize + ", " + profiledCodeHeapSize);
}
}
/**
* Check the result of segmented code cache related VM options.
*/
@ -160,9 +190,60 @@ public class CheckSegmentedCodeCache {
// minimum size: CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)
long minSize = (Platform.isDebugBuild() ? 3 : 1) * minUseSpace;
pb = ProcessTools.createLimitedTestJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:NonNMethodCodeHeapSize=" + minSize,
"-XX:ReservedCodeCacheSize=" + minSize,
"-XX:InitialCodeCacheSize=100K",
"-version");
failsWith(pb, "Not enough space in non-nmethod code heap to run VM");
// Try different combination of Segment Sizes
// Fails if there is not enough space for code cache.
// All segments are set to minimum allowed value, but VM still fails
pb = ProcessTools.createLimitedTestJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:ReservedCodeCacheSize=" + minSize,
"-XX:InitialCodeCacheSize=100K",
"-version");
failsWith(pb, "Invalid code heap sizes");
// Reserved code cache is set but not equal to the sum of other segments
// that are explicitly specified - fails
pb = ProcessTools.createLimitedTestJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:ReservedCodeCacheSize=100M",
"-XX:NonNMethodCodeHeapSize=10M",
"-XX:ProfiledCodeHeapSize=10M",
"-XX:NonProfiledCodeHeapSize=10M",
"-version");
failsWith(pb, "Invalid code heap sizes");
// Reserved code cache is not set - it's automatically adjusted to the sum of other segments
// that are explicitly specified
pb = ProcessTools.createLimitedTestJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:NonNMethodCodeHeapSize=10M",
"-XX:ProfiledCodeHeapSize=10M",
"-XX:NonProfiledCodeHeapSize=10M",
"-XX:+PrintFlagsFinal",
"-version");
verifyCodeHeapSize(pb, "ReservedCodeCacheSize", 31457280);
// Reserved code cache is set, NonNmethod segment size is set, two other segments is automatically
// adjusted to half of the remaining space
pb = ProcessTools.createLimitedTestJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:ReservedCodeCacheSize=100M",
"-XX:NonNMethodCodeHeapSize=10M",
"-XX:+PrintFlagsFinal",
"-version");
verifyCodeHeapSize(pb, " ProfiledCodeHeapSize", 47185920);
// Reserved code cache is set but NonNmethodCodeHeapSize is not set.
// It's calculated based on the number of compiler threads
pb = ProcessTools.createLimitedTestJavaProcessBuilder("-XX:+SegmentedCodeCache",
"-XX:ReservedCodeCacheSize=100M",
"-XX:ProfiledCodeHeapSize=10M",
"-XX:NonProfiledCodeHeapSize=10M",
"-XX:+PrintFlagsFinal",
"-version");
verifyCodeHeapSize(pb, "NonNMethodCodeHeapSize", 83886080);
}
}