8224203: Remove need to specify type when using FLAG_SET macros
Reviewed-by: rehn, pliden, coleenp
This commit is contained in:
parent
c9846fce19
commit
4914e0ee95
src/hotspot
cpu
os
share
aot
code
compiler
gc
cms
g1
parallel
shared
jfr/recorder
memory
runtime
test/hotspot/gtest
@ -36,18 +36,18 @@ void Compile::pd_compiler2_init() {
|
||||
// Power7 and later.
|
||||
if (PowerArchitecturePPC64 > 6) {
|
||||
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
|
||||
FLAG_SET_ERGO(bool, UsePopCountInstruction, true);
|
||||
FLAG_SET_ERGO(UsePopCountInstruction, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (PowerArchitecturePPC64 == 6) {
|
||||
if (FLAG_IS_DEFAULT(InsertEndGroupPPC64)) {
|
||||
FLAG_SET_ERGO(bool, InsertEndGroupPPC64, true);
|
||||
FLAG_SET_ERGO(InsertEndGroupPPC64, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (!VM_Version::has_isel() && FLAG_IS_DEFAULT(ConditionalMoveLimit)) {
|
||||
FLAG_SET_ERGO(intx, ConditionalMoveLimit, 0);
|
||||
FLAG_SET_ERGO(ConditionalMoveLimit, 0);
|
||||
}
|
||||
|
||||
if (OptimizeFill) {
|
||||
|
@ -67,17 +67,17 @@ void VM_Version::initialize() {
|
||||
// If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
|
||||
if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
|
||||
if (VM_Version::has_darn()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 9);
|
||||
FLAG_SET_ERGO(PowerArchitecturePPC64, 9);
|
||||
} else if (VM_Version::has_lqarx()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
|
||||
FLAG_SET_ERGO(PowerArchitecturePPC64, 8);
|
||||
} else if (VM_Version::has_popcntw()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
|
||||
FLAG_SET_ERGO(PowerArchitecturePPC64, 7);
|
||||
} else if (VM_Version::has_cmpb()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
|
||||
FLAG_SET_ERGO(PowerArchitecturePPC64, 6);
|
||||
} else if (VM_Version::has_popcntb()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
|
||||
FLAG_SET_ERGO(PowerArchitecturePPC64, 5);
|
||||
} else {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
|
||||
FLAG_SET_ERGO(PowerArchitecturePPC64, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -103,15 +103,15 @@ void VM_Version::initialize() {
|
||||
MSG(TrapBasedICMissChecks);
|
||||
MSG(TrapBasedNotEntrantChecks);
|
||||
MSG(TrapBasedNullChecks);
|
||||
FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
|
||||
FLAG_SET_ERGO(bool, TrapBasedNullChecks, false);
|
||||
FLAG_SET_ERGO(bool, TrapBasedICMissChecks, false);
|
||||
FLAG_SET_ERGO(TrapBasedNotEntrantChecks, false);
|
||||
FLAG_SET_ERGO(TrapBasedNullChecks, false);
|
||||
FLAG_SET_ERGO(TrapBasedICMissChecks, false);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (!UseSIGTRAP) {
|
||||
MSG(TrapBasedRangeChecks);
|
||||
FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false);
|
||||
FLAG_SET_ERGO(TrapBasedRangeChecks, false);
|
||||
}
|
||||
|
||||
// On Power6 test for section size.
|
||||
@ -123,7 +123,7 @@ void VM_Version::initialize() {
|
||||
|
||||
if (PowerArchitecturePPC64 >= 8) {
|
||||
if (FLAG_IS_DEFAULT(SuperwordUseVSX)) {
|
||||
FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
|
||||
FLAG_SET_ERGO(SuperwordUseVSX, true);
|
||||
}
|
||||
} else {
|
||||
if (SuperwordUseVSX) {
|
||||
@ -135,10 +135,10 @@ void VM_Version::initialize() {
|
||||
|
||||
if (PowerArchitecturePPC64 >= 9) {
|
||||
if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstructionsPPC64)) {
|
||||
FLAG_SET_ERGO(bool, UseCountTrailingZerosInstructionsPPC64, true);
|
||||
FLAG_SET_ERGO(UseCountTrailingZerosInstructionsPPC64, true);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(UseCharacterCompareIntrinsics)) {
|
||||
FLAG_SET_ERGO(bool, UseCharacterCompareIntrinsics, true);
|
||||
FLAG_SET_ERGO(UseCharacterCompareIntrinsics, true);
|
||||
}
|
||||
} else {
|
||||
if (UseCountTrailingZerosInstructionsPPC64) {
|
||||
|
@ -139,12 +139,12 @@ void VM_Version::initialize() {
|
||||
if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
|
||||
const int ap_lns = AllocatePrefetchLines;
|
||||
const int ap_inc = cache_line_size < 64 ? ap_lns : (ap_lns + 1) / 2;
|
||||
FLAG_SET_ERGO(intx, AllocatePrefetchLines, ap_lns + ap_inc);
|
||||
FLAG_SET_ERGO(AllocatePrefetchLines, ap_lns + ap_inc);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
|
||||
const int ip_lns = AllocateInstancePrefetchLines;
|
||||
const int ip_inc = cache_line_size < 64 ? ip_lns : (ip_lns + 1) / 2;
|
||||
FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, ip_lns + ip_inc);
|
||||
FLAG_SET_ERGO(AllocateInstancePrefetchLines, ip_lns + ip_inc);
|
||||
}
|
||||
}
|
||||
#endif /* COMPILER2 */
|
||||
|
@ -145,7 +145,7 @@ static bool initialize_elapsed_counter() {
|
||||
static bool ergonomics() {
|
||||
const bool invtsc_support = Rdtsc::is_supported();
|
||||
if (FLAG_IS_DEFAULT(UseFastUnorderedTimeStamps) && invtsc_support) {
|
||||
FLAG_SET_ERGO(bool, UseFastUnorderedTimeStamps, true);
|
||||
FLAG_SET_ERGO(UseFastUnorderedTimeStamps, true);
|
||||
}
|
||||
|
||||
bool ft_enabled = UseFastUnorderedTimeStamps && invtsc_support;
|
||||
|
@ -3447,7 +3447,7 @@ void os::init(void) {
|
||||
// fall back to 4K paged mode and use mmap for everything.
|
||||
trcVerbose("4K page mode");
|
||||
Aix::_page_size = 4*K;
|
||||
FLAG_SET_ERGO(bool, Use64KPages, false);
|
||||
FLAG_SET_ERGO(Use64KPages, false);
|
||||
}
|
||||
} else {
|
||||
// datapsize = 64k. Data segment, thread stacks are 64k paged.
|
||||
@ -3457,11 +3457,11 @@ void os::init(void) {
|
||||
assert0(g_multipage_support.can_use_64K_pages);
|
||||
Aix::_page_size = 64*K;
|
||||
trcVerbose("64K page mode");
|
||||
FLAG_SET_ERGO(bool, Use64KPages, true);
|
||||
FLAG_SET_ERGO(Use64KPages, true);
|
||||
}
|
||||
|
||||
// For now UseLargePages is just ignored.
|
||||
FLAG_SET_ERGO(bool, UseLargePages, false);
|
||||
FLAG_SET_ERGO(UseLargePages, false);
|
||||
_page_sizes[0] = 0;
|
||||
|
||||
// debug trace
|
||||
|
@ -4072,7 +4072,7 @@ void os::init(void) {
|
||||
init_page_sizes((size_t) win32::vm_page_size());
|
||||
|
||||
// This may be overridden later when argument processing is done.
|
||||
FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
|
||||
FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
|
||||
|
||||
// Initialize main_process and main_thread
|
||||
main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
|
||||
|
@ -54,7 +54,7 @@ public:
|
||||
static void add_heap(AOTCodeHeap *heap);
|
||||
static void add_library(AOTLib *lib);
|
||||
#endif
|
||||
static void initialize() NOT_AOT({ FLAG_SET_ERGO(bool, UseAOT, false); });
|
||||
static void initialize() NOT_AOT({ FLAG_SET_ERGO(UseAOT, false); });
|
||||
|
||||
static void universe_init() NOT_AOT_RETURN;
|
||||
static void set_narrow_oop_shift() NOT_AOT_RETURN;
|
||||
|
@ -283,9 +283,9 @@ void CodeCache::initialize_heaps() {
|
||||
|
||||
// Verify sizes and update flag values
|
||||
assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
|
||||
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
|
||||
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
|
||||
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
|
||||
FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
|
||||
FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
|
||||
FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
|
||||
|
||||
// If large page support is enabled, align code heaps according to large
|
||||
// page size to make sure that code cache is covered by large pages.
|
||||
@ -941,9 +941,9 @@ void CodeCache::initialize() {
|
||||
initialize_heaps();
|
||||
} else {
|
||||
// Use a single code heap
|
||||
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0);
|
||||
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
|
||||
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
|
||||
FLAG_SET_ERGO(NonNMethodCodeHeapSize, 0);
|
||||
FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
|
||||
FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
|
||||
ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
|
||||
add_heap(rs, "CodeCache", CodeBlobType::All);
|
||||
}
|
||||
|
@ -117,38 +117,38 @@ void set_client_compilation_mode() {
|
||||
Compilation_mode = CompMode_client;
|
||||
CompLevel_highest_tier = CompLevel_simple;
|
||||
CompLevel_initial_compile = CompLevel_simple;
|
||||
FLAG_SET_ERGO(bool, TieredCompilation, false);
|
||||
FLAG_SET_ERGO(bool, ProfileInterpreter, false);
|
||||
FLAG_SET_ERGO(TieredCompilation, false);
|
||||
FLAG_SET_ERGO(ProfileInterpreter, false);
|
||||
#if INCLUDE_JVMCI
|
||||
FLAG_SET_ERGO(bool, EnableJVMCI, false);
|
||||
FLAG_SET_ERGO(bool, UseJVMCICompiler, false);
|
||||
FLAG_SET_ERGO(EnableJVMCI, false);
|
||||
FLAG_SET_ERGO(UseJVMCICompiler, false);
|
||||
#endif
|
||||
#if INCLUDE_AOT
|
||||
FLAG_SET_ERGO(bool, UseAOT, false);
|
||||
FLAG_SET_ERGO(UseAOT, false);
|
||||
#endif
|
||||
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
|
||||
FLAG_SET_ERGO(bool, NeverActAsServerClassMachine, true);
|
||||
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
|
||||
FLAG_SET_ERGO(uintx, InitialCodeCacheSize, 160*K);
|
||||
FLAG_SET_ERGO(InitialCodeCacheSize, 160*K);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
||||
FLAG_SET_ERGO(uintx, ReservedCodeCacheSize, 32*M);
|
||||
FLAG_SET_ERGO(ReservedCodeCacheSize, 32*M);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
|
||||
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 27*M);
|
||||
FLAG_SET_ERGO(NonProfiledCodeHeapSize, 27*M);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) {
|
||||
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
|
||||
FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(NonNMethodCodeHeapSize)) {
|
||||
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 5*M);
|
||||
FLAG_SET_ERGO(NonNMethodCodeHeapSize, 5*M);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) {
|
||||
FLAG_SET_ERGO(uintx, CodeCacheExpansionSize, 32*K);
|
||||
FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(MetaspaceSize)) {
|
||||
FLAG_SET_ERGO(size_t, MetaspaceSize, MIN2(12*M, MaxMetaspaceSize));
|
||||
FLAG_SET_ERGO(MetaspaceSize, MIN2(12*M, MaxMetaspaceSize));
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(MaxRAM)) {
|
||||
// Do not use FLAG_SET_ERGO to update MaxRAM, as this will impact
|
||||
@ -156,13 +156,13 @@ void set_client_compilation_mode() {
|
||||
FLAG_SET_DEFAULT(MaxRAM, 1ULL*G);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(CompileThreshold)) {
|
||||
FLAG_SET_ERGO(intx, CompileThreshold, 1500);
|
||||
FLAG_SET_ERGO(CompileThreshold, 1500);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
|
||||
FLAG_SET_ERGO(intx, OnStackReplacePercentage, 933);
|
||||
FLAG_SET_ERGO(OnStackReplacePercentage, 933);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(CICompilerCount)) {
|
||||
FLAG_SET_ERGO(intx, CICompilerCount, 1);
|
||||
FLAG_SET_ERGO(CICompilerCount, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ bool compilation_mode_selected() {
|
||||
void select_compilation_mode_ergonomically() {
|
||||
#if defined(_WINDOWS) && !defined(_LP64)
|
||||
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
|
||||
FLAG_SET_ERGO(bool, NeverActAsServerClassMachine, true);
|
||||
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
|
||||
}
|
||||
#endif
|
||||
if (NeverActAsServerClassMachine) {
|
||||
@ -198,14 +198,14 @@ void CompilerConfig::set_tiered_flags() {
|
||||
}
|
||||
// Increase the code cache size - tiered compiles a lot more.
|
||||
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
||||
FLAG_SET_ERGO(uintx, ReservedCodeCacheSize,
|
||||
FLAG_SET_ERGO(ReservedCodeCacheSize,
|
||||
MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5));
|
||||
}
|
||||
// Enable SegmentedCodeCache if TieredCompilation is enabled, ReservedCodeCacheSize >= 240M
|
||||
// and the code cache contains at least 8 pages (segmentation disables advantage of huge pages).
|
||||
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M &&
|
||||
8 * CodeCache::page_size() <= ReservedCodeCacheSize) {
|
||||
FLAG_SET_ERGO(bool, SegmentedCodeCache, true);
|
||||
FLAG_SET_ERGO(SegmentedCodeCache, true);
|
||||
}
|
||||
if (!UseInterpreter) { // -Xcomp
|
||||
Tier3InvokeNotifyFreqLog = 0;
|
||||
@ -219,29 +219,29 @@ void CompilerConfig::set_tiered_flags() {
|
||||
// Scale tiered compilation thresholds.
|
||||
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves compilation thresholds unchanged.
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
|
||||
FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier3InvocationThreshold, scaled_compile_threshold(Tier3InvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, scaled_compile_threshold(Tier3MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3CompileThreshold, scaled_compile_threshold(Tier3CompileThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
|
||||
FLAG_SET_ERGO(Tier3InvocationThreshold, scaled_compile_threshold(Tier3InvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier3MinInvocationThreshold, scaled_compile_threshold(Tier3MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier3CompileThreshold, scaled_compile_threshold(Tier3CompileThreshold));
|
||||
FLAG_SET_ERGO(Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
|
||||
|
||||
// Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
|
||||
// once these thresholds become supported.
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
|
||||
FLAG_SET_ERGO(Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
|
||||
FLAG_SET_ERGO(Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
|
||||
}
|
||||
}
|
||||
|
||||
@ -256,7 +256,7 @@ void set_jvmci_specific_flags() {
|
||||
if (TieredStopAtLevel != CompLevel_full_optimization) {
|
||||
// Currently JVMCI compiler can only work at the full optimization level
|
||||
warning("forcing TieredStopAtLevel to full optimization because JVMCI is enabled");
|
||||
FLAG_SET_ERGO(intx, TieredStopAtLevel, CompLevel_full_optimization);
|
||||
FLAG_SET_ERGO(TieredStopAtLevel, CompLevel_full_optimization);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(TypeProfileLevel)) {
|
||||
FLAG_SET_DEFAULT(TypeProfileLevel, 0);
|
||||
@ -338,7 +338,7 @@ bool CompilerConfig::check_args_consistency(bool status) {
|
||||
if (!FLAG_IS_DEFAULT(BackgroundCompilation)) {
|
||||
warning("BackgroundCompilation disabled due to ReplayCompiles option.");
|
||||
}
|
||||
FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
|
||||
FLAG_SET_CMDLINE(BackgroundCompilation, false);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
@ -346,7 +346,7 @@ bool CompilerConfig::check_args_consistency(bool status) {
|
||||
if (!FLAG_IS_DEFAULT(PostLoopMultiversioning)) {
|
||||
warning("PostLoopMultiversioning disabled because RangeCheckElimination is disabled.");
|
||||
}
|
||||
FLAG_SET_CMDLINE(bool, PostLoopMultiversioning, false);
|
||||
FLAG_SET_CMDLINE(PostLoopMultiversioning, false);
|
||||
}
|
||||
if (UseCountedLoopSafepoints && LoopStripMiningIter == 0) {
|
||||
if (!FLAG_IS_DEFAULT(UseCountedLoopSafepoints) || !FLAG_IS_DEFAULT(LoopStripMiningIter)) {
|
||||
@ -366,27 +366,27 @@ bool CompilerConfig::check_args_consistency(bool status) {
|
||||
if (!FLAG_IS_DEFAULT(UseCompiler)) {
|
||||
warning("UseCompiler disabled due to -Xint.");
|
||||
}
|
||||
FLAG_SET_CMDLINE(bool, UseCompiler, false);
|
||||
FLAG_SET_CMDLINE(UseCompiler, false);
|
||||
}
|
||||
if (ProfileInterpreter) {
|
||||
if (!FLAG_IS_DEFAULT(ProfileInterpreter)) {
|
||||
warning("ProfileInterpreter disabled due to -Xint.");
|
||||
}
|
||||
FLAG_SET_CMDLINE(bool, ProfileInterpreter, false);
|
||||
FLAG_SET_CMDLINE(ProfileInterpreter, false);
|
||||
}
|
||||
if (TieredCompilation) {
|
||||
if (!FLAG_IS_DEFAULT(TieredCompilation)) {
|
||||
warning("TieredCompilation disabled due to -Xint.");
|
||||
}
|
||||
FLAG_SET_CMDLINE(bool, TieredCompilation, false);
|
||||
FLAG_SET_CMDLINE(TieredCompilation, false);
|
||||
}
|
||||
#if INCLUDE_JVMCI
|
||||
if (EnableJVMCI) {
|
||||
if (!FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler)) {
|
||||
warning("JVMCI Compiler disabled due to -Xint.");
|
||||
}
|
||||
FLAG_SET_CMDLINE(bool, EnableJVMCI, false);
|
||||
FLAG_SET_CMDLINE(bool, UseJVMCICompiler, false);
|
||||
FLAG_SET_CMDLINE(EnableJVMCI, false);
|
||||
FLAG_SET_CMDLINE(UseJVMCICompiler, false);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
@ -434,7 +434,7 @@ void CompilerConfig::ergo_initialize() {
|
||||
// Scale CompileThreshold
|
||||
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
|
||||
FLAG_SET_ERGO(intx, CompileThreshold, scaled_compile_threshold(CompileThreshold));
|
||||
FLAG_SET_ERGO(CompileThreshold, scaled_compile_threshold(CompileThreshold));
|
||||
}
|
||||
}
|
||||
|
||||
@ -455,7 +455,7 @@ void CompilerConfig::ergo_initialize() {
|
||||
AlwaysIncrementalInline = false;
|
||||
}
|
||||
if (PrintIdealGraphLevel > 0) {
|
||||
FLAG_SET_ERGO(bool, PrintIdealGraph, true);
|
||||
FLAG_SET_ERGO(PrintIdealGraph, true);
|
||||
}
|
||||
#endif
|
||||
if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) {
|
||||
|
@ -106,7 +106,7 @@ void CMSArguments::initialize() {
|
||||
}
|
||||
|
||||
if (!ClassUnloading) {
|
||||
FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
|
||||
FLAG_SET_CMDLINE(CMSClassUnloadingEnabled, false);
|
||||
}
|
||||
|
||||
// Set CMS global values
|
||||
@ -142,9 +142,9 @@ void CMSArguments::initialize() {
|
||||
// NewSize was set on the command line and it is larger than
|
||||
// preferred_max_new_size.
|
||||
if (!FLAG_IS_DEFAULT(NewSize)) { // NewSize explicitly set at command-line
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
|
||||
FLAG_SET_ERGO(MaxNewSize, MAX2(NewSize, preferred_max_new_size));
|
||||
} else {
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, preferred_max_new_size);
|
||||
FLAG_SET_ERGO(MaxNewSize, preferred_max_new_size);
|
||||
}
|
||||
log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
|
||||
|
||||
@ -159,15 +159,15 @@ void CMSArguments::initialize() {
|
||||
// Unless explicitly requested otherwise, make young gen
|
||||
// at least min_new, and at most preferred_max_new_size.
|
||||
if (FLAG_IS_DEFAULT(NewSize)) {
|
||||
FLAG_SET_ERGO(size_t, NewSize, MAX2(NewSize, min_new));
|
||||
FLAG_SET_ERGO(size_t, NewSize, MIN2(preferred_max_new_size, NewSize));
|
||||
FLAG_SET_ERGO(NewSize, MAX2(NewSize, min_new));
|
||||
FLAG_SET_ERGO(NewSize, MIN2(preferred_max_new_size, NewSize));
|
||||
log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
|
||||
}
|
||||
// Unless explicitly requested otherwise, size old gen
|
||||
// so it's NewRatio x of NewSize.
|
||||
if (FLAG_IS_DEFAULT(OldSize)) {
|
||||
if (max_heap > NewSize) {
|
||||
FLAG_SET_ERGO(size_t, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
|
||||
FLAG_SET_ERGO(OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
|
||||
log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
|
||||
}
|
||||
}
|
||||
@ -177,14 +177,14 @@ void CMSArguments::initialize() {
|
||||
// promote all objects surviving "tenuring_default" scavenges.
|
||||
if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
|
||||
FLAG_IS_DEFAULT(SurvivorRatio)) {
|
||||
FLAG_SET_ERGO(uintx, MaxTenuringThreshold, tenuring_default);
|
||||
FLAG_SET_ERGO(MaxTenuringThreshold, tenuring_default);
|
||||
}
|
||||
// If we decided above (or user explicitly requested)
|
||||
// `promote all' (via MaxTenuringThreshold := 0),
|
||||
// prefer minuscule survivor spaces so as not to waste
|
||||
// space for (non-existent) survivors
|
||||
if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
|
||||
FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
|
||||
FLAG_SET_ERGO(SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
|
||||
}
|
||||
|
||||
// OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
|
||||
@ -195,7 +195,7 @@ void CMSArguments::initialize() {
|
||||
// OldPLAB sizing manually turned off: Use a larger default setting,
|
||||
// unless it was manually specified. This is because a too-low value
|
||||
// will slow down scavenges.
|
||||
FLAG_SET_ERGO(size_t, OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
|
||||
FLAG_SET_ERGO(OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
|
||||
} else {
|
||||
FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
|
||||
}
|
||||
|
@ -110,11 +110,11 @@ void G1Arguments::initialize() {
|
||||
// triggering a full collection. To get as low fragmentation as
|
||||
// possible we only use one worker thread.
|
||||
if (DumpSharedSpaces) {
|
||||
FLAG_SET_ERGO(uint, ParallelGCThreads, 1);
|
||||
FLAG_SET_ERGO(ParallelGCThreads, 1);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
|
||||
FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
|
||||
FLAG_SET_ERGO(G1ConcRefinementThreads, ParallelGCThreads);
|
||||
}
|
||||
|
||||
// MarkStackSize will be set (if it hasn't been set by the user)
|
||||
@ -162,7 +162,7 @@ void G1Arguments::initialize() {
|
||||
|
||||
// By default do not let the target stack size to be more than 1/4 of the entries
|
||||
if (FLAG_IS_DEFAULT(GCDrainStackTargetSize)) {
|
||||
FLAG_SET_ERGO(uintx, GCDrainStackTargetSize, MIN2(GCDrainStackTargetSize, (uintx)TASKQUEUE_SIZE / 4));
|
||||
FLAG_SET_ERGO(GCDrainStackTargetSize, MIN2(GCDrainStackTargetSize, (uintx)TASKQUEUE_SIZE / 4));
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
@ -425,7 +425,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
// Calculate the number of concurrent worker threads by scaling
|
||||
// the number of parallel GC threads.
|
||||
uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
|
||||
FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
|
||||
FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
|
||||
}
|
||||
|
||||
assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
|
||||
@ -456,7 +456,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
mark_stack_size, MarkStackSizeMax);
|
||||
return;
|
||||
}
|
||||
FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
|
||||
FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
|
||||
} else {
|
||||
// Verify MarkStackSize is in range.
|
||||
if (FLAG_IS_CMDLINE(MarkStackSize)) {
|
||||
|
@ -48,7 +48,7 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
|
||||
"A new max generation size of " SIZE_FORMAT "k will be used.",
|
||||
NewSize/K, MaxNewSize/K, NewSize/K);
|
||||
}
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
|
||||
FLAG_SET_ERGO(MaxNewSize, NewSize);
|
||||
}
|
||||
|
||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||
@ -121,7 +121,7 @@ void G1YoungGenSizer::adjust_max_new_size(uint number_of_heap_regions) {
|
||||
|
||||
size_t max_young_size = result * HeapRegion::GrainBytes;
|
||||
if (max_young_size != MaxNewSize) {
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
|
||||
FLAG_SET_ERGO(MaxNewSize, max_young_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
|
||||
CardsPerRegion = GrainBytes >> G1CardTable::card_shift;
|
||||
|
||||
if (G1HeapRegionSize != GrainBytes) {
|
||||
FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes);
|
||||
FLAG_SET_ERGO(G1HeapRegionSize, GrainBytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,11 +113,11 @@ void ParallelArguments::initialize_heap_flags_and_sizes_one_pass() {
|
||||
// default gc, which adds 2 to the ratio value. We need to
|
||||
// make sure the values are valid before using them.
|
||||
if (MinSurvivorRatio < 3) {
|
||||
FLAG_SET_ERGO(uintx, MinSurvivorRatio, 3);
|
||||
FLAG_SET_ERGO(MinSurvivorRatio, 3);
|
||||
}
|
||||
|
||||
if (InitialSurvivorRatio < 3) {
|
||||
FLAG_SET_ERGO(uintx, InitialSurvivorRatio, 3);
|
||||
FLAG_SET_ERGO(InitialSurvivorRatio, 3);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,12 +54,12 @@ void GCArguments::initialize() {
|
||||
|
||||
if (MinHeapFreeRatio == 100) {
|
||||
// Keeping the heap 100% free is hard ;-) so limit it to 99%.
|
||||
FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
|
||||
FLAG_SET_ERGO(MinHeapFreeRatio, 99);
|
||||
}
|
||||
|
||||
if (!ClassUnloading) {
|
||||
// If class unloading is disabled, also disable concurrent class unloading.
|
||||
FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
|
||||
FLAG_SET_CMDLINE(ClassUnloadingWithConcurrentMark, false);
|
||||
}
|
||||
|
||||
if (!FLAG_IS_DEFAULT(AllocateOldGenAt)) {
|
||||
@ -172,10 +172,10 @@ void GCArguments::initialize_heap_flags_and_sizes() {
|
||||
|
||||
// Write back to flags if the values changed
|
||||
if (aligned_initial_heap_size != InitialHeapSize) {
|
||||
FLAG_SET_ERGO(size_t, InitialHeapSize, aligned_initial_heap_size);
|
||||
FLAG_SET_ERGO(InitialHeapSize, aligned_initial_heap_size);
|
||||
}
|
||||
if (aligned_max_heap_size != MaxHeapSize) {
|
||||
FLAG_SET_ERGO(size_t, MaxHeapSize, aligned_max_heap_size);
|
||||
FLAG_SET_ERGO(MaxHeapSize, aligned_max_heap_size);
|
||||
}
|
||||
|
||||
if (FLAG_IS_CMDLINE(InitialHeapSize) && MinHeapSize != 0 &&
|
||||
@ -183,15 +183,15 @@ void GCArguments::initialize_heap_flags_and_sizes() {
|
||||
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
|
||||
}
|
||||
if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
|
||||
FLAG_SET_ERGO(size_t, MaxHeapSize, InitialHeapSize);
|
||||
FLAG_SET_ERGO(MaxHeapSize, InitialHeapSize);
|
||||
} else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
|
||||
FLAG_SET_ERGO(size_t, InitialHeapSize, MaxHeapSize);
|
||||
FLAG_SET_ERGO(InitialHeapSize, MaxHeapSize);
|
||||
if (InitialHeapSize < MinHeapSize) {
|
||||
MinHeapSize = InitialHeapSize;
|
||||
}
|
||||
}
|
||||
|
||||
FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_up(MinHeapDeltaBytes, SpaceAlignment));
|
||||
FLAG_SET_ERGO(MinHeapDeltaBytes, align_up(MinHeapDeltaBytes, SpaceAlignment));
|
||||
|
||||
DEBUG_ONLY(assert_flags();)
|
||||
}
|
||||
|
@ -109,15 +109,15 @@ void GCConfig::fail_if_unsupported_gc_is_selected() {
|
||||
void GCConfig::select_gc_ergonomically() {
|
||||
if (os::is_server_class_machine()) {
|
||||
#if INCLUDE_G1GC
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
|
||||
FLAG_SET_ERGO_IF_DEFAULT(UseG1GC, true);
|
||||
#elif INCLUDE_PARALLELGC
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, UseParallelGC, true);
|
||||
FLAG_SET_ERGO_IF_DEFAULT(UseParallelGC, true);
|
||||
#elif INCLUDE_SERIALGC
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
|
||||
FLAG_SET_ERGO_IF_DEFAULT(UseSerialGC, true);
|
||||
#endif
|
||||
} else {
|
||||
#if INCLUDE_SERIALGC
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
|
||||
FLAG_SET_ERGO_IF_DEFAULT(UseSerialGC, true);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -86,20 +86,20 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
size_t smallest_heap_size = align_up(smallest_new_size + old_gen_size_lower_bound(),
|
||||
HeapAlignment);
|
||||
if (MaxHeapSize < smallest_heap_size) {
|
||||
FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size);
|
||||
FLAG_SET_ERGO(MaxHeapSize, smallest_heap_size);
|
||||
}
|
||||
// If needed, synchronize MinHeapSize size and InitialHeapSize
|
||||
if (MinHeapSize < smallest_heap_size) {
|
||||
MinHeapSize = smallest_heap_size;
|
||||
if (InitialHeapSize < MinHeapSize) {
|
||||
FLAG_SET_ERGO(size_t, InitialHeapSize, smallest_heap_size);
|
||||
FLAG_SET_ERGO(InitialHeapSize, smallest_heap_size);
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure NewSize allows an old generation to fit even if set on the command line
|
||||
if (FLAG_IS_CMDLINE(NewSize) && NewSize >= InitialHeapSize) {
|
||||
log_warning(gc, ergo)("NewSize was set larger than initial heap size, will use initial heap size.");
|
||||
FLAG_SET_ERGO(size_t, NewSize, bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment));
|
||||
FLAG_SET_ERGO(NewSize, bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment));
|
||||
}
|
||||
|
||||
// Now take the actual NewSize into account. We will silently increase NewSize
|
||||
@ -107,7 +107,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, GenAlignment);
|
||||
bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, GenAlignment));
|
||||
if (bounded_new_size != NewSize) {
|
||||
FLAG_SET_ERGO(size_t, NewSize, bounded_new_size);
|
||||
FLAG_SET_ERGO(NewSize, bounded_new_size);
|
||||
}
|
||||
MinNewSize = smallest_new_size;
|
||||
|
||||
@ -120,14 +120,14 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
"heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.",
|
||||
MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
|
||||
}
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, smaller_max_new_size);
|
||||
FLAG_SET_ERGO(MaxNewSize, smaller_max_new_size);
|
||||
if (NewSize > MaxNewSize) {
|
||||
FLAG_SET_ERGO(size_t, NewSize, MaxNewSize);
|
||||
FLAG_SET_ERGO(NewSize, MaxNewSize);
|
||||
}
|
||||
} else if (MaxNewSize < NewSize) {
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
|
||||
FLAG_SET_ERGO(MaxNewSize, NewSize);
|
||||
} else if (!is_aligned(MaxNewSize, GenAlignment)) {
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, align_down(MaxNewSize, GenAlignment));
|
||||
FLAG_SET_ERGO(MaxNewSize, align_down(MaxNewSize, GenAlignment));
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
"A new max generation size of " SIZE_FORMAT "k will be used.",
|
||||
NewSize/K, MaxNewSize/K, NewSize/K);
|
||||
}
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
|
||||
FLAG_SET_ERGO(MaxNewSize, NewSize);
|
||||
}
|
||||
|
||||
if (SurvivorRatio < 1 || NewRatio < 1) {
|
||||
@ -147,10 +147,10 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
}
|
||||
|
||||
if (OldSize < old_gen_size_lower_bound()) {
|
||||
FLAG_SET_ERGO(size_t, OldSize, old_gen_size_lower_bound());
|
||||
FLAG_SET_ERGO(OldSize, old_gen_size_lower_bound());
|
||||
}
|
||||
if (!is_aligned(OldSize, GenAlignment)) {
|
||||
FLAG_SET_ERGO(size_t, OldSize, align_down(OldSize, GenAlignment));
|
||||
FLAG_SET_ERGO(OldSize, align_down(OldSize, GenAlignment));
|
||||
}
|
||||
|
||||
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
|
||||
@ -161,8 +161,8 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
|
||||
|
||||
calculated_heapsize = align_up(calculated_heapsize, HeapAlignment);
|
||||
FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize);
|
||||
FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize);
|
||||
FLAG_SET_ERGO(MaxHeapSize, calculated_heapsize);
|
||||
FLAG_SET_ERGO(InitialHeapSize, calculated_heapsize);
|
||||
}
|
||||
|
||||
// Adjust NewSize and OldSize or MaxHeapSize to match each other
|
||||
@ -173,15 +173,15 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
size_t calculated_size = NewSize + OldSize;
|
||||
double shrink_factor = (double) MaxHeapSize / calculated_size;
|
||||
size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), GenAlignment);
|
||||
FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
|
||||
FLAG_SET_ERGO(NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
|
||||
|
||||
// OldSize is already aligned because above we aligned MaxHeapSize to
|
||||
// HeapAlignment, and we just made sure that NewSize is aligned to
|
||||
// GenAlignment. In initialize_flags() we verified that HeapAlignment
|
||||
// is a multiple of GenAlignment.
|
||||
FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize);
|
||||
FLAG_SET_ERGO(OldSize, MaxHeapSize - NewSize);
|
||||
} else {
|
||||
FLAG_SET_ERGO(size_t, MaxHeapSize, align_up(NewSize + OldSize, HeapAlignment));
|
||||
FLAG_SET_ERGO(MaxHeapSize, align_up(NewSize + OldSize, HeapAlignment));
|
||||
}
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
if (OldSize < InitialHeapSize) {
|
||||
size_t new_size = InitialHeapSize - OldSize;
|
||||
if (new_size >= MinNewSize && new_size <= MaxNewSize) {
|
||||
FLAG_SET_ERGO(size_t, NewSize, new_size);
|
||||
FLAG_SET_ERGO(NewSize, new_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -341,15 +341,15 @@ void GenArguments::initialize_size_info() {
|
||||
|
||||
// Write back to flags if necessary.
|
||||
if (NewSize != initial_young_size) {
|
||||
FLAG_SET_ERGO(size_t, NewSize, initial_young_size);
|
||||
FLAG_SET_ERGO(NewSize, initial_young_size);
|
||||
}
|
||||
|
||||
if (MaxNewSize != max_young_size) {
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
|
||||
FLAG_SET_ERGO(MaxNewSize, max_young_size);
|
||||
}
|
||||
|
||||
if (OldSize != initial_old_size) {
|
||||
FLAG_SET_ERGO(size_t, OldSize, initial_old_size);
|
||||
FLAG_SET_ERGO(OldSize, initial_old_size);
|
||||
}
|
||||
|
||||
log_trace(gc, heap)("Minimum old " SIZE_FORMAT " Initial old " SIZE_FORMAT " Maximum old " SIZE_FORMAT,
|
||||
|
@ -57,7 +57,7 @@ static bool _enabled = false;
|
||||
|
||||
static bool enable() {
|
||||
assert(!_enabled, "invariant");
|
||||
FLAG_SET_MGMT(bool, FlightRecorder, true);
|
||||
FLAG_SET_MGMT(FlightRecorder, true);
|
||||
_enabled = FlightRecorder;
|
||||
assert(_enabled, "invariant");
|
||||
return _enabled;
|
||||
|
@ -1152,7 +1152,7 @@ void Metaspace::initialize_class_space(ReservedSpace rs) {
|
||||
void Metaspace::ergo_initialize() {
|
||||
if (DumpSharedSpaces) {
|
||||
// Using large pages when dumping the shared archive is currently not implemented.
|
||||
FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
|
||||
FLAG_SET_ERGO(UseLargePagesInMetaspace, false);
|
||||
}
|
||||
|
||||
size_t page_size = os::vm_page_size();
|
||||
@ -1194,12 +1194,12 @@ void Metaspace::ergo_initialize() {
|
||||
if (min_metaspace_sz >= MaxMetaspaceSize) {
|
||||
vm_exit_during_initialization("MaxMetaspaceSize is too small.");
|
||||
} else {
|
||||
FLAG_SET_ERGO(size_t, CompressedClassSpaceSize,
|
||||
FLAG_SET_ERGO(CompressedClassSpaceSize,
|
||||
MaxMetaspaceSize - min_metaspace_sz);
|
||||
}
|
||||
}
|
||||
} else if (min_metaspace_sz >= MaxMetaspaceSize) {
|
||||
FLAG_SET_ERGO(size_t, InitialBootClassLoaderMetaspaceSize,
|
||||
FLAG_SET_ERGO(InitialBootClassLoaderMetaspaceSize,
|
||||
min_metaspace_sz);
|
||||
}
|
||||
|
||||
|
@ -1634,7 +1634,7 @@ void Arguments::set_use_compressed_oops() {
|
||||
if (max_heap_size <= max_heap_for_compressed_oops()) {
|
||||
#if !defined(COMPILER1) || defined(TIERED)
|
||||
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
|
||||
FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
||||
FLAG_SET_ERGO(UseCompressedOops, true);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
@ -1663,7 +1663,7 @@ void Arguments::set_use_compressed_klass_ptrs() {
|
||||
} else {
|
||||
// Turn on UseCompressedClassPointers too
|
||||
if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
|
||||
FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
|
||||
FLAG_SET_ERGO(UseCompressedClassPointers, true);
|
||||
}
|
||||
// Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
|
||||
if (UseCompressedClassPointers) {
|
||||
@ -1771,7 +1771,7 @@ void Arguments::set_heap_size() {
|
||||
DefaultHeapBaseMinAddress,
|
||||
DefaultHeapBaseMinAddress/G,
|
||||
HeapBaseMinAddress);
|
||||
FLAG_SET_ERGO(size_t, HeapBaseMinAddress, DefaultHeapBaseMinAddress);
|
||||
FLAG_SET_ERGO(HeapBaseMinAddress, DefaultHeapBaseMinAddress);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1793,7 +1793,7 @@ void Arguments::set_heap_size() {
|
||||
}
|
||||
|
||||
log_trace(gc, heap)(" Maximum heap size " SIZE_FORMAT, (size_t) reasonable_max);
|
||||
FLAG_SET_ERGO(size_t, MaxHeapSize, (size_t)reasonable_max);
|
||||
FLAG_SET_ERGO(MaxHeapSize, (size_t)reasonable_max);
|
||||
}
|
||||
|
||||
// If the minimum or initial heap_size have not been set or requested to be set
|
||||
@ -1814,7 +1814,7 @@ void Arguments::set_heap_size() {
|
||||
reasonable_initial = limit_by_allocatable_memory(reasonable_initial);
|
||||
|
||||
log_trace(gc, heap)(" Initial heap size " SIZE_FORMAT, (size_t)reasonable_initial);
|
||||
FLAG_SET_ERGO(size_t, InitialHeapSize, (size_t)reasonable_initial);
|
||||
FLAG_SET_ERGO(InitialHeapSize, (size_t)reasonable_initial);
|
||||
}
|
||||
// If the minimum heap size has not been set (via -Xms),
|
||||
// synchronize with InitialHeapSize to avoid errors with the default value.
|
||||
@ -1855,10 +1855,10 @@ jint Arguments::set_aggressive_heap_flags() {
|
||||
initHeapSize = limit_by_allocatable_memory(initHeapSize);
|
||||
|
||||
if (FLAG_IS_DEFAULT(MaxHeapSize)) {
|
||||
if (FLAG_SET_CMDLINE(size_t, MaxHeapSize, initHeapSize) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MaxHeapSize, initHeapSize) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(size_t, InitialHeapSize, initHeapSize) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(InitialHeapSize, initHeapSize) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// Currently the minimum size and the initial heap sizes are the same.
|
||||
@ -1866,11 +1866,11 @@ jint Arguments::set_aggressive_heap_flags() {
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(NewSize)) {
|
||||
// Make the young generation 3/8ths of the total heap.
|
||||
if (FLAG_SET_CMDLINE(size_t, NewSize,
|
||||
if (FLAG_SET_CMDLINE(NewSize,
|
||||
((julong) MaxHeapSize / (julong) 8) * (julong) 3) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(size_t, MaxNewSize, NewSize) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MaxNewSize, NewSize) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
}
|
||||
@ -1880,20 +1880,20 @@ jint Arguments::set_aggressive_heap_flags() {
|
||||
#endif
|
||||
|
||||
// Increase some data structure sizes for efficiency
|
||||
if (FLAG_SET_CMDLINE(size_t, BaseFootPrintEstimate, MaxHeapSize) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(BaseFootPrintEstimate, MaxHeapSize) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, ResizeTLAB, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ResizeTLAB, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(size_t, TLABSize, 256 * K) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(TLABSize, 256 * K) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
// See the OldPLABSize comment below, but replace 'after promotion'
|
||||
// with 'after copying'. YoungPLABSize is the size of the survivor
|
||||
// space per-gc-thread buffers. The default is 4kw.
|
||||
if (FLAG_SET_CMDLINE(size_t, YoungPLABSize, 256 * K) != JVMFlag::SUCCESS) { // Note: this is in words
|
||||
if (FLAG_SET_CMDLINE(YoungPLABSize, 256 * K) != JVMFlag::SUCCESS) { // Note: this is in words
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
@ -1910,29 +1910,29 @@ jint Arguments::set_aggressive_heap_flags() {
|
||||
// locality. A minor effect may be that larger PLABs reduce the
|
||||
// number of PLAB allocation events during gc. The value of 8kw
|
||||
// was arrived at by experimenting with specjbb.
|
||||
if (FLAG_SET_CMDLINE(size_t, OldPLABSize, 8 * K) != JVMFlag::SUCCESS) { // Note: this is in words
|
||||
if (FLAG_SET_CMDLINE(OldPLABSize, 8 * K) != JVMFlag::SUCCESS) { // Note: this is in words
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
// Enable parallel GC and adaptive generation sizing
|
||||
if (FLAG_SET_CMDLINE(bool, UseParallelGC, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(UseParallelGC, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
// Encourage steady state memory management
|
||||
if (FLAG_SET_CMDLINE(uintx, ThresholdTolerance, 100) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ThresholdTolerance, 100) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
// This appears to improve mutator locality
|
||||
if (FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ScavengeBeforeFullGC, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
// Get around early Solaris scheduling bug
|
||||
// (affinity vs other jobs on system)
|
||||
// but disallow DR and offlining (5008695).
|
||||
if (FLAG_SET_CMDLINE(bool, BindGCTaskThreadsToCPUs, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(BindGCTaskThreadsToCPUs, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
@ -2053,7 +2053,7 @@ bool Arguments::check_vm_args_consistency() {
|
||||
|
||||
#ifndef SUPPORT_RESERVED_STACK_AREA
|
||||
if (StackReservedPages != 0) {
|
||||
FLAG_SET_CMDLINE(intx, StackReservedPages, 0);
|
||||
FLAG_SET_CMDLINE(StackReservedPages, 0);
|
||||
warning("Reserved Stack Area not supported on this platform");
|
||||
}
|
||||
#endif
|
||||
@ -2372,7 +2372,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
} else if (!strcmp(tail, ":gc")) {
|
||||
LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(gc));
|
||||
} else if (!strcmp(tail, ":jni")) {
|
||||
if (FLAG_SET_CMDLINE(bool, PrintJNIResolving, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(PrintJNIResolving, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
}
|
||||
@ -2515,24 +2515,24 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
set_enable_preview();
|
||||
// -Xnoclassgc
|
||||
} else if (match_option(option, "-Xnoclassgc")) {
|
||||
if (FLAG_SET_CMDLINE(bool, ClassUnloading, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ClassUnloading, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xconcgc
|
||||
} else if (match_option(option, "-Xconcgc")) {
|
||||
if (FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(UseConcMarkSweepGC, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
handle_extra_cms_flags("-Xconcgc uses UseConcMarkSweepGC");
|
||||
// -Xnoconcgc
|
||||
} else if (match_option(option, "-Xnoconcgc")) {
|
||||
if (FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(UseConcMarkSweepGC, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
handle_extra_cms_flags("-Xnoconcgc uses UseConcMarkSweepGC");
|
||||
// -Xbatch
|
||||
} else if (match_option(option, "-Xbatch")) {
|
||||
if (FLAG_SET_CMDLINE(bool, BackgroundCompilation, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(BackgroundCompilation, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xmn for compatibility with other JVM vendors
|
||||
@ -2545,10 +2545,10 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
describe_range_error(errcode);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(size_t, MaxNewSize, (size_t)long_initial_young_size) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MaxNewSize, (size_t)long_initial_young_size) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(size_t, NewSize, (size_t)long_initial_young_size) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(NewSize, (size_t)long_initial_young_size) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xms
|
||||
@ -2565,7 +2565,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
MinHeapSize = (size_t)long_initial_heap_size;
|
||||
// Currently the minimum size and the initial heap sizes are the same.
|
||||
// Can be overridden with -XX:InitialHeapSize.
|
||||
if (FLAG_SET_CMDLINE(size_t, InitialHeapSize, (size_t)long_initial_heap_size) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(InitialHeapSize, (size_t)long_initial_heap_size) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xmx
|
||||
@ -2578,7 +2578,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
describe_range_error(errcode);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(size_t, MaxHeapSize, (size_t)long_max_heap_size) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MaxHeapSize, (size_t)long_max_heap_size) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// Xmaxf
|
||||
@ -2591,7 +2591,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
option->optionString);
|
||||
return JNI_EINVAL;
|
||||
} else {
|
||||
if (FLAG_SET_CMDLINE(uintx, MaxHeapFreeRatio, maxf) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MaxHeapFreeRatio, maxf) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
}
|
||||
@ -2605,7 +2605,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
option->optionString);
|
||||
return JNI_EINVAL;
|
||||
} else {
|
||||
if (FLAG_SET_CMDLINE(uintx, MinHeapFreeRatio, minf) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MinHeapFreeRatio, minf) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
}
|
||||
@ -2616,7 +2616,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
if (err != JNI_OK) {
|
||||
return err;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(intx, ThreadStackSize, value) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ThreadStackSize, value) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else if (match_option(option, "-Xmaxjitcodesize", &tail) ||
|
||||
@ -2629,7 +2629,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
"Invalid maximum code cache size: %s.\n", option->optionString);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -green
|
||||
@ -2643,7 +2643,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
// -Xrs
|
||||
} else if (match_option(option, "-Xrs")) {
|
||||
// Classic/EVM option, new functionality
|
||||
if (FLAG_SET_CMDLINE(bool, ReduceSignalUsage, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ReduceSignalUsage, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xprof
|
||||
@ -2695,7 +2695,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
// Out of the box management support
|
||||
if (match_option(option, "-Dcom.sun.management", &tail)) {
|
||||
#if INCLUDE_MANAGEMENT
|
||||
if (FLAG_SET_CMDLINE(bool, ManagementServer, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ManagementServer, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// management agent in module jdk.management.agent
|
||||
@ -2720,54 +2720,54 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
set_mode_flags(_comp);
|
||||
// -Xshare:dump
|
||||
} else if (match_option(option, "-Xshare:dump")) {
|
||||
if (FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(DumpSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xshare:on
|
||||
} else if (match_option(option, "-Xshare:on")) {
|
||||
if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(UseSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(RequireSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xshare:auto || -XX:ArchiveClassesAtExit=<archive file>
|
||||
} else if (match_option(option, "-Xshare:auto")) {
|
||||
if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(UseSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(RequireSharedSpaces, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xshare:off
|
||||
} else if (match_option(option, "-Xshare:off")) {
|
||||
if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(UseSharedSpaces, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(RequireSharedSpaces, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xverify
|
||||
} else if (match_option(option, "-Xverify", &tail)) {
|
||||
if (strcmp(tail, ":all") == 0 || strcmp(tail, "") == 0) {
|
||||
if (FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(BytecodeVerificationLocal, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(BytecodeVerificationRemote, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else if (strcmp(tail, ":remote") == 0) {
|
||||
if (FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(BytecodeVerificationLocal, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(BytecodeVerificationRemote, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else if (strcmp(tail, ":none") == 0) {
|
||||
if (FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(BytecodeVerificationLocal, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(BytecodeVerificationRemote, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
warning("Options -Xverify:none and -noverify were deprecated in JDK 13 and will likely be removed in a future release.");
|
||||
@ -2827,23 +2827,23 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
// Need to keep consistency of MaxTenuringThreshold and AlwaysTenure/NeverTenure;
|
||||
// and the last option wins.
|
||||
} else if (match_option(option, "-XX:+NeverTenure")) {
|
||||
if (FLAG_SET_CMDLINE(bool, NeverTenure, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(NeverTenure, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, AlwaysTenure, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(AlwaysTenure, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, markOopDesc::max_age + 1) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MaxTenuringThreshold, markOopDesc::max_age + 1) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else if (match_option(option, "-XX:+AlwaysTenure")) {
|
||||
if (FLAG_SET_CMDLINE(bool, NeverTenure, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(NeverTenure, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, AlwaysTenure, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(AlwaysTenure, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, 0) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MaxTenuringThreshold, 0) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else if (match_option(option, "-XX:MaxTenuringThreshold=", &tail)) {
|
||||
@ -2854,65 +2854,65 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, max_tenuring_thresh) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MaxTenuringThreshold, max_tenuring_thresh) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
if (MaxTenuringThreshold == 0) {
|
||||
if (FLAG_SET_CMDLINE(bool, NeverTenure, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(NeverTenure, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, AlwaysTenure, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(AlwaysTenure, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (FLAG_SET_CMDLINE(bool, NeverTenure, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(NeverTenure, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, AlwaysTenure, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(AlwaysTenure, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
}
|
||||
} else if (match_option(option, "-XX:+DisplayVMOutputToStderr")) {
|
||||
if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(DisplayVMOutputToStdout, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(DisplayVMOutputToStderr, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else if (match_option(option, "-XX:+DisplayVMOutputToStdout")) {
|
||||
if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(DisplayVMOutputToStderr, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(DisplayVMOutputToStdout, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else if (match_option(option, "-XX:+ErrorFileToStderr")) {
|
||||
if (FLAG_SET_CMDLINE(bool, ErrorFileToStdout, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ErrorFileToStdout, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, ErrorFileToStderr, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ErrorFileToStderr, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else if (match_option(option, "-XX:+ErrorFileToStdout")) {
|
||||
if (FLAG_SET_CMDLINE(bool, ErrorFileToStderr, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ErrorFileToStderr, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, ErrorFileToStdout, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ErrorFileToStdout, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
} else if (match_option(option, "-XX:+ExtendedDTraceProbes")) {
|
||||
#if defined(DTRACE_ENABLED)
|
||||
if (FLAG_SET_CMDLINE(bool, ExtendedDTraceProbes, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ExtendedDTraceProbes, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, DTraceMethodProbes, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(DTraceMethodProbes, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, DTraceAllocProbes, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(DTraceAllocProbes, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, DTraceMonitorProbes, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(DTraceMonitorProbes, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
#else // defined(DTRACE_ENABLED)
|
||||
@ -2922,11 +2922,11 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
#endif // defined(DTRACE_ENABLED)
|
||||
#ifdef ASSERT
|
||||
} else if (match_option(option, "-XX:+FullGCALot")) {
|
||||
if (FLAG_SET_CMDLINE(bool, FullGCALot, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(FullGCALot, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// disable scavenge before parallel mark-compact
|
||||
if (FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(ScavengeBeforeFullGC, false) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
#endif
|
||||
@ -2959,10 +2959,10 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
// -Xshare:on
|
||||
// -Xlog:class+path=info
|
||||
if (PrintSharedArchiveAndExit) {
|
||||
if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(UseSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(RequireSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(class, path));
|
||||
@ -3087,7 +3087,7 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) {
|
||||
|
||||
// eventually fix up InitialTenuringThreshold if only MaxTenuringThreshold is set
|
||||
if (FLAG_IS_DEFAULT(InitialTenuringThreshold) && (InitialTenuringThreshold > MaxTenuringThreshold)) {
|
||||
FLAG_SET_ERGO(uintx, InitialTenuringThreshold, MaxTenuringThreshold);
|
||||
FLAG_SET_ERGO(InitialTenuringThreshold, MaxTenuringThreshold);
|
||||
}
|
||||
|
||||
#if !COMPILER2_OR_JVMCI
|
||||
@ -3514,7 +3514,7 @@ bool Arguments::init_shared_archive_paths() {
|
||||
if (DumpSharedSpaces) {
|
||||
vm_exit_during_initialization("-XX:ArchiveClassesAtExit cannot be used with -Xshare:dump");
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(bool, DynamicDumpSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(DynamicDumpSharedSpaces, true) != JVMFlag::SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
check_unsupported_dumping_properties();
|
||||
@ -4067,7 +4067,7 @@ jint Arguments::apply_ergo() {
|
||||
#if defined(IA32)
|
||||
// Only server compiler can optimize safepoints well enough.
|
||||
if (!is_server_compilation_mode_vm()) {
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, ThreadLocalHandshakes, false);
|
||||
FLAG_SET_ERGO_IF_DEFAULT(ThreadLocalHandshakes, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -4084,7 +4084,7 @@ jint Arguments::apply_ergo() {
|
||||
jint Arguments::adjust_after_os() {
|
||||
if (UseNUMA) {
|
||||
if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
|
||||
FLAG_SET_ERGO(bool, UseNUMA, false);
|
||||
FLAG_SET_ERGO(UseNUMA, false);
|
||||
} else if (UseParallelGC || UseParallelOldGC) {
|
||||
if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
|
||||
FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
|
||||
@ -4099,7 +4099,7 @@ jint Arguments::adjust_after_os() {
|
||||
// all platforms and ParallelGC on Windows will interleave all
|
||||
// of the heap spaces across NUMA nodes.
|
||||
if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) {
|
||||
FLAG_SET_ERGO(bool, UseNUMAInterleaving, true);
|
||||
FLAG_SET_ERGO(UseNUMAInterleaving, true);
|
||||
}
|
||||
}
|
||||
return JNI_OK;
|
||||
|
@ -232,7 +232,7 @@ void NonTieredCompPolicy::initialize() {
|
||||
// Lower the compiler count such that all buffers fit into the code cache
|
||||
_compiler_count = MAX2(max_count, 1);
|
||||
}
|
||||
FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count);
|
||||
FLAG_SET_ERGO(CICompilerCount, _compiler_count);
|
||||
} else {
|
||||
_compiler_count = CICompilerCount;
|
||||
}
|
||||
|
@ -68,24 +68,6 @@ typedef enum {
|
||||
NUM_JVMFlagsEnum
|
||||
} JVMFlagsEnum;
|
||||
|
||||
#define FLAG_IS_DEFAULT(name) (JVMFlagEx::is_default(FLAG_MEMBER_ENUM(name)))
|
||||
#define FLAG_IS_ERGO(name) (JVMFlagEx::is_ergo(FLAG_MEMBER_ENUM(name)))
|
||||
#define FLAG_IS_CMDLINE(name) (JVMFlagEx::is_cmdline(FLAG_MEMBER_ENUM(name)))
|
||||
|
||||
#define FLAG_SET_DEFAULT(name, value) ((name) = (value))
|
||||
|
||||
#define FLAG_SET_CMDLINE(type, name, value) (JVMFlagEx::setOnCmdLine(FLAG_MEMBER_ENUM(name)), \
|
||||
JVMFlagEx::type##AtPut(FLAG_MEMBER_ENUM(name), (type)(value), JVMFlag::COMMAND_LINE))
|
||||
#define FLAG_SET_ERGO(type, name, value) (JVMFlagEx::type##AtPut(FLAG_MEMBER_ENUM(name), (type)(value), JVMFlag::ERGONOMIC))
|
||||
#define FLAG_SET_MGMT(type, name, value) (JVMFlagEx::type##AtPut(FLAG_MEMBER_ENUM(name), (type)(value), JVMFlag::MANAGEMENT))
|
||||
|
||||
#define FLAG_SET_ERGO_IF_DEFAULT(type, name, value) \
|
||||
do { \
|
||||
if (FLAG_IS_DEFAULT(name)) { \
|
||||
FLAG_SET_ERGO(type, name, value); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// Can't put the following in JVMFlags because
|
||||
// of a circular dependency on the enum definition.
|
||||
class JVMFlagEx : JVMFlag {
|
||||
@ -100,6 +82,9 @@ class JVMFlagEx : JVMFlag {
|
||||
static JVMFlag::Error doubleAtPut(JVMFlagsEnum flag, double value, JVMFlag::Flags origin);
|
||||
// Contract: Flag will make private copy of the incoming value
|
||||
static JVMFlag::Error ccstrAtPut(JVMFlagsEnum flag, ccstr value, JVMFlag::Flags origin);
|
||||
static JVMFlag::Error ccstrlistAtPut(JVMFlagsEnum flag, ccstr value, JVMFlag::Flags origin) {
|
||||
return ccstrAtPut(flag, value, origin);
|
||||
}
|
||||
|
||||
static bool is_default(JVMFlagsEnum flag);
|
||||
static bool is_ergo(JVMFlagsEnum flag);
|
||||
@ -108,4 +93,58 @@ class JVMFlagEx : JVMFlag {
|
||||
static void setOnCmdLine(JVMFlagsEnum flag);
|
||||
};
|
||||
|
||||
// Construct set functions for all flags
|
||||
|
||||
#define FLAG_MEMBER_SET(name) Flag_##name##_set
|
||||
#define FLAG_MEMBER_SET_(type, name) inline JVMFlag::Error FLAG_MEMBER_SET(name)(type value, JVMFlag::Flags origin) { return JVMFlagEx::type##AtPut(FLAG_MEMBER_ENUM(name), value, origin); }
|
||||
|
||||
#define FLAG_MEMBER_SET_PRODUCT(type, name, value, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#define FLAG_MEMBER_SET_PD_PRODUCT(type, name, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#define FLAG_MEMBER_SET_DIAGNOSTIC(type, name, value, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#define FLAG_MEMBER_SET_PD_DIAGNOSTIC(type, name, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#define FLAG_MEMBER_SET_EXPERIMENTAL(type, name, value, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#define FLAG_MEMBER_SET_MANAGEABLE(type, name, value, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#define FLAG_MEMBER_SET_PRODUCT_RW(type, name, value, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#define FLAG_MEMBER_SET_DEVELOP(type, name, value, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#define FLAG_MEMBER_SET_PD_DEVELOP(type, name, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#define FLAG_MEMBER_SET_NOTPRODUCT(type, name, value, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#ifdef _LP64
|
||||
#define FLAG_MEMBER_SET_LP64_PRODUCT(type, name, value, doc) FLAG_MEMBER_SET_(type, name)
|
||||
#else
|
||||
#define FLAG_MEMBER_SET_LP64_PRODUCT(type, name, value, doc) /* flag is constant */
|
||||
#endif // _LP64
|
||||
|
||||
ALL_FLAGS(FLAG_MEMBER_SET_DEVELOP,
|
||||
FLAG_MEMBER_SET_PD_DEVELOP,
|
||||
FLAG_MEMBER_SET_PRODUCT,
|
||||
FLAG_MEMBER_SET_PD_PRODUCT,
|
||||
FLAG_MEMBER_SET_DIAGNOSTIC,
|
||||
FLAG_MEMBER_SET_PD_DIAGNOSTIC,
|
||||
FLAG_MEMBER_SET_EXPERIMENTAL,
|
||||
FLAG_MEMBER_SET_NOTPRODUCT,
|
||||
FLAG_MEMBER_SET_MANAGEABLE,
|
||||
FLAG_MEMBER_SET_PRODUCT_RW,
|
||||
FLAG_MEMBER_SET_LP64_PRODUCT,
|
||||
IGNORE_RANGE,
|
||||
IGNORE_CONSTRAINT,
|
||||
IGNORE_WRITEABLE)
|
||||
|
||||
#define FLAG_IS_DEFAULT(name) (JVMFlagEx::is_default(FLAG_MEMBER_ENUM(name)))
|
||||
#define FLAG_IS_ERGO(name) (JVMFlagEx::is_ergo(FLAG_MEMBER_ENUM(name)))
|
||||
#define FLAG_IS_CMDLINE(name) (JVMFlagEx::is_cmdline(FLAG_MEMBER_ENUM(name)))
|
||||
|
||||
#define FLAG_SET_DEFAULT(name, value) ((name) = (value))
|
||||
|
||||
#define FLAG_SET_CMDLINE(name, value) (JVMFlagEx::setOnCmdLine(FLAG_MEMBER_ENUM(name)), \
|
||||
FLAG_MEMBER_SET(name)((value), JVMFlag::COMMAND_LINE))
|
||||
#define FLAG_SET_ERGO(name, value) (FLAG_MEMBER_SET(name)((value), JVMFlag::ERGONOMIC))
|
||||
#define FLAG_SET_MGMT(name, value) (FLAG_MEMBER_SET(name)((value), JVMFlag::MANAGEMENT))
|
||||
|
||||
#define FLAG_SET_ERGO_IF_DEFAULT(name, value) \
|
||||
do { \
|
||||
if (FLAG_IS_DEFAULT(name)) { \
|
||||
FLAG_SET_ERGO(name, value); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif // SHARE_RUNTIME_GLOBALS_EXTENSION_HPP
|
||||
|
@ -238,7 +238,7 @@ void TieredThresholdPolicy::initialize() {
|
||||
// Lower the compiler count such that all buffers fit into the code cache
|
||||
count = MAX2(max_count, c1_only ? 1 : 2);
|
||||
}
|
||||
FLAG_SET_ERGO(intx, CICompilerCount, count);
|
||||
FLAG_SET_ERGO(CICompilerCount, count);
|
||||
}
|
||||
#else
|
||||
// On 32-bit systems, the number of compiler threads is limited to 3.
|
||||
@ -250,7 +250,7 @@ void TieredThresholdPolicy::initialize() {
|
||||
/// available to the VM and thus cause the VM to crash.
|
||||
if (FLAG_IS_DEFAULT(CICompilerCount)) {
|
||||
count = 3;
|
||||
FLAG_SET_ERGO(intx, CICompilerCount, count);
|
||||
FLAG_SET_ERGO(CICompilerCount, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -66,10 +66,10 @@ class TestGenCollectorPolicy {
|
||||
FLAG_GUARD(OldSize);
|
||||
|
||||
MinHeapSize = 40 * M;
|
||||
FLAG_SET_ERGO(size_t, InitialHeapSize, 100 * M);
|
||||
FLAG_SET_ERGO(size_t, OldSize, 4 * M);
|
||||
FLAG_SET_ERGO(size_t, NewSize, 1 * M);
|
||||
FLAG_SET_ERGO(size_t, MaxNewSize, 80 * M);
|
||||
FLAG_SET_ERGO(InitialHeapSize, 100 * M);
|
||||
FLAG_SET_ERGO(OldSize, 4 * M);
|
||||
FLAG_SET_ERGO(NewSize, 1 * M);
|
||||
FLAG_SET_ERGO(MaxNewSize, 80 * M);
|
||||
|
||||
ASSERT_NO_FATAL_FAILURE(setter1->execute());
|
||||
|
||||
@ -88,7 +88,7 @@ class TestGenCollectorPolicy {
|
||||
public:
|
||||
SetNewSizeErgo(size_t param) : UnaryExecutor(param) { }
|
||||
void execute() {
|
||||
FLAG_SET_ERGO(size_t, NewSize, param);
|
||||
FLAG_SET_ERGO(NewSize, param);
|
||||
}
|
||||
};
|
||||
|
||||
@ -129,7 +129,7 @@ class TestGenCollectorPolicy {
|
||||
public:
|
||||
SetNewSizeCmd(size_t param) : UnaryExecutor(param) { }
|
||||
void execute() {
|
||||
FLAG_SET_CMDLINE(size_t, NewSize, param);
|
||||
FLAG_SET_CMDLINE(NewSize, param);
|
||||
}
|
||||
};
|
||||
|
||||
@ -148,7 +148,7 @@ class TestGenCollectorPolicy {
|
||||
public:
|
||||
SetOldSizeCmd(size_t param) : UnaryExecutor(param) { }
|
||||
void execute() {
|
||||
FLAG_SET_CMDLINE(size_t, OldSize, param);
|
||||
FLAG_SET_CMDLINE(OldSize, param);
|
||||
}
|
||||
};
|
||||
|
||||
@ -159,7 +159,7 @@ class TestGenCollectorPolicy {
|
||||
size_t heap_alignment = GCArguments::compute_heap_alignment();
|
||||
size_t new_size_value = align_up(MaxHeapSize, heap_alignment)
|
||||
- param1 + param2;
|
||||
FLAG_SET_CMDLINE(size_t, MaxNewSize, new_size_value);
|
||||
FLAG_SET_CMDLINE(MaxNewSize, new_size_value);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -59,8 +59,8 @@ TEST_VM(os_windows, reserve_memory_special) {
|
||||
// set globals to make sure we hit the correct code path
|
||||
FLAG_GUARD(UseLargePagesIndividualAllocation);
|
||||
FLAG_GUARD(UseNUMAInterleaving);
|
||||
FLAG_SET_CMDLINE(bool, UseLargePagesIndividualAllocation, false);
|
||||
FLAG_SET_CMDLINE(bool, UseNUMAInterleaving, false);
|
||||
FLAG_SET_CMDLINE(UseLargePagesIndividualAllocation, false);
|
||||
FLAG_SET_CMDLINE(UseNUMAInterleaving, false);
|
||||
|
||||
const size_t large_allocation_size = os::large_page_size() * 4;
|
||||
char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
|
||||
|
Loading…
x
Reference in New Issue
Block a user