Merge
This commit is contained in:
commit
db5f26110b
hotspot
agent/src/share/classes/sun/jvm/hotspot/oops
src
cpu
aarch64/vm
ppc/vm
sparc/vm
x86/vm
zero/vm
os/aix/vm
os_cpu
share/vm
c1
classfile
gc
cms
g1
concurrentG1Refine.hppconcurrentMark.cppg1CodeBlobClosure.hppg1CollectedHeap.cppg1CollectedHeap.hppg1CollectorPolicy.cppg1CollectorPolicy.hppg1CollectorState.hppg1ErgoVerbose.cppg1ErgoVerbose.hppg1GCPhaseTimes.cppg1GCPhaseTimes.hppg1HotCardCache.cppg1HotCardCache.hppg1InCSetState.hppg1OopClosures.cppg1OopClosures.hppg1OopClosures.inline.hppg1ParScanThreadState.cppg1ParScanThreadState.hppg1ParScanThreadState.inline.hppg1Predictions.cppg1Predictions.hppg1RemSet.cppg1RemSet.hppg1RootClosures.cppg1RootClosures.hppg1RootProcessor.cppg1RootProcessor.hppheapRegion.hppsurvRateGroup.cppsurvRateGroup.hppyoungList.cppyoungList.hpp
parallel
psAdaptiveSizePolicy.cpppsAdaptiveSizePolicy.hpppsParallelCompact.cpppsParallelCompact.hpppsScavenge.cpp
serial
shared
cardGeneration.cppcardGeneration.hppcardTableRS.cppcardTableRS.hppcollectorPolicy.cppcollectorPolicy.hppgcId.cppgenCollectedHeap.cppgenCollectedHeap.hppgenOopClosures.hppgenOopClosures.inline.hppgenRemSet.cppgenRemSet.hppgeneration.cppgeneration.hppgenerationSpec.cppgenerationSpec.hppspace.cppspace.hppwatermark.hpp
interpreter
memory
oops
opto
precompiled
prims
runtime
arguments.cppcommandLineFlagConstraintList.cppcommandLineFlagConstraintList.hppcommandLineFlagConstraintsRuntime.cppcommandLineFlagConstraintsRuntime.hppcommandLineFlagRangeList.cppglobals.hppsafepoint.cppthread.cppvmStructs.cppvmThread.cpp
utilities
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -52,21 +52,19 @@ public class Method extends Metadata {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
|
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
|
||||||
Type type = db.lookupType("Method");
|
type = db.lookupType("Method");
|
||||||
constMethod = type.getAddressField("_constMethod");
|
constMethod = type.getAddressField("_constMethod");
|
||||||
methodData = type.getAddressField("_method_data");
|
methodData = type.getAddressField("_method_data");
|
||||||
methodCounters = type.getAddressField("_method_counters");
|
methodCounters = type.getAddressField("_method_counters");
|
||||||
methodSize = new CIntField(type.getCIntegerField("_method_size"), 0);
|
|
||||||
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
|
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
|
||||||
code = type.getAddressField("_code");
|
code = type.getAddressField("_code");
|
||||||
vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0);
|
vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0);
|
||||||
bytecodeOffset = type.getSize();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
interpreterEntry = type.getAddressField("_interpreter_entry");
|
|
||||||
fromCompiledCodeEntryPoint = type.getAddressField("_from_compiled_code_entry_point");
|
fromCompiledCodeEntryPoint = type.getAddressField("_from_compiled_code_entry_point");
|
||||||
|
interpreterEntry = type.getAddressField("_from_interpreted_entry");
|
||||||
*/
|
*/
|
||||||
|
|
||||||
objectInitializerName = null;
|
objectInitializerName = null;
|
||||||
classInitializerName = null;
|
classInitializerName = null;
|
||||||
}
|
}
|
||||||
@ -77,16 +75,22 @@ public class Method extends Metadata {
|
|||||||
|
|
||||||
public boolean isMethod() { return true; }
|
public boolean isMethod() { return true; }
|
||||||
|
|
||||||
|
// Not a Method field, used to keep type.
|
||||||
|
private static Type type;
|
||||||
|
|
||||||
// Fields
|
// Fields
|
||||||
private static AddressField constMethod;
|
private static AddressField constMethod;
|
||||||
private static AddressField methodData;
|
private static AddressField methodData;
|
||||||
private static AddressField methodCounters;
|
private static AddressField methodCounters;
|
||||||
private static CIntField methodSize;
|
|
||||||
private static CIntField accessFlags;
|
private static CIntField accessFlags;
|
||||||
private static CIntField vtableIndex;
|
private static CIntField vtableIndex;
|
||||||
private static long bytecodeOffset;
|
|
||||||
|
|
||||||
private static AddressField code;
|
private static AddressField code;
|
||||||
|
/*
|
||||||
|
private static AddressCField fromCompiledCodeEntryPoint;
|
||||||
|
private static AddressField interpreterEntry;
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
// constant method names - <init>, <clinit>
|
// constant method names - <init>, <clinit>
|
||||||
// Initialized lazily to avoid initialization ordering dependencies between Method and SymbolTable
|
// Initialized lazily to avoid initialization ordering dependencies between Method and SymbolTable
|
||||||
@ -106,11 +110,6 @@ public class Method extends Metadata {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
private static AddressCField interpreterEntry;
|
|
||||||
private static AddressCField fromCompiledCodeEntryPoint;
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Accessors for declared fields
|
// Accessors for declared fields
|
||||||
public ConstMethod getConstMethod() {
|
public ConstMethod getConstMethod() {
|
||||||
Address addr = constMethod.getValue(getAddress());
|
Address addr = constMethod.getValue(getAddress());
|
||||||
@ -128,7 +127,6 @@ public class Method extends Metadata {
|
|||||||
return (MethodCounters) VMObjectFactory.newObject(MethodCounters.class, addr);
|
return (MethodCounters) VMObjectFactory.newObject(MethodCounters.class, addr);
|
||||||
}
|
}
|
||||||
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
|
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
|
||||||
public long getMethodSize() { return methodSize.getValue(this); }
|
|
||||||
public long getMaxStack() { return getConstMethod().getMaxStack(); }
|
public long getMaxStack() { return getConstMethod().getMaxStack(); }
|
||||||
public long getMaxLocals() { return getConstMethod().getMaxLocals(); }
|
public long getMaxLocals() { return getConstMethod().getMaxLocals(); }
|
||||||
public long getSizeOfParameters() { return getConstMethod().getSizeOfParameters(); }
|
public long getSizeOfParameters() { return getConstMethod().getSizeOfParameters(); }
|
||||||
@ -265,7 +263,7 @@ public class Method extends Metadata {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public long getSize() {
|
public long getSize() {
|
||||||
return getMethodSize();
|
return type.getSize() + (isNative() ? 2: 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void printValueOn(PrintStream tty) {
|
public void printValueOn(PrintStream tty) {
|
||||||
@ -273,7 +271,6 @@ public class Method extends Metadata {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void iterateFields(MetadataVisitor visitor) {
|
public void iterateFields(MetadataVisitor visitor) {
|
||||||
visitor.doCInt(methodSize, true);
|
|
||||||
visitor.doCInt(accessFlags, true);
|
visitor.doCInt(accessFlags, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,10 +55,17 @@ define_pd_global(intx, CodeEntryAlignment, 16);
|
|||||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||||
|
|
||||||
define_pd_global(intx, StackYellowPages, 2);
|
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||||
define_pd_global(intx, StackRedPages, 1);
|
#define DEFAULT_STACK_RED_PAGES (1)
|
||||||
|
#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
|
||||||
|
|
||||||
define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
|
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||||
|
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||||
|
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||||
|
|
||||||
|
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||||
|
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||||
|
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||||
|
|
||||||
define_pd_global(bool, RewriteBytecodes, true);
|
define_pd_global(bool, RewriteBytecodes, true);
|
||||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||||
|
@ -41,6 +41,18 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for impli
|
|||||||
define_pd_global(bool, TrapBasedNullChecks, true);
|
define_pd_global(bool, TrapBasedNullChecks, true);
|
||||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
|
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
|
||||||
|
|
||||||
|
#define DEFAULT_STACK_YELLOW_PAGES (6)
|
||||||
|
#define DEFAULT_STACK_RED_PAGES (1)
|
||||||
|
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
|
||||||
|
|
||||||
|
#define MIN_STACK_YELLOW_PAGES (1)
|
||||||
|
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||||
|
#define MIN_STACK_SHADOW_PAGES (1)
|
||||||
|
|
||||||
|
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||||
|
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||||
|
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||||
|
|
||||||
// Use large code-entry alignment.
|
// Use large code-entry alignment.
|
||||||
define_pd_global(intx, CodeEntryAlignment, 128);
|
define_pd_global(intx, CodeEntryAlignment, 128);
|
||||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||||
|
@ -52,19 +52,27 @@ define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize
|
|||||||
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
|
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
|
||||||
define_pd_global(intx, InlineSmallCode, 1500);
|
define_pd_global(intx, InlineSmallCode, 1500);
|
||||||
|
|
||||||
|
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||||
|
#define DEFAULT_STACK_RED_PAGES (1)
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
|
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
|
||||||
define_pd_global(intx, ThreadStackSize, 1024);
|
define_pd_global(intx, ThreadStackSize, 1024);
|
||||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||||
define_pd_global(intx, StackShadowPages, 10 DEBUG_ONLY(+1));
|
#define DEFAULT_STACK_SHADOW_PAGES (10 DEBUG_ONLY(+1))
|
||||||
#else
|
#else
|
||||||
define_pd_global(intx, ThreadStackSize, 512);
|
define_pd_global(intx, ThreadStackSize, 512);
|
||||||
define_pd_global(intx, VMThreadStackSize, 512);
|
define_pd_global(intx, VMThreadStackSize, 512);
|
||||||
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
|
#define DEFAULT_STACK_SHADOW_PAGES (3 DEBUG_ONLY(+1))
|
||||||
#endif
|
#endif // _LP64
|
||||||
|
|
||||||
define_pd_global(intx, StackYellowPages, 2);
|
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||||
define_pd_global(intx, StackRedPages, 1);
|
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||||
|
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||||
|
|
||||||
|
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||||
|
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||||
|
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||||
|
|
||||||
define_pd_global(bool, RewriteBytecodes, true);
|
define_pd_global(bool, RewriteBytecodes, true);
|
||||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||||
|
@ -55,16 +55,28 @@ define_pd_global(intx, OptoLoopAlignment, 16);
|
|||||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||||
define_pd_global(intx, InlineSmallCode, 1000);
|
define_pd_global(intx, InlineSmallCode, 1000);
|
||||||
|
|
||||||
define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3));
|
#define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3))
|
||||||
define_pd_global(intx, StackRedPages, 1);
|
#define DEFAULT_STACK_RED_PAGES (1)
|
||||||
|
|
||||||
|
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||||
|
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||||
|
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
// Very large C++ stack frames using solaris-amd64 optimized builds
|
// Very large C++ stack frames using solaris-amd64 optimized builds
|
||||||
// due to lack of optimization caused by C++ compiler bugs
|
// due to lack of optimization caused by C++ compiler bugs
|
||||||
define_pd_global(intx, StackShadowPages, NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2));
|
#define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2))
|
||||||
|
// For those clients that do not use write socket, we allow
|
||||||
|
// the min range value to be below that of the default
|
||||||
|
#define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(6) DEBUG_ONLY(+2))
|
||||||
#else
|
#else
|
||||||
define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
|
#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
|
||||||
|
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
|
||||||
|
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||||
|
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||||
|
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||||
|
|
||||||
define_pd_global(bool, RewriteBytecodes, true);
|
define_pd_global(bool, RewriteBytecodes, true);
|
||||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||||
|
|
||||||
@ -135,6 +147,7 @@ define_pd_global(bool, PreserveFramePointer, false);
|
|||||||
\
|
\
|
||||||
product(uintx, RTMRetryCount, 5, \
|
product(uintx, RTMRetryCount, 5, \
|
||||||
"Number of RTM retries on lock abort or busy") \
|
"Number of RTM retries on lock abort or busy") \
|
||||||
|
range(0, max_uintx) \
|
||||||
\
|
\
|
||||||
experimental(intx, RTMSpinLoopCount, 100, \
|
experimental(intx, RTMSpinLoopCount, 100, \
|
||||||
"Spin count for lock to become free before RTM retry") \
|
"Spin count for lock to become free before RTM retry") \
|
||||||
|
@ -45,9 +45,17 @@ define_pd_global(intx, OptoLoopAlignment, 16);
|
|||||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||||
define_pd_global(intx, InlineSmallCode, 1000 );
|
define_pd_global(intx, InlineSmallCode, 1000 );
|
||||||
|
|
||||||
define_pd_global(intx, StackYellowPages, 2);
|
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||||
define_pd_global(intx, StackRedPages, 1);
|
#define DEFAULT_STACK_RED_PAGES (1)
|
||||||
define_pd_global(intx, StackShadowPages, 5 LP64_ONLY(+1) DEBUG_ONLY(+3));
|
#define DEFAULT_STACK_SHADOW_PAGES (5 LP64_ONLY(+1) DEBUG_ONLY(+3))
|
||||||
|
|
||||||
|
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||||
|
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||||
|
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||||
|
|
||||||
|
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||||
|
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||||
|
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||||
|
|
||||||
define_pd_global(bool, RewriteBytecodes, true);
|
define_pd_global(bool, RewriteBytecodes, true);
|
||||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||||
|
@ -39,15 +39,16 @@
|
|||||||
/* a scarce resource and there may be situations where we do not want the VM */ \
|
/* a scarce resource and there may be situations where we do not want the VM */ \
|
||||||
/* to run with 16M pages. (Will fall back to 64K pages). */ \
|
/* to run with 16M pages. (Will fall back to 64K pages). */ \
|
||||||
product_pd(bool, Use16MPages, \
|
product_pd(bool, Use16MPages, \
|
||||||
"Use 16M pages if available.") \
|
"Use 16M pages if available.") \
|
||||||
\
|
\
|
||||||
/* use optimized addresses for the polling page, */ \
|
/* use optimized addresses for the polling page, */ \
|
||||||
/* e.g. map it to a special 32-bit address. */ \
|
/* e.g. map it to a special 32-bit address. */ \
|
||||||
product_pd(bool, OptimizePollingPageLocation, \
|
product_pd(bool, OptimizePollingPageLocation, \
|
||||||
"Optimize the location of the polling page used for Safepoints") \
|
"Optimize the location of the polling page used for Safepoints") \
|
||||||
\
|
\
|
||||||
product_pd(intx, AttachListenerTimeout, \
|
product_pd(intx, AttachListenerTimeout, \
|
||||||
"Timeout in ms the attach listener waits for a request") \
|
"Timeout in ms the attach listener waits for a request") \
|
||||||
|
range(0, 2147483) \
|
||||||
\
|
\
|
||||||
|
|
||||||
// Per default, do not allow 16M pages. 16M pages have to be switched on specifically.
|
// Per default, do not allow 16M pages. 16M pages have to be switched on specifically.
|
||||||
|
@ -42,10 +42,6 @@ define_pd_global(intx, CompilerThreadStackSize, 4096);
|
|||||||
// Allow extra space in DEBUG builds for asserts.
|
// Allow extra space in DEBUG builds for asserts.
|
||||||
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
||||||
|
|
||||||
define_pd_global(intx, StackYellowPages, 6);
|
|
||||||
define_pd_global(intx, StackRedPages, 1);
|
|
||||||
define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+2));
|
|
||||||
|
|
||||||
// Only used on 64 bit platforms
|
// Only used on 64 bit platforms
|
||||||
define_pd_global(size_t, HeapBaseMinAddress, 2*G);
|
define_pd_global(size_t, HeapBaseMinAddress, 2*G);
|
||||||
|
|
||||||
|
@ -42,10 +42,6 @@ define_pd_global(intx, CompilerThreadStackSize, 4096);
|
|||||||
// Allow extra space in DEBUG builds for asserts.
|
// Allow extra space in DEBUG builds for asserts.
|
||||||
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
||||||
|
|
||||||
define_pd_global(intx, StackYellowPages, 6);
|
|
||||||
define_pd_global(intx, StackRedPages, 1);
|
|
||||||
define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+2));
|
|
||||||
|
|
||||||
// Only used on 64 bit platforms
|
// Only used on 64 bit platforms
|
||||||
define_pd_global(size_t, HeapBaseMinAddress, 2*G);
|
define_pd_global(size_t, HeapBaseMinAddress, 2*G);
|
||||||
|
|
||||||
|
@ -554,7 +554,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
|
|||||||
exception->print_value_string(), p2i((address)exception()), nm->method()->print_value_string(), p2i(pc), p2i(thread));
|
exception->print_value_string(), p2i((address)exception()), nm->method()->print_value_string(), p2i(pc), p2i(thread));
|
||||||
}
|
}
|
||||||
// for AbortVMOnException flag
|
// for AbortVMOnException flag
|
||||||
NOT_PRODUCT(Exceptions::debug_check_abort(exception));
|
Exceptions::debug_check_abort(exception);
|
||||||
|
|
||||||
// Clear out the exception oop and pc since looking up an
|
// Clear out the exception oop and pc since looking up an
|
||||||
// exception handler can cause class loading, which might throw an
|
// exception handler can cause class loading, which might throw an
|
||||||
|
@ -2003,6 +2003,10 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
|
|||||||
verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle));
|
verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (name == vmSymbols::object_initializer_name() && is_interface) {
|
||||||
|
classfile_parse_error("Interface cannot have a method named <init>, class file %s", CHECK_(nullHandle));
|
||||||
|
}
|
||||||
|
|
||||||
int args_size = -1; // only used when _need_verify is true
|
int args_size = -1; // only used when _need_verify is true
|
||||||
if (_need_verify) {
|
if (_need_verify) {
|
||||||
args_size = ((flags & JVM_ACC_STATIC) ? 0 : 1) +
|
args_size = ((flags & JVM_ACC_STATIC) ? 0 : 1) +
|
||||||
|
@ -1579,11 +1579,9 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Make sure "this" has been initialized if current method is an
|
// Make sure "this" has been initialized if current method is an
|
||||||
// <init>. Note that "<init>" methods in interfaces are just
|
// <init>.
|
||||||
// normal methods. Interfaces cannot have ctors.
|
|
||||||
if (_method->name() == vmSymbols::object_initializer_name() &&
|
if (_method->name() == vmSymbols::object_initializer_name() &&
|
||||||
current_frame.flag_this_uninit() &&
|
current_frame.flag_this_uninit()) {
|
||||||
!current_class()->is_interface()) {
|
|
||||||
verify_error(ErrorContext::bad_code(bci),
|
verify_error(ErrorContext::bad_code(bci),
|
||||||
"Constructor must call super() or this() "
|
"Constructor must call super() or this() "
|
||||||
"before return");
|
"before return");
|
||||||
|
@ -6051,8 +6051,8 @@ MarkRefsIntoClosure::MarkRefsIntoClosure(
|
|||||||
_span(span),
|
_span(span),
|
||||||
_bitMap(bitMap)
|
_bitMap(bitMap)
|
||||||
{
|
{
|
||||||
assert(_ref_processor == NULL, "deliberately left NULL");
|
assert(ref_processor() == NULL, "deliberately left NULL");
|
||||||
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
|
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkRefsIntoClosure::do_oop(oop obj) {
|
void MarkRefsIntoClosure::do_oop(oop obj) {
|
||||||
@ -6073,8 +6073,8 @@ Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
|
|||||||
_span(span),
|
_span(span),
|
||||||
_bitMap(bitMap)
|
_bitMap(bitMap)
|
||||||
{
|
{
|
||||||
assert(_ref_processor == NULL, "deliberately left NULL");
|
assert(ref_processor() == NULL, "deliberately left NULL");
|
||||||
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
|
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
|
||||||
}
|
}
|
||||||
|
|
||||||
void Par_MarkRefsIntoClosure::do_oop(oop obj) {
|
void Par_MarkRefsIntoClosure::do_oop(oop obj) {
|
||||||
@ -6097,8 +6097,8 @@ MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
|
|||||||
_verification_bm(verification_bm),
|
_verification_bm(verification_bm),
|
||||||
_cms_bm(cms_bm)
|
_cms_bm(cms_bm)
|
||||||
{
|
{
|
||||||
assert(_ref_processor == NULL, "deliberately left NULL");
|
assert(ref_processor() == NULL, "deliberately left NULL");
|
||||||
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
|
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
|
void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
|
||||||
@ -6140,8 +6140,9 @@ MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
|
|||||||
_concurrent_precleaning(concurrent_precleaning),
|
_concurrent_precleaning(concurrent_precleaning),
|
||||||
_freelistLock(NULL)
|
_freelistLock(NULL)
|
||||||
{
|
{
|
||||||
_ref_processor = rp;
|
// FIXME: Should initialize in base class constructor.
|
||||||
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
assert(rp != NULL, "ref_processor shouldn't be NULL");
|
||||||
|
set_ref_processor_internal(rp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This closure is used to mark refs into the CMS generation at the
|
// This closure is used to mark refs into the CMS generation at the
|
||||||
@ -6246,8 +6247,9 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
|
|||||||
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
|
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
|
||||||
_par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
|
_par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
|
||||||
{
|
{
|
||||||
_ref_processor = rp;
|
// FIXME: Should initialize in base class constructor.
|
||||||
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
assert(rp != NULL, "ref_processor shouldn't be NULL");
|
||||||
|
set_ref_processor_internal(rp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This closure is used to mark refs into the CMS generation at the
|
// This closure is used to mark refs into the CMS generation at the
|
||||||
@ -7097,7 +7099,7 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
|
|||||||
_mark_stack(mark_stack),
|
_mark_stack(mark_stack),
|
||||||
_concurrent_precleaning(concurrent_precleaning)
|
_concurrent_precleaning(concurrent_precleaning)
|
||||||
{
|
{
|
||||||
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grey object rescan during pre-cleaning and second checkpoint phases --
|
// Grey object rescan during pre-cleaning and second checkpoint phases --
|
||||||
@ -7168,7 +7170,7 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
|
|||||||
_bit_map(bit_map),
|
_bit_map(bit_map),
|
||||||
_work_queue(work_queue)
|
_work_queue(work_queue)
|
||||||
{
|
{
|
||||||
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
|
||||||
}
|
}
|
||||||
|
|
||||||
void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
|
void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||||
|
@ -112,6 +112,8 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
|||||||
int thread_threshold_step() const { return _thread_threshold_step; }
|
int thread_threshold_step() const { return _thread_threshold_step; }
|
||||||
|
|
||||||
G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
|
G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
|
||||||
|
|
||||||
|
static bool hot_card_cache_enabled() { return G1HotCardCache::default_use_cache(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_G1_CONCURRENTG1REFINE_HPP
|
#endif // SHARE_VM_GC_G1_CONCURRENTG1REFINE_HPP
|
||||||
|
@ -3084,17 +3084,21 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
|
||||||
|
ReferenceProcessor* result = NULL;
|
||||||
|
if (G1UseConcMarkReferenceProcessing) {
|
||||||
|
result = g1h->ref_processor_cm();
|
||||||
|
assert(result != NULL, "should not be NULL");
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
|
G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
|
||||||
ConcurrentMark* cm,
|
ConcurrentMark* cm,
|
||||||
CMTask* task)
|
CMTask* task)
|
||||||
: _g1h(g1h), _cm(cm), _task(task) {
|
: MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
|
||||||
assert(_ref_processor == NULL, "should be initialized to NULL");
|
_g1h(g1h), _cm(cm), _task(task)
|
||||||
|
{ }
|
||||||
if (G1UseConcMarkReferenceProcessing) {
|
|
||||||
_ref_processor = g1h->ref_processor_cm();
|
|
||||||
assert(_ref_processor != NULL, "should not be NULL");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CMTask::setup_for_region(HeapRegion* hr) {
|
void CMTask::setup_for_region(HeapRegion* hr) {
|
||||||
assert(hr != NULL,
|
assert(hr != NULL,
|
||||||
@ -3731,8 +3735,7 @@ void CMTask::do_marking_step(double time_target_ms,
|
|||||||
// and do_marking_step() is not being called serially.
|
// and do_marking_step() is not being called serially.
|
||||||
bool do_stealing = do_termination && !is_serial;
|
bool do_stealing = do_termination && !is_serial;
|
||||||
|
|
||||||
double diff_prediction_ms =
|
double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
|
||||||
g1_policy->get_new_prediction(&_marking_step_diffs_ms);
|
|
||||||
_time_target_ms = time_target_ms - diff_prediction_ms;
|
_time_target_ms = time_target_ms - diff_prediction_ms;
|
||||||
|
|
||||||
// set up the variables that are used in the work-based scheme to
|
// set up the variables that are used in the work-based scheme to
|
||||||
|
@ -22,6 +22,9 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_G1_G1CODEBLOBCLOSURE_HPP
|
||||||
|
#define SHARE_VM_GC_G1_G1CODEBLOBCLOSURE_HPP
|
||||||
|
|
||||||
#include "gc/g1/g1CollectedHeap.hpp"
|
#include "gc/g1/g1CollectedHeap.hpp"
|
||||||
#include "memory/iterator.hpp"
|
#include "memory/iterator.hpp"
|
||||||
|
|
||||||
@ -53,3 +56,6 @@ public:
|
|||||||
|
|
||||||
void do_code_blob(CodeBlob* cb);
|
void do_code_blob(CodeBlob* cb);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_G1_G1CODEBLOBCLOSURE_HPP
|
||||||
|
|
||||||
|
@ -44,6 +44,7 @@
|
|||||||
#include "gc/g1/g1ParScanThreadState.inline.hpp"
|
#include "gc/g1/g1ParScanThreadState.inline.hpp"
|
||||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||||
#include "gc/g1/g1RemSet.inline.hpp"
|
#include "gc/g1/g1RemSet.inline.hpp"
|
||||||
|
#include "gc/g1/g1RootClosures.hpp"
|
||||||
#include "gc/g1/g1RootProcessor.hpp"
|
#include "gc/g1/g1RootProcessor.hpp"
|
||||||
#include "gc/g1/g1StringDedup.hpp"
|
#include "gc/g1/g1StringDedup.hpp"
|
||||||
#include "gc/g1/g1YCTypes.hpp"
|
#include "gc/g1/g1YCTypes.hpp"
|
||||||
@ -125,213 +126,6 @@ class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
|
|||||||
size_t num_processed() const { return _num_processed; }
|
size_t num_processed() const { return _num_processed; }
|
||||||
};
|
};
|
||||||
|
|
||||||
YoungList::YoungList(G1CollectedHeap* g1h) :
|
|
||||||
_g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
|
|
||||||
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
|
|
||||||
guarantee(check_list_empty(false), "just making sure...");
|
|
||||||
}
|
|
||||||
|
|
||||||
void YoungList::push_region(HeapRegion *hr) {
|
|
||||||
assert(!hr->is_young(), "should not already be young");
|
|
||||||
assert(hr->get_next_young_region() == NULL, "cause it should!");
|
|
||||||
|
|
||||||
hr->set_next_young_region(_head);
|
|
||||||
_head = hr;
|
|
||||||
|
|
||||||
_g1h->g1_policy()->set_region_eden(hr, (int) _length);
|
|
||||||
++_length;
|
|
||||||
}
|
|
||||||
|
|
||||||
void YoungList::add_survivor_region(HeapRegion* hr) {
|
|
||||||
assert(hr->is_survivor(), "should be flagged as survivor region");
|
|
||||||
assert(hr->get_next_young_region() == NULL, "cause it should!");
|
|
||||||
|
|
||||||
hr->set_next_young_region(_survivor_head);
|
|
||||||
if (_survivor_head == NULL) {
|
|
||||||
_survivor_tail = hr;
|
|
||||||
}
|
|
||||||
_survivor_head = hr;
|
|
||||||
++_survivor_length;
|
|
||||||
}
|
|
||||||
|
|
||||||
void YoungList::empty_list(HeapRegion* list) {
|
|
||||||
while (list != NULL) {
|
|
||||||
HeapRegion* next = list->get_next_young_region();
|
|
||||||
list->set_next_young_region(NULL);
|
|
||||||
list->uninstall_surv_rate_group();
|
|
||||||
// This is called before a Full GC and all the non-empty /
|
|
||||||
// non-humongous regions at the end of the Full GC will end up as
|
|
||||||
// old anyway.
|
|
||||||
list->set_old();
|
|
||||||
list = next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void YoungList::empty_list() {
|
|
||||||
assert(check_list_well_formed(), "young list should be well formed");
|
|
||||||
|
|
||||||
empty_list(_head);
|
|
||||||
_head = NULL;
|
|
||||||
_length = 0;
|
|
||||||
|
|
||||||
empty_list(_survivor_head);
|
|
||||||
_survivor_head = NULL;
|
|
||||||
_survivor_tail = NULL;
|
|
||||||
_survivor_length = 0;
|
|
||||||
|
|
||||||
_last_sampled_rs_lengths = 0;
|
|
||||||
|
|
||||||
assert(check_list_empty(false), "just making sure...");
|
|
||||||
}
|
|
||||||
|
|
||||||
bool YoungList::check_list_well_formed() {
|
|
||||||
bool ret = true;
|
|
||||||
|
|
||||||
uint length = 0;
|
|
||||||
HeapRegion* curr = _head;
|
|
||||||
HeapRegion* last = NULL;
|
|
||||||
while (curr != NULL) {
|
|
||||||
if (!curr->is_young()) {
|
|
||||||
gclog_or_tty->print_cr("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
|
|
||||||
"incorrectly tagged (y: %d, surv: %d)",
|
|
||||||
p2i(curr->bottom()), p2i(curr->end()),
|
|
||||||
curr->is_young(), curr->is_survivor());
|
|
||||||
ret = false;
|
|
||||||
}
|
|
||||||
++length;
|
|
||||||
last = curr;
|
|
||||||
curr = curr->get_next_young_region();
|
|
||||||
}
|
|
||||||
ret = ret && (length == _length);
|
|
||||||
|
|
||||||
if (!ret) {
|
|
||||||
gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
|
|
||||||
gclog_or_tty->print_cr("### list has %u entries, _length is %u",
|
|
||||||
length, _length);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool YoungList::check_list_empty(bool check_sample) {
|
|
||||||
bool ret = true;
|
|
||||||
|
|
||||||
if (_length != 0) {
|
|
||||||
gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
|
|
||||||
_length);
|
|
||||||
ret = false;
|
|
||||||
}
|
|
||||||
if (check_sample && _last_sampled_rs_lengths != 0) {
|
|
||||||
gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
|
|
||||||
ret = false;
|
|
||||||
}
|
|
||||||
if (_head != NULL) {
|
|
||||||
gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
|
|
||||||
ret = false;
|
|
||||||
}
|
|
||||||
if (!ret) {
|
|
||||||
gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
YoungList::rs_length_sampling_init() {
|
|
||||||
_sampled_rs_lengths = 0;
|
|
||||||
_curr = _head;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
YoungList::rs_length_sampling_more() {
|
|
||||||
return _curr != NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
YoungList::rs_length_sampling_next() {
|
|
||||||
assert( _curr != NULL, "invariant" );
|
|
||||||
size_t rs_length = _curr->rem_set()->occupied();
|
|
||||||
|
|
||||||
_sampled_rs_lengths += rs_length;
|
|
||||||
|
|
||||||
// The current region may not yet have been added to the
|
|
||||||
// incremental collection set (it gets added when it is
|
|
||||||
// retired as the current allocation region).
|
|
||||||
if (_curr->in_collection_set()) {
|
|
||||||
// Update the collection set policy information for this region
|
|
||||||
_g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
|
|
||||||
}
|
|
||||||
|
|
||||||
_curr = _curr->get_next_young_region();
|
|
||||||
if (_curr == NULL) {
|
|
||||||
_last_sampled_rs_lengths = _sampled_rs_lengths;
|
|
||||||
// gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
YoungList::reset_auxilary_lists() {
|
|
||||||
guarantee( is_empty(), "young list should be empty" );
|
|
||||||
assert(check_list_well_formed(), "young list should be well formed");
|
|
||||||
|
|
||||||
// Add survivor regions to SurvRateGroup.
|
|
||||||
_g1h->g1_policy()->note_start_adding_survivor_regions();
|
|
||||||
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
|
|
||||||
|
|
||||||
int young_index_in_cset = 0;
|
|
||||||
for (HeapRegion* curr = _survivor_head;
|
|
||||||
curr != NULL;
|
|
||||||
curr = curr->get_next_young_region()) {
|
|
||||||
_g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
|
|
||||||
|
|
||||||
// The region is a non-empty survivor so let's add it to
|
|
||||||
// the incremental collection set for the next evacuation
|
|
||||||
// pause.
|
|
||||||
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
|
|
||||||
young_index_in_cset += 1;
|
|
||||||
}
|
|
||||||
assert((uint) young_index_in_cset == _survivor_length, "post-condition");
|
|
||||||
_g1h->g1_policy()->note_stop_adding_survivor_regions();
|
|
||||||
|
|
||||||
_head = _survivor_head;
|
|
||||||
_length = _survivor_length;
|
|
||||||
if (_survivor_head != NULL) {
|
|
||||||
assert(_survivor_tail != NULL, "cause it shouldn't be");
|
|
||||||
assert(_survivor_length > 0, "invariant");
|
|
||||||
_survivor_tail->set_next_young_region(NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't clear the survivor list handles until the start of
|
|
||||||
// the next evacuation pause - we need it in order to re-tag
|
|
||||||
// the survivor regions from this evacuation pause as 'young'
|
|
||||||
// at the start of the next.
|
|
||||||
|
|
||||||
_g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
|
|
||||||
|
|
||||||
assert(check_list_well_formed(), "young list should be well formed");
|
|
||||||
}
|
|
||||||
|
|
||||||
void YoungList::print() {
|
|
||||||
HeapRegion* lists[] = {_head, _survivor_head};
|
|
||||||
const char* names[] = {"YOUNG", "SURVIVOR"};
|
|
||||||
|
|
||||||
for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
|
|
||||||
gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
|
|
||||||
HeapRegion *curr = lists[list];
|
|
||||||
if (curr == NULL)
|
|
||||||
gclog_or_tty->print_cr(" empty");
|
|
||||||
while (curr != NULL) {
|
|
||||||
gclog_or_tty->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",
|
|
||||||
HR_FORMAT_PARAMS(curr),
|
|
||||||
p2i(curr->prev_top_at_mark_start()),
|
|
||||||
p2i(curr->next_top_at_mark_start()),
|
|
||||||
curr->age_in_surv_rate_group_cond());
|
|
||||||
curr = curr->get_next_young_region();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
gclog_or_tty->cr();
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
|
void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
|
||||||
HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
|
HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
|
||||||
@ -2469,14 +2263,11 @@ void G1CollectedHeap::check_gc_time_stamps() {
|
|||||||
}
|
}
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
|
||||||
DirtyCardQueue* into_cset_dcq,
|
_cg1r->hot_card_cache()->drain(cl, worker_i);
|
||||||
bool concurrent,
|
}
|
||||||
uint worker_i) {
|
|
||||||
// Clean cards in the hot card cache
|
|
||||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
|
||||||
hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
|
|
||||||
|
|
||||||
|
void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
|
||||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||||
size_t n_completed_buffers = 0;
|
size_t n_completed_buffers = 0;
|
||||||
while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
|
while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
|
||||||
@ -4355,80 +4146,6 @@ void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1ParCopyHelper::mark_object(oop obj) {
|
|
||||||
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
|
|
||||||
|
|
||||||
// We know that the object is not moving so it's safe to read its size.
|
|
||||||
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
|
|
||||||
assert(from_obj->is_forwarded(), "from obj should be forwarded");
|
|
||||||
assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
|
|
||||||
assert(from_obj != to_obj, "should not be self-forwarded");
|
|
||||||
|
|
||||||
assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
|
|
||||||
assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
|
|
||||||
|
|
||||||
// The object might be in the process of being copied by another
|
|
||||||
// worker so we cannot trust that its to-space image is
|
|
||||||
// well-formed. So we have to read its size from its from-space
|
|
||||||
// image which we know should not be changing.
|
|
||||||
_cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class T>
|
|
||||||
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
|
|
||||||
if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
|
|
||||||
_scanned_klass->record_modified_oops();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <G1Barrier barrier, G1Mark do_mark_object>
|
|
||||||
template <class T>
|
|
||||||
void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
|
||||||
|
|
||||||
if (oopDesc::is_null(heap_oop)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
|
||||||
|
|
||||||
assert(_worker_id == _par_scan_state->worker_id(), "sanity");
|
|
||||||
|
|
||||||
const InCSetState state = _g1->in_cset_state(obj);
|
|
||||||
if (state.is_in_cset()) {
|
|
||||||
oop forwardee;
|
|
||||||
markOop m = obj->mark();
|
|
||||||
if (m->is_marked()) {
|
|
||||||
forwardee = (oop) m->decode_pointer();
|
|
||||||
} else {
|
|
||||||
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
|
|
||||||
}
|
|
||||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
|
||||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
|
||||||
if (do_mark_object != G1MarkNone && forwardee != obj) {
|
|
||||||
// If the object is self-forwarded we don't need to explicitly
|
|
||||||
// mark it, the evacuation failure protocol will do so.
|
|
||||||
mark_forwarded_object(obj, forwardee);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (barrier == G1BarrierKlass) {
|
|
||||||
do_klass_barrier(p, forwardee);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (state.is_humongous()) {
|
|
||||||
_g1->set_humongous_is_live(obj);
|
|
||||||
}
|
|
||||||
// The object is not in collection set. If we're a root scanning
|
|
||||||
// closure during an initial mark pause then attempt to mark the object.
|
|
||||||
if (do_mark_object == G1MarkFromRoot) {
|
|
||||||
mark_object(obj);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class G1ParEvacuateFollowersClosure : public VoidClosure {
|
class G1ParEvacuateFollowersClosure : public VoidClosure {
|
||||||
private:
|
private:
|
||||||
double _start_term;
|
double _start_term;
|
||||||
@ -4481,32 +4198,6 @@ void G1ParEvacuateFollowersClosure::do_void() {
|
|||||||
} while (!offer_termination());
|
} while (!offer_termination());
|
||||||
}
|
}
|
||||||
|
|
||||||
class G1KlassScanClosure : public KlassClosure {
|
|
||||||
G1ParCopyHelper* _closure;
|
|
||||||
bool _process_only_dirty;
|
|
||||||
int _count;
|
|
||||||
public:
|
|
||||||
G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
|
|
||||||
: _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
|
|
||||||
void do_klass(Klass* klass) {
|
|
||||||
// If the klass has not been dirtied we know that there's
|
|
||||||
// no references into the young gen and we can skip it.
|
|
||||||
if (!_process_only_dirty || klass->has_modified_oops()) {
|
|
||||||
// Clean the klass since we're going to scavenge all the metadata.
|
|
||||||
klass->clear_modified_oops();
|
|
||||||
|
|
||||||
// Tell the closure that this klass is the Klass to scavenge
|
|
||||||
// and is the one to dirty if oops are left pointing into the young gen.
|
|
||||||
_closure->set_scanned_klass(klass);
|
|
||||||
|
|
||||||
klass->oops_do(_closure);
|
|
||||||
|
|
||||||
_closure->set_scanned_klass(NULL);
|
|
||||||
}
|
|
||||||
_count++;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class G1ParTask : public AbstractGangTask {
|
class G1ParTask : public AbstractGangTask {
|
||||||
protected:
|
protected:
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
@ -4527,42 +4218,6 @@ public:
|
|||||||
_n_workers(n_workers)
|
_n_workers(n_workers)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
RefToScanQueueSet* queues() { return _queues; }
|
|
||||||
|
|
||||||
RefToScanQueue *work_queue(int i) {
|
|
||||||
return queues()->queue(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
ParallelTaskTerminator* terminator() { return &_terminator; }
|
|
||||||
|
|
||||||
// Helps out with CLD processing.
|
|
||||||
//
|
|
||||||
// During InitialMark we need to:
|
|
||||||
// 1) Scavenge all CLDs for the young GC.
|
|
||||||
// 2) Mark all objects directly reachable from strong CLDs.
|
|
||||||
template <G1Mark do_mark_object>
|
|
||||||
class G1CLDClosure : public CLDClosure {
|
|
||||||
G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;
|
|
||||||
G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;
|
|
||||||
G1KlassScanClosure _klass_in_cld_closure;
|
|
||||||
bool _claim;
|
|
||||||
|
|
||||||
public:
|
|
||||||
G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
|
|
||||||
bool only_young, bool claim)
|
|
||||||
: _oop_closure(oop_closure),
|
|
||||||
_oop_in_klass_closure(oop_closure->g1(),
|
|
||||||
oop_closure->pss()),
|
|
||||||
_klass_in_cld_closure(&_oop_in_klass_closure, only_young),
|
|
||||||
_claim(claim) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void do_cld(ClassLoaderData* cld) {
|
|
||||||
cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void work(uint worker_id) {
|
void work(uint worker_id) {
|
||||||
if (worker_id >= _n_workers) return; // no work needed this round
|
if (worker_id >= _n_workers) return; // no work needed this round
|
||||||
|
|
||||||
@ -4578,62 +4233,18 @@ public:
|
|||||||
G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
|
G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
|
||||||
pss->set_ref_processor(rp);
|
pss->set_ref_processor(rp);
|
||||||
|
|
||||||
bool only_young = _g1h->collector_state()->gcs_are_young();
|
|
||||||
|
|
||||||
// Non-IM young GC.
|
|
||||||
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss);
|
|
||||||
G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
|
|
||||||
only_young, // Only process dirty klasses.
|
|
||||||
false); // No need to claim CLDs.
|
|
||||||
// IM young GC.
|
|
||||||
// Strong roots closures.
|
|
||||||
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss);
|
|
||||||
G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
|
|
||||||
false, // Process all klasses.
|
|
||||||
true); // Need to claim CLDs.
|
|
||||||
// Weak roots closures.
|
|
||||||
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
|
|
||||||
G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
|
|
||||||
false, // Process all klasses.
|
|
||||||
true); // Need to claim CLDs.
|
|
||||||
|
|
||||||
OopClosure* strong_root_cl;
|
|
||||||
OopClosure* weak_root_cl;
|
|
||||||
CLDClosure* strong_cld_cl;
|
|
||||||
CLDClosure* weak_cld_cl;
|
|
||||||
|
|
||||||
bool trace_metadata = false;
|
|
||||||
|
|
||||||
if (_g1h->collector_state()->during_initial_mark_pause()) {
|
|
||||||
// We also need to mark copied objects.
|
|
||||||
strong_root_cl = &scan_mark_root_cl;
|
|
||||||
strong_cld_cl = &scan_mark_cld_cl;
|
|
||||||
if (ClassUnloadingWithConcurrentMark) {
|
|
||||||
weak_root_cl = &scan_mark_weak_root_cl;
|
|
||||||
weak_cld_cl = &scan_mark_weak_cld_cl;
|
|
||||||
trace_metadata = true;
|
|
||||||
} else {
|
|
||||||
weak_root_cl = &scan_mark_root_cl;
|
|
||||||
weak_cld_cl = &scan_mark_cld_cl;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
strong_root_cl = &scan_only_root_cl;
|
|
||||||
weak_root_cl = &scan_only_root_cl;
|
|
||||||
strong_cld_cl = &scan_only_cld_cl;
|
|
||||||
weak_cld_cl = &scan_only_cld_cl;
|
|
||||||
}
|
|
||||||
|
|
||||||
double start_strong_roots_sec = os::elapsedTime();
|
double start_strong_roots_sec = os::elapsedTime();
|
||||||
_root_processor->evacuate_roots(strong_root_cl,
|
|
||||||
weak_root_cl,
|
_root_processor->evacuate_roots(pss->closures(), worker_id);
|
||||||
strong_cld_cl,
|
|
||||||
weak_cld_cl,
|
|
||||||
trace_metadata,
|
|
||||||
worker_id);
|
|
||||||
|
|
||||||
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
|
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
|
||||||
|
|
||||||
|
// We pass a weak code blobs closure to the remembered set scanning because we want to avoid
|
||||||
|
// treating the nmethods visited to act as roots for concurrent marking.
|
||||||
|
// We only want to make sure that the oops in the nmethods are adjusted with regard to the
|
||||||
|
// objects copied by the current evacuation.
|
||||||
size_t cards_scanned = _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
|
size_t cards_scanned = _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
|
||||||
weak_root_cl,
|
pss->closures()->weak_codeblobs(),
|
||||||
worker_id);
|
worker_id);
|
||||||
|
|
||||||
_pss->add_cards_scanned(worker_id, cards_scanned);
|
_pss->add_cards_scanned(worker_id, cards_scanned);
|
||||||
@ -5294,19 +4905,8 @@ public:
|
|||||||
G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
|
G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
|
||||||
pss->set_ref_processor(NULL);
|
pss->set_ref_processor(NULL);
|
||||||
|
|
||||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
|
|
||||||
|
|
||||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
|
|
||||||
|
|
||||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
|
||||||
|
|
||||||
if (_g1h->collector_state()->during_initial_mark_pause()) {
|
|
||||||
// We also need to mark copied objects.
|
|
||||||
copy_non_heap_cl = ©_mark_non_heap_cl;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep alive closure.
|
// Keep alive closure.
|
||||||
G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, pss);
|
G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
|
||||||
|
|
||||||
// Complete GC closure
|
// Complete GC closure
|
||||||
G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
|
G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
|
||||||
@ -5394,23 +4994,12 @@ public:
|
|||||||
pss->set_ref_processor(NULL);
|
pss->set_ref_processor(NULL);
|
||||||
assert(pss->queue_is_empty(), "both queue and overflow should be empty");
|
assert(pss->queue_is_empty(), "both queue and overflow should be empty");
|
||||||
|
|
||||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
|
|
||||||
|
|
||||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
|
|
||||||
|
|
||||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
|
||||||
|
|
||||||
if (_g1h->collector_state()->during_initial_mark_pause()) {
|
|
||||||
// We also need to mark copied objects.
|
|
||||||
copy_non_heap_cl = ©_mark_non_heap_cl;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is alive closure
|
// Is alive closure
|
||||||
G1AlwaysAliveClosure always_alive(_g1h);
|
G1AlwaysAliveClosure always_alive(_g1h);
|
||||||
|
|
||||||
// Copying keep alive closure. Applied to referent objects that need
|
// Copying keep alive closure. Applied to referent objects that need
|
||||||
// to be copied.
|
// to be copied.
|
||||||
G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, pss);
|
G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
|
||||||
|
|
||||||
ReferenceProcessor* rp = _g1h->ref_processor_cm();
|
ReferenceProcessor* rp = _g1h->ref_processor_cm();
|
||||||
|
|
||||||
@ -5500,23 +5089,8 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
|
|||||||
pss->set_ref_processor(NULL);
|
pss->set_ref_processor(NULL);
|
||||||
assert(pss->queue_is_empty(), "pre-condition");
|
assert(pss->queue_is_empty(), "pre-condition");
|
||||||
|
|
||||||
// We do not embed a reference processor in the copying/scanning
|
|
||||||
// closures while we're actually processing the discovered
|
|
||||||
// reference objects.
|
|
||||||
|
|
||||||
G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss);
|
|
||||||
|
|
||||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
|
|
||||||
|
|
||||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
|
||||||
|
|
||||||
if (collector_state()->during_initial_mark_pause()) {
|
|
||||||
// We also need to mark copied objects.
|
|
||||||
copy_non_heap_cl = ©_mark_non_heap_cl;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep alive closure.
|
// Keep alive closure.
|
||||||
G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);
|
G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
|
||||||
|
|
||||||
// Serial Complete GC closure
|
// Serial Complete GC closure
|
||||||
G1STWDrainQueueClosure drain_queue(this, pss);
|
G1STWDrainQueueClosure drain_queue(this, pss);
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
#include "gc/g1/hSpaceCounters.hpp"
|
#include "gc/g1/hSpaceCounters.hpp"
|
||||||
#include "gc/g1/heapRegionManager.hpp"
|
#include "gc/g1/heapRegionManager.hpp"
|
||||||
#include "gc/g1/heapRegionSet.hpp"
|
#include "gc/g1/heapRegionSet.hpp"
|
||||||
|
#include "gc/g1/youngList.hpp"
|
||||||
#include "gc/shared/barrierSet.hpp"
|
#include "gc/shared/barrierSet.hpp"
|
||||||
#include "gc/shared/collectedHeap.hpp"
|
#include "gc/shared/collectedHeap.hpp"
|
||||||
#include "gc/shared/plab.hpp"
|
#include "gc/shared/plab.hpp"
|
||||||
@ -64,7 +65,6 @@ class SpaceClosure;
|
|||||||
class CompactibleSpaceClosure;
|
class CompactibleSpaceClosure;
|
||||||
class Space;
|
class Space;
|
||||||
class G1CollectorPolicy;
|
class G1CollectorPolicy;
|
||||||
class GenRemSet;
|
|
||||||
class G1RemSet;
|
class G1RemSet;
|
||||||
class HeapRegionRemSetIterator;
|
class HeapRegionRemSetIterator;
|
||||||
class ConcurrentMark;
|
class ConcurrentMark;
|
||||||
@ -88,79 +88,6 @@ typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
|
|||||||
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
||||||
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||||
|
|
||||||
class YoungList : public CHeapObj<mtGC> {
|
|
||||||
private:
|
|
||||||
G1CollectedHeap* _g1h;
|
|
||||||
|
|
||||||
HeapRegion* _head;
|
|
||||||
|
|
||||||
HeapRegion* _survivor_head;
|
|
||||||
HeapRegion* _survivor_tail;
|
|
||||||
|
|
||||||
HeapRegion* _curr;
|
|
||||||
|
|
||||||
uint _length;
|
|
||||||
uint _survivor_length;
|
|
||||||
|
|
||||||
size_t _last_sampled_rs_lengths;
|
|
||||||
size_t _sampled_rs_lengths;
|
|
||||||
|
|
||||||
void empty_list(HeapRegion* list);
|
|
||||||
|
|
||||||
public:
|
|
||||||
YoungList(G1CollectedHeap* g1h);
|
|
||||||
|
|
||||||
void push_region(HeapRegion* hr);
|
|
||||||
void add_survivor_region(HeapRegion* hr);
|
|
||||||
|
|
||||||
void empty_list();
|
|
||||||
bool is_empty() { return _length == 0; }
|
|
||||||
uint length() { return _length; }
|
|
||||||
uint eden_length() { return length() - survivor_length(); }
|
|
||||||
uint survivor_length() { return _survivor_length; }
|
|
||||||
|
|
||||||
// Currently we do not keep track of the used byte sum for the
|
|
||||||
// young list and the survivors and it'd be quite a lot of work to
|
|
||||||
// do so. When we'll eventually replace the young list with
|
|
||||||
// instances of HeapRegionLinkedList we'll get that for free. So,
|
|
||||||
// we'll report the more accurate information then.
|
|
||||||
size_t eden_used_bytes() {
|
|
||||||
assert(length() >= survivor_length(), "invariant");
|
|
||||||
return (size_t) eden_length() * HeapRegion::GrainBytes;
|
|
||||||
}
|
|
||||||
size_t survivor_used_bytes() {
|
|
||||||
return (size_t) survivor_length() * HeapRegion::GrainBytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
void rs_length_sampling_init();
|
|
||||||
bool rs_length_sampling_more();
|
|
||||||
void rs_length_sampling_next();
|
|
||||||
|
|
||||||
void reset_sampled_info() {
|
|
||||||
_last_sampled_rs_lengths = 0;
|
|
||||||
}
|
|
||||||
size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
|
|
||||||
|
|
||||||
// for development purposes
|
|
||||||
void reset_auxilary_lists();
|
|
||||||
void clear() { _head = NULL; _length = 0; }
|
|
||||||
|
|
||||||
void clear_survivors() {
|
|
||||||
_survivor_head = NULL;
|
|
||||||
_survivor_tail = NULL;
|
|
||||||
_survivor_length = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapRegion* first_region() { return _head; }
|
|
||||||
HeapRegion* first_survivor_region() { return _survivor_head; }
|
|
||||||
HeapRegion* last_survivor_region() { return _survivor_tail; }
|
|
||||||
|
|
||||||
// debugging
|
|
||||||
bool check_list_well_formed();
|
|
||||||
bool check_list_empty(bool check_sample = true);
|
|
||||||
void print();
|
|
||||||
};
|
|
||||||
|
|
||||||
// The G1 STW is alive closure.
|
// The G1 STW is alive closure.
|
||||||
// An instance is embedded into the G1CH and used as the
|
// An instance is embedded into the G1CH and used as the
|
||||||
// (optional) _is_alive_non_header closure in the STW
|
// (optional) _is_alive_non_header closure in the STW
|
||||||
@ -1083,9 +1010,11 @@ public:
|
|||||||
// continues humongous regions too.
|
// continues humongous regions too.
|
||||||
void reset_gc_time_stamps(HeapRegion* hr);
|
void reset_gc_time_stamps(HeapRegion* hr);
|
||||||
|
|
||||||
void iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
// Apply the given closure on all cards in the Hot Card Cache, emptying it.
|
||||||
DirtyCardQueue* into_cset_dcq,
|
void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
|
||||||
bool concurrent, uint worker_i);
|
|
||||||
|
// Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
|
||||||
|
void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
|
||||||
|
|
||||||
// The shared block offset table array.
|
// The shared block offset table array.
|
||||||
G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
|
G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
|
||||||
|
@ -80,6 +80,7 @@ static double non_young_other_cost_per_region_ms_defaults[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
G1CollectorPolicy::G1CollectorPolicy() :
|
G1CollectorPolicy::G1CollectorPolicy() :
|
||||||
|
_predictor(G1ConfidencePercent / 100.0),
|
||||||
_parallel_gc_threads(ParallelGCThreads),
|
_parallel_gc_threads(ParallelGCThreads),
|
||||||
|
|
||||||
_recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
_recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||||
@ -92,6 +93,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
_prev_collection_pause_end_ms(0.0),
|
_prev_collection_pause_end_ms(0.0),
|
||||||
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
|
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||||
_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||||
|
_cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||||
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
|
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||||
_mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
|
_mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||||
_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||||
@ -126,8 +128,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
_survivor_cset_region_length(0),
|
_survivor_cset_region_length(0),
|
||||||
_old_cset_region_length(0),
|
_old_cset_region_length(0),
|
||||||
|
|
||||||
_sigma(G1ConfidencePercent / 100.0),
|
|
||||||
|
|
||||||
_collection_set(NULL),
|
_collection_set(NULL),
|
||||||
_collection_set_bytes_used_before(0),
|
_collection_set_bytes_used_before(0),
|
||||||
|
|
||||||
@ -150,12 +150,12 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
|
|
||||||
_gc_overhead_perc(0.0) {
|
_gc_overhead_perc(0.0) {
|
||||||
|
|
||||||
// SurvRateGroups below must be initialized after '_sigma' because they
|
// SurvRateGroups below must be initialized after the predictor because they
|
||||||
// indirectly access '_sigma' through this object passed to their constructor.
|
// indirectly use it through this object passed to their constructor.
|
||||||
_short_lived_surv_rate_group =
|
_short_lived_surv_rate_group =
|
||||||
new SurvRateGroup(this, "Short Lived", G1YoungSurvRateNumRegionsSummary);
|
new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
|
||||||
_survivor_surv_rate_group =
|
_survivor_surv_rate_group =
|
||||||
new SurvRateGroup(this, "Survivor", G1YoungSurvRateNumRegionsSummary);
|
new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
|
||||||
|
|
||||||
// Set up the region size and associated fields. Given that the
|
// Set up the region size and associated fields. Given that the
|
||||||
// policy is created before the heap, we have to set this up here,
|
// policy is created before the heap, we have to set this up here,
|
||||||
@ -192,6 +192,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
|
|
||||||
_rs_length_diff_seq->add(rs_length_diff_defaults[index]);
|
_rs_length_diff_seq->add(rs_length_diff_defaults[index]);
|
||||||
_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
|
_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
|
||||||
|
_cost_scan_hcc_seq->add(0.0);
|
||||||
_young_cards_per_entry_ratio_seq->add(
|
_young_cards_per_entry_ratio_seq->add(
|
||||||
young_cards_per_entry_ratio_defaults[index]);
|
young_cards_per_entry_ratio_defaults[index]);
|
||||||
_cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
|
_cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
|
||||||
@ -287,9 +288,13 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
_collectionSetChooser = new CollectionSetChooser();
|
_collectionSetChooser = new CollectionSetChooser();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
|
||||||
|
return _predictor.get_new_prediction(seq);
|
||||||
|
}
|
||||||
|
|
||||||
void G1CollectorPolicy::initialize_alignments() {
|
void G1CollectorPolicy::initialize_alignments() {
|
||||||
_space_alignment = HeapRegion::GrainBytes;
|
_space_alignment = HeapRegion::GrainBytes;
|
||||||
size_t card_table_alignment = GenRemSet::max_alignment_constraint();
|
size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
|
||||||
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||||
_heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
|
_heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
|
||||||
}
|
}
|
||||||
@ -314,8 +319,7 @@ void G1CollectorPolicy::post_heap_initialize() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
|
G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
|
||||||
G1CollectorState* G1CollectorPolicy::collector_state() { return _g1->collector_state(); }
|
|
||||||
|
|
||||||
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
|
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
|
||||||
_min_desired_young_length(0), _max_desired_young_length(0) {
|
_min_desired_young_length(0), _max_desired_young_length(0) {
|
||||||
@ -426,8 +430,8 @@ void G1CollectorPolicy::init() {
|
|||||||
_young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
|
_young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
|
||||||
}
|
}
|
||||||
_free_regions_at_end_of_collection = _g1->num_free_regions();
|
_free_regions_at_end_of_collection = _g1->num_free_regions();
|
||||||
update_young_list_target_length();
|
|
||||||
|
|
||||||
|
update_young_list_target_length();
|
||||||
// We may immediately start allocating regions and placing them on the
|
// We may immediately start allocating regions and placing them on the
|
||||||
// collection set list. Initialize the per-collection set info
|
// collection set list. Initialize the per-collection set info
|
||||||
start_incremental_cset_building();
|
start_incremental_cset_building();
|
||||||
@ -458,9 +462,8 @@ bool G1CollectorPolicy::predict_will_fit(uint young_length,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t free_bytes =
|
size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
|
||||||
(base_free_regions - young_length) * HeapRegion::GrainBytes;
|
if ((2.0 /* magic */ * _predictor.sigma()) * bytes_to_copy > free_bytes) {
|
||||||
if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
|
|
||||||
// end condition 3: out-of-space (conservatively!)
|
// end condition 3: out-of-space (conservatively!)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -1046,10 +1049,12 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
|||||||
|
|
||||||
if (update_stats) {
|
if (update_stats) {
|
||||||
double cost_per_card_ms = 0.0;
|
double cost_per_card_ms = 0.0;
|
||||||
|
double cost_scan_hcc = phase_times()->average_time_ms(G1GCPhaseTimes::ScanHCC);
|
||||||
if (_pending_cards > 0) {
|
if (_pending_cards > 0) {
|
||||||
cost_per_card_ms = phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) / (double) _pending_cards;
|
cost_per_card_ms = (phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
|
||||||
_cost_per_card_ms_seq->add(cost_per_card_ms);
|
_cost_per_card_ms_seq->add(cost_per_card_ms);
|
||||||
}
|
}
|
||||||
|
_cost_scan_hcc_seq->add(cost_scan_hcc);
|
||||||
|
|
||||||
double cost_per_entry_ms = 0.0;
|
double cost_per_entry_ms = 0.0;
|
||||||
if (cards_scanned > 10) {
|
if (cards_scanned > 10) {
|
||||||
@ -1146,8 +1151,25 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
|||||||
|
|
||||||
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
|
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
|
||||||
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
|
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
|
||||||
adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS),
|
|
||||||
phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms);
|
double scan_hcc_time_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanHCC);
|
||||||
|
|
||||||
|
if (update_rs_time_goal_ms < scan_hcc_time_ms) {
|
||||||
|
ergo_verbose2(ErgoTiming,
|
||||||
|
"adjust concurrent refinement thresholds",
|
||||||
|
ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
|
||||||
|
ergo_format_ms("Update RS time goal")
|
||||||
|
ergo_format_ms("Scan HCC time"),
|
||||||
|
update_rs_time_goal_ms,
|
||||||
|
scan_hcc_time_ms);
|
||||||
|
|
||||||
|
update_rs_time_goal_ms = 0;
|
||||||
|
} else {
|
||||||
|
update_rs_time_goal_ms -= scan_hcc_time_ms;
|
||||||
|
}
|
||||||
|
adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
|
||||||
|
phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
|
||||||
|
update_rs_time_goal_ms);
|
||||||
|
|
||||||
_collectionSetChooser->verify();
|
_collectionSetChooser->verify();
|
||||||
}
|
}
|
||||||
@ -1248,7 +1270,7 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
|
|||||||
cg1r->set_red_zone(g * k_gr);
|
cg1r->set_red_zone(g * k_gr);
|
||||||
cg1r->reinitialize_threads();
|
cg1r->reinitialize_threads();
|
||||||
|
|
||||||
int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
|
int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1);
|
||||||
int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
|
int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
|
||||||
cg1r->yellow_zone());
|
cg1r->yellow_zone());
|
||||||
// Change the barrier params
|
// Change the barrier params
|
||||||
@ -1265,17 +1287,125 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
|
|||||||
dcqs.notify_if_necessary();
|
dcqs.notify_if_necessary();
|
||||||
}
|
}
|
||||||
|
|
||||||
double
|
size_t G1CollectorPolicy::predict_rs_length_diff() const {
|
||||||
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
|
return (size_t) get_new_prediction(_rs_length_diff_seq);
|
||||||
size_t scanned_cards) const {
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_alloc_rate_ms() const {
|
||||||
|
return get_new_prediction(_alloc_rate_ms_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_cost_per_card_ms() const {
|
||||||
|
return get_new_prediction(_cost_per_card_ms_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_scan_hcc_ms() const {
|
||||||
|
return get_new_prediction(_cost_scan_hcc_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const {
|
||||||
|
return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const {
|
||||||
|
return get_new_prediction(_young_cards_per_entry_ratio_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const {
|
||||||
|
if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
|
||||||
|
return predict_young_cards_per_entry_ratio();
|
||||||
|
} else {
|
||||||
|
return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const {
|
||||||
|
return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const {
|
||||||
|
return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio());
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const {
|
||||||
|
if (collector_state()->gcs_are_young()) {
|
||||||
|
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
|
||||||
|
} else {
|
||||||
|
return predict_mixed_rs_scan_time_ms(card_num);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const {
|
||||||
|
if (_mixed_cost_per_entry_ms_seq->num() < 3) {
|
||||||
|
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
|
||||||
|
} else {
|
||||||
|
return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
|
||||||
|
if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
|
||||||
|
return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
|
||||||
|
} else {
|
||||||
|
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const {
|
||||||
|
if (collector_state()->during_concurrent_mark()) {
|
||||||
|
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
|
||||||
|
} else {
|
||||||
|
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_constant_other_time_ms() const {
|
||||||
|
return get_new_prediction(_constant_other_time_ms_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const {
|
||||||
|
return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const {
|
||||||
|
return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_remark_time_ms() const {
|
||||||
|
return get_new_prediction(_concurrent_mark_remark_times_ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_cleanup_time_ms() const {
|
||||||
|
return get_new_prediction(_concurrent_mark_cleanup_times_ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
|
||||||
|
TruncatedSeq* seq = surv_rate_group->get_seq(age);
|
||||||
|
guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
|
||||||
|
double pred = get_new_prediction(seq);
|
||||||
|
if (pred > 1.0) {
|
||||||
|
pred = 1.0;
|
||||||
|
}
|
||||||
|
return pred;
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
|
||||||
|
return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
|
||||||
|
return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
|
||||||
|
size_t scanned_cards) const {
|
||||||
return
|
return
|
||||||
predict_rs_update_time_ms(pending_cards) +
|
predict_rs_update_time_ms(pending_cards) +
|
||||||
predict_rs_scan_time_ms(scanned_cards) +
|
predict_rs_scan_time_ms(scanned_cards) +
|
||||||
predict_constant_other_time_ms();
|
predict_constant_other_time_ms();
|
||||||
}
|
}
|
||||||
|
|
||||||
double
|
double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
|
||||||
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
|
|
||||||
size_t rs_length = predict_rs_length_diff();
|
size_t rs_length = predict_rs_length_diff();
|
||||||
size_t card_num;
|
size_t card_num;
|
||||||
if (collector_state()->gcs_are_young()) {
|
if (collector_state()->gcs_are_young()) {
|
||||||
@ -1294,14 +1424,13 @@ size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
|
|||||||
assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
|
assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
|
||||||
int age = hr->age_in_surv_rate_group();
|
int age = hr->age_in_surv_rate_group();
|
||||||
double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
|
double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
|
||||||
bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
|
bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
|
||||||
}
|
}
|
||||||
return bytes_to_copy;
|
return bytes_to_copy;
|
||||||
}
|
}
|
||||||
|
|
||||||
double
|
double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
|
||||||
G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
|
bool for_young_gc) const {
|
||||||
bool for_young_gc) const {
|
|
||||||
size_t rs_length = hr->rem_set()->occupied();
|
size_t rs_length = hr->rem_set()->occupied();
|
||||||
size_t card_num;
|
size_t card_num;
|
||||||
|
|
||||||
@ -1328,9 +1457,8 @@ G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
|
|||||||
return region_elapsed_time_ms;
|
return region_elapsed_time_ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
|
||||||
G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
|
uint survivor_cset_region_length) {
|
||||||
uint survivor_cset_region_length) {
|
|
||||||
_eden_cset_region_length = eden_cset_region_length;
|
_eden_cset_region_length = eden_cset_region_length;
|
||||||
_survivor_cset_region_length = survivor_cset_region_length;
|
_survivor_cset_region_length = survivor_cset_region_length;
|
||||||
_old_cset_region_length = 0;
|
_old_cset_region_length = 0;
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "gc/g1/g1CollectorState.hpp"
|
#include "gc/g1/g1CollectorState.hpp"
|
||||||
#include "gc/g1/g1InCSetState.hpp"
|
#include "gc/g1/g1InCSetState.hpp"
|
||||||
#include "gc/g1/g1MMUTracker.hpp"
|
#include "gc/g1/g1MMUTracker.hpp"
|
||||||
|
#include "gc/g1/g1Predictions.hpp"
|
||||||
#include "gc/shared/collectorPolicy.hpp"
|
#include "gc/shared/collectorPolicy.hpp"
|
||||||
|
|
||||||
// A G1CollectorPolicy makes policy decisions that determine the
|
// A G1CollectorPolicy makes policy decisions that determine the
|
||||||
@ -161,7 +162,11 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
class G1CollectorPolicy: public CollectorPolicy {
|
class G1CollectorPolicy: public CollectorPolicy {
|
||||||
private:
|
private:
|
||||||
|
G1Predictions _predictor;
|
||||||
|
|
||||||
|
double get_new_prediction(TruncatedSeq const* seq) const;
|
||||||
|
|
||||||
// either equal to the number of parallel threads, if ParallelGCThreads
|
// either equal to the number of parallel threads, if ParallelGCThreads
|
||||||
// has been set, or 1 otherwise
|
// has been set, or 1 otherwise
|
||||||
int _parallel_gc_threads;
|
int _parallel_gc_threads;
|
||||||
@ -169,10 +174,6 @@ private:
|
|||||||
// The number of GC threads currently active.
|
// The number of GC threads currently active.
|
||||||
uintx _no_of_gc_threads;
|
uintx _no_of_gc_threads;
|
||||||
|
|
||||||
enum SomePrivateConstants {
|
|
||||||
NumPrevPausesForHeuristics = 10
|
|
||||||
};
|
|
||||||
|
|
||||||
G1MMUTracker* _mmu_tracker;
|
G1MMUTracker* _mmu_tracker;
|
||||||
|
|
||||||
void initialize_alignments();
|
void initialize_alignments();
|
||||||
@ -211,7 +212,8 @@ private:
|
|||||||
uint _reserve_regions;
|
uint _reserve_regions;
|
||||||
|
|
||||||
enum PredictionConstants {
|
enum PredictionConstants {
|
||||||
TruncatedSeqLength = 10
|
TruncatedSeqLength = 10,
|
||||||
|
NumPrevPausesForHeuristics = 10
|
||||||
};
|
};
|
||||||
|
|
||||||
TruncatedSeq* _alloc_rate_ms_seq;
|
TruncatedSeq* _alloc_rate_ms_seq;
|
||||||
@ -219,6 +221,7 @@ private:
|
|||||||
|
|
||||||
TruncatedSeq* _rs_length_diff_seq;
|
TruncatedSeq* _rs_length_diff_seq;
|
||||||
TruncatedSeq* _cost_per_card_ms_seq;
|
TruncatedSeq* _cost_per_card_ms_seq;
|
||||||
|
TruncatedSeq* _cost_scan_hcc_seq;
|
||||||
TruncatedSeq* _young_cards_per_entry_ratio_seq;
|
TruncatedSeq* _young_cards_per_entry_ratio_seq;
|
||||||
TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
|
TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
|
||||||
TruncatedSeq* _cost_per_entry_ms_seq;
|
TruncatedSeq* _cost_per_entry_ms_seq;
|
||||||
@ -250,25 +253,9 @@ private:
|
|||||||
|
|
||||||
size_t _recorded_rs_lengths;
|
size_t _recorded_rs_lengths;
|
||||||
size_t _max_rs_lengths;
|
size_t _max_rs_lengths;
|
||||||
double _sigma;
|
|
||||||
|
|
||||||
size_t _rs_lengths_prediction;
|
size_t _rs_lengths_prediction;
|
||||||
|
|
||||||
double sigma() const { return _sigma; }
|
|
||||||
|
|
||||||
// A function that prevents us putting too much stock in small sample
|
|
||||||
// sets. Returns a number between 2.0 and 1.0, depending on the number
|
|
||||||
// of samples. 5 or more samples yields one; fewer scales linearly from
|
|
||||||
// 2.0 at 1 sample to 1.0 at 5.
|
|
||||||
double confidence_factor(int samples) const {
|
|
||||||
if (samples > 4) return 1.0;
|
|
||||||
else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
double get_new_neg_prediction(TruncatedSeq* seq) {
|
|
||||||
return seq->davg() - sigma() * seq->dsd();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
|
bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
@ -285,6 +272,8 @@ private:
|
|||||||
size_t _pending_cards;
|
size_t _pending_cards;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
G1Predictions& predictor() { return _predictor; }
|
||||||
|
|
||||||
// Accessors
|
// Accessors
|
||||||
|
|
||||||
void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
|
void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
|
||||||
@ -303,102 +292,41 @@ public:
|
|||||||
bool verify_young_ages();
|
bool verify_young_ages();
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
double get_new_prediction(TruncatedSeq* seq) const {
|
|
||||||
return MAX2(seq->davg() + sigma() * seq->dsd(),
|
|
||||||
seq->davg() * confidence_factor(seq->num()));
|
|
||||||
}
|
|
||||||
|
|
||||||
void record_max_rs_lengths(size_t rs_lengths) {
|
void record_max_rs_lengths(size_t rs_lengths) {
|
||||||
_max_rs_lengths = rs_lengths;
|
_max_rs_lengths = rs_lengths;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t predict_rs_length_diff() const {
|
size_t predict_rs_length_diff() const;
|
||||||
return (size_t) get_new_prediction(_rs_length_diff_seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_alloc_rate_ms() const {
|
double predict_alloc_rate_ms() const;
|
||||||
return get_new_prediction(_alloc_rate_ms_seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_cost_per_card_ms() const {
|
double predict_cost_per_card_ms() const;
|
||||||
return get_new_prediction(_cost_per_card_ms_seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_rs_update_time_ms(size_t pending_cards) const {
|
double predict_scan_hcc_ms() const;
|
||||||
return (double) pending_cards * predict_cost_per_card_ms();
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_young_cards_per_entry_ratio() const {
|
double predict_rs_update_time_ms(size_t pending_cards) const;
|
||||||
return get_new_prediction(_young_cards_per_entry_ratio_seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_mixed_cards_per_entry_ratio() const {
|
double predict_young_cards_per_entry_ratio() const;
|
||||||
if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
|
|
||||||
return predict_young_cards_per_entry_ratio();
|
|
||||||
} else {
|
|
||||||
return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t predict_young_card_num(size_t rs_length) const {
|
double predict_mixed_cards_per_entry_ratio() const;
|
||||||
return (size_t) ((double) rs_length *
|
|
||||||
predict_young_cards_per_entry_ratio());
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t predict_non_young_card_num(size_t rs_length) const {
|
size_t predict_young_card_num(size_t rs_length) const;
|
||||||
return (size_t) ((double) rs_length *
|
|
||||||
predict_mixed_cards_per_entry_ratio());
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_rs_scan_time_ms(size_t card_num) const {
|
size_t predict_non_young_card_num(size_t rs_length) const;
|
||||||
if (collector_state()->gcs_are_young()) {
|
|
||||||
return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
|
|
||||||
} else {
|
|
||||||
return predict_mixed_rs_scan_time_ms(card_num);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_mixed_rs_scan_time_ms(size_t card_num) const {
|
double predict_rs_scan_time_ms(size_t card_num) const;
|
||||||
if (_mixed_cost_per_entry_ms_seq->num() < 3) {
|
|
||||||
return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
|
|
||||||
} else {
|
|
||||||
return (double) (card_num *
|
|
||||||
get_new_prediction(_mixed_cost_per_entry_ms_seq));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
|
double predict_mixed_rs_scan_time_ms(size_t card_num) const;
|
||||||
if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
|
|
||||||
return (1.1 * (double) bytes_to_copy) *
|
|
||||||
get_new_prediction(_cost_per_byte_ms_seq);
|
|
||||||
} else {
|
|
||||||
return (double) bytes_to_copy *
|
|
||||||
get_new_prediction(_cost_per_byte_ms_during_cm_seq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_object_copy_time_ms(size_t bytes_to_copy) const {
|
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
|
||||||
if (collector_state()->during_concurrent_mark()) {
|
|
||||||
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
|
|
||||||
} else {
|
|
||||||
return (double) bytes_to_copy *
|
|
||||||
get_new_prediction(_cost_per_byte_ms_seq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_constant_other_time_ms() const {
|
double predict_object_copy_time_ms(size_t bytes_to_copy) const;
|
||||||
return get_new_prediction(_constant_other_time_ms_seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_young_other_time_ms(size_t young_num) const {
|
double predict_constant_other_time_ms() const;
|
||||||
return (double) young_num *
|
|
||||||
get_new_prediction(_young_other_cost_per_region_ms_seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_non_young_other_time_ms(size_t non_young_num) const {
|
double predict_young_other_time_ms(size_t young_num) const;
|
||||||
return (double) non_young_num *
|
|
||||||
get_new_prediction(_non_young_other_cost_per_region_ms_seq);
|
double predict_non_young_other_time_ms(size_t non_young_num) const;
|
||||||
}
|
|
||||||
|
|
||||||
double predict_base_elapsed_time_ms(size_t pending_cards) const;
|
double predict_base_elapsed_time_ms(size_t pending_cards) const;
|
||||||
double predict_base_elapsed_time_ms(size_t pending_cards,
|
double predict_base_elapsed_time_ms(size_t pending_cards,
|
||||||
@ -415,11 +343,15 @@ public:
|
|||||||
|
|
||||||
double predict_survivor_regions_evac_time() const;
|
double predict_survivor_regions_evac_time() const;
|
||||||
|
|
||||||
|
bool should_update_surv_rate_group_predictors() {
|
||||||
|
return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
|
||||||
|
}
|
||||||
|
|
||||||
void cset_regions_freed() {
|
void cset_regions_freed() {
|
||||||
bool propagate = collector_state()->should_propagate();
|
bool update = should_update_surv_rate_group_predictors();
|
||||||
_short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
|
|
||||||
_survivor_surv_rate_group->all_surviving_words_recorded(propagate);
|
_short_lived_surv_rate_group->all_surviving_words_recorded(update);
|
||||||
// also call it on any more surv rate groups
|
_survivor_surv_rate_group->all_surviving_words_recorded(update);
|
||||||
}
|
}
|
||||||
|
|
||||||
G1MMUTracker* mmu_tracker() {
|
G1MMUTracker* mmu_tracker() {
|
||||||
@ -434,34 +366,17 @@ public:
|
|||||||
return _mmu_tracker->max_gc_time() * 1000.0;
|
return _mmu_tracker->max_gc_time() * 1000.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
double predict_remark_time_ms() const {
|
double predict_remark_time_ms() const;
|
||||||
return get_new_prediction(_concurrent_mark_remark_times_ms);
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_cleanup_time_ms() const {
|
double predict_cleanup_time_ms() const;
|
||||||
return get_new_prediction(_concurrent_mark_cleanup_times_ms);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns an estimate of the survival rate of the region at yg-age
|
// Returns an estimate of the survival rate of the region at yg-age
|
||||||
// "yg_age".
|
// "yg_age".
|
||||||
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
|
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
|
||||||
TruncatedSeq* seq = surv_rate_group->get_seq(age);
|
|
||||||
if (seq->num() == 0)
|
|
||||||
gclog_or_tty->print("BARF! age is %d", age);
|
|
||||||
guarantee( seq->num() > 0, "invariant" );
|
|
||||||
double pred = get_new_prediction(seq);
|
|
||||||
if (pred > 1.0)
|
|
||||||
pred = 1.0;
|
|
||||||
return pred;
|
|
||||||
}
|
|
||||||
|
|
||||||
double predict_yg_surv_rate(int age) const {
|
double predict_yg_surv_rate(int age) const;
|
||||||
return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
|
|
||||||
}
|
|
||||||
|
|
||||||
double accum_yg_surv_rate_pred(int age) const {
|
double accum_yg_surv_rate_pred(int age) const;
|
||||||
return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Statistics kept per GC stoppage, pause or full.
|
// Statistics kept per GC stoppage, pause or full.
|
||||||
@ -608,8 +523,7 @@ public:
|
|||||||
|
|
||||||
virtual G1CollectorPolicy* as_g1_policy() { return this; }
|
virtual G1CollectorPolicy* as_g1_policy() { return this; }
|
||||||
|
|
||||||
const G1CollectorState* collector_state() const;
|
G1CollectorState* collector_state() const;
|
||||||
G1CollectorState* collector_state();
|
|
||||||
|
|
||||||
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
||||||
|
|
||||||
@ -883,15 +797,4 @@ public:
|
|||||||
virtual void post_heap_initialize();
|
virtual void post_heap_initialize();
|
||||||
};
|
};
|
||||||
|
|
||||||
// This should move to some place more general...
|
|
||||||
|
|
||||||
// If we have "n" measurements, and we've kept track of their "sum" and the
|
|
||||||
// "sum_of_squares" of the measurements, this returns the variance of the
|
|
||||||
// sequence.
|
|
||||||
inline double variance(int n, double sum_of_squares, double sum) {
|
|
||||||
double n_d = (double)n;
|
|
||||||
double avg = sum/n_d;
|
|
||||||
return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
|
#endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
|
||||||
|
@ -121,11 +121,7 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
|
|||||||
return (_in_marking_window && !_in_marking_window_im);
|
return (_in_marking_window && !_in_marking_window_im);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool should_propagate() const { // XXX should have a more suitable state name or abstraction for this
|
G1YCType yc_type() const {
|
||||||
return (_last_young_gc && !_in_marking_window);
|
|
||||||
}
|
|
||||||
|
|
||||||
G1YCType yc_type() {
|
|
||||||
if (during_initial_mark_pause()) {
|
if (during_initial_mark_pause()) {
|
||||||
return InitialMark;
|
return InitialMark;
|
||||||
} else if (mark_in_progress()) {
|
} else if (mark_in_progress()) {
|
||||||
|
@ -56,6 +56,7 @@ const char* G1ErgoVerbose::to_string(int tag) {
|
|||||||
case ErgoCSetConstruction: return "CSet Construction";
|
case ErgoCSetConstruction: return "CSet Construction";
|
||||||
case ErgoConcCycles: return "Concurrent Cycles";
|
case ErgoConcCycles: return "Concurrent Cycles";
|
||||||
case ErgoMixedGCs: return "Mixed GCs";
|
case ErgoMixedGCs: return "Mixed GCs";
|
||||||
|
case ErgoTiming: return "Timing";
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
// Keep the Windows compiler happy
|
// Keep the Windows compiler happy
|
||||||
|
@ -70,6 +70,7 @@ typedef enum {
|
|||||||
ErgoCSetConstruction,
|
ErgoCSetConstruction,
|
||||||
ErgoConcCycles,
|
ErgoConcCycles,
|
||||||
ErgoMixedGCs,
|
ErgoMixedGCs,
|
||||||
|
ErgoTiming,
|
||||||
|
|
||||||
ErgoHeuristicNum
|
ErgoHeuristicNum
|
||||||
} ErgoHeuristic;
|
} ErgoHeuristic;
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "gc/g1/concurrentG1Refine.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||||
#include "gc/g1/g1Log.hpp"
|
#include "gc/g1/g1Log.hpp"
|
||||||
@ -269,6 +270,8 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
|||||||
_gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFinest, 3);
|
_gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFinest, 3);
|
||||||
|
|
||||||
_gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2);
|
_gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2);
|
||||||
|
_gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC (ms)", true, G1Log::LevelFiner, 3);
|
||||||
|
_gc_par_phases[ScanHCC]->set_enabled(ConcurrentG1Refine::hot_card_cache_enabled());
|
||||||
_gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2);
|
_gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2);
|
||||||
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2);
|
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2);
|
||||||
_gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2);
|
_gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2);
|
||||||
|
@ -56,6 +56,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
|||||||
WeakCLDRoots,
|
WeakCLDRoots,
|
||||||
SATBFiltering,
|
SATBFiltering,
|
||||||
UpdateRS,
|
UpdateRS,
|
||||||
|
ScanHCC,
|
||||||
ScanRS,
|
ScanRS,
|
||||||
CodeRoots,
|
CodeRoots,
|
||||||
ObjCopy,
|
ObjCopy,
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
#include "gc/g1/dirtyCardQueue.hpp"
|
#include "gc/g1/dirtyCardQueue.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1HotCardCache.hpp"
|
#include "gc/g1/g1HotCardCache.hpp"
|
||||||
#include "gc/g1/g1RemSet.hpp"
|
|
||||||
#include "runtime/atomic.inline.hpp"
|
#include "runtime/atomic.inline.hpp"
|
||||||
|
|
||||||
G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
|
G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
|
||||||
@ -81,9 +80,7 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
|
|||||||
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
|
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1HotCardCache::drain(uint worker_i,
|
void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
|
||||||
G1RemSet* g1rs,
|
|
||||||
DirtyCardQueue* into_cset_dcq) {
|
|
||||||
if (!default_use_cache()) {
|
if (!default_use_cache()) {
|
||||||
assert(_hot_cache == NULL, "Logic");
|
assert(_hot_cache == NULL, "Logic");
|
||||||
return;
|
return;
|
||||||
@ -101,21 +98,8 @@ void G1HotCardCache::drain(uint worker_i,
|
|||||||
for (size_t i = start_idx; i < end_idx; i++) {
|
for (size_t i = start_idx; i < end_idx; i++) {
|
||||||
jbyte* card_ptr = _hot_cache[i];
|
jbyte* card_ptr = _hot_cache[i];
|
||||||
if (card_ptr != NULL) {
|
if (card_ptr != NULL) {
|
||||||
if (g1rs->refine_card(card_ptr, worker_i, true)) {
|
bool result = cl->do_card_ptr(card_ptr, worker_i);
|
||||||
// The part of the heap spanned by the card contains references
|
assert(result, "Closure should always return true");
|
||||||
// that point into the current collection set.
|
|
||||||
// We need to record the card pointer in the DirtyCardQueueSet
|
|
||||||
// that we use for such cards.
|
|
||||||
//
|
|
||||||
// The only time we care about recording cards that contain
|
|
||||||
// references that point into the collection set is during
|
|
||||||
// RSet updating while within an evacuation pause.
|
|
||||||
// In this case worker_i should be the id of a GC worker thread
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
|
||||||
assert(worker_i < ParallelGCThreads, "incorrect worker id: %u", worker_i);
|
|
||||||
|
|
||||||
into_cset_dcq->enqueue(card_ptr);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -32,9 +32,9 @@
|
|||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
|
||||||
|
class CardTableEntryClosure;
|
||||||
class DirtyCardQueue;
|
class DirtyCardQueue;
|
||||||
class G1CollectedHeap;
|
class G1CollectedHeap;
|
||||||
class G1RemSet;
|
|
||||||
class HeapRegion;
|
class HeapRegion;
|
||||||
|
|
||||||
// An evicting cache of cards that have been logged by the G1 post
|
// An evicting cache of cards that have been logged by the G1 post
|
||||||
@ -84,11 +84,11 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
|||||||
// The number of cached cards a thread claims when flushing the cache
|
// The number of cached cards a thread claims when flushing the cache
|
||||||
static const int ClaimChunkSize = 32;
|
static const int ClaimChunkSize = 32;
|
||||||
|
|
||||||
bool default_use_cache() const {
|
public:
|
||||||
|
static bool default_use_cache() {
|
||||||
return (G1ConcRSLogCacheSize > 0);
|
return (G1ConcRSLogCacheSize > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
|
||||||
G1HotCardCache(G1CollectedHeap* g1h);
|
G1HotCardCache(G1CollectedHeap* g1h);
|
||||||
~G1HotCardCache();
|
~G1HotCardCache();
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
|||||||
|
|
||||||
// Refine the cards that have delayed as a result of
|
// Refine the cards that have delayed as a result of
|
||||||
// being in the cache.
|
// being in the cache.
|
||||||
void drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
|
void drain(CardTableEntryClosure* cl, uint worker_i);
|
||||||
|
|
||||||
// Set up for parallel processing of the cards in the hot cache
|
// Set up for parallel processing of the cards in the hot cache
|
||||||
void reset_hot_cache_claimed_index() {
|
void reset_hot_cache_claimed_index() {
|
||||||
|
@ -51,15 +51,11 @@ struct InCSetState {
|
|||||||
enum {
|
enum {
|
||||||
// Selection of the values were driven to micro-optimize the encoding and
|
// Selection of the values were driven to micro-optimize the encoding and
|
||||||
// frequency of the checks.
|
// frequency of the checks.
|
||||||
// The most common check is whether the region is in the collection set or not.
|
// The most common check is whether the region is in the collection set or not,
|
||||||
// This encoding allows us to use an != 0 check which in some architectures
|
// this encoding allows us to use an > 0 check.
|
||||||
// (x86*) can be encoded slightly more efficently than a normal comparison
|
|
||||||
// against zero.
|
|
||||||
// The same situation occurs when checking whether the region is humongous
|
|
||||||
// or not, which is encoded by values < 0.
|
|
||||||
// The other values are simply encoded in increasing generation order, which
|
// The other values are simply encoded in increasing generation order, which
|
||||||
// makes getting the next generation fast by a simple increment.
|
// makes getting the next generation fast by a simple increment.
|
||||||
Humongous = -1, // The region is humongous - note that actually any value < 0 would be possible here.
|
Humongous = -1, // The region is humongous
|
||||||
NotInCSet = 0, // The region is not in the collection set.
|
NotInCSet = 0, // The region is not in the collection set.
|
||||||
Young = 1, // The region is in the collection set and a young region.
|
Young = 1, // The region is in the collection set and a young region.
|
||||||
Old = 2, // The region is in the collection set and an old region.
|
Old = 2, // The region is in the collection set and an old region.
|
||||||
@ -74,9 +70,10 @@ struct InCSetState {
|
|||||||
|
|
||||||
void set_old() { _value = Old; }
|
void set_old() { _value = Old; }
|
||||||
|
|
||||||
bool is_in_cset_or_humongous() const { return _value != NotInCSet; }
|
bool is_in_cset_or_humongous() const { return is_in_cset() || is_humongous(); }
|
||||||
bool is_in_cset() const { return _value > NotInCSet; }
|
bool is_in_cset() const { return _value > NotInCSet; }
|
||||||
bool is_humongous() const { return _value < NotInCSet; }
|
|
||||||
|
bool is_humongous() const { return _value == Humongous; }
|
||||||
bool is_young() const { return _value == Young; }
|
bool is_young() const { return _value == Young; }
|
||||||
bool is_old() const { return _value == Old; }
|
bool is_old() const { return _value == Old; }
|
||||||
|
|
||||||
|
@ -31,31 +31,32 @@
|
|||||||
#include "utilities/stack.inline.hpp"
|
#include "utilities/stack.inline.hpp"
|
||||||
|
|
||||||
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
|
G1ParClosureSuper(g1, par_scan_state),
|
||||||
_cm(_g1->concurrent_mark()) { }
|
_worker_id(par_scan_state->worker_id()),
|
||||||
|
_scanned_klass(NULL),
|
||||||
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1) :
|
_cm(_g1->concurrent_mark())
|
||||||
G1ParClosureSuper(g1), _scanned_klass(NULL),
|
{ }
|
||||||
_cm(_g1->concurrent_mark()) { }
|
|
||||||
|
|
||||||
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1) :
|
|
||||||
_g1(g1), _par_scan_state(NULL), _worker_id(UINT_MAX) { }
|
|
||||||
|
|
||||||
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
_g1(g1), _par_scan_state(NULL),
|
_g1(g1), _par_scan_state(par_scan_state)
|
||||||
_worker_id(UINT_MAX) {
|
{ }
|
||||||
set_par_scan_thread_state(par_scan_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan_state) {
|
void G1KlassScanClosure::do_klass(Klass* klass) {
|
||||||
assert(_par_scan_state == NULL, "_par_scan_state must only be set once");
|
// If the klass has not been dirtied we know that there's
|
||||||
assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
|
// no references into the young gen and we can skip it.
|
||||||
|
if (!_process_only_dirty || klass->has_modified_oops()) {
|
||||||
|
// Clean the klass since we're going to scavenge all the metadata.
|
||||||
|
klass->clear_modified_oops();
|
||||||
|
|
||||||
_par_scan_state = par_scan_state;
|
// Tell the closure that this klass is the Klass to scavenge
|
||||||
_worker_id = par_scan_state->worker_id();
|
// and is the one to dirty if oops are left pointing into the young gen.
|
||||||
|
_closure->set_scanned_klass(klass);
|
||||||
|
|
||||||
assert(_worker_id < ParallelGCThreads,
|
klass->oops_do(_closure);
|
||||||
"The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads);
|
|
||||||
|
_closure->set_scanned_klass(NULL);
|
||||||
|
}
|
||||||
|
_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate G1 specialized oop_oop_iterate functions.
|
// Generate G1 specialized oop_oop_iterate functions.
|
||||||
|
@ -52,15 +52,12 @@ class G1ParClosureSuper : public OopsInHeapRegionClosure {
|
|||||||
protected:
|
protected:
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
G1ParScanThreadState* _par_scan_state;
|
G1ParScanThreadState* _par_scan_state;
|
||||||
uint _worker_id;
|
|
||||||
public:
|
|
||||||
// Initializes the instance, leaving _par_scan_state uninitialized. Must be done
|
|
||||||
// later using the set_par_scan_thread_state() method.
|
|
||||||
G1ParClosureSuper(G1CollectedHeap* g1);
|
|
||||||
G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
|
|
||||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
|
||||||
|
|
||||||
void set_par_scan_thread_state(G1ParScanThreadState* par_scan_state);
|
G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
|
||||||
|
~G1ParClosureSuper() { }
|
||||||
|
|
||||||
|
public:
|
||||||
|
virtual bool apply_to_weak_ref_discovered_field() { return true; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class G1ParPushHeapRSClosure : public G1ParClosureSuper {
|
class G1ParPushHeapRSClosure : public G1ParClosureSuper {
|
||||||
@ -76,36 +73,41 @@ public:
|
|||||||
|
|
||||||
class G1ParScanClosure : public G1ParClosureSuper {
|
class G1ParScanClosure : public G1ParClosureSuper {
|
||||||
public:
|
public:
|
||||||
G1ParScanClosure(G1CollectedHeap* g1) : G1ParClosureSuper(g1) { }
|
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
|
G1ParClosureSuper(g1, par_scan_state) { }
|
||||||
|
|
||||||
template <class T> void do_oop_nv(T* p);
|
template <class T> void do_oop_nv(T* p);
|
||||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
|
|
||||||
void set_ref_processor(ReferenceProcessor* ref_processor) { _ref_processor = ref_processor; }
|
void set_ref_processor(ReferenceProcessor* rp) {
|
||||||
|
set_ref_processor_internal(rp);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Add back base class for metadata
|
// Add back base class for metadata
|
||||||
class G1ParCopyHelper : public G1ParClosureSuper {
|
class G1ParCopyHelper : public G1ParClosureSuper {
|
||||||
protected:
|
protected:
|
||||||
|
uint _worker_id; // Cache value from par_scan_state.
|
||||||
Klass* _scanned_klass;
|
Klass* _scanned_klass;
|
||||||
ConcurrentMark* _cm;
|
ConcurrentMark* _cm;
|
||||||
|
|
||||||
// Mark the object if it's not already marked. This is used to mark
|
// Mark the object if it's not already marked. This is used to mark
|
||||||
// objects pointed to by roots that are guaranteed not to move
|
// objects pointed to by roots that are guaranteed not to move
|
||||||
// during the GC (i.e., non-CSet objects). It is MT-safe.
|
// during the GC (i.e., non-CSet objects). It is MT-safe.
|
||||||
void mark_object(oop obj);
|
inline void mark_object(oop obj);
|
||||||
|
|
||||||
// Mark the object if it's not already marked. This is used to mark
|
// Mark the object if it's not already marked. This is used to mark
|
||||||
// objects pointed to by roots that have been forwarded during a
|
// objects pointed to by roots that have been forwarded during a
|
||||||
// GC. It is MT-safe.
|
// GC. It is MT-safe.
|
||||||
void mark_forwarded_object(oop from_obj, oop to_obj);
|
inline void mark_forwarded_object(oop from_obj, oop to_obj);
|
||||||
public:
|
|
||||||
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
|
|
||||||
G1ParCopyHelper(G1CollectedHeap* g1);
|
|
||||||
|
|
||||||
|
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
|
||||||
|
~G1ParCopyHelper() { }
|
||||||
|
|
||||||
|
public:
|
||||||
void set_scanned_klass(Klass* k) { _scanned_klass = k; }
|
void set_scanned_klass(Klass* k) { _scanned_klass = k; }
|
||||||
template <class T> void do_klass_barrier(T* p, oop new_obj);
|
template <class T> inline void do_klass_barrier(T* p, oop new_obj);
|
||||||
};
|
};
|
||||||
|
|
||||||
enum G1Barrier {
|
enum G1Barrier {
|
||||||
@ -127,26 +129,23 @@ private:
|
|||||||
public:
|
public:
|
||||||
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
G1ParCopyHelper(g1, par_scan_state) {
|
G1ParCopyHelper(g1, par_scan_state) {
|
||||||
assert(_ref_processor == NULL, "sanity");
|
assert(ref_processor() == NULL, "sanity");
|
||||||
}
|
|
||||||
|
|
||||||
G1ParCopyClosure(G1CollectedHeap* g1) : G1ParCopyHelper(g1) {
|
|
||||||
assert(_ref_processor == NULL, "sanity");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
|
template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
|
||||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
|
|
||||||
G1CollectedHeap* g1() { return _g1; };
|
|
||||||
G1ParScanThreadState* pss() { return _par_scan_state; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure;
|
class G1KlassScanClosure : public KlassClosure {
|
||||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> G1ParScanAndMarkExtRootClosure;
|
G1ParCopyHelper* _closure;
|
||||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> G1ParScanAndMarkWeakExtRootClosure;
|
bool _process_only_dirty;
|
||||||
// We use a separate closure to handle references during evacuation
|
int _count;
|
||||||
// failure processing.
|
public:
|
||||||
|
G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
|
||||||
|
: _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
|
||||||
|
void do_klass(Klass* klass);
|
||||||
|
};
|
||||||
|
|
||||||
class FilterIntoCSClosure: public ExtendedOopClosure {
|
class FilterIntoCSClosure: public ExtendedOopClosure {
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
|
@ -91,7 +91,7 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
|
|||||||
if (state.is_humongous()) {
|
if (state.is_humongous()) {
|
||||||
_g1->set_humongous_is_live(obj);
|
_g1->set_humongous_is_live(obj);
|
||||||
}
|
}
|
||||||
_par_scan_state->update_rs(_from, p, _worker_id);
|
_par_scan_state->update_rs(_from, p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -225,4 +225,78 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
|
||||||
|
if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
|
||||||
|
_scanned_klass->record_modified_oops();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1ParCopyHelper::mark_object(oop obj) {
|
||||||
|
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||||
|
|
||||||
|
// We know that the object is not moving so it's safe to read its size.
|
||||||
|
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
|
||||||
|
assert(from_obj->is_forwarded(), "from obj should be forwarded");
|
||||||
|
assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
|
||||||
|
assert(from_obj != to_obj, "should not be self-forwarded");
|
||||||
|
|
||||||
|
assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
|
||||||
|
assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||||
|
|
||||||
|
// The object might be in the process of being copied by another
|
||||||
|
// worker so we cannot trust that its to-space image is
|
||||||
|
// well-formed. So we have to read its size from its from-space
|
||||||
|
// image which we know should not be changing.
|
||||||
|
_cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <G1Barrier barrier, G1Mark do_mark_object>
|
||||||
|
template <class T>
|
||||||
|
void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||||
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
|
||||||
|
if (oopDesc::is_null(heap_oop)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
|
||||||
|
assert(_worker_id == _par_scan_state->worker_id(), "sanity");
|
||||||
|
|
||||||
|
const InCSetState state = _g1->in_cset_state(obj);
|
||||||
|
if (state.is_in_cset()) {
|
||||||
|
oop forwardee;
|
||||||
|
markOop m = obj->mark();
|
||||||
|
if (m->is_marked()) {
|
||||||
|
forwardee = (oop) m->decode_pointer();
|
||||||
|
} else {
|
||||||
|
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
|
||||||
|
}
|
||||||
|
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||||
|
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||||
|
if (do_mark_object != G1MarkNone && forwardee != obj) {
|
||||||
|
// If the object is self-forwarded we don't need to explicitly
|
||||||
|
// mark it, the evacuation failure protocol will do so.
|
||||||
|
mark_forwarded_object(obj, forwardee);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (barrier == G1BarrierKlass) {
|
||||||
|
do_klass_barrier(p, forwardee);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (state.is_humongous()) {
|
||||||
|
_g1->set_humongous_is_live(obj);
|
||||||
|
}
|
||||||
|
// The object is not in collection set. If we're a root scanning
|
||||||
|
// closure during an initial mark pause then attempt to mark the object.
|
||||||
|
if (do_mark_object == G1MarkFromRoot) {
|
||||||
|
mark_object(obj);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
|
#endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||||
#include "gc/g1/g1ParScanThreadState.inline.hpp"
|
#include "gc/g1/g1ParScanThreadState.inline.hpp"
|
||||||
|
#include "gc/g1/g1RootClosures.hpp"
|
||||||
#include "gc/g1/g1StringDedup.hpp"
|
#include "gc/g1/g1StringDedup.hpp"
|
||||||
#include "gc/shared/taskqueue.inline.hpp"
|
#include "gc/shared/taskqueue.inline.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
@ -37,15 +38,14 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id,
|
|||||||
_refs(g1h->task_queue(worker_id)),
|
_refs(g1h->task_queue(worker_id)),
|
||||||
_dcq(&g1h->dirty_card_queue_set()),
|
_dcq(&g1h->dirty_card_queue_set()),
|
||||||
_ct_bs(g1h->g1_barrier_set()),
|
_ct_bs(g1h->g1_barrier_set()),
|
||||||
_g1_rem(g1h->g1_rem_set()),
|
_closures(NULL),
|
||||||
_hash_seed(17),
|
_hash_seed(17),
|
||||||
_worker_id(worker_id),
|
_worker_id(worker_id),
|
||||||
_tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
|
_tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
|
||||||
_age_table(false),
|
_age_table(false),
|
||||||
_scanner(g1h),
|
_scanner(g1h, this),
|
||||||
_old_gen_is_full(false)
|
_old_gen_is_full(false)
|
||||||
{
|
{
|
||||||
_scanner.set_par_scan_thread_state(this);
|
|
||||||
// we allocate G1YoungSurvRateNumRegions plus one entries, since
|
// we allocate G1YoungSurvRateNumRegions plus one entries, since
|
||||||
// we "sacrifice" entry 0 to keep track of surviving bytes for
|
// we "sacrifice" entry 0 to keep track of surviving bytes for
|
||||||
// non-young regions (where the age is -1)
|
// non-young regions (where the age is -1)
|
||||||
@ -69,6 +69,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id,
|
|||||||
// need to be moved to the next space.
|
// need to be moved to the next space.
|
||||||
_dest[InCSetState::Young] = InCSetState::Old;
|
_dest[InCSetState::Young] = InCSetState::Old;
|
||||||
_dest[InCSetState::Old] = InCSetState::Old;
|
_dest[InCSetState::Old] = InCSetState::Old;
|
||||||
|
|
||||||
|
_closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pass locally gathered statistics to global state.
|
// Pass locally gathered statistics to global state.
|
||||||
@ -86,6 +88,7 @@ void G1ParScanThreadState::flush(size_t* surviving_young_words) {
|
|||||||
|
|
||||||
G1ParScanThreadState::~G1ParScanThreadState() {
|
G1ParScanThreadState::~G1ParScanThreadState() {
|
||||||
delete _plab_allocator;
|
delete _plab_allocator;
|
||||||
|
delete _closures;
|
||||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#include "oops/oop.hpp"
|
#include "oops/oop.hpp"
|
||||||
|
|
||||||
class G1PLABAllocator;
|
class G1PLABAllocator;
|
||||||
|
class G1EvacuationRootClosures;
|
||||||
class HeapRegion;
|
class HeapRegion;
|
||||||
class outputStream;
|
class outputStream;
|
||||||
|
|
||||||
@ -45,7 +46,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
|||||||
RefToScanQueue* _refs;
|
RefToScanQueue* _refs;
|
||||||
DirtyCardQueue _dcq;
|
DirtyCardQueue _dcq;
|
||||||
G1SATBCardTableModRefBS* _ct_bs;
|
G1SATBCardTableModRefBS* _ct_bs;
|
||||||
G1RemSet* _g1_rem;
|
G1EvacuationRootClosures* _closures;
|
||||||
|
|
||||||
G1PLABAllocator* _plab_allocator;
|
G1PLABAllocator* _plab_allocator;
|
||||||
|
|
||||||
@ -97,7 +98,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
|||||||
|
|
||||||
template <class T> void push_on_queue(T* ref);
|
template <class T> void push_on_queue(T* ref);
|
||||||
|
|
||||||
template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
|
template <class T> void update_rs(HeapRegion* from, T* p) {
|
||||||
// If the new value of the field points to the same region or
|
// If the new value of the field points to the same region or
|
||||||
// is the to-space, we don't need to include it in the Rset updates.
|
// is the to-space, we don't need to include it in the Rset updates.
|
||||||
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
||||||
@ -109,6 +110,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
G1EvacuationRootClosures* closures() { return _closures; }
|
||||||
uint worker_id() { return _worker_id; }
|
uint worker_id() { return _worker_id; }
|
||||||
|
|
||||||
// Returns the current amount of waste due to alignment or not being able to fit
|
// Returns the current amount of waste due to alignment or not being able to fit
|
||||||
|
@ -56,7 +56,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert(obj != NULL, "Must be");
|
assert(obj != NULL, "Must be");
|
||||||
update_rs(from, p, _worker_id);
|
update_rs(from, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
|
template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
|
||||||
|
98
hotspot/src/share/vm/gc/g1/g1Predictions.cpp
Normal file
98
hotspot/src/share/vm/gc/g1/g1Predictions.cpp
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc/g1/g1Predictions.hpp"
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
|
||||||
|
void G1Predictions::test() {
|
||||||
|
double const epsilon = 1e-6;
|
||||||
|
{
|
||||||
|
// Some basic formula tests with confidence = 0.0
|
||||||
|
G1Predictions predictor(0.0);
|
||||||
|
TruncatedSeq s;
|
||||||
|
|
||||||
|
double p0 = predictor.get_new_prediction(&s);
|
||||||
|
assert(p0 < epsilon, "Initial prediction of empty sequence must be 0.0 but is %f", p0);
|
||||||
|
|
||||||
|
s.add(5.0);
|
||||||
|
double p1 = predictor.get_new_prediction(&s);
|
||||||
|
assert(fabs(p1 - 5.0) < epsilon, "Prediction should be 5.0 but is %f", p1);
|
||||||
|
for (int i = 0; i < 40; i++) {
|
||||||
|
s.add(5.0);
|
||||||
|
}
|
||||||
|
double p2 = predictor.get_new_prediction(&s);
|
||||||
|
assert(fabs(p2 - 5.0) < epsilon, "Prediction should be 5.0 but is %f", p1);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// The following tests checks that the initial predictions are based on the
|
||||||
|
// average of the sequence and not on the stddev (which is 0).
|
||||||
|
G1Predictions predictor(0.5);
|
||||||
|
TruncatedSeq s;
|
||||||
|
|
||||||
|
s.add(1.0);
|
||||||
|
double p1 = predictor.get_new_prediction(&s);
|
||||||
|
assert(p1 > 1.0, "First prediction must be larger than average, but avg is %f and prediction %f", s.davg(), p1);
|
||||||
|
s.add(1.0);
|
||||||
|
double p2 = predictor.get_new_prediction(&s);
|
||||||
|
assert(p2 < p1, "First prediction must be larger than second, but they are %f %f", p1, p2);
|
||||||
|
s.add(1.0);
|
||||||
|
double p3 = predictor.get_new_prediction(&s);
|
||||||
|
assert(p3 < p2, "Second prediction must be larger than third, but they are %f %f", p2, p3);
|
||||||
|
s.add(1.0);
|
||||||
|
s.add(1.0); // Five elements are now in the sequence.
|
||||||
|
double p5 = predictor.get_new_prediction(&s);
|
||||||
|
assert(p5 < p3, "Fifth prediction must be smaller than third, but they are %f %f", p3, p5);
|
||||||
|
assert(fabs(p5 - 1.0) < epsilon, "Prediction must be 1.0+epsilon, but is %f", p5);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// The following tests checks that initially prediction based on the average is
|
||||||
|
// used, that gets overridden by the stddev prediction at the end.
|
||||||
|
G1Predictions predictor(0.5);
|
||||||
|
TruncatedSeq s;
|
||||||
|
|
||||||
|
s.add(0.5);
|
||||||
|
double p1 = predictor.get_new_prediction(&s);
|
||||||
|
assert(p1 > 0.5, "First prediction must be larger than average, but avg is %f and prediction %f", s.davg(), p1);
|
||||||
|
s.add(0.2);
|
||||||
|
double p2 = predictor.get_new_prediction(&s);
|
||||||
|
assert(p2 < p1, "First prediction must be larger than second, but they are %f %f", p1, p2);
|
||||||
|
s.add(0.5);
|
||||||
|
double p3 = predictor.get_new_prediction(&s);
|
||||||
|
assert(p3 < p2, "Second prediction must be larger than third, but they are %f %f", p2, p3);
|
||||||
|
s.add(0.2);
|
||||||
|
s.add(2.0);
|
||||||
|
double p5 = predictor.get_new_prediction(&s);
|
||||||
|
assert(p5 > p3, "Fifth prediction must be bigger than third, but they are %f %f", p3, p5);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestPredictions_test() {
|
||||||
|
G1Predictions::test();
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
67
hotspot/src/share/vm/gc/g1/g1Predictions.hpp
Normal file
67
hotspot/src/share/vm/gc/g1/g1Predictions.hpp
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_G1_G1PREDICTIONS_HPP
|
||||||
|
#define SHARE_VM_GC_G1_G1PREDICTIONS_HPP
|
||||||
|
|
||||||
|
#include "memory/allocation.inline.hpp"
|
||||||
|
#include "utilities/numberSeq.hpp"
|
||||||
|
|
||||||
|
// Utility class containing various helper methods for prediction.
|
||||||
|
class G1Predictions VALUE_OBJ_CLASS_SPEC {
|
||||||
|
private:
|
||||||
|
double _sigma;
|
||||||
|
|
||||||
|
// This function is used to estimate the stddev of sample sets. There is some
|
||||||
|
// special consideration of small sample sets: the actual stddev for them is
|
||||||
|
// not very useful, so we calculate some value based on the sample average.
|
||||||
|
// Five or more samples yields zero (at that point we use the stddev); fewer
|
||||||
|
// scale the sample set average linearly from two times the average to 0.5 times
|
||||||
|
// it.
|
||||||
|
double stddev_estimate(TruncatedSeq const* seq) const {
|
||||||
|
double estimate = seq->dsd();
|
||||||
|
int const samples = seq->num();
|
||||||
|
if (samples < 5) {
|
||||||
|
estimate = MAX2(seq->davg() * (5 - samples) / 2.0, estimate);
|
||||||
|
}
|
||||||
|
return estimate;
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
G1Predictions(double sigma) : _sigma(sigma) {
|
||||||
|
assert(sigma >= 0.0, "Confidence must be larger than or equal to zero");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Confidence factor.
|
||||||
|
double sigma() const { return _sigma; }
|
||||||
|
|
||||||
|
double get_new_prediction(TruncatedSeq const* seq) const {
|
||||||
|
return seq->davg() + _sigma * stddev_estimate(seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
static void test();
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_G1_G1PREDICTIONS_HPP
|
@ -26,7 +26,6 @@
|
|||||||
#include "gc/g1/concurrentG1Refine.hpp"
|
#include "gc/g1/concurrentG1Refine.hpp"
|
||||||
#include "gc/g1/concurrentG1RefineThread.hpp"
|
#include "gc/g1/concurrentG1RefineThread.hpp"
|
||||||
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
|
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
|
||||||
#include "gc/g1/g1CodeBlobClosure.hpp"
|
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||||
@ -228,15 +227,13 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
size_t G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
|
size_t G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
|
||||||
OopClosure* non_heap_roots,
|
CodeBlobClosure* heap_region_codeblobs,
|
||||||
uint worker_i) {
|
uint worker_i) {
|
||||||
double rs_time_start = os::elapsedTime();
|
double rs_time_start = os::elapsedTime();
|
||||||
|
|
||||||
G1CodeBlobClosure code_root_cl(non_heap_roots);
|
|
||||||
|
|
||||||
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
|
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
|
||||||
|
|
||||||
ScanRSClosure scanRScl(oc, &code_root_cl, worker_i);
|
ScanRSClosure scanRScl(oc, heap_region_codeblobs, worker_i);
|
||||||
|
|
||||||
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
||||||
scanRScl.set_try_claimed();
|
scanRScl.set_try_claimed();
|
||||||
@ -263,6 +260,7 @@ public:
|
|||||||
DirtyCardQueue* into_cset_dcq) :
|
DirtyCardQueue* into_cset_dcq) :
|
||||||
_g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
|
_g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||||
// The only time we care about recording cards that
|
// The only time we care about recording cards that
|
||||||
// contain references that point into the collection set
|
// contain references that point into the collection set
|
||||||
@ -285,11 +283,16 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
|
void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
|
||||||
G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
|
|
||||||
// Apply the given closure to all remaining log entries.
|
|
||||||
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
|
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
|
||||||
|
|
||||||
_g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
|
G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
|
||||||
|
{
|
||||||
|
// Apply the closure to the entries of the hot card cache.
|
||||||
|
G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i);
|
||||||
|
_g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i);
|
||||||
|
}
|
||||||
|
// Apply the closure to all remaining log entries.
|
||||||
|
_g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, worker_i);
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1RemSet::cleanupHRRS() {
|
void G1RemSet::cleanupHRRS() {
|
||||||
@ -297,7 +300,7 @@ void G1RemSet::cleanupHRRS() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
||||||
OopClosure* non_heap_roots,
|
CodeBlobClosure* heap_region_codeblobs,
|
||||||
uint worker_i) {
|
uint worker_i) {
|
||||||
#if CARD_REPEAT_HISTO
|
#if CARD_REPEAT_HISTO
|
||||||
ct_freq_update_histo_and_reset();
|
ct_freq_update_histo_and_reset();
|
||||||
@ -320,7 +323,7 @@ size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
|||||||
DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
|
DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
|
||||||
|
|
||||||
updateRS(&into_cset_dcq, worker_i);
|
updateRS(&into_cset_dcq, worker_i);
|
||||||
size_t cards_scanned = scanRS(oc, non_heap_roots, worker_i);
|
size_t cards_scanned = scanRS(oc, heap_region_codeblobs, worker_i);
|
||||||
|
|
||||||
// We now clear the cached values of _cset_rs_update_cl for this worker
|
// We now clear the cached values of _cset_rs_update_cl for this worker
|
||||||
_cset_rs_update_cl[worker_i] = NULL;
|
_cset_rs_update_cl[worker_i] = NULL;
|
||||||
|
@ -95,7 +95,7 @@ public:
|
|||||||
// Returns the number of cards scanned while looking for pointers
|
// Returns the number of cards scanned while looking for pointers
|
||||||
// into the collection set.
|
// into the collection set.
|
||||||
size_t oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
|
size_t oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
|
||||||
OopClosure* non_heap_roots,
|
CodeBlobClosure* heap_region_codeblobs,
|
||||||
uint worker_i);
|
uint worker_i);
|
||||||
|
|
||||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||||
@ -107,7 +107,7 @@ public:
|
|||||||
void cleanup_after_oops_into_collection_set_do();
|
void cleanup_after_oops_into_collection_set_do();
|
||||||
|
|
||||||
size_t scanRS(G1ParPushHeapRSClosure* oc,
|
size_t scanRS(G1ParPushHeapRSClosure* oc,
|
||||||
OopClosure* non_heap_roots,
|
CodeBlobClosure* heap_region_codeblobs,
|
||||||
uint worker_i);
|
uint worker_i);
|
||||||
|
|
||||||
void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
|
void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
|
||||||
|
149
hotspot/src/share/vm/gc/g1/g1RootClosures.cpp
Normal file
149
hotspot/src/share/vm/gc/g1/g1RootClosures.cpp
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
|
||||||
|
#include "gc/g1/bufferingOopClosure.hpp"
|
||||||
|
#include "gc/g1/g1CodeBlobClosure.hpp"
|
||||||
|
#include "gc/g1/g1CollectedHeap.hpp"
|
||||||
|
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||||
|
#include "gc/g1/g1RootClosures.hpp"
|
||||||
|
|
||||||
|
class G1ParScanThreadState;
|
||||||
|
|
||||||
|
// Simple holder object for a complete set of closures used by the G1 evacuation code.
|
||||||
|
template <G1Mark Mark>
|
||||||
|
class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
|
||||||
|
public:
|
||||||
|
G1ParCopyClosure<G1BarrierNone, Mark> _oops;
|
||||||
|
G1ParCopyClosure<G1BarrierKlass, Mark> _oop_in_klass;
|
||||||
|
G1KlassScanClosure _klass_in_cld_closure;
|
||||||
|
CLDToKlassAndOopClosure _clds;
|
||||||
|
G1CodeBlobClosure _codeblobs;
|
||||||
|
BufferingOopClosure _buffered_oops;
|
||||||
|
|
||||||
|
G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
|
||||||
|
_oops(g1h, pss),
|
||||||
|
_oop_in_klass(g1h, pss),
|
||||||
|
_klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
|
||||||
|
_clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
|
||||||
|
_codeblobs(&_oops),
|
||||||
|
_buffered_oops(&_oops) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1EvacuationClosures : public G1EvacuationRootClosures {
|
||||||
|
G1SharedClosures<G1MarkNone> _closures;
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1EvacuationClosures(G1CollectedHeap* g1h,
|
||||||
|
G1ParScanThreadState* pss,
|
||||||
|
bool gcs_are_young) :
|
||||||
|
_closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
|
||||||
|
|
||||||
|
OopClosure* weak_oops() { return &_closures._buffered_oops; }
|
||||||
|
OopClosure* strong_oops() { return &_closures._buffered_oops; }
|
||||||
|
|
||||||
|
CLDClosure* weak_clds() { return &_closures._clds; }
|
||||||
|
CLDClosure* strong_clds() { return &_closures._clds; }
|
||||||
|
CLDClosure* thread_root_clds() { return NULL; }
|
||||||
|
CLDClosure* second_pass_weak_clds() { return NULL; }
|
||||||
|
|
||||||
|
CodeBlobClosure* strong_codeblobs() { return &_closures._codeblobs; }
|
||||||
|
CodeBlobClosure* weak_codeblobs() { return &_closures._codeblobs; }
|
||||||
|
|
||||||
|
void flush() { _closures._buffered_oops.done(); }
|
||||||
|
double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
|
||||||
|
|
||||||
|
OopClosure* raw_strong_oops() { return &_closures._oops; }
|
||||||
|
|
||||||
|
bool trace_metadata() { return false; }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Closures used during initial mark.
|
||||||
|
// The treatment of "weak" roots is selectable through the template parameter,
|
||||||
|
// this is usually used to control unloading of classes and interned strings.
|
||||||
|
template <G1Mark MarkWeak>
|
||||||
|
class G1InitalMarkClosures : public G1EvacuationRootClosures {
|
||||||
|
G1SharedClosures<G1MarkFromRoot> _strong;
|
||||||
|
G1SharedClosures<MarkWeak> _weak;
|
||||||
|
|
||||||
|
// Filter method to help with returning the appropriate closures
|
||||||
|
// depending on the class template parameter.
|
||||||
|
template <G1Mark Mark, typename T>
|
||||||
|
T* null_if(T* t) {
|
||||||
|
if (Mark == MarkWeak) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1InitalMarkClosures(G1CollectedHeap* g1h,
|
||||||
|
G1ParScanThreadState* pss) :
|
||||||
|
_strong(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true),
|
||||||
|
_weak(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true) {}
|
||||||
|
|
||||||
|
OopClosure* weak_oops() { return &_weak._buffered_oops; }
|
||||||
|
OopClosure* strong_oops() { return &_strong._buffered_oops; }
|
||||||
|
|
||||||
|
// If MarkWeak is G1MarkPromotedFromRoot then the weak CLDs must be processed in a second pass.
|
||||||
|
CLDClosure* weak_clds() { return null_if<G1MarkPromotedFromRoot>(&_weak._clds); }
|
||||||
|
CLDClosure* strong_clds() { return &_strong._clds; }
|
||||||
|
|
||||||
|
// If MarkWeak is G1MarkFromRoot then all CLDs are processed by the weak and strong variants
|
||||||
|
// return a NULL closure for the following specialized versions in that case.
|
||||||
|
CLDClosure* thread_root_clds() { return null_if<G1MarkFromRoot>(&_strong._clds); }
|
||||||
|
CLDClosure* second_pass_weak_clds() { return null_if<G1MarkFromRoot>(&_weak._clds); }
|
||||||
|
|
||||||
|
CodeBlobClosure* strong_codeblobs() { return &_strong._codeblobs; }
|
||||||
|
CodeBlobClosure* weak_codeblobs() { return &_weak._codeblobs; }
|
||||||
|
|
||||||
|
void flush() {
|
||||||
|
_strong._buffered_oops.done();
|
||||||
|
_weak._buffered_oops.done();
|
||||||
|
}
|
||||||
|
|
||||||
|
double closure_app_seconds() {
|
||||||
|
return _strong._buffered_oops.closure_app_seconds() +
|
||||||
|
_weak._buffered_oops.closure_app_seconds();
|
||||||
|
}
|
||||||
|
|
||||||
|
OopClosure* raw_strong_oops() { return &_strong._oops; }
|
||||||
|
|
||||||
|
// If we are not marking all weak roots then we are tracing
|
||||||
|
// which metadata is alive.
|
||||||
|
bool trace_metadata() { return MarkWeak == G1MarkPromotedFromRoot; }
|
||||||
|
};
|
||||||
|
|
||||||
|
G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
|
||||||
|
if (g1h->collector_state()->during_initial_mark_pause()) {
|
||||||
|
if (ClassUnloadingWithConcurrentMark) {
|
||||||
|
return new G1InitalMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
|
||||||
|
} else {
|
||||||
|
return new G1InitalMarkClosures<G1MarkFromRoot>(g1h, pss);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return new G1EvacuationClosures(g1h, pss, g1h->collector_state()->gcs_are_young());
|
||||||
|
}
|
||||||
|
}
|
73
hotspot/src/share/vm/gc/g1/g1RootClosures.hpp
Normal file
73
hotspot/src/share/vm/gc/g1/g1RootClosures.hpp
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_G1_G1ROOTCLOSURESET_HPP
|
||||||
|
#define SHARE_VM_GC_G1_G1ROOTCLOSURESET_HPP
|
||||||
|
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
#include "memory/iterator.hpp"
|
||||||
|
|
||||||
|
class G1CollectedHeap;
|
||||||
|
class G1ParScanThreadState;
|
||||||
|
|
||||||
|
class G1RootClosures : public CHeapObj<mtGC> {
|
||||||
|
public:
|
||||||
|
// Closures to process raw oops in the root set.
|
||||||
|
virtual OopClosure* weak_oops() = 0;
|
||||||
|
virtual OopClosure* strong_oops() = 0;
|
||||||
|
|
||||||
|
// Closures to process CLDs in the root set.
|
||||||
|
virtual CLDClosure* weak_clds() = 0;
|
||||||
|
virtual CLDClosure* strong_clds() = 0;
|
||||||
|
|
||||||
|
// Applied to the CLDs reachable from the thread stacks.
|
||||||
|
virtual CLDClosure* thread_root_clds() = 0;
|
||||||
|
|
||||||
|
// Applied to code blobs reachable as strong roots.
|
||||||
|
virtual CodeBlobClosure* strong_codeblobs() = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1EvacuationRootClosures : public G1RootClosures {
|
||||||
|
public:
|
||||||
|
// Flush any buffered state and deferred processing
|
||||||
|
virtual void flush() = 0;
|
||||||
|
virtual double closure_app_seconds() = 0;
|
||||||
|
|
||||||
|
// Applied to the weakly reachable CLDs when all strongly reachable
|
||||||
|
// CLDs are guaranteed to have been processed.
|
||||||
|
virtual CLDClosure* second_pass_weak_clds() = 0;
|
||||||
|
|
||||||
|
// Get a raw oop closure for processing oops, bypassing the flushing above.
|
||||||
|
virtual OopClosure* raw_strong_oops() = 0;
|
||||||
|
|
||||||
|
// Applied to code blobs treated as weak roots.
|
||||||
|
virtual CodeBlobClosure* weak_codeblobs() = 0;
|
||||||
|
|
||||||
|
// Is this closure used for tracing metadata?
|
||||||
|
virtual bool trace_metadata() = 0;
|
||||||
|
|
||||||
|
static G1EvacuationRootClosures* create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h);
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_G1_G1ROOTCLOSURESET_HPP
|
@ -33,7 +33,7 @@
|
|||||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||||
#include "gc/g1/g1CollectorState.hpp"
|
#include "gc/g1/g1CollectorState.hpp"
|
||||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||||
#include "gc/g1/g1RemSet.inline.hpp"
|
#include "gc/g1/g1RootClosures.hpp"
|
||||||
#include "gc/g1/g1RootProcessor.hpp"
|
#include "gc/g1/g1RootProcessor.hpp"
|
||||||
#include "gc/g1/heapRegion.inline.hpp"
|
#include "gc/g1/heapRegion.inline.hpp"
|
||||||
#include "memory/allocation.inline.hpp"
|
#include "memory/allocation.inline.hpp"
|
||||||
@ -70,40 +70,19 @@ G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
|
|||||||
_lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
|
_lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
|
||||||
_n_workers_discovered_strong_classes(0) {}
|
_n_workers_discovered_strong_classes(0) {}
|
||||||
|
|
||||||
void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
|
void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
|
||||||
OopClosure* scan_non_heap_weak_roots,
|
|
||||||
CLDClosure* scan_strong_clds,
|
|
||||||
CLDClosure* scan_weak_clds,
|
|
||||||
bool trace_metadata,
|
|
||||||
uint worker_i) {
|
|
||||||
// First scan the shared roots.
|
|
||||||
double ext_roots_start = os::elapsedTime();
|
double ext_roots_start = os::elapsedTime();
|
||||||
G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
|
G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
|
||||||
|
|
||||||
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
|
process_java_roots(closures, phase_times, worker_i);
|
||||||
BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
|
|
||||||
|
|
||||||
OopClosure* const weak_roots = &buf_scan_non_heap_weak_roots;
|
|
||||||
OopClosure* const strong_roots = &buf_scan_non_heap_roots;
|
|
||||||
|
|
||||||
// CodeBlobClosures are not interoperable with BufferingOopClosures
|
|
||||||
G1CodeBlobClosure root_code_blobs(scan_non_heap_roots);
|
|
||||||
|
|
||||||
process_java_roots(strong_roots,
|
|
||||||
trace_metadata ? scan_strong_clds : NULL,
|
|
||||||
scan_strong_clds,
|
|
||||||
trace_metadata ? NULL : scan_weak_clds,
|
|
||||||
&root_code_blobs,
|
|
||||||
phase_times,
|
|
||||||
worker_i);
|
|
||||||
|
|
||||||
// This is the point where this worker thread will not find more strong CLDs/nmethods.
|
// This is the point where this worker thread will not find more strong CLDs/nmethods.
|
||||||
// Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
|
// Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
|
||||||
if (trace_metadata) {
|
if (closures->trace_metadata()) {
|
||||||
worker_has_discovered_all_strong_classes();
|
worker_has_discovered_all_strong_classes();
|
||||||
}
|
}
|
||||||
|
|
||||||
process_vm_roots(strong_roots, weak_roots, phase_times, worker_i);
|
process_vm_roots(closures, phase_times, worker_i);
|
||||||
|
|
||||||
{
|
{
|
||||||
// Now the CM ref_processor roots.
|
// Now the CM ref_processor roots.
|
||||||
@ -113,11 +92,11 @@ void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
|
|||||||
// concurrent mark ref processor as roots and keep entries
|
// concurrent mark ref processor as roots and keep entries
|
||||||
// (which are added by the marking threads) on them live
|
// (which are added by the marking threads) on them live
|
||||||
// until they can be processed at the end of marking.
|
// until they can be processed at the end of marking.
|
||||||
_g1h->ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
|
_g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (trace_metadata) {
|
if (closures->trace_metadata()) {
|
||||||
{
|
{
|
||||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
|
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
|
||||||
// Barrier to make sure all workers passed
|
// Barrier to make sure all workers passed
|
||||||
@ -127,18 +106,18 @@ void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
|
|||||||
|
|
||||||
// Now take the complement of the strong CLDs.
|
// Now take the complement of the strong CLDs.
|
||||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
|
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
|
||||||
ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
|
assert(closures->second_pass_weak_clds() != NULL, "Should be non-null if we are tracing metadata.");
|
||||||
|
ClassLoaderDataGraph::roots_cld_do(NULL, closures->second_pass_weak_clds());
|
||||||
} else {
|
} else {
|
||||||
phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
|
phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
|
||||||
phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
|
phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
|
||||||
|
assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finish up any enqueued closure apps (attributed as object copy time).
|
// Finish up any enqueued closure apps (attributed as object copy time).
|
||||||
buf_scan_non_heap_roots.done();
|
closures->flush();
|
||||||
buf_scan_non_heap_weak_roots.done();
|
|
||||||
|
|
||||||
double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
|
double obj_copy_time_sec = closures->closure_app_seconds();
|
||||||
+ buf_scan_non_heap_weak_roots.closure_app_seconds();
|
|
||||||
|
|
||||||
phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
|
phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
|
||||||
|
|
||||||
@ -159,22 +138,68 @@ void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
|
|||||||
_process_strong_tasks.all_tasks_completed(n_workers());
|
_process_strong_tasks.all_tasks_completed(n_workers());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Adaptor to pass the closures to the strong roots in the VM.
|
||||||
|
class StrongRootsClosures : public G1RootClosures {
|
||||||
|
OopClosure* _roots;
|
||||||
|
CLDClosure* _clds;
|
||||||
|
CodeBlobClosure* _blobs;
|
||||||
|
public:
|
||||||
|
StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
|
||||||
|
_roots(roots), _clds(clds), _blobs(blobs) {}
|
||||||
|
|
||||||
|
OopClosure* weak_oops() { return NULL; }
|
||||||
|
OopClosure* strong_oops() { return _roots; }
|
||||||
|
|
||||||
|
CLDClosure* weak_clds() { return NULL; }
|
||||||
|
CLDClosure* strong_clds() { return _clds; }
|
||||||
|
CLDClosure* thread_root_clds() { return _clds; }
|
||||||
|
|
||||||
|
CodeBlobClosure* strong_codeblobs() { return _blobs; }
|
||||||
|
};
|
||||||
|
|
||||||
void G1RootProcessor::process_strong_roots(OopClosure* oops,
|
void G1RootProcessor::process_strong_roots(OopClosure* oops,
|
||||||
CLDClosure* clds,
|
CLDClosure* clds,
|
||||||
CodeBlobClosure* blobs) {
|
CodeBlobClosure* blobs) {
|
||||||
|
StrongRootsClosures closures(oops, clds, blobs);
|
||||||
|
|
||||||
process_java_roots(oops, clds, clds, NULL, blobs, NULL, 0);
|
process_java_roots(&closures, NULL, 0);
|
||||||
process_vm_roots(oops, NULL, NULL, 0);
|
process_vm_roots(&closures, NULL, 0);
|
||||||
|
|
||||||
_process_strong_tasks.all_tasks_completed(n_workers());
|
_process_strong_tasks.all_tasks_completed(n_workers());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Adaptor to pass the closures to all the roots in the VM.
|
||||||
|
class AllRootsClosures : public G1RootClosures {
|
||||||
|
OopClosure* _roots;
|
||||||
|
CLDClosure* _clds;
|
||||||
|
public:
|
||||||
|
AllRootsClosures(OopClosure* roots, CLDClosure* clds) :
|
||||||
|
_roots(roots), _clds(clds) {}
|
||||||
|
|
||||||
|
OopClosure* weak_oops() { return _roots; }
|
||||||
|
OopClosure* strong_oops() { return _roots; }
|
||||||
|
|
||||||
|
// By returning the same CLDClosure for both weak and strong CLDs we ensure
|
||||||
|
// that a single walk of the CLDG will invoke the closure on all CLDs i the
|
||||||
|
// system.
|
||||||
|
CLDClosure* weak_clds() { return _clds; }
|
||||||
|
CLDClosure* strong_clds() { return _clds; }
|
||||||
|
// We don't want to visit CLDs more than once, so we return NULL for the
|
||||||
|
// thread root CLDs.
|
||||||
|
CLDClosure* thread_root_clds() { return NULL; }
|
||||||
|
|
||||||
|
// We don't want to visit code blobs more than once, so we return NULL for the
|
||||||
|
// strong case and walk the entire code cache as a separate step.
|
||||||
|
CodeBlobClosure* strong_codeblobs() { return NULL; }
|
||||||
|
};
|
||||||
|
|
||||||
void G1RootProcessor::process_all_roots(OopClosure* oops,
|
void G1RootProcessor::process_all_roots(OopClosure* oops,
|
||||||
CLDClosure* clds,
|
CLDClosure* clds,
|
||||||
CodeBlobClosure* blobs) {
|
CodeBlobClosure* blobs) {
|
||||||
|
AllRootsClosures closures(oops, clds);
|
||||||
|
|
||||||
process_java_roots(oops, NULL, clds, clds, NULL, NULL, 0);
|
process_java_roots(&closures, NULL, 0);
|
||||||
process_vm_roots(oops, oops, NULL, 0);
|
process_vm_roots(&closures, NULL, 0);
|
||||||
|
|
||||||
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
|
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
|
||||||
CodeCache::blobs_do(blobs);
|
CodeCache::blobs_do(blobs);
|
||||||
@ -183,35 +208,36 @@ void G1RootProcessor::process_all_roots(OopClosure* oops,
|
|||||||
_process_strong_tasks.all_tasks_completed(n_workers());
|
_process_strong_tasks.all_tasks_completed(n_workers());
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
|
void G1RootProcessor::process_java_roots(G1RootClosures* closures,
|
||||||
CLDClosure* thread_stack_clds,
|
|
||||||
CLDClosure* strong_clds,
|
|
||||||
CLDClosure* weak_clds,
|
|
||||||
CodeBlobClosure* strong_code,
|
|
||||||
G1GCPhaseTimes* phase_times,
|
G1GCPhaseTimes* phase_times,
|
||||||
uint worker_i) {
|
uint worker_i) {
|
||||||
assert(thread_stack_clds == NULL || weak_clds == NULL, "There is overlap between those, only one may be set");
|
assert(closures->thread_root_clds() == NULL || closures->weak_clds() == NULL, "There is overlap between those, only one may be set");
|
||||||
// Iterating over the CLDG and the Threads are done early to allow us to
|
// Iterating over the CLDG and the Threads are done early to allow us to
|
||||||
// first process the strong CLDs and nmethods and then, after a barrier,
|
// first process the strong CLDs and nmethods and then, after a barrier,
|
||||||
// let the thread process the weak CLDs and nmethods.
|
// let the thread process the weak CLDs and nmethods.
|
||||||
{
|
{
|
||||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
|
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
|
||||||
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
|
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
|
||||||
ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds);
|
ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
|
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
|
||||||
bool is_par = n_workers() > 1;
|
bool is_par = n_workers() > 1;
|
||||||
Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
|
Threads::possibly_parallel_oops_do(is_par,
|
||||||
|
closures->strong_oops(),
|
||||||
|
closures->thread_root_clds(),
|
||||||
|
closures->strong_codeblobs());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
|
void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
|
||||||
OopClosure* weak_roots,
|
|
||||||
G1GCPhaseTimes* phase_times,
|
G1GCPhaseTimes* phase_times,
|
||||||
uint worker_i) {
|
uint worker_i) {
|
||||||
|
OopClosure* strong_roots = closures->strong_oops();
|
||||||
|
OopClosure* weak_roots = closures->weak_oops();
|
||||||
|
|
||||||
{
|
{
|
||||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
|
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
|
||||||
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
|
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
|
||||||
|
@ -32,8 +32,10 @@
|
|||||||
class CLDClosure;
|
class CLDClosure;
|
||||||
class CodeBlobClosure;
|
class CodeBlobClosure;
|
||||||
class G1CollectedHeap;
|
class G1CollectedHeap;
|
||||||
|
class G1EvacuationRootClosures;
|
||||||
class G1GCPhaseTimes;
|
class G1GCPhaseTimes;
|
||||||
class G1ParPushHeapRSClosure;
|
class G1ParPushHeapRSClosure;
|
||||||
|
class G1RootClosures;
|
||||||
class Monitor;
|
class Monitor;
|
||||||
class OopClosure;
|
class OopClosure;
|
||||||
class SubTasksDone;
|
class SubTasksDone;
|
||||||
@ -71,16 +73,11 @@ class G1RootProcessor : public StackObj {
|
|||||||
void worker_has_discovered_all_strong_classes();
|
void worker_has_discovered_all_strong_classes();
|
||||||
void wait_until_all_strong_classes_discovered();
|
void wait_until_all_strong_classes_discovered();
|
||||||
|
|
||||||
void process_java_roots(OopClosure* scan_non_heap_roots,
|
void process_java_roots(G1RootClosures* closures,
|
||||||
CLDClosure* thread_stack_clds,
|
|
||||||
CLDClosure* scan_strong_clds,
|
|
||||||
CLDClosure* scan_weak_clds,
|
|
||||||
CodeBlobClosure* scan_strong_code,
|
|
||||||
G1GCPhaseTimes* phase_times,
|
G1GCPhaseTimes* phase_times,
|
||||||
uint worker_i);
|
uint worker_i);
|
||||||
|
|
||||||
void process_vm_roots(OopClosure* scan_non_heap_roots,
|
void process_vm_roots(G1RootClosures* closures,
|
||||||
OopClosure* scan_non_heap_weak_roots,
|
|
||||||
G1GCPhaseTimes* phase_times,
|
G1GCPhaseTimes* phase_times,
|
||||||
uint worker_i);
|
uint worker_i);
|
||||||
|
|
||||||
@ -90,12 +87,7 @@ public:
|
|||||||
// Apply closures to the strongly and weakly reachable roots in the system
|
// Apply closures to the strongly and weakly reachable roots in the system
|
||||||
// in a single pass.
|
// in a single pass.
|
||||||
// Record and report timing measurements for sub phases using the worker_i
|
// Record and report timing measurements for sub phases using the worker_i
|
||||||
void evacuate_roots(OopClosure* scan_non_heap_roots,
|
void evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i);
|
||||||
OopClosure* scan_non_heap_weak_roots,
|
|
||||||
CLDClosure* scan_strong_clds,
|
|
||||||
CLDClosure* scan_weak_clds,
|
|
||||||
bool trace_metadata,
|
|
||||||
uint worker_i);
|
|
||||||
|
|
||||||
// Apply oops, clds and blobs to all strongly reachable roots in the system
|
// Apply oops, clds and blobs to all strongly reachable roots in the system
|
||||||
void process_strong_roots(OopClosure* oops,
|
void process_strong_roots(OopClosure* oops,
|
||||||
|
@ -31,7 +31,6 @@
|
|||||||
#include "gc/g1/survRateGroup.hpp"
|
#include "gc/g1/survRateGroup.hpp"
|
||||||
#include "gc/shared/ageTable.hpp"
|
#include "gc/shared/ageTable.hpp"
|
||||||
#include "gc/shared/spaceDecorator.hpp"
|
#include "gc/shared/spaceDecorator.hpp"
|
||||||
#include "gc/shared/watermark.hpp"
|
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
// A HeapRegion is the smallest piece of a G1CollectedHeap that
|
// A HeapRegion is the smallest piece of a G1CollectedHeap that
|
||||||
|
@ -24,15 +24,15 @@
|
|||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
#include "gc/g1/g1Predictions.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
#include "gc/g1/survRateGroup.hpp"
|
#include "gc/g1/survRateGroup.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
|
|
||||||
SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
|
SurvRateGroup::SurvRateGroup(G1Predictions* predictor,
|
||||||
const char* name,
|
const char* name,
|
||||||
size_t summary_surv_rates_len) :
|
size_t summary_surv_rates_len) :
|
||||||
_g1p(g1p), _name(name),
|
_predictor(predictor), _name(name),
|
||||||
_summary_surv_rates_len(summary_surv_rates_len),
|
_summary_surv_rates_len(summary_surv_rates_len),
|
||||||
_summary_surv_rates_max_len(0),
|
_summary_surv_rates_max_len(0),
|
||||||
_summary_surv_rates(NULL),
|
_summary_surv_rates(NULL),
|
||||||
@ -52,10 +52,13 @@ SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
|
|||||||
start_adding_regions();
|
start_adding_regions();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double SurvRateGroup::get_new_prediction(TruncatedSeq const* seq) const {
|
||||||
|
return _predictor->get_new_prediction(seq);
|
||||||
|
}
|
||||||
|
|
||||||
void SurvRateGroup::reset() {
|
void SurvRateGroup::reset() {
|
||||||
_all_regions_allocated = 0;
|
_all_regions_allocated = 0;
|
||||||
_setup_seq_num = 0;
|
_setup_seq_num = 0;
|
||||||
_accum_surv_rate = 0.0;
|
|
||||||
_last_pred = 0.0;
|
_last_pred = 0.0;
|
||||||
// the following will set up the arrays with length 1
|
// the following will set up the arrays with length 1
|
||||||
_region_num = 1;
|
_region_num = 1;
|
||||||
@ -76,15 +79,12 @@ void SurvRateGroup::reset() {
|
|||||||
_region_num = 0;
|
_region_num = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void SurvRateGroup::start_adding_regions() {
|
||||||
SurvRateGroup::start_adding_regions() {
|
|
||||||
_setup_seq_num = _stats_arrays_length;
|
_setup_seq_num = _stats_arrays_length;
|
||||||
_region_num = 0;
|
_region_num = 0;
|
||||||
_accum_surv_rate = 0.0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void SurvRateGroup::stop_adding_regions() {
|
||||||
SurvRateGroup::stop_adding_regions() {
|
|
||||||
if (_region_num > _stats_arrays_length) {
|
if (_region_num > _stats_arrays_length) {
|
||||||
double* old_surv_rate = _surv_rate;
|
double* old_surv_rate = _surv_rate;
|
||||||
double* old_accum_surv_rate_pred = _accum_surv_rate_pred;
|
double* old_accum_surv_rate_pred = _accum_surv_rate_pred;
|
||||||
@ -119,33 +119,12 @@ SurvRateGroup::stop_adding_regions() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double
|
int SurvRateGroup::next_age_index() {
|
||||||
SurvRateGroup::accum_surv_rate(size_t adjustment) {
|
|
||||||
// we might relax this one in the future...
|
|
||||||
guarantee( adjustment == 0 || adjustment == 1, "pre-condition" );
|
|
||||||
|
|
||||||
double ret = _accum_surv_rate;
|
|
||||||
if (adjustment > 0) {
|
|
||||||
TruncatedSeq* seq = get_seq(_region_num+1);
|
|
||||||
double surv_rate = _g1p->get_new_prediction(seq);
|
|
||||||
ret += surv_rate;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
SurvRateGroup::next_age_index() {
|
|
||||||
TruncatedSeq* seq = get_seq(_region_num);
|
|
||||||
double surv_rate = _g1p->get_new_prediction(seq);
|
|
||||||
_accum_surv_rate += surv_rate;
|
|
||||||
|
|
||||||
++_region_num;
|
++_region_num;
|
||||||
return (int) ++_all_regions_allocated;
|
return (int) ++_all_regions_allocated;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
|
||||||
SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
|
|
||||||
guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
|
guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
|
||||||
"pre-condition" );
|
"pre-condition" );
|
||||||
guarantee( _surv_rate[age_in_group] <= 0.00001,
|
guarantee( _surv_rate[age_in_group] <= 0.00001,
|
||||||
@ -161,9 +140,8 @@ SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void SurvRateGroup::all_surviving_words_recorded(bool update_predictors) {
|
||||||
SurvRateGroup::all_surviving_words_recorded(bool propagate) {
|
if (update_predictors && _region_num > 0) { // conservative
|
||||||
if (propagate && _region_num > 0) { // conservative
|
|
||||||
double surv_rate = _surv_rate_pred[_region_num-1]->last();
|
double surv_rate = _surv_rate_pred[_region_num-1]->last();
|
||||||
for (size_t i = _region_num; i < _stats_arrays_length; ++i) {
|
for (size_t i = _region_num; i < _stats_arrays_length; ++i) {
|
||||||
guarantee( _surv_rate[i] <= 0.00001,
|
guarantee( _surv_rate[i] <= 0.00001,
|
||||||
@ -175,24 +153,22 @@ SurvRateGroup::all_surviving_words_recorded(bool propagate) {
|
|||||||
double accum = 0.0;
|
double accum = 0.0;
|
||||||
double pred = 0.0;
|
double pred = 0.0;
|
||||||
for (size_t i = 0; i < _stats_arrays_length; ++i) {
|
for (size_t i = 0; i < _stats_arrays_length; ++i) {
|
||||||
pred = _g1p->get_new_prediction(_surv_rate_pred[i]);
|
pred = get_new_prediction(_surv_rate_pred[i]);
|
||||||
if (pred > 1.0) pred = 1.0;
|
if (pred > 1.0) pred = 1.0;
|
||||||
accum += pred;
|
accum += pred;
|
||||||
_accum_surv_rate_pred[i] = accum;
|
_accum_surv_rate_pred[i] = accum;
|
||||||
// gclog_or_tty->print_cr("age %3d, accum %10.2lf", i, accum);
|
|
||||||
}
|
}
|
||||||
_last_pred = pred;
|
_last_pred = pred;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void
|
void SurvRateGroup::print() {
|
||||||
SurvRateGroup::print() {
|
|
||||||
gclog_or_tty->print_cr("Surv Rate Group: %s (" SIZE_FORMAT " entries)",
|
gclog_or_tty->print_cr("Surv Rate Group: %s (" SIZE_FORMAT " entries)",
|
||||||
_name, _region_num);
|
_name, _region_num);
|
||||||
for (size_t i = 0; i < _region_num; ++i) {
|
for (size_t i = 0; i < _region_num; ++i) {
|
||||||
gclog_or_tty->print_cr(" age " SIZE_FORMAT_W(4) " surv rate %6.2lf %% pred %6.2lf %%",
|
gclog_or_tty->print_cr(" age " SIZE_FORMAT_W(4) " surv rate %6.2lf %% pred %6.2lf %%",
|
||||||
i, _surv_rate[i] * 100.0,
|
i, _surv_rate[i] * 100.0,
|
||||||
_g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0);
|
_predictor->get_new_prediction(_surv_rate_pred[i]) * 100.0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -211,9 +187,9 @@ SurvRateGroup::print_surv_rate_summary() {
|
|||||||
size_t limit = MIN2((int) length, 10);
|
size_t limit = MIN2((int) length, 10);
|
||||||
while (index < limit) {
|
while (index < limit) {
|
||||||
gclog_or_tty->print_cr(" " SIZE_FORMAT_W(4)
|
gclog_or_tty->print_cr(" " SIZE_FORMAT_W(4)
|
||||||
" %6.2lf%% %6.2lf",
|
" %6.2lf%% %6.2lf",
|
||||||
index, _summary_surv_rates[index]->avg() * 100.0,
|
index, _summary_surv_rates[index]->avg() * 100.0,
|
||||||
(double) _summary_surv_rates[index]->num());
|
(double) _summary_surv_rates[index]->num());
|
||||||
++index;
|
++index;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,9 +206,9 @@ SurvRateGroup::print_surv_rate_summary() {
|
|||||||
|
|
||||||
if (index == length || num % 10 == 0) {
|
if (index == length || num % 10 == 0) {
|
||||||
gclog_or_tty->print_cr(" " SIZE_FORMAT_W(4) " .. " SIZE_FORMAT_W(4)
|
gclog_or_tty->print_cr(" " SIZE_FORMAT_W(4) " .. " SIZE_FORMAT_W(4)
|
||||||
" %6.2lf%% %6.2lf",
|
" %6.2lf%% %6.2lf",
|
||||||
(index-1) / 10 * 10, index-1, sum / (double) num,
|
(index-1) / 10 * 10, index-1, sum / (double) num,
|
||||||
(double) samples / (double) num);
|
(double) samples / (double) num);
|
||||||
sum = 0.0;
|
sum = 0.0;
|
||||||
num = 0;
|
num = 0;
|
||||||
samples = 0;
|
samples = 0;
|
||||||
|
@ -27,18 +27,20 @@
|
|||||||
|
|
||||||
#include "utilities/numberSeq.hpp"
|
#include "utilities/numberSeq.hpp"
|
||||||
|
|
||||||
class G1CollectorPolicy;
|
class G1Predictions;
|
||||||
|
|
||||||
class SurvRateGroup : public CHeapObj<mtGC> {
|
class SurvRateGroup : public CHeapObj<mtGC> {
|
||||||
private:
|
private:
|
||||||
G1CollectorPolicy* _g1p;
|
G1Predictions* _predictor;
|
||||||
|
|
||||||
|
double get_new_prediction(TruncatedSeq const* seq) const;
|
||||||
|
|
||||||
const char* _name;
|
const char* _name;
|
||||||
|
|
||||||
size_t _stats_arrays_length;
|
size_t _stats_arrays_length;
|
||||||
double* _surv_rate;
|
double* _surv_rate;
|
||||||
double* _accum_surv_rate_pred;
|
double* _accum_surv_rate_pred;
|
||||||
double _last_pred;
|
double _last_pred;
|
||||||
double _accum_surv_rate;
|
|
||||||
TruncatedSeq** _surv_rate_pred;
|
TruncatedSeq** _surv_rate_pred;
|
||||||
NumberSeq** _summary_surv_rates;
|
NumberSeq** _summary_surv_rates;
|
||||||
size_t _summary_surv_rates_len;
|
size_t _summary_surv_rates_len;
|
||||||
@ -49,18 +51,18 @@ private:
|
|||||||
size_t _setup_seq_num;
|
size_t _setup_seq_num;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SurvRateGroup(G1CollectorPolicy* g1p,
|
SurvRateGroup(G1Predictions* predictor,
|
||||||
const char* name,
|
const char* name,
|
||||||
size_t summary_surv_rates_len);
|
size_t summary_surv_rates_len);
|
||||||
void reset();
|
void reset();
|
||||||
void start_adding_regions();
|
void start_adding_regions();
|
||||||
void stop_adding_regions();
|
void stop_adding_regions();
|
||||||
void record_surviving_words(int age_in_group, size_t surv_words);
|
void record_surviving_words(int age_in_group, size_t surv_words);
|
||||||
void all_surviving_words_recorded(bool propagate);
|
void all_surviving_words_recorded(bool update_predictors);
|
||||||
const char* name() { return _name; }
|
const char* name() { return _name; }
|
||||||
|
|
||||||
size_t region_num() { return _region_num; }
|
size_t region_num() { return _region_num; }
|
||||||
double accum_surv_rate_pred(int age) {
|
double accum_surv_rate_pred(int age) const {
|
||||||
assert(age >= 0, "must be");
|
assert(age >= 0, "must be");
|
||||||
if ((size_t)age < _stats_arrays_length)
|
if ((size_t)age < _stats_arrays_length)
|
||||||
return _accum_surv_rate_pred[age];
|
return _accum_surv_rate_pred[age];
|
||||||
@ -70,9 +72,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double accum_surv_rate(size_t adjustment);
|
TruncatedSeq* get_seq(size_t age) const {
|
||||||
|
|
||||||
TruncatedSeq* get_seq(size_t age) {
|
|
||||||
if (age >= _setup_seq_num) {
|
if (age >= _setup_seq_num) {
|
||||||
guarantee( _setup_seq_num > 0, "invariant" );
|
guarantee( _setup_seq_num > 0, "invariant" );
|
||||||
age = _setup_seq_num-1;
|
age = _setup_seq_num-1;
|
||||||
|
241
hotspot/src/share/vm/gc/g1/youngList.cpp
Normal file
241
hotspot/src/share/vm/gc/g1/youngList.cpp
Normal file
@ -0,0 +1,241 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc/g1/g1CollectedHeap.hpp"
|
||||||
|
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||||
|
#include "gc/g1/heapRegion.hpp"
|
||||||
|
#include "gc/g1/heapRegion.inline.hpp"
|
||||||
|
#include "gc/g1/heapRegionRemSet.hpp"
|
||||||
|
#include "gc/g1/youngList.hpp"
|
||||||
|
#include "utilities/ostream.hpp"
|
||||||
|
|
||||||
|
YoungList::YoungList(G1CollectedHeap* g1h) :
|
||||||
|
_g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
|
||||||
|
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
|
||||||
|
guarantee(check_list_empty(false), "just making sure...");
|
||||||
|
}
|
||||||
|
|
||||||
|
void YoungList::push_region(HeapRegion *hr) {
|
||||||
|
assert(!hr->is_young(), "should not already be young");
|
||||||
|
assert(hr->get_next_young_region() == NULL, "cause it should!");
|
||||||
|
|
||||||
|
hr->set_next_young_region(_head);
|
||||||
|
_head = hr;
|
||||||
|
|
||||||
|
_g1h->g1_policy()->set_region_eden(hr, (int) _length);
|
||||||
|
++_length;
|
||||||
|
}
|
||||||
|
|
||||||
|
void YoungList::add_survivor_region(HeapRegion* hr) {
|
||||||
|
assert(hr->is_survivor(), "should be flagged as survivor region");
|
||||||
|
assert(hr->get_next_young_region() == NULL, "cause it should!");
|
||||||
|
|
||||||
|
hr->set_next_young_region(_survivor_head);
|
||||||
|
if (_survivor_head == NULL) {
|
||||||
|
_survivor_tail = hr;
|
||||||
|
}
|
||||||
|
_survivor_head = hr;
|
||||||
|
++_survivor_length;
|
||||||
|
}
|
||||||
|
|
||||||
|
void YoungList::empty_list(HeapRegion* list) {
|
||||||
|
while (list != NULL) {
|
||||||
|
HeapRegion* next = list->get_next_young_region();
|
||||||
|
list->set_next_young_region(NULL);
|
||||||
|
list->uninstall_surv_rate_group();
|
||||||
|
// This is called before a Full GC and all the non-empty /
|
||||||
|
// non-humongous regions at the end of the Full GC will end up as
|
||||||
|
// old anyway.
|
||||||
|
list->set_old();
|
||||||
|
list = next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void YoungList::empty_list() {
|
||||||
|
assert(check_list_well_formed(), "young list should be well formed");
|
||||||
|
|
||||||
|
empty_list(_head);
|
||||||
|
_head = NULL;
|
||||||
|
_length = 0;
|
||||||
|
|
||||||
|
empty_list(_survivor_head);
|
||||||
|
_survivor_head = NULL;
|
||||||
|
_survivor_tail = NULL;
|
||||||
|
_survivor_length = 0;
|
||||||
|
|
||||||
|
_last_sampled_rs_lengths = 0;
|
||||||
|
|
||||||
|
assert(check_list_empty(false), "just making sure...");
|
||||||
|
}
|
||||||
|
|
||||||
|
bool YoungList::check_list_well_formed() {
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
|
uint length = 0;
|
||||||
|
HeapRegion* curr = _head;
|
||||||
|
HeapRegion* last = NULL;
|
||||||
|
while (curr != NULL) {
|
||||||
|
if (!curr->is_young()) {
|
||||||
|
gclog_or_tty->print_cr("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
|
||||||
|
"incorrectly tagged (y: %d, surv: %d)",
|
||||||
|
p2i(curr->bottom()), p2i(curr->end()),
|
||||||
|
curr->is_young(), curr->is_survivor());
|
||||||
|
ret = false;
|
||||||
|
}
|
||||||
|
++length;
|
||||||
|
last = curr;
|
||||||
|
curr = curr->get_next_young_region();
|
||||||
|
}
|
||||||
|
ret = ret && (length == _length);
|
||||||
|
|
||||||
|
if (!ret) {
|
||||||
|
gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
|
||||||
|
gclog_or_tty->print_cr("### list has %u entries, _length is %u",
|
||||||
|
length, _length);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool YoungList::check_list_empty(bool check_sample) {
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
|
if (_length != 0) {
|
||||||
|
gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
|
||||||
|
_length);
|
||||||
|
ret = false;
|
||||||
|
}
|
||||||
|
if (check_sample && _last_sampled_rs_lengths != 0) {
|
||||||
|
gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
|
||||||
|
ret = false;
|
||||||
|
}
|
||||||
|
if (_head != NULL) {
|
||||||
|
gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
|
||||||
|
ret = false;
|
||||||
|
}
|
||||||
|
if (!ret) {
|
||||||
|
gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
YoungList::rs_length_sampling_init() {
|
||||||
|
_sampled_rs_lengths = 0;
|
||||||
|
_curr = _head;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
YoungList::rs_length_sampling_more() {
|
||||||
|
return _curr != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
YoungList::rs_length_sampling_next() {
|
||||||
|
assert( _curr != NULL, "invariant" );
|
||||||
|
size_t rs_length = _curr->rem_set()->occupied();
|
||||||
|
|
||||||
|
_sampled_rs_lengths += rs_length;
|
||||||
|
|
||||||
|
// The current region may not yet have been added to the
|
||||||
|
// incremental collection set (it gets added when it is
|
||||||
|
// retired as the current allocation region).
|
||||||
|
if (_curr->in_collection_set()) {
|
||||||
|
// Update the collection set policy information for this region
|
||||||
|
_g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
|
||||||
|
}
|
||||||
|
|
||||||
|
_curr = _curr->get_next_young_region();
|
||||||
|
if (_curr == NULL) {
|
||||||
|
_last_sampled_rs_lengths = _sampled_rs_lengths;
|
||||||
|
// gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
YoungList::reset_auxilary_lists() {
|
||||||
|
guarantee( is_empty(), "young list should be empty" );
|
||||||
|
assert(check_list_well_formed(), "young list should be well formed");
|
||||||
|
|
||||||
|
// Add survivor regions to SurvRateGroup.
|
||||||
|
_g1h->g1_policy()->note_start_adding_survivor_regions();
|
||||||
|
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
|
||||||
|
|
||||||
|
int young_index_in_cset = 0;
|
||||||
|
for (HeapRegion* curr = _survivor_head;
|
||||||
|
curr != NULL;
|
||||||
|
curr = curr->get_next_young_region()) {
|
||||||
|
_g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
|
||||||
|
|
||||||
|
// The region is a non-empty survivor so let's add it to
|
||||||
|
// the incremental collection set for the next evacuation
|
||||||
|
// pause.
|
||||||
|
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
|
||||||
|
young_index_in_cset += 1;
|
||||||
|
}
|
||||||
|
assert((uint) young_index_in_cset == _survivor_length, "post-condition");
|
||||||
|
_g1h->g1_policy()->note_stop_adding_survivor_regions();
|
||||||
|
|
||||||
|
_head = _survivor_head;
|
||||||
|
_length = _survivor_length;
|
||||||
|
if (_survivor_head != NULL) {
|
||||||
|
assert(_survivor_tail != NULL, "cause it shouldn't be");
|
||||||
|
assert(_survivor_length > 0, "invariant");
|
||||||
|
_survivor_tail->set_next_young_region(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't clear the survivor list handles until the start of
|
||||||
|
// the next evacuation pause - we need it in order to re-tag
|
||||||
|
// the survivor regions from this evacuation pause as 'young'
|
||||||
|
// at the start of the next.
|
||||||
|
|
||||||
|
_g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
|
||||||
|
|
||||||
|
assert(check_list_well_formed(), "young list should be well formed");
|
||||||
|
}
|
||||||
|
|
||||||
|
void YoungList::print() {
|
||||||
|
HeapRegion* lists[] = {_head, _survivor_head};
|
||||||
|
const char* names[] = {"YOUNG", "SURVIVOR"};
|
||||||
|
|
||||||
|
for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
|
||||||
|
gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
|
||||||
|
HeapRegion *curr = lists[list];
|
||||||
|
if (curr == NULL) {
|
||||||
|
gclog_or_tty->print_cr(" empty");
|
||||||
|
}
|
||||||
|
while (curr != NULL) {
|
||||||
|
gclog_or_tty->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",
|
||||||
|
HR_FORMAT_PARAMS(curr),
|
||||||
|
p2i(curr->prev_top_at_mark_start()),
|
||||||
|
p2i(curr->next_top_at_mark_start()),
|
||||||
|
curr->age_in_surv_rate_group_cond());
|
||||||
|
curr = curr->get_next_young_region();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gclog_or_tty->cr();
|
||||||
|
}
|
104
hotspot/src/share/vm/gc/g1/youngList.hpp
Normal file
104
hotspot/src/share/vm/gc/g1/youngList.hpp
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_G1_YOUNGLIST_HPP
|
||||||
|
#define SHARE_VM_GC_G1_YOUNGLIST_HPP
|
||||||
|
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
#include "runtime/globals.hpp"
|
||||||
|
|
||||||
|
class YoungList : public CHeapObj<mtGC> {
|
||||||
|
private:
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
|
HeapRegion* _head;
|
||||||
|
|
||||||
|
HeapRegion* _survivor_head;
|
||||||
|
HeapRegion* _survivor_tail;
|
||||||
|
|
||||||
|
HeapRegion* _curr;
|
||||||
|
|
||||||
|
uint _length;
|
||||||
|
uint _survivor_length;
|
||||||
|
|
||||||
|
size_t _last_sampled_rs_lengths;
|
||||||
|
size_t _sampled_rs_lengths;
|
||||||
|
|
||||||
|
void empty_list(HeapRegion* list);
|
||||||
|
|
||||||
|
public:
|
||||||
|
YoungList(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
void push_region(HeapRegion* hr);
|
||||||
|
void add_survivor_region(HeapRegion* hr);
|
||||||
|
|
||||||
|
void empty_list();
|
||||||
|
bool is_empty() { return _length == 0; }
|
||||||
|
uint length() { return _length; }
|
||||||
|
uint eden_length() { return length() - survivor_length(); }
|
||||||
|
uint survivor_length() { return _survivor_length; }
|
||||||
|
|
||||||
|
// Currently we do not keep track of the used byte sum for the
|
||||||
|
// young list and the survivors and it'd be quite a lot of work to
|
||||||
|
// do so. When we'll eventually replace the young list with
|
||||||
|
// instances of HeapRegionLinkedList we'll get that for free. So,
|
||||||
|
// we'll report the more accurate information then.
|
||||||
|
size_t eden_used_bytes() {
|
||||||
|
assert(length() >= survivor_length(), "invariant");
|
||||||
|
return (size_t) eden_length() * HeapRegion::GrainBytes;
|
||||||
|
}
|
||||||
|
size_t survivor_used_bytes() {
|
||||||
|
return (size_t) survivor_length() * HeapRegion::GrainBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
void rs_length_sampling_init();
|
||||||
|
bool rs_length_sampling_more();
|
||||||
|
void rs_length_sampling_next();
|
||||||
|
|
||||||
|
void reset_sampled_info() {
|
||||||
|
_last_sampled_rs_lengths = 0;
|
||||||
|
}
|
||||||
|
size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
|
||||||
|
|
||||||
|
// for development purposes
|
||||||
|
void reset_auxilary_lists();
|
||||||
|
void clear() { _head = NULL; _length = 0; }
|
||||||
|
|
||||||
|
void clear_survivors() {
|
||||||
|
_survivor_head = NULL;
|
||||||
|
_survivor_tail = NULL;
|
||||||
|
_survivor_length = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapRegion* first_region() { return _head; }
|
||||||
|
HeapRegion* first_survivor_region() { return _survivor_head; }
|
||||||
|
HeapRegion* last_survivor_region() { return _survivor_tail; }
|
||||||
|
|
||||||
|
// debugging
|
||||||
|
bool check_list_well_formed();
|
||||||
|
bool check_list_empty(bool check_sample = true);
|
||||||
|
void print();
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_G1_YOUNGLIST_HPP
|
@ -529,10 +529,7 @@ void PSAdaptiveSizePolicy::compute_old_gen_free_space(
|
|||||||
set_decide_at_full_gc(decide_at_full_gc_true);
|
set_decide_at_full_gc(decide_at_full_gc_true);
|
||||||
adjust_promo_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
|
adjust_promo_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
|
||||||
}
|
}
|
||||||
} else if (_avg_minor_pause->padded_average() > gc_minor_pause_goal_sec()) {
|
} else if (adjusted_mutator_cost() < _throughput_goal) {
|
||||||
// Adjust only for the minor pause time goal
|
|
||||||
adjust_promo_for_minor_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
|
|
||||||
} else if(adjusted_mutator_cost() < _throughput_goal) {
|
|
||||||
// This branch used to require that (mutator_cost() > 0.0 in 1.4.2.
|
// This branch used to require that (mutator_cost() > 0.0 in 1.4.2.
|
||||||
// This sometimes resulted in skipping to the minimize footprint
|
// This sometimes resulted in skipping to the minimize footprint
|
||||||
// code. Change this to try and reduce GC time if mutator time is
|
// code. Change this to try and reduce GC time if mutator time is
|
||||||
@ -670,36 +667,6 @@ void PSAdaptiveSizePolicy::decay_supplemental_growth(bool is_full_gc) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void PSAdaptiveSizePolicy::adjust_promo_for_minor_pause_time(bool is_full_gc,
|
|
||||||
size_t* desired_promo_size_ptr, size_t* desired_eden_size_ptr) {
|
|
||||||
|
|
||||||
if (PSAdjustTenuredGenForMinorPause) {
|
|
||||||
if (is_full_gc) {
|
|
||||||
set_decide_at_full_gc(decide_at_full_gc_true);
|
|
||||||
}
|
|
||||||
// If the desired eden size is as small as it will get,
|
|
||||||
// try to adjust the old gen size.
|
|
||||||
if (*desired_eden_size_ptr <= _space_alignment) {
|
|
||||||
// Vary the old gen size to reduce the young gen pause. This
|
|
||||||
// may not be a good idea. This is just a test.
|
|
||||||
if (minor_pause_old_estimator()->decrement_will_decrease()) {
|
|
||||||
set_change_old_gen_for_min_pauses(decrease_old_gen_for_min_pauses_true);
|
|
||||||
*desired_promo_size_ptr =
|
|
||||||
_promo_size - promo_decrement_aligned_down(*desired_promo_size_ptr);
|
|
||||||
} else {
|
|
||||||
set_change_old_gen_for_min_pauses(increase_old_gen_for_min_pauses_true);
|
|
||||||
size_t promo_heap_delta =
|
|
||||||
promo_increment_with_supplement_aligned_up(*desired_promo_size_ptr);
|
|
||||||
if ((*desired_promo_size_ptr + promo_heap_delta) >
|
|
||||||
*desired_promo_size_ptr) {
|
|
||||||
*desired_promo_size_ptr =
|
|
||||||
_promo_size + promo_heap_delta;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void PSAdaptiveSizePolicy::adjust_eden_for_minor_pause_time(bool is_full_gc,
|
void PSAdaptiveSizePolicy::adjust_eden_for_minor_pause_time(bool is_full_gc,
|
||||||
size_t* desired_eden_size_ptr) {
|
size_t* desired_eden_size_ptr) {
|
||||||
|
|
||||||
@ -733,10 +700,7 @@ void PSAdaptiveSizePolicy::adjust_promo_for_pause_time(bool is_full_gc,
|
|||||||
// a change less than the required alignment is probably not worth
|
// a change less than the required alignment is probably not worth
|
||||||
// attempting.
|
// attempting.
|
||||||
|
|
||||||
if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) {
|
if (_avg_minor_pause->padded_average() <= _avg_major_pause->padded_average() && is_full_gc) {
|
||||||
adjust_promo_for_minor_pause_time(is_full_gc, desired_promo_size_ptr, desired_eden_size_ptr);
|
|
||||||
// major pause adjustments
|
|
||||||
} else if (is_full_gc) {
|
|
||||||
// Adjust for the major pause time only at full gc's because the
|
// Adjust for the major pause time only at full gc's because the
|
||||||
// affects of a change can only be seen at full gc's.
|
// affects of a change can only be seen at full gc's.
|
||||||
|
|
||||||
@ -774,44 +738,8 @@ void PSAdaptiveSizePolicy::adjust_eden_for_pause_time(bool is_full_gc,
|
|||||||
// a change less than the required alignment is probably not worth
|
// a change less than the required alignment is probably not worth
|
||||||
// attempting.
|
// attempting.
|
||||||
if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) {
|
if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) {
|
||||||
adjust_eden_for_minor_pause_time(is_full_gc,
|
adjust_eden_for_minor_pause_time(is_full_gc, desired_eden_size_ptr);
|
||||||
desired_eden_size_ptr);
|
|
||||||
// major pause adjustments
|
|
||||||
} else if (is_full_gc) {
|
|
||||||
// Adjust for the major pause time only at full gc's because the
|
|
||||||
// affects of a change can only be seen at full gc's.
|
|
||||||
if (PSAdjustYoungGenForMajorPause) {
|
|
||||||
// If the promo size is at the minimum (i.e., the old gen
|
|
||||||
// size will not actually decrease), consider changing the
|
|
||||||
// young gen size.
|
|
||||||
if (*desired_promo_size_ptr < _space_alignment) {
|
|
||||||
// If increasing the young generation will decrease the old gen
|
|
||||||
// pause, do it.
|
|
||||||
// During startup there is noise in the statistics for deciding
|
|
||||||
// on whether to increase or decrease the young gen size. For
|
|
||||||
// some number of iterations, just try to increase the young
|
|
||||||
// gen size if the major pause is too long to try and establish
|
|
||||||
// good statistics for later decisions.
|
|
||||||
if (major_pause_young_estimator()->increment_will_decrease() ||
|
|
||||||
(_young_gen_change_for_major_pause_count
|
|
||||||
<= AdaptiveSizePolicyInitializingSteps)) {
|
|
||||||
set_change_young_gen_for_maj_pauses(
|
|
||||||
increase_young_gen_for_maj_pauses_true);
|
|
||||||
eden_heap_delta = eden_increment_aligned_up(*desired_eden_size_ptr);
|
|
||||||
*desired_eden_size_ptr = _eden_size + eden_heap_delta;
|
|
||||||
_young_gen_change_for_major_pause_count++;
|
|
||||||
} else {
|
|
||||||
// Record that decreasing the young gen size would decrease
|
|
||||||
// the major pause
|
|
||||||
set_change_young_gen_for_maj_pauses(
|
|
||||||
decrease_young_gen_for_maj_pauses_true);
|
|
||||||
eden_heap_delta = eden_decrement_aligned_down(*desired_eden_size_ptr);
|
|
||||||
*desired_eden_size_ptr = _eden_size - eden_heap_delta;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr(
|
||||||
"PSAdaptiveSizePolicy::adjust_eden_for_pause_time "
|
"PSAdaptiveSizePolicy::adjust_eden_for_pause_time "
|
||||||
|
@ -134,10 +134,6 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
|||||||
AdaptivePaddedAverage* avg_major_pause() const { return _avg_major_pause; }
|
AdaptivePaddedAverage* avg_major_pause() const { return _avg_major_pause; }
|
||||||
double gc_minor_pause_goal_sec() const { return _gc_minor_pause_goal_sec; }
|
double gc_minor_pause_goal_sec() const { return _gc_minor_pause_goal_sec; }
|
||||||
|
|
||||||
// Change the young generation size to achieve a minor GC pause time goal
|
|
||||||
void adjust_promo_for_minor_pause_time(bool is_full_gc,
|
|
||||||
size_t* desired_promo_size_ptr,
|
|
||||||
size_t* desired_eden_size_ptr);
|
|
||||||
void adjust_eden_for_minor_pause_time(bool is_full_gc,
|
void adjust_eden_for_minor_pause_time(bool is_full_gc,
|
||||||
size_t* desired_eden_size_ptr);
|
size_t* desired_eden_size_ptr);
|
||||||
// Change the generation sizes to achieve a GC pause time goal
|
// Change the generation sizes to achieve a GC pause time goal
|
||||||
|
@ -1351,13 +1351,6 @@ HeapWord*
|
|||||||
PSParallelCompact::compute_dense_prefix(const SpaceId id,
|
PSParallelCompact::compute_dense_prefix(const SpaceId id,
|
||||||
bool maximum_compaction)
|
bool maximum_compaction)
|
||||||
{
|
{
|
||||||
if (ParallelOldGCSplitALot) {
|
|
||||||
if (_space_info[id].dense_prefix() != _space_info[id].space()->bottom()) {
|
|
||||||
// The value was chosen to provoke splitting a young gen space; use it.
|
|
||||||
return _space_info[id].dense_prefix();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t region_size = ParallelCompactData::RegionSize;
|
const size_t region_size = ParallelCompactData::RegionSize;
|
||||||
const ParallelCompactData& sd = summary_data();
|
const ParallelCompactData& sd = summary_data();
|
||||||
|
|
||||||
@ -1430,220 +1423,9 @@ PSParallelCompact::compute_dense_prefix(const SpaceId id,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
// Something to consider: if the region with the best ratio is 'close to' the
|
|
||||||
// first region w/free space, choose the first region with free space
|
|
||||||
// ("first-free"). The first-free region is usually near the start of the
|
|
||||||
// heap, which means we are copying most of the heap already, so copy a bit
|
|
||||||
// more to get complete compaction.
|
|
||||||
if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
|
|
||||||
_maximum_compaction_gc_num = total_invocations();
|
|
||||||
best_cp = full_cp;
|
|
||||||
}
|
|
||||||
#endif // #if 0
|
|
||||||
|
|
||||||
return sd.region_to_addr(best_cp);
|
return sd.region_to_addr(best_cp);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
void
|
|
||||||
PSParallelCompact::fill_with_live_objects(SpaceId id, HeapWord* const start,
|
|
||||||
size_t words)
|
|
||||||
{
|
|
||||||
if (TraceParallelOldGCSummaryPhase) {
|
|
||||||
tty->print_cr("fill_with_live_objects [" PTR_FORMAT " " PTR_FORMAT ") "
|
|
||||||
SIZE_FORMAT, p2i(start), p2i(start + words), words);
|
|
||||||
}
|
|
||||||
|
|
||||||
ObjectStartArray* const start_array = _space_info[id].start_array();
|
|
||||||
CollectedHeap::fill_with_objects(start, words);
|
|
||||||
for (HeapWord* p = start; p < start + words; p += oop(p)->size()) {
|
|
||||||
_mark_bitmap.mark_obj(p, words);
|
|
||||||
_summary_data.add_obj(p, words);
|
|
||||||
start_array->allocate_block(p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
|
|
||||||
{
|
|
||||||
ParallelCompactData& sd = summary_data();
|
|
||||||
MutableSpace* space = _space_info[id].space();
|
|
||||||
|
|
||||||
// Find the source and destination start addresses.
|
|
||||||
HeapWord* const src_addr = sd.region_align_down(start);
|
|
||||||
HeapWord* dst_addr;
|
|
||||||
if (src_addr < start) {
|
|
||||||
dst_addr = sd.addr_to_region_ptr(src_addr)->destination();
|
|
||||||
} else if (src_addr > space->bottom()) {
|
|
||||||
// The start (the original top() value) is aligned to a region boundary so
|
|
||||||
// the associated region does not have a destination. Compute the
|
|
||||||
// destination from the previous region.
|
|
||||||
RegionData* const cp = sd.addr_to_region_ptr(src_addr) - 1;
|
|
||||||
dst_addr = cp->destination() + cp->data_size();
|
|
||||||
} else {
|
|
||||||
// Filling the entire space.
|
|
||||||
dst_addr = space->bottom();
|
|
||||||
}
|
|
||||||
assert(dst_addr != NULL, "sanity");
|
|
||||||
|
|
||||||
// Update the summary data.
|
|
||||||
bool result = _summary_data.summarize(_space_info[id].split_info(),
|
|
||||||
src_addr, space->top(), NULL,
|
|
||||||
dst_addr, space->end(),
|
|
||||||
_space_info[id].new_top_addr());
|
|
||||||
assert(result, "should not fail: bad filler object size");
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
|
|
||||||
{
|
|
||||||
if (total_invocations() % (ParallelOldGCSplitInterval * 3) != 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
MutableSpace* const space = _space_info[id].space();
|
|
||||||
if (space->is_empty()) {
|
|
||||||
HeapWord* b = space->bottom();
|
|
||||||
HeapWord* t = b + space->capacity_in_words() / 2;
|
|
||||||
space->set_top(t);
|
|
||||||
if (ZapUnusedHeapArea) {
|
|
||||||
space->set_top_for_allocations();
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t min_size = CollectedHeap::min_fill_size();
|
|
||||||
size_t obj_len = min_size;
|
|
||||||
while (b + obj_len <= t) {
|
|
||||||
CollectedHeap::fill_with_object(b, obj_len);
|
|
||||||
mark_bitmap()->mark_obj(b, obj_len);
|
|
||||||
summary_data().add_obj(b, obj_len);
|
|
||||||
b += obj_len;
|
|
||||||
obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
|
|
||||||
}
|
|
||||||
if (b < t) {
|
|
||||||
// The loop didn't completely fill to t (top); adjust top downward.
|
|
||||||
space->set_top(b);
|
|
||||||
if (ZapUnusedHeapArea) {
|
|
||||||
space->set_top_for_allocations();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord** nta = _space_info[id].new_top_addr();
|
|
||||||
bool result = summary_data().summarize(_space_info[id].split_info(),
|
|
||||||
space->bottom(), space->top(), NULL,
|
|
||||||
space->bottom(), space->end(), nta);
|
|
||||||
assert(result, "space must fit into itself");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
PSParallelCompact::provoke_split(bool & max_compaction)
|
|
||||||
{
|
|
||||||
if (total_invocations() % ParallelOldGCSplitInterval != 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t region_size = ParallelCompactData::RegionSize;
|
|
||||||
ParallelCompactData& sd = summary_data();
|
|
||||||
|
|
||||||
MutableSpace* const eden_space = _space_info[eden_space_id].space();
|
|
||||||
MutableSpace* const from_space = _space_info[from_space_id].space();
|
|
||||||
const size_t eden_live = pointer_delta(eden_space->top(),
|
|
||||||
_space_info[eden_space_id].new_top());
|
|
||||||
const size_t from_live = pointer_delta(from_space->top(),
|
|
||||||
_space_info[from_space_id].new_top());
|
|
||||||
|
|
||||||
const size_t min_fill_size = CollectedHeap::min_fill_size();
|
|
||||||
const size_t eden_free = pointer_delta(eden_space->end(), eden_space->top());
|
|
||||||
const size_t eden_fillable = eden_free >= min_fill_size ? eden_free : 0;
|
|
||||||
const size_t from_free = pointer_delta(from_space->end(), from_space->top());
|
|
||||||
const size_t from_fillable = from_free >= min_fill_size ? from_free : 0;
|
|
||||||
|
|
||||||
// Choose the space to split; need at least 2 regions live (or fillable).
|
|
||||||
SpaceId id;
|
|
||||||
MutableSpace* space;
|
|
||||||
size_t live_words;
|
|
||||||
size_t fill_words;
|
|
||||||
if (eden_live + eden_fillable >= region_size * 2) {
|
|
||||||
id = eden_space_id;
|
|
||||||
space = eden_space;
|
|
||||||
live_words = eden_live;
|
|
||||||
fill_words = eden_fillable;
|
|
||||||
} else if (from_live + from_fillable >= region_size * 2) {
|
|
||||||
id = from_space_id;
|
|
||||||
space = from_space;
|
|
||||||
live_words = from_live;
|
|
||||||
fill_words = from_fillable;
|
|
||||||
} else {
|
|
||||||
return; // Give up.
|
|
||||||
}
|
|
||||||
assert(fill_words == 0 || fill_words >= min_fill_size, "sanity");
|
|
||||||
|
|
||||||
if (live_words < region_size * 2) {
|
|
||||||
// Fill from top() to end() w/live objects of mixed sizes.
|
|
||||||
HeapWord* const fill_start = space->top();
|
|
||||||
live_words += fill_words;
|
|
||||||
|
|
||||||
space->set_top(fill_start + fill_words);
|
|
||||||
if (ZapUnusedHeapArea) {
|
|
||||||
space->set_top_for_allocations();
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* cur_addr = fill_start;
|
|
||||||
while (fill_words > 0) {
|
|
||||||
const size_t r = (size_t)os::random() % (region_size / 2) + min_fill_size;
|
|
||||||
size_t cur_size = MIN2(align_object_size_(r), fill_words);
|
|
||||||
if (fill_words - cur_size < min_fill_size) {
|
|
||||||
cur_size = fill_words; // Avoid leaving a fragment too small to fill.
|
|
||||||
}
|
|
||||||
|
|
||||||
CollectedHeap::fill_with_object(cur_addr, cur_size);
|
|
||||||
mark_bitmap()->mark_obj(cur_addr, cur_size);
|
|
||||||
sd.add_obj(cur_addr, cur_size);
|
|
||||||
|
|
||||||
cur_addr += cur_size;
|
|
||||||
fill_words -= cur_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
summarize_new_objects(id, fill_start);
|
|
||||||
}
|
|
||||||
|
|
||||||
max_compaction = false;
|
|
||||||
|
|
||||||
// Manipulate the old gen so that it has room for about half of the live data
|
|
||||||
// in the target young gen space (live_words / 2).
|
|
||||||
id = old_space_id;
|
|
||||||
space = _space_info[id].space();
|
|
||||||
const size_t free_at_end = space->free_in_words();
|
|
||||||
const size_t free_target = align_object_size(live_words / 2);
|
|
||||||
const size_t dead = pointer_delta(space->top(), _space_info[id].new_top());
|
|
||||||
|
|
||||||
if (free_at_end >= free_target + min_fill_size) {
|
|
||||||
// Fill space above top() and set the dense prefix so everything survives.
|
|
||||||
HeapWord* const fill_start = space->top();
|
|
||||||
const size_t fill_size = free_at_end - free_target;
|
|
||||||
space->set_top(space->top() + fill_size);
|
|
||||||
if (ZapUnusedHeapArea) {
|
|
||||||
space->set_top_for_allocations();
|
|
||||||
}
|
|
||||||
fill_with_live_objects(id, fill_start, fill_size);
|
|
||||||
summarize_new_objects(id, fill_start);
|
|
||||||
_space_info[id].set_dense_prefix(sd.region_align_down(space->top()));
|
|
||||||
} else if (dead + free_at_end > free_target) {
|
|
||||||
// Find a dense prefix that makes the right amount of space available.
|
|
||||||
HeapWord* cur = sd.region_align_down(space->top());
|
|
||||||
HeapWord* cur_destination = sd.addr_to_region_ptr(cur)->destination();
|
|
||||||
size_t dead_to_right = pointer_delta(space->end(), cur_destination);
|
|
||||||
while (dead_to_right < free_target) {
|
|
||||||
cur -= region_size;
|
|
||||||
cur_destination = sd.addr_to_region_ptr(cur)->destination();
|
|
||||||
dead_to_right = pointer_delta(space->end(), cur_destination);
|
|
||||||
}
|
|
||||||
_space_info[id].set_dense_prefix(cur);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif // #ifndef PRODUCT
|
|
||||||
|
|
||||||
void PSParallelCompact::summarize_spaces_quick()
|
void PSParallelCompact::summarize_spaces_quick()
|
||||||
{
|
{
|
||||||
for (unsigned int i = 0; i < last_space_id; ++i) {
|
for (unsigned int i = 0; i < last_space_id; ++i) {
|
||||||
@ -1655,12 +1437,6 @@ void PSParallelCompact::summarize_spaces_quick()
|
|||||||
assert(result, "space must fit into itself");
|
assert(result, "space must fit into itself");
|
||||||
_space_info[i].set_dense_prefix(space->bottom());
|
_space_info[i].set_dense_prefix(space->bottom());
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
if (ParallelOldGCSplitALot) {
|
|
||||||
provoke_split_fill_survivor(to_space_id);
|
|
||||||
}
|
|
||||||
#endif // #ifndef PRODUCT
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
|
void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
|
||||||
@ -1745,8 +1521,7 @@ void
|
|||||||
PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
|
PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
|
||||||
{
|
{
|
||||||
assert(id < last_space_id, "id out of range");
|
assert(id < last_space_id, "id out of range");
|
||||||
assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom() ||
|
assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
|
||||||
ParallelOldGCSplitALot && id == old_space_id,
|
|
||||||
"should have been reset in summarize_spaces_quick()");
|
"should have been reset in summarize_spaces_quick()");
|
||||||
|
|
||||||
const MutableSpace* space = _space_info[id].space();
|
const MutableSpace* space = _space_info[id].space();
|
||||||
@ -1866,11 +1641,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
|
|||||||
// XXX - should also try to expand
|
// XXX - should also try to expand
|
||||||
maximum_compaction = true;
|
maximum_compaction = true;
|
||||||
}
|
}
|
||||||
#ifndef PRODUCT
|
|
||||||
if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
|
|
||||||
provoke_split(maximum_compaction);
|
|
||||||
}
|
|
||||||
#endif // #ifndef PRODUCT
|
|
||||||
|
|
||||||
// Old generations.
|
// Old generations.
|
||||||
summarize_space(old_space_id, maximum_compaction);
|
summarize_space(old_space_id, maximum_compaction);
|
||||||
|
@ -1059,24 +1059,6 @@ class PSParallelCompact : AllStatic {
|
|||||||
// Clear the summary data source_region field for the specified addresses.
|
// Clear the summary data source_region field for the specified addresses.
|
||||||
static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
|
static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
// Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
|
|
||||||
|
|
||||||
// Fill the region [start, start + words) with live object(s). Only usable
|
|
||||||
// for the old and permanent generations.
|
|
||||||
static void fill_with_live_objects(SpaceId id, HeapWord* const start,
|
|
||||||
size_t words);
|
|
||||||
// Include the new objects in the summary data.
|
|
||||||
static void summarize_new_objects(SpaceId id, HeapWord* start);
|
|
||||||
|
|
||||||
// Add live objects to a survivor space since it's rare that both survivors
|
|
||||||
// are non-empty.
|
|
||||||
static void provoke_split_fill_survivor(SpaceId id);
|
|
||||||
|
|
||||||
// Add live objects and/or choose the dense prefix to provoke splitting.
|
|
||||||
static void provoke_split(bool & maximum_compaction);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void summarize_spaces_quick();
|
static void summarize_spaces_quick();
|
||||||
static void summarize_space(SpaceId id, bool maximum_compaction);
|
static void summarize_space(SpaceId id, bool maximum_compaction);
|
||||||
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
|
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
|
||||||
|
@ -297,11 +297,6 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
young_gen->eden_space()->accumulate_statistics();
|
young_gen->eden_space()->accumulate_statistics();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ZapUnusedHeapArea) {
|
|
||||||
// Save information needed to minimize mangling
|
|
||||||
heap->record_gen_tops_before_GC();
|
|
||||||
}
|
|
||||||
|
|
||||||
heap->print_heap_before_gc();
|
heap->print_heap_before_gc();
|
||||||
heap->trace_heap_before_gc(&_gc_tracer);
|
heap->trace_heap_before_gc(&_gc_tracer);
|
||||||
|
|
||||||
@ -344,13 +339,10 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
CardTableExtension::verify_all_young_refs_imprecise();
|
CardTableExtension::verify_all_young_refs_imprecise();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ScavengeWithObjectsInToSpace) {
|
assert(young_gen->to_space()->is_empty(),
|
||||||
assert(young_gen->to_space()->is_empty(),
|
"Attempt to scavenge with live objects in to_space");
|
||||||
"Attempt to scavenge with live objects in to_space");
|
young_gen->to_space()->clear(SpaceDecorator::Mangle);
|
||||||
young_gen->to_space()->clear(SpaceDecorator::Mangle);
|
|
||||||
} else if (ZapUnusedHeapArea) {
|
|
||||||
young_gen->to_space()->mangle_unused_area();
|
|
||||||
}
|
|
||||||
save_to_space_top_before_gc();
|
save_to_space_top_before_gc();
|
||||||
|
|
||||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||||
@ -681,12 +673,6 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
heap->print_heap_after_gc();
|
heap->print_heap_after_gc();
|
||||||
heap->trace_heap_after_gc(&_gc_tracer);
|
heap->trace_heap_after_gc(&_gc_tracer);
|
||||||
|
|
||||||
if (ZapUnusedHeapArea) {
|
|
||||||
young_gen->eden_space()->check_mangled_unused_area_complete();
|
|
||||||
young_gen->from_space()->check_mangled_unused_area_complete();
|
|
||||||
young_gen->to_space()->check_mangled_unused_area_complete();
|
|
||||||
}
|
|
||||||
|
|
||||||
scavenge_exit.update();
|
scavenge_exit.update();
|
||||||
|
|
||||||
if (PrintGCTaskTimeStamps) {
|
if (PrintGCTaskTimeStamps) {
|
||||||
@ -768,15 +754,13 @@ bool PSScavenge::should_attempt_scavenge() {
|
|||||||
PSYoungGen* young_gen = heap->young_gen();
|
PSYoungGen* young_gen = heap->young_gen();
|
||||||
PSOldGen* old_gen = heap->old_gen();
|
PSOldGen* old_gen = heap->old_gen();
|
||||||
|
|
||||||
if (!ScavengeWithObjectsInToSpace) {
|
// Do not attempt to promote unless to_space is empty
|
||||||
// Do not attempt to promote unless to_space is empty
|
if (!young_gen->to_space()->is_empty()) {
|
||||||
if (!young_gen->to_space()->is_empty()) {
|
_consecutive_skipped_scavenges++;
|
||||||
_consecutive_skipped_scavenges++;
|
if (UsePerfData) {
|
||||||
if (UsePerfData) {
|
counters->update_scavenge_skipped(to_space_not_empty);
|
||||||
counters->update_scavenge_skipped(to_space_not_empty);
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test to see if the scavenge will likely fail.
|
// Test to see if the scavenge will likely fail.
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/serial/defNewGeneration.inline.hpp"
|
#include "gc/serial/defNewGeneration.inline.hpp"
|
||||||
|
#include "gc/shared/cardTableRS.hpp"
|
||||||
#include "gc/shared/collectorCounters.hpp"
|
#include "gc/shared/collectorCounters.hpp"
|
||||||
#include "gc/shared/gcHeapSummary.hpp"
|
#include "gc/shared/gcHeapSummary.hpp"
|
||||||
#include "gc/shared/gcLocker.inline.hpp"
|
#include "gc/shared/gcLocker.inline.hpp"
|
||||||
@ -33,7 +34,6 @@
|
|||||||
#include "gc/shared/gcTraceTime.hpp"
|
#include "gc/shared/gcTraceTime.hpp"
|
||||||
#include "gc/shared/genCollectedHeap.hpp"
|
#include "gc/shared/genCollectedHeap.hpp"
|
||||||
#include "gc/shared/genOopClosures.inline.hpp"
|
#include "gc/shared/genOopClosures.inline.hpp"
|
||||||
#include "gc/shared/genRemSet.hpp"
|
|
||||||
#include "gc/shared/generationSpec.hpp"
|
#include "gc/shared/generationSpec.hpp"
|
||||||
#include "gc/shared/referencePolicy.hpp"
|
#include "gc/shared/referencePolicy.hpp"
|
||||||
#include "gc/shared/space.inline.hpp"
|
#include "gc/shared/space.inline.hpp"
|
||||||
@ -69,8 +69,7 @@ bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
|
|||||||
|
|
||||||
DefNewGeneration::KeepAliveClosure::
|
DefNewGeneration::KeepAliveClosure::
|
||||||
KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
|
KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
|
||||||
GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
|
_rs = GenCollectedHeap::heap()->rem_set();
|
||||||
_rs = (CardTableRS*)rs;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
|
void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
|
||||||
|
@ -117,7 +117,7 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
|
|||||||
// can clear the card table. Otherwise, we must invalidate
|
// can clear the card table. Otherwise, we must invalidate
|
||||||
// it (consider all cards dirty). In the future, we might consider doing
|
// it (consider all cards dirty). In the future, we might consider doing
|
||||||
// compaction within generations only, and doing card-table sliding.
|
// compaction within generations only, and doing card-table sliding.
|
||||||
GenRemSet* rs = gch->rem_set();
|
CardTableRS* rs = gch->rem_set();
|
||||||
Generation* old_gen = gch->old_gen();
|
Generation* old_gen = gch->old_gen();
|
||||||
|
|
||||||
// Clear/invalidate below make use of the "prev_used_regions" saved earlier.
|
// Clear/invalidate below make use of the "prev_used_regions" saved earlier.
|
||||||
|
@ -196,7 +196,9 @@ public:
|
|||||||
virtual void do_cld(ClassLoaderData* cld);
|
virtual void do_cld(ClassLoaderData* cld);
|
||||||
void do_cld_nv(ClassLoaderData* cld);
|
void do_cld_nv(ClassLoaderData* cld);
|
||||||
|
|
||||||
void set_ref_processor(ReferenceProcessor* rp) { _ref_processor = rp; }
|
void set_ref_processor(ReferenceProcessor* rp) {
|
||||||
|
set_ref_processor_internal(rp);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class PreservedMark VALUE_OBJ_CLASS_SPEC {
|
class PreservedMark VALUE_OBJ_CLASS_SPEC {
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
|
|
||||||
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
|
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
|
||||||
size_t initial_byte_size,
|
size_t initial_byte_size,
|
||||||
GenRemSet* remset) :
|
CardTableRS* remset) :
|
||||||
CardGeneration(rs, initial_byte_size, remset)
|
CardGeneration(rs, initial_byte_size, remset)
|
||||||
{
|
{
|
||||||
HeapWord* bottom = (HeapWord*) _virtual_space.low();
|
HeapWord* bottom = (HeapWord*) _virtual_space.low();
|
||||||
|
@ -58,7 +58,7 @@ class TenuredGeneration: public CardGeneration {
|
|||||||
public:
|
public:
|
||||||
TenuredGeneration(ReservedSpace rs,
|
TenuredGeneration(ReservedSpace rs,
|
||||||
size_t initial_byte_size,
|
size_t initial_byte_size,
|
||||||
GenRemSet* remset);
|
CardTableRS* remset);
|
||||||
|
|
||||||
Generation::Name kind() { return Generation::MarkSweepCompact; }
|
Generation::Name kind() { return Generation::MarkSweepCompact; }
|
||||||
|
|
||||||
|
@ -26,9 +26,9 @@
|
|||||||
|
|
||||||
#include "gc/shared/blockOffsetTable.inline.hpp"
|
#include "gc/shared/blockOffsetTable.inline.hpp"
|
||||||
#include "gc/shared/cardGeneration.inline.hpp"
|
#include "gc/shared/cardGeneration.inline.hpp"
|
||||||
|
#include "gc/shared/cardTableRS.hpp"
|
||||||
#include "gc/shared/gcLocker.hpp"
|
#include "gc/shared/gcLocker.hpp"
|
||||||
#include "gc/shared/genOopClosures.inline.hpp"
|
#include "gc/shared/genOopClosures.inline.hpp"
|
||||||
#include "gc/shared/genRemSet.hpp"
|
|
||||||
#include "gc/shared/generationSpec.hpp"
|
#include "gc/shared/generationSpec.hpp"
|
||||||
#include "gc/shared/space.inline.hpp"
|
#include "gc/shared/space.inline.hpp"
|
||||||
#include "memory/iterator.hpp"
|
#include "memory/iterator.hpp"
|
||||||
@ -37,7 +37,7 @@
|
|||||||
|
|
||||||
CardGeneration::CardGeneration(ReservedSpace rs,
|
CardGeneration::CardGeneration(ReservedSpace rs,
|
||||||
size_t initial_byte_size,
|
size_t initial_byte_size,
|
||||||
GenRemSet* remset) :
|
CardTableRS* remset) :
|
||||||
Generation(rs, initial_byte_size), _rs(remset),
|
Generation(rs, initial_byte_size), _rs(remset),
|
||||||
_shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
|
_shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
|
||||||
_used_at_prologue()
|
_used_at_prologue()
|
||||||
|
@ -37,7 +37,7 @@ class CardGeneration: public Generation {
|
|||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
protected:
|
protected:
|
||||||
// This is shared with other generations.
|
// This is shared with other generations.
|
||||||
GenRemSet* _rs;
|
CardTableRS* _rs;
|
||||||
// This is local to this generation.
|
// This is local to this generation.
|
||||||
BlockOffsetSharedArray* _bts;
|
BlockOffsetSharedArray* _bts;
|
||||||
|
|
||||||
@ -52,7 +52,7 @@ class CardGeneration: public Generation {
|
|||||||
size_t _capacity_at_prologue;
|
size_t _capacity_at_prologue;
|
||||||
size_t _used_at_prologue;
|
size_t _used_at_prologue;
|
||||||
|
|
||||||
CardGeneration(ReservedSpace rs, size_t initial_byte_size, GenRemSet* remset);
|
CardGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* remset);
|
||||||
|
|
||||||
virtual void assert_correct_size_change_locking() = 0;
|
virtual void assert_correct_size_change_locking() = 0;
|
||||||
|
|
||||||
|
@ -34,8 +34,48 @@
|
|||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
|
class HasAccumulatedModifiedOopsClosure : public KlassClosure {
|
||||||
|
bool _found;
|
||||||
|
public:
|
||||||
|
HasAccumulatedModifiedOopsClosure() : _found(false) {}
|
||||||
|
void do_klass(Klass* klass) {
|
||||||
|
if (_found) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (klass->has_accumulated_modified_oops()) {
|
||||||
|
_found = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bool found() {
|
||||||
|
return _found;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
bool KlassRemSet::mod_union_is_clear() {
|
||||||
|
HasAccumulatedModifiedOopsClosure closure;
|
||||||
|
ClassLoaderDataGraph::classes_do(&closure);
|
||||||
|
|
||||||
|
return !closure.found();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ClearKlassModUnionClosure : public KlassClosure {
|
||||||
|
public:
|
||||||
|
void do_klass(Klass* klass) {
|
||||||
|
if (klass->has_accumulated_modified_oops()) {
|
||||||
|
klass->clear_accumulated_modified_oops();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void KlassRemSet::clear_mod_union() {
|
||||||
|
ClearKlassModUnionClosure closure;
|
||||||
|
ClassLoaderDataGraph::classes_do(&closure);
|
||||||
|
}
|
||||||
|
|
||||||
CardTableRS::CardTableRS(MemRegion whole_heap) :
|
CardTableRS::CardTableRS(MemRegion whole_heap) :
|
||||||
GenRemSet(),
|
_bs(NULL),
|
||||||
_cur_youngergen_card_val(youngergenP1_card)
|
_cur_youngergen_card_val(youngergenP1_card)
|
||||||
{
|
{
|
||||||
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
|
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
|
||||||
|
@ -26,16 +26,26 @@
|
|||||||
#define SHARE_VM_GC_SHARED_CARDTABLERS_HPP
|
#define SHARE_VM_GC_SHARED_CARDTABLERS_HPP
|
||||||
|
|
||||||
#include "gc/shared/cardTableModRefBSForCTRS.hpp"
|
#include "gc/shared/cardTableModRefBSForCTRS.hpp"
|
||||||
#include "gc/shared/genRemSet.hpp"
|
|
||||||
#include "memory/memRegion.hpp"
|
#include "memory/memRegion.hpp"
|
||||||
|
|
||||||
class Space;
|
class Space;
|
||||||
class OopsInGenClosure;
|
class OopsInGenClosure;
|
||||||
|
|
||||||
// This kind of "GenRemSet" uses a card table both as shared data structure
|
// Helper to remember modified oops in all klasses.
|
||||||
|
class KlassRemSet {
|
||||||
|
bool _accumulate_modified_oops;
|
||||||
|
public:
|
||||||
|
KlassRemSet() : _accumulate_modified_oops(false) {}
|
||||||
|
void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; }
|
||||||
|
bool accumulate_modified_oops() { return _accumulate_modified_oops; }
|
||||||
|
bool mod_union_is_clear();
|
||||||
|
void clear_mod_union();
|
||||||
|
};
|
||||||
|
|
||||||
|
// This RemSet uses a card table both as shared data structure
|
||||||
// for a mod ref barrier set and for the rem set information.
|
// for a mod ref barrier set and for the rem set information.
|
||||||
|
|
||||||
class CardTableRS: public GenRemSet {
|
class CardTableRS: public CHeapObj<mtGC> {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
// Below are private classes used in impl.
|
// Below are private classes used in impl.
|
||||||
friend class VerifyCTSpaceClosure;
|
friend class VerifyCTSpaceClosure;
|
||||||
@ -54,9 +64,10 @@ class CardTableRS: public GenRemSet {
|
|||||||
return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
|
return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
|
||||||
}
|
}
|
||||||
|
|
||||||
CardTableModRefBSForCTRS* _ct_bs;
|
KlassRemSet _klass_rem_set;
|
||||||
|
BarrierSet* _bs;
|
||||||
|
|
||||||
virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
|
CardTableModRefBSForCTRS* _ct_bs;
|
||||||
|
|
||||||
void verify_space(Space* s, HeapWord* gen_start);
|
void verify_space(Space* s, HeapWord* gen_start);
|
||||||
|
|
||||||
@ -104,11 +115,18 @@ public:
|
|||||||
CardTableRS(MemRegion whole_heap);
|
CardTableRS(MemRegion whole_heap);
|
||||||
~CardTableRS();
|
~CardTableRS();
|
||||||
|
|
||||||
// *** GenRemSet functions.
|
// Return the barrier set associated with "this."
|
||||||
CardTableRS* as_CardTableRS() { return this; }
|
BarrierSet* bs() { return _bs; }
|
||||||
|
|
||||||
|
// Set the barrier set.
|
||||||
|
void set_bs(BarrierSet* bs) { _bs = bs; }
|
||||||
|
|
||||||
|
KlassRemSet* klass_rem_set() { return &_klass_rem_set; }
|
||||||
|
|
||||||
CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }
|
CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }
|
||||||
|
|
||||||
|
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
|
||||||
|
|
||||||
// Override.
|
// Override.
|
||||||
void prepare_for_younger_refs_iterate(bool parallel);
|
void prepare_for_younger_refs_iterate(bool parallel);
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap) {
|
CardTableRS* CollectorPolicy::create_rem_set(MemRegion whole_heap) {
|
||||||
return new CardTableRS(whole_heap);
|
return new CardTableRS(whole_heap);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,7 +173,7 @@ size_t CollectorPolicy::compute_heap_alignment() {
|
|||||||
// byte entry and the os page size is 4096, the maximum heap size should
|
// byte entry and the os page size is 4096, the maximum heap size should
|
||||||
// be 512*4096 = 2MB aligned.
|
// be 512*4096 = 2MB aligned.
|
||||||
|
|
||||||
size_t alignment = GenRemSet::max_alignment_constraint();
|
size_t alignment = CardTableRS::ct_max_alignment_constraint();
|
||||||
|
|
||||||
if (UseLargePages) {
|
if (UseLargePages) {
|
||||||
// In presence of large pages we have to make sure that our
|
// In presence of large pages we have to make sure that our
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
#define SHARE_VM_GC_SHARED_COLLECTORPOLICY_HPP
|
#define SHARE_VM_GC_SHARED_COLLECTORPOLICY_HPP
|
||||||
|
|
||||||
#include "gc/shared/barrierSet.hpp"
|
#include "gc/shared/barrierSet.hpp"
|
||||||
#include "gc/shared/genRemSet.hpp"
|
#include "gc/shared/cardTableRS.hpp"
|
||||||
#include "gc/shared/generationSpec.hpp"
|
#include "gc/shared/generationSpec.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
@ -143,7 +143,7 @@ class CollectorPolicy : public CHeapObj<mtGC> {
|
|||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
||||||
|
|
||||||
virtual GenRemSet* create_rem_set(MemRegion reserved);
|
virtual CardTableRS* create_rem_set(MemRegion reserved);
|
||||||
|
|
||||||
// This method controls how a collector satisfies a request
|
// This method controls how a collector satisfies a request
|
||||||
// for a block of memory. "gc_time_limit_was_exceeded" will
|
// for a block of memory. "gc_time_limit_was_exceeded" will
|
||||||
|
@ -60,12 +60,12 @@ GCIdMark::~GCIdMark() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
GCIdMarkAndRestore::GCIdMarkAndRestore() : _gc_id(GCId::create()) {
|
GCIdMarkAndRestore::GCIdMarkAndRestore() : _gc_id(GCId::create()) {
|
||||||
_previous_gc_id = GCId::current(); // will assert that the GC Id is not undefined
|
_previous_gc_id = GCId::current_raw();
|
||||||
currentNamedthread()->set_gc_id(_gc_id);
|
currentNamedthread()->set_gc_id(_gc_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
GCIdMarkAndRestore::GCIdMarkAndRestore(uint gc_id) : _gc_id(gc_id) {
|
GCIdMarkAndRestore::GCIdMarkAndRestore(uint gc_id) : _gc_id(gc_id) {
|
||||||
_previous_gc_id = GCId::current(); // will assert that the GC Id is not undefinied
|
_previous_gc_id = GCId::current_raw();
|
||||||
currentNamedthread()->set_gc_id(_gc_id);
|
currentNamedthread()->set_gc_id(_gc_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -433,7 +433,7 @@ void GenCollectedHeap::do_collection(bool full,
|
|||||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||||
}
|
}
|
||||||
|
|
||||||
GCIdMark gc_id_mark;
|
GCIdMarkAndRestore gc_id_mark;
|
||||||
|
|
||||||
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
|
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
|
||||||
collector_policy()->should_clear_all_soft_refs();
|
collector_policy()->should_clear_all_soft_refs();
|
||||||
@ -823,7 +823,7 @@ bool GenCollectedHeap::create_cms_collector() {
|
|||||||
assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
|
assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
|
||||||
CMSCollector* collector =
|
CMSCollector* collector =
|
||||||
new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
|
new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
|
||||||
_rem_set->as_CardTableRS(),
|
_rem_set,
|
||||||
_gen_policy->as_concurrent_mark_sweep_policy());
|
_gen_policy->as_concurrent_mark_sweep_policy());
|
||||||
|
|
||||||
if (collector == NULL || !collector->completed_initialization()) {
|
if (collector == NULL || !collector->completed_initialization()) {
|
||||||
|
@ -64,8 +64,8 @@ private:
|
|||||||
Generation* _young_gen;
|
Generation* _young_gen;
|
||||||
Generation* _old_gen;
|
Generation* _old_gen;
|
||||||
|
|
||||||
// The singleton Gen Remembered Set.
|
// The singleton CardTable Remembered Set.
|
||||||
GenRemSet* _rem_set;
|
CardTableRS* _rem_set;
|
||||||
|
|
||||||
// The generational collector policy.
|
// The generational collector policy.
|
||||||
GenCollectorPolicy* _gen_policy;
|
GenCollectorPolicy* _gen_policy;
|
||||||
@ -361,9 +361,9 @@ public:
|
|||||||
// collection.
|
// collection.
|
||||||
virtual bool is_maximal_no_gc() const;
|
virtual bool is_maximal_no_gc() const;
|
||||||
|
|
||||||
// This function returns the "GenRemSet" object that allows us to scan
|
// This function returns the CardTableRS object that allows us to scan
|
||||||
// generations in a fully generational heap.
|
// generations in a fully generational heap.
|
||||||
GenRemSet* rem_set() { return _rem_set; }
|
CardTableRS* rem_set() { return _rem_set; }
|
||||||
|
|
||||||
// Convenience function to be used in situations where the heap type can be
|
// Convenience function to be used in situations where the heap type can be
|
||||||
// asserted to be this type.
|
// asserted to be this type.
|
||||||
|
@ -157,7 +157,7 @@ class FilteringClosure: public ExtendedOopClosure {
|
|||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
FilteringClosure(HeapWord* boundary, ExtendedOopClosure* cl) :
|
FilteringClosure(HeapWord* boundary, ExtendedOopClosure* cl) :
|
||||||
ExtendedOopClosure(cl->_ref_processor), _boundary(boundary),
|
ExtendedOopClosure(cl->ref_processor()), _boundary(boundary),
|
||||||
_cl(cl) {}
|
_cl(cl) {}
|
||||||
virtual void do_oop(oop* p);
|
virtual void do_oop(oop* p);
|
||||||
virtual void do_oop(narrowOop* p);
|
virtual void do_oop(narrowOop* p);
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
#include "gc/shared/cardTableRS.hpp"
|
#include "gc/shared/cardTableRS.hpp"
|
||||||
#include "gc/shared/genCollectedHeap.hpp"
|
#include "gc/shared/genCollectedHeap.hpp"
|
||||||
#include "gc/shared/genOopClosures.hpp"
|
#include "gc/shared/genOopClosures.hpp"
|
||||||
#include "gc/shared/genRemSet.hpp"
|
|
||||||
#include "gc/shared/generation.hpp"
|
#include "gc/shared/generation.hpp"
|
||||||
#include "gc/shared/space.hpp"
|
#include "gc/shared/space.hpp"
|
||||||
|
|
||||||
@ -43,8 +42,7 @@ inline void OopsInGenClosure::set_generation(Generation* gen) {
|
|||||||
_gen_boundary = _gen->reserved().start();
|
_gen_boundary = _gen->reserved().start();
|
||||||
// Barrier set for the heap, must be set after heap is initialized
|
// Barrier set for the heap, must be set after heap is initialized
|
||||||
if (_rs == NULL) {
|
if (_rs == NULL) {
|
||||||
GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
|
_rs = GenCollectedHeap::heap()->rem_set();
|
||||||
_rs = (CardTableRS*)rs;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,77 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
|
||||||
#include "classfile/classLoaderData.hpp"
|
|
||||||
#include "gc/shared/cardTableRS.hpp"
|
|
||||||
#include "gc/shared/genRemSet.hpp"
|
|
||||||
#include "oops/klass.hpp"
|
|
||||||
|
|
||||||
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
|
|
||||||
// enumerate ref fields that have been modified (since the last
|
|
||||||
// enumeration.)
|
|
||||||
|
|
||||||
uintx GenRemSet::max_alignment_constraint() {
|
|
||||||
return CardTableRS::ct_max_alignment_constraint();
|
|
||||||
}
|
|
||||||
|
|
||||||
class HasAccumulatedModifiedOopsClosure : public KlassClosure {
|
|
||||||
bool _found;
|
|
||||||
public:
|
|
||||||
HasAccumulatedModifiedOopsClosure() : _found(false) {}
|
|
||||||
void do_klass(Klass* klass) {
|
|
||||||
if (_found) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (klass->has_accumulated_modified_oops()) {
|
|
||||||
_found = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bool found() {
|
|
||||||
return _found;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
bool KlassRemSet::mod_union_is_clear() {
|
|
||||||
HasAccumulatedModifiedOopsClosure closure;
|
|
||||||
ClassLoaderDataGraph::classes_do(&closure);
|
|
||||||
|
|
||||||
return !closure.found();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ClearKlassModUnionClosure : public KlassClosure {
|
|
||||||
public:
|
|
||||||
void do_klass(Klass* klass) {
|
|
||||||
if (klass->has_accumulated_modified_oops()) {
|
|
||||||
klass->clear_accumulated_modified_oops();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void KlassRemSet::clear_mod_union() {
|
|
||||||
ClearKlassModUnionClosure closure;
|
|
||||||
ClassLoaderDataGraph::classes_do(&closure);
|
|
||||||
}
|
|
@ -1,133 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef SHARE_VM_GC_SHARED_GENREMSET_HPP
|
|
||||||
#define SHARE_VM_GC_SHARED_GENREMSET_HPP
|
|
||||||
|
|
||||||
#include "oops/oop.hpp"
|
|
||||||
|
|
||||||
// A GenRemSet provides ways of iterating over pointers across generations.
|
|
||||||
// (This is especially useful for older-to-younger.)
|
|
||||||
|
|
||||||
class Generation;
|
|
||||||
class BarrierSet;
|
|
||||||
class OopsInGenClosure;
|
|
||||||
class CardTableRS;
|
|
||||||
|
|
||||||
// Helper to remember modified oops in all klasses.
|
|
||||||
class KlassRemSet {
|
|
||||||
bool _accumulate_modified_oops;
|
|
||||||
public:
|
|
||||||
KlassRemSet() : _accumulate_modified_oops(false) {}
|
|
||||||
void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; }
|
|
||||||
bool accumulate_modified_oops() { return _accumulate_modified_oops; }
|
|
||||||
bool mod_union_is_clear();
|
|
||||||
void clear_mod_union();
|
|
||||||
};
|
|
||||||
|
|
||||||
class GenRemSet: public CHeapObj<mtGC> {
|
|
||||||
friend class Generation;
|
|
||||||
|
|
||||||
BarrierSet* _bs;
|
|
||||||
KlassRemSet _klass_rem_set;
|
|
||||||
|
|
||||||
public:
|
|
||||||
GenRemSet(BarrierSet * bs) : _bs(bs) {}
|
|
||||||
GenRemSet() : _bs(NULL) {}
|
|
||||||
|
|
||||||
// These are for dynamic downcasts. Unfortunately that it names the
|
|
||||||
// possible subtypes (but not that they are subtypes!) Return NULL if
|
|
||||||
// the cast is invalid.
|
|
||||||
virtual CardTableRS* as_CardTableRS() { return NULL; }
|
|
||||||
|
|
||||||
// Return the barrier set associated with "this."
|
|
||||||
BarrierSet* bs() { return _bs; }
|
|
||||||
|
|
||||||
// Set the barrier set.
|
|
||||||
void set_bs(BarrierSet* bs) { _bs = bs; }
|
|
||||||
|
|
||||||
KlassRemSet* klass_rem_set() { return &_klass_rem_set; }
|
|
||||||
|
|
||||||
// Do any (sequential) processing necessary to prepare for (possibly
|
|
||||||
// "parallel", if that arg is true) calls to younger_refs_iterate.
|
|
||||||
virtual void prepare_for_younger_refs_iterate(bool parallel) = 0;
|
|
||||||
|
|
||||||
// Apply the "do_oop" method of "blk" to (exactly) all oop locations
|
|
||||||
// 1) that are in objects allocated in "g" at the time of the last call
|
|
||||||
// to "save_Marks", and
|
|
||||||
// 2) that point to objects in younger generations.
|
|
||||||
virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads) = 0;
|
|
||||||
|
|
||||||
virtual void younger_refs_in_space_iterate(Space* sp,
|
|
||||||
OopsInGenClosure* cl,
|
|
||||||
uint n_threads) = 0;
|
|
||||||
|
|
||||||
// This method is used to notify the remembered set that "new_val" has
|
|
||||||
// been written into "field" by the garbage collector.
|
|
||||||
void write_ref_field_gc(void* field, oop new_val);
|
|
||||||
protected:
|
|
||||||
virtual void write_ref_field_gc_work(void* field, oop new_val) = 0;
|
|
||||||
public:
|
|
||||||
|
|
||||||
// A version of the above suitable for use by parallel collectors.
|
|
||||||
virtual void write_ref_field_gc_par(void* field, oop new_val) = 0;
|
|
||||||
|
|
||||||
// Resize one of the regions covered by the remembered set.
|
|
||||||
virtual void resize_covered_region(MemRegion new_region) = 0;
|
|
||||||
|
|
||||||
// If the rem set imposes any alignment restrictions on boundaries
|
|
||||||
// within the heap, this function tells whether they are met.
|
|
||||||
virtual bool is_aligned(HeapWord* addr) = 0;
|
|
||||||
|
|
||||||
// Returns any alignment constraint that the remembered set imposes upon the
|
|
||||||
// heap.
|
|
||||||
static uintx max_alignment_constraint();
|
|
||||||
|
|
||||||
virtual void verify() = 0;
|
|
||||||
|
|
||||||
// If appropriate, print some information about the remset on "tty".
|
|
||||||
virtual void print() {}
|
|
||||||
|
|
||||||
// Informs the RS that the given memregion contains no references to
|
|
||||||
// the young generation.
|
|
||||||
virtual void clear(MemRegion mr) = 0;
|
|
||||||
|
|
||||||
// Informs the RS that there are no references to the young generation
|
|
||||||
// from old_gen.
|
|
||||||
virtual void clear_into_younger(Generation* old_gen) = 0;
|
|
||||||
|
|
||||||
// Informs the RS that refs in the given "mr" may have changed
|
|
||||||
// arbitrarily, and therefore may contain old-to-young pointers.
|
|
||||||
// If "whole heap" is true, then this invalidation is part of an
|
|
||||||
// invalidation of the whole heap, which an implementation might
|
|
||||||
// handle differently than that of a sub-part of the heap.
|
|
||||||
virtual void invalidate(MemRegion mr, bool whole_heap = false) = 0;
|
|
||||||
|
|
||||||
// Informs the RS that refs in this generation
|
|
||||||
// may have changed arbitrarily, and therefore may contain
|
|
||||||
// old-to-young pointers in arbitrary locations.
|
|
||||||
virtual void invalidate_or_clear(Generation* old_gen) = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_SHARED_GENREMSET_HPP
|
|
@ -293,7 +293,7 @@ void Generation::oop_iterate(ExtendedOopClosure* cl) {
|
|||||||
void Generation::younger_refs_in_space_iterate(Space* sp,
|
void Generation::younger_refs_in_space_iterate(Space* sp,
|
||||||
OopsInGenClosure* cl,
|
OopsInGenClosure* cl,
|
||||||
uint n_threads) {
|
uint n_threads) {
|
||||||
GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
|
CardTableRS* rs = GenCollectedHeap::heap()->rem_set();
|
||||||
rs->younger_refs_in_space_iterate(sp, cl, n_threads);
|
rs->younger_refs_in_space_iterate(sp, cl, n_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
|
|
||||||
#include "gc/shared/collectorCounters.hpp"
|
#include "gc/shared/collectorCounters.hpp"
|
||||||
#include "gc/shared/referenceProcessor.hpp"
|
#include "gc/shared/referenceProcessor.hpp"
|
||||||
#include "gc/shared/watermark.hpp"
|
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "memory/memRegion.hpp"
|
#include "memory/memRegion.hpp"
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
@ -67,7 +66,6 @@ class OopClosure;
|
|||||||
class ScanClosure;
|
class ScanClosure;
|
||||||
class FastScanClosure;
|
class FastScanClosure;
|
||||||
class GenCollectedHeap;
|
class GenCollectedHeap;
|
||||||
class GenRemSet;
|
|
||||||
class GCStats;
|
class GCStats;
|
||||||
|
|
||||||
// A "ScratchBlock" represents a block of memory in one generation usable by
|
// A "ScratchBlock" represents a block of memory in one generation usable by
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/serial/defNewGeneration.hpp"
|
#include "gc/serial/defNewGeneration.hpp"
|
||||||
#include "gc/serial/tenuredGeneration.hpp"
|
#include "gc/serial/tenuredGeneration.hpp"
|
||||||
#include "gc/shared/genRemSet.hpp"
|
#include "gc/shared/cardTableRS.hpp"
|
||||||
#include "gc/shared/generationSpec.hpp"
|
#include "gc/shared/generationSpec.hpp"
|
||||||
#include "memory/binaryTreeDictionary.hpp"
|
#include "memory/binaryTreeDictionary.hpp"
|
||||||
#include "memory/filemap.hpp"
|
#include "memory/filemap.hpp"
|
||||||
@ -36,7 +36,7 @@
|
|||||||
#include "gc/cms/parNewGeneration.hpp"
|
#include "gc/cms/parNewGeneration.hpp"
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
||||||
Generation* GenerationSpec::init(ReservedSpace rs, GenRemSet* remset) {
|
Generation* GenerationSpec::init(ReservedSpace rs, CardTableRS* remset) {
|
||||||
switch (name()) {
|
switch (name()) {
|
||||||
case Generation::DefNew:
|
case Generation::DefNew:
|
||||||
return new DefNewGeneration(rs, init_size());
|
return new DefNewGeneration(rs, init_size());
|
||||||
@ -50,8 +50,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, GenRemSet* remset) {
|
|||||||
|
|
||||||
case Generation::ConcurrentMarkSweep: {
|
case Generation::ConcurrentMarkSweep: {
|
||||||
assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
|
assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
|
||||||
CardTableRS* ctrs = remset->as_CardTableRS();
|
if (remset == NULL) {
|
||||||
if (ctrs == NULL) {
|
|
||||||
vm_exit_during_initialization("Rem set incompatibility.");
|
vm_exit_during_initialization("Rem set incompatibility.");
|
||||||
}
|
}
|
||||||
// Otherwise
|
// Otherwise
|
||||||
@ -60,7 +59,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, GenRemSet* remset) {
|
|||||||
|
|
||||||
ConcurrentMarkSweepGeneration* g = NULL;
|
ConcurrentMarkSweepGeneration* g = NULL;
|
||||||
g = new ConcurrentMarkSweepGeneration(rs,
|
g = new ConcurrentMarkSweepGeneration(rs,
|
||||||
init_size(), ctrs, UseCMSAdaptiveFreeLists,
|
init_size(), remset, UseCMSAdaptiveFreeLists,
|
||||||
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
|
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
|
||||||
|
|
||||||
g->initialize_performance_counters();
|
g->initialize_performance_counters();
|
||||||
|
@ -45,7 +45,7 @@ public:
|
|||||||
_max_size(align_size_up(max_size, alignment))
|
_max_size(align_size_up(max_size, alignment))
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
Generation* init(ReservedSpace rs, GenRemSet* remset);
|
Generation* init(ReservedSpace rs, CardTableRS* remset);
|
||||||
|
|
||||||
// Accessors
|
// Accessors
|
||||||
Generation::Name name() const { return _name; }
|
Generation::Name name() const { return _name; }
|
||||||
|
@ -529,8 +529,7 @@ void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
|
|||||||
|
|
||||||
void ContiguousSpace::object_iterate(ObjectClosure* blk) {
|
void ContiguousSpace::object_iterate(ObjectClosure* blk) {
|
||||||
if (is_empty()) return;
|
if (is_empty()) return;
|
||||||
WaterMark bm = bottom_mark();
|
object_iterate_from(bottom(), blk);
|
||||||
object_iterate_from(bm, blk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a ContiguousSpace object_iterate() and safe_object_iterate()
|
// For a ContiguousSpace object_iterate() and safe_object_iterate()
|
||||||
@ -539,12 +538,10 @@ void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
|
|||||||
object_iterate(blk);
|
object_iterate(blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
|
void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) {
|
||||||
assert(mark.space() == this, "Mark does not match space");
|
while (mark < top()) {
|
||||||
HeapWord* p = mark.point();
|
blk->do_object(oop(mark));
|
||||||
while (p < top()) {
|
mark += oop(mark)->size();
|
||||||
blk->do_object(oop(p));
|
|
||||||
p += oop(p)->size();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
|
|
||||||
#include "gc/shared/blockOffsetTable.hpp"
|
#include "gc/shared/blockOffsetTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "gc/shared/watermark.hpp"
|
|
||||||
#include "gc/shared/workgroup.hpp"
|
#include "gc/shared/workgroup.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "memory/iterator.hpp"
|
#include "memory/iterator.hpp"
|
||||||
@ -48,7 +47,6 @@ class BlockOffsetArrayContigSpace;
|
|||||||
class Generation;
|
class Generation;
|
||||||
class CompactibleSpace;
|
class CompactibleSpace;
|
||||||
class BlockOffsetTable;
|
class BlockOffsetTable;
|
||||||
class GenRemSet;
|
|
||||||
class CardTableRS;
|
class CardTableRS;
|
||||||
class DirtyCardToOopClosure;
|
class DirtyCardToOopClosure;
|
||||||
|
|
||||||
@ -541,9 +539,6 @@ class ContiguousSpace: public CompactibleSpace {
|
|||||||
void set_saved_mark() { _saved_mark_word = top(); }
|
void set_saved_mark() { _saved_mark_word = top(); }
|
||||||
void reset_saved_mark() { _saved_mark_word = bottom(); }
|
void reset_saved_mark() { _saved_mark_word = bottom(); }
|
||||||
|
|
||||||
WaterMark bottom_mark() { return WaterMark(this, bottom()); }
|
|
||||||
WaterMark top_mark() { return WaterMark(this, top()); }
|
|
||||||
WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
|
|
||||||
bool saved_mark_at_top() const { return saved_mark_word() == top(); }
|
bool saved_mark_at_top() const { return saved_mark_word() == top(); }
|
||||||
|
|
||||||
// In debug mode mangle (write it with a particular bit
|
// In debug mode mangle (write it with a particular bit
|
||||||
@ -649,7 +644,7 @@ class ContiguousSpace: public CompactibleSpace {
|
|||||||
// Same as object_iterate, but starting from "mark", which is required
|
// Same as object_iterate, but starting from "mark", which is required
|
||||||
// to denote the start of an object. Objects allocated by
|
// to denote the start of an object. Objects allocated by
|
||||||
// applications of the closure *are* included in the iteration.
|
// applications of the closure *are* included in the iteration.
|
||||||
virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
|
virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
|
||||||
|
|
||||||
// Very inefficient implementation.
|
// Very inefficient implementation.
|
||||||
virtual HeapWord* block_start_const(const void* p) const;
|
virtual HeapWord* block_start_const(const void* p) const;
|
||||||
|
@ -1,61 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef SHARE_VM_GC_SHARED_WATERMARK_HPP
|
|
||||||
#define SHARE_VM_GC_SHARED_WATERMARK_HPP
|
|
||||||
|
|
||||||
#include "memory/allocation.hpp"
|
|
||||||
#include "utilities/globalDefinitions.hpp"
|
|
||||||
|
|
||||||
// A water mark points into a space and is used during GC to keep track of
|
|
||||||
// progress.
|
|
||||||
|
|
||||||
class Space;
|
|
||||||
|
|
||||||
class WaterMark VALUE_OBJ_CLASS_SPEC {
|
|
||||||
friend class VMStructs;
|
|
||||||
private:
|
|
||||||
HeapWord* _point;
|
|
||||||
Space* _space;
|
|
||||||
public:
|
|
||||||
// Accessors
|
|
||||||
Space* space() const { return _space; }
|
|
||||||
void set_space(Space* s) { _space = s; }
|
|
||||||
HeapWord* point() const { return _point; }
|
|
||||||
void set_point(HeapWord* p) { _point = p; }
|
|
||||||
|
|
||||||
// Constructors
|
|
||||||
WaterMark(Space* s, HeapWord* p) : _space(s), _point(p) {};
|
|
||||||
WaterMark() : _space(NULL), _point(NULL) {};
|
|
||||||
};
|
|
||||||
|
|
||||||
inline bool operator==(const WaterMark& x, const WaterMark& y) {
|
|
||||||
return (x.point() == y.point()) && (x.space() == y.space());
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool operator!=(const WaterMark& x, const WaterMark& y) {
|
|
||||||
return !(x == y);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_SHARED_WATERMARK_HPP
|
|
@ -2791,7 +2791,7 @@ run:
|
|||||||
(int)continuation_bci, p2i(THREAD));
|
(int)continuation_bci, p2i(THREAD));
|
||||||
}
|
}
|
||||||
// for AbortVMOnException flag
|
// for AbortVMOnException flag
|
||||||
NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
|
Exceptions::debug_check_abort(except_oop);
|
||||||
|
|
||||||
// Update profiling data.
|
// Update profiling data.
|
||||||
BI_PROFILE_ALIGN_TO_CURRENT_BCI();
|
BI_PROFILE_ALIGN_TO_CURRENT_BCI();
|
||||||
@ -2807,7 +2807,8 @@ run:
|
|||||||
p2i(THREAD));
|
p2i(THREAD));
|
||||||
}
|
}
|
||||||
// for AbortVMOnException flag
|
// for AbortVMOnException flag
|
||||||
NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
|
Exceptions::debug_check_abort(except_oop);
|
||||||
|
|
||||||
// No handler in this activation, unwind and try again
|
// No handler in this activation, unwind and try again
|
||||||
THREAD->set_pending_exception(except_oop(), NULL, 0);
|
THREAD->set_pending_exception(except_oop(), NULL, 0);
|
||||||
goto handle_return;
|
goto handle_return;
|
||||||
|
@ -458,7 +458,7 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
|
|||||||
// // warning("performance bug: should not call runtime if method has no exception handlers");
|
// // warning("performance bug: should not call runtime if method has no exception handlers");
|
||||||
// }
|
// }
|
||||||
// for AbortVMOnException flag
|
// for AbortVMOnException flag
|
||||||
NOT_PRODUCT(Exceptions::debug_check_abort(h_exception));
|
Exceptions::debug_check_abort(h_exception);
|
||||||
|
|
||||||
// exception handler lookup
|
// exception handler lookup
|
||||||
KlassHandle h_klass(THREAD, h_exception->klass());
|
KlassHandle h_klass(THREAD, h_exception->klass());
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -51,10 +51,18 @@ class OopClosure : public Closure {
|
|||||||
// This is needed by the GC and is extracted to a separate type to not
|
// This is needed by the GC and is extracted to a separate type to not
|
||||||
// pollute the OopClosure interface.
|
// pollute the OopClosure interface.
|
||||||
class ExtendedOopClosure : public OopClosure {
|
class ExtendedOopClosure : public OopClosure {
|
||||||
public:
|
private:
|
||||||
ReferenceProcessor* _ref_processor;
|
ReferenceProcessor* _ref_processor;
|
||||||
|
|
||||||
|
protected:
|
||||||
ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
|
ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
|
||||||
ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
|
ExtendedOopClosure() : _ref_processor(NULL) { }
|
||||||
|
~ExtendedOopClosure() { }
|
||||||
|
|
||||||
|
void set_ref_processor_internal(ReferenceProcessor* rp) { _ref_processor = rp; }
|
||||||
|
|
||||||
|
public:
|
||||||
|
ReferenceProcessor* ref_processor() const { return _ref_processor; }
|
||||||
|
|
||||||
// If the do_metadata functions return "true",
|
// If the do_metadata functions return "true",
|
||||||
// we invoke the following when running oop_iterate():
|
// we invoke the following when running oop_iterate():
|
||||||
|
@ -35,7 +35,6 @@
|
|||||||
#include "gc/shared/collectedHeap.inline.hpp"
|
#include "gc/shared/collectedHeap.inline.hpp"
|
||||||
#include "gc/shared/gcLocker.inline.hpp"
|
#include "gc/shared/gcLocker.inline.hpp"
|
||||||
#include "gc/shared/genCollectedHeap.hpp"
|
#include "gc/shared/genCollectedHeap.hpp"
|
||||||
#include "gc/shared/genRemSet.hpp"
|
|
||||||
#include "gc/shared/generation.hpp"
|
#include "gc/shared/generation.hpp"
|
||||||
#include "gc/shared/space.hpp"
|
#include "gc/shared/space.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
@ -43,7 +43,7 @@ void InstanceRefKlass::oop_oop_iterate_ref_processing_specialized(oop obj, OopCl
|
|||||||
|
|
||||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||||
ReferenceProcessor* rp = closure->_ref_processor;
|
ReferenceProcessor* rp = closure->ref_processor();
|
||||||
if (!oopDesc::is_null(heap_oop)) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
if (!referent->is_gc_marked() && (rp != NULL) &&
|
if (!referent->is_gc_marked() && (rp != NULL) &&
|
||||||
|
@ -72,17 +72,14 @@ Method* Method::allocate(ClassLoaderData* loader_data,
|
|||||||
sizes,
|
sizes,
|
||||||
method_type,
|
method_type,
|
||||||
CHECK_NULL);
|
CHECK_NULL);
|
||||||
|
|
||||||
int size = Method::size(access_flags.is_native());
|
int size = Method::size(access_flags.is_native());
|
||||||
|
return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags);
|
||||||
return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
|
Method::Method(ConstMethod* xconst, AccessFlags access_flags) {
|
||||||
No_Safepoint_Verifier no_safepoint;
|
No_Safepoint_Verifier no_safepoint;
|
||||||
set_constMethod(xconst);
|
set_constMethod(xconst);
|
||||||
set_access_flags(access_flags);
|
set_access_flags(access_flags);
|
||||||
set_method_size(size);
|
|
||||||
#ifdef CC_INTERP
|
#ifdef CC_INTERP
|
||||||
set_result_index(T_VOID);
|
set_result_index(T_VOID);
|
||||||
#endif
|
#endif
|
||||||
@ -1227,7 +1224,6 @@ methodHandle Method::clone_with_new_data(methodHandle m, u_char* new_code, int n
|
|||||||
m->method_type(),
|
m->method_type(),
|
||||||
CHECK_(methodHandle()));
|
CHECK_(methodHandle()));
|
||||||
methodHandle newm (THREAD, newm_oop);
|
methodHandle newm (THREAD, newm_oop);
|
||||||
int new_method_size = newm->method_size();
|
|
||||||
|
|
||||||
// Create a shallow copy of Method part, but be careful to preserve the new ConstMethod*
|
// Create a shallow copy of Method part, but be careful to preserve the new ConstMethod*
|
||||||
ConstMethod* newcm = newm->constMethod();
|
ConstMethod* newcm = newm->constMethod();
|
||||||
@ -1242,7 +1238,6 @@ methodHandle Method::clone_with_new_data(methodHandle m, u_char* new_code, int n
|
|||||||
newm->set_constMethod(newcm);
|
newm->set_constMethod(newcm);
|
||||||
newm->constMethod()->set_code_size(new_code_length);
|
newm->constMethod()->set_code_size(new_code_length);
|
||||||
newm->constMethod()->set_constMethod_size(new_const_method_size);
|
newm->constMethod()->set_constMethod_size(new_const_method_size);
|
||||||
newm->set_method_size(new_method_size);
|
|
||||||
assert(newm->code_size() == new_code_length, "check");
|
assert(newm->code_size() == new_code_length, "check");
|
||||||
assert(newm->method_parameters_length() == method_parameters_len, "check");
|
assert(newm->method_parameters_length() == method_parameters_len, "check");
|
||||||
assert(newm->checked_exceptions_length() == checked_exceptions_len, "check");
|
assert(newm->checked_exceptions_length() == checked_exceptions_len, "check");
|
||||||
|
@ -71,7 +71,6 @@ class Method : public Metadata {
|
|||||||
#ifdef CC_INTERP
|
#ifdef CC_INTERP
|
||||||
int _result_index; // C++ interpreter needs for converting results to/from stack
|
int _result_index; // C++ interpreter needs for converting results to/from stack
|
||||||
#endif
|
#endif
|
||||||
u2 _method_size; // size of this object
|
|
||||||
u2 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
|
u2 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
|
||||||
|
|
||||||
// Flags
|
// Flags
|
||||||
@ -106,7 +105,7 @@ class Method : public Metadata {
|
|||||||
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
|
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
|
||||||
|
|
||||||
// Constructor
|
// Constructor
|
||||||
Method(ConstMethod* xconst, AccessFlags access_flags, int size);
|
Method(ConstMethod* xconst, AccessFlags access_flags);
|
||||||
public:
|
public:
|
||||||
|
|
||||||
static Method* allocate(ClassLoaderData* loader_data,
|
static Method* allocate(ClassLoaderData* loader_data,
|
||||||
@ -241,12 +240,8 @@ class Method : public Metadata {
|
|||||||
// code size
|
// code size
|
||||||
int code_size() const { return constMethod()->code_size(); }
|
int code_size() const { return constMethod()->code_size(); }
|
||||||
|
|
||||||
// method size
|
// method size in words
|
||||||
int method_size() const { return _method_size; }
|
int method_size() const { return sizeof(Method)/wordSize + is_native() ? 2 : 0; }
|
||||||
void set_method_size(int size) {
|
|
||||||
assert(0 <= size && size < (1 << 16), "invalid method size");
|
|
||||||
_method_size = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
// constant pool for Klass* holding this method
|
// constant pool for Klass* holding this method
|
||||||
ConstantPool* constants() const { return constMethod()->constants(); }
|
ConstantPool* constants() const { return constMethod()->constants(); }
|
||||||
|
@ -1235,7 +1235,7 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* t
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for AbortVMOnException flag
|
// for AbortVMOnException flag
|
||||||
NOT_PRODUCT(Exceptions::debug_check_abort(exception));
|
Exceptions::debug_check_abort(exception);
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
|
if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
|
||||||
|
@ -101,7 +101,6 @@
|
|||||||
# include "gc/shared/gcStats.hpp"
|
# include "gc/shared/gcStats.hpp"
|
||||||
# include "gc/shared/gcUtil.hpp"
|
# include "gc/shared/gcUtil.hpp"
|
||||||
# include "gc/shared/genCollectedHeap.hpp"
|
# include "gc/shared/genCollectedHeap.hpp"
|
||||||
# include "gc/shared/genRemSet.hpp"
|
|
||||||
# include "gc/shared/generation.hpp"
|
# include "gc/shared/generation.hpp"
|
||||||
# include "gc/shared/generationCounters.hpp"
|
# include "gc/shared/generationCounters.hpp"
|
||||||
# include "gc/shared/modRefBarrierSet.hpp"
|
# include "gc/shared/modRefBarrierSet.hpp"
|
||||||
@ -111,7 +110,6 @@
|
|||||||
# include "gc/shared/spaceDecorator.hpp"
|
# include "gc/shared/spaceDecorator.hpp"
|
||||||
# include "gc/shared/taskqueue.hpp"
|
# include "gc/shared/taskqueue.hpp"
|
||||||
# include "gc/shared/threadLocalAllocBuffer.hpp"
|
# include "gc/shared/threadLocalAllocBuffer.hpp"
|
||||||
# include "gc/shared/watermark.hpp"
|
|
||||||
# include "gc/shared/workgroup.hpp"
|
# include "gc/shared/workgroup.hpp"
|
||||||
# include "interpreter/abstractInterpreter.hpp"
|
# include "interpreter/abstractInterpreter.hpp"
|
||||||
# include "interpreter/bytecode.hpp"
|
# include "interpreter/bytecode.hpp"
|
||||||
|
@ -3874,6 +3874,7 @@ void TestBufferingOopClosure_test();
|
|||||||
void TestCodeCacheRemSet_test();
|
void TestCodeCacheRemSet_test();
|
||||||
void FreeRegionList_test();
|
void FreeRegionList_test();
|
||||||
void test_memset_with_concurrent_readers();
|
void test_memset_with_concurrent_readers();
|
||||||
|
void TestPredictions_test();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void execute_internal_vm_tests() {
|
void execute_internal_vm_tests() {
|
||||||
@ -3916,6 +3917,7 @@ void execute_internal_vm_tests() {
|
|||||||
run_unit_test(FreeRegionList_test());
|
run_unit_test(FreeRegionList_test());
|
||||||
}
|
}
|
||||||
run_unit_test(test_memset_with_concurrent_readers());
|
run_unit_test(test_memset_with_concurrent_readers());
|
||||||
|
run_unit_test(TestPredictions_test());
|
||||||
#endif
|
#endif
|
||||||
tty->print_cr("All internal VM tests passed");
|
tty->print_cr("All internal VM tests passed");
|
||||||
}
|
}
|
||||||
|
@ -2436,20 +2436,6 @@ bool Arguments::check_vm_args_consistency() {
|
|||||||
MarkSweepAlwaysCompactCount = 1; // Move objects every gc.
|
MarkSweepAlwaysCompactCount = 1; // Move objects every gc.
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UseParallelOldGC && ParallelOldGCSplitALot) {
|
|
||||||
// Settings to encourage splitting.
|
|
||||||
if (!FLAG_IS_CMDLINE(NewRatio)) {
|
|
||||||
if (FLAG_SET_CMDLINE(uintx, NewRatio, 2) != Flag::SUCCESS) {
|
|
||||||
status = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!FLAG_IS_CMDLINE(ScavengeBeforeFullGC)) {
|
|
||||||
if (FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false) != Flag::SUCCESS) {
|
|
||||||
status = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!(UseParallelGC || UseParallelOldGC) && FLAG_IS_DEFAULT(ScavengeBeforeFullGC)) {
|
if (!(UseParallelGC || UseParallelOldGC) && FLAG_IS_DEFAULT(ScavengeBeforeFullGC)) {
|
||||||
FLAG_SET_DEFAULT(ScavengeBeforeFullGC, false);
|
FLAG_SET_DEFAULT(ScavengeBeforeFullGC, false);
|
||||||
}
|
}
|
||||||
@ -3350,19 +3336,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
|||||||
return JNI_EINVAL;
|
return JNI_EINVAL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
} else if (match_option(option, "-XX:MaxDirectMemorySize=", &tail)) {
|
|
||||||
julong max_direct_memory_size = 0;
|
|
||||||
ArgsRange errcode = parse_memory_size(tail, &max_direct_memory_size, 0);
|
|
||||||
if (errcode != arg_in_range) {
|
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
|
||||||
"Invalid maximum direct memory size: %s\n",
|
|
||||||
option->optionString);
|
|
||||||
describe_range_error(errcode);
|
|
||||||
return JNI_EINVAL;
|
|
||||||
}
|
|
||||||
if (FLAG_SET_CMDLINE(size_t, MaxDirectMemorySize, max_direct_memory_size) != Flag::SUCCESS) {
|
|
||||||
return JNI_EINVAL;
|
|
||||||
}
|
|
||||||
#if !INCLUDE_MANAGEMENT
|
#if !INCLUDE_MANAGEMENT
|
||||||
} else if (match_option(option, "-XX:+ManagementServer")) {
|
} else if (match_option(option, "-XX:+ManagementServer")) {
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
@ -3990,16 +3963,8 @@ jint Arguments::insert_vm_options_file(const JavaVMInitArgs* args,
|
|||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now set global settings from the vm_option file, giving an error if
|
|
||||||
// it has VMOptionsFile in it
|
|
||||||
code = match_special_option_and_act(vm_options_file_args->get(), flags_file,
|
|
||||||
NULL, NULL, NULL);
|
|
||||||
if (code != JNI_OK) {
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vm_options_file_args->get()->nOptions < 1) {
|
if (vm_options_file_args->get()->nOptions < 1) {
|
||||||
return 0;
|
return JNI_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
return args_out->insert(args, vm_options_file_args->get(),
|
return args_out->insert(args, vm_options_file_args->get(),
|
||||||
@ -4034,17 +3999,29 @@ jint Arguments::match_special_option_and_act(const JavaVMInitArgs* args,
|
|||||||
// The caller accepts -XX:VMOptionsFile
|
// The caller accepts -XX:VMOptionsFile
|
||||||
if (*vm_options_file != NULL) {
|
if (*vm_options_file != NULL) {
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
"Only one VM Options file is supported "
|
"The VM Options file can only be specified once and "
|
||||||
"on the command line\n");
|
"only on the command line.\n");
|
||||||
return JNI_EINVAL;
|
return JNI_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
*vm_options_file = (char *) tail;
|
*vm_options_file = (char *) tail;
|
||||||
vm_options_file_pos = index; // save position of -XX:VMOptionsFile
|
vm_options_file_pos = index; // save position of -XX:VMOptionsFile
|
||||||
if (*vm_options_file == NULL) {
|
// If there's a VMOptionsFile, parse that (also can set flags_file)
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
jint code = insert_vm_options_file(args, flags_file, vm_options_file,
|
||||||
"Cannot copy vm_options_file name.\n");
|
vm_options_file_pos,
|
||||||
return JNI_ENOMEM;
|
vm_options_file_args, args_out);
|
||||||
|
if (code != JNI_OK) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
if (args_out->is_set()) {
|
||||||
|
// The VMOptions file inserted some options so switch 'args'
|
||||||
|
// to the new set of options, and continue processing which
|
||||||
|
// preserves "last option wins" semantics.
|
||||||
|
args = args_out->get();
|
||||||
|
// The first option from the VMOptionsFile replaces the
|
||||||
|
// current option. So we back track to process the
|
||||||
|
// replacement option.
|
||||||
|
index--;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
@ -4104,12 +4081,6 @@ jint Arguments::match_special_option_and_act(const JavaVMInitArgs* args,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there's a VMOptionsFile, parse that (also can set flags_file)
|
|
||||||
if ((vm_options_file != NULL) && (*vm_options_file != NULL)) {
|
|
||||||
return insert_vm_options_file(args, flags_file, vm_options_file,
|
|
||||||
vm_options_file_pos, vm_options_file_args, args_out);
|
|
||||||
}
|
|
||||||
return JNI_OK;
|
return JNI_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ void emit_constraint_double(const char* name, CommandLineFlagConstraintFunc_doub
|
|||||||
#define EMIT_CONSTRAINT_CHECK(func, type) , func, CommandLineFlagConstraint::type
|
#define EMIT_CONSTRAINT_CHECK(func, type) , func, CommandLineFlagConstraint::type
|
||||||
|
|
||||||
// the "name" argument must be a string literal
|
// the "name" argument must be a string literal
|
||||||
#define INITIAL_CONSTRAINTS_SIZE 40
|
#define INITIAL_CONSTRAINTS_SIZE 45
|
||||||
GrowableArray<CommandLineFlagConstraint*>* CommandLineFlagConstraintList::_constraints = NULL;
|
GrowableArray<CommandLineFlagConstraint*>* CommandLineFlagConstraintList::_constraints = NULL;
|
||||||
CommandLineFlagConstraint::ConstraintType CommandLineFlagConstraintList::_validating_type = CommandLineFlagConstraint::AtParse;
|
CommandLineFlagConstraint::ConstraintType CommandLineFlagConstraintList::_validating_type = CommandLineFlagConstraint::AtParse;
|
||||||
|
|
||||||
|
@ -54,9 +54,9 @@ public:
|
|||||||
enum ConstraintType {
|
enum ConstraintType {
|
||||||
// Will be validated during argument processing (Arguments::parse_argument).
|
// Will be validated during argument processing (Arguments::parse_argument).
|
||||||
AtParse = 0,
|
AtParse = 0,
|
||||||
// Will be validated by CommandLineFlagConstraintList::check_constraints(AfterErgo).
|
// Will be validated inside Threads::create_vm(), right after Arguments::apply_ergo().
|
||||||
AfterErgo = 1,
|
AfterErgo = 1,
|
||||||
// Will be validated by CommandLineFlagConstraintList::check_constraints(AfterMemoryInit).
|
// Will be validated inside universe_init(), right after Metaspace::global_initialize().
|
||||||
AfterMemoryInit = 2
|
AfterMemoryInit = 2
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
#include "runtime/commandLineFlagConstraintsRuntime.hpp"
|
#include "runtime/commandLineFlagConstraintsRuntime.hpp"
|
||||||
#include "runtime/commandLineFlagRangeList.hpp"
|
#include "runtime/commandLineFlagRangeList.hpp"
|
||||||
#include "runtime/globals.hpp"
|
#include "runtime/globals.hpp"
|
||||||
|
#include "runtime/task.hpp"
|
||||||
#include "utilities/defaultStream.hpp"
|
#include "utilities/defaultStream.hpp"
|
||||||
|
|
||||||
Flag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose) {
|
Flag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose) {
|
||||||
@ -41,7 +42,7 @@ Flag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose) {
|
|||||||
if (value >= (intx)os::vm_page_size()) {
|
if (value >= (intx)os::vm_page_size()) {
|
||||||
CommandLineError::print(verbose,
|
CommandLineError::print(verbose,
|
||||||
"ObjectAlignmentInBytes (" INTX_FORMAT ") must be "
|
"ObjectAlignmentInBytes (" INTX_FORMAT ") must be "
|
||||||
"less than page size " INTX_FORMAT "\n",
|
"less than page size (" INTX_FORMAT ")\n",
|
||||||
value, (intx)os::vm_page_size());
|
value, (intx)os::vm_page_size());
|
||||||
return Flag::VIOLATES_CONSTRAINT;
|
return Flag::VIOLATES_CONSTRAINT;
|
||||||
}
|
}
|
||||||
@ -51,7 +52,7 @@ Flag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose) {
|
|||||||
// Need to enforce the padding not to break the existing field alignments.
|
// Need to enforce the padding not to break the existing field alignments.
|
||||||
// It is sufficient to check against the largest type size.
|
// It is sufficient to check against the largest type size.
|
||||||
Flag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose) {
|
Flag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose) {
|
||||||
if ((value != 0) && ((value % BytesPerLong) != 0)) {
|
if ((value % BytesPerLong) != 0) {
|
||||||
CommandLineError::print(verbose,
|
CommandLineError::print(verbose,
|
||||||
"ContendedPaddingWidth (" INTX_FORMAT ") must be "
|
"ContendedPaddingWidth (" INTX_FORMAT ") must be "
|
||||||
"a multiple of %d\n",
|
"a multiple of %d\n",
|
||||||
@ -61,3 +62,71 @@ Flag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose) {
|
|||||||
return Flag::SUCCESS;
|
return Flag::SUCCESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Flag::Error BiasedLockingBulkRebiasThresholdFunc(intx value, bool verbose) {
|
||||||
|
if (value > BiasedLockingBulkRevokeThreshold) {
|
||||||
|
CommandLineError::print(verbose,
|
||||||
|
"BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ") must be "
|
||||||
|
"less than or equal to BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ")\n",
|
||||||
|
value, BiasedLockingBulkRevokeThreshold);
|
||||||
|
return Flag::VIOLATES_CONSTRAINT;
|
||||||
|
} else {
|
||||||
|
return Flag::SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Flag::Error BiasedLockingStartupDelayFunc(intx value, bool verbose) {
|
||||||
|
if ((value % PeriodicTask::interval_gran) != 0) {
|
||||||
|
CommandLineError::print(verbose,
|
||||||
|
"BiasedLockingStartupDelay (" INTX_FORMAT ") must be "
|
||||||
|
"evenly divisible by PeriodicTask::interval_gran (" INTX_FORMAT ")\n",
|
||||||
|
value, PeriodicTask::interval_gran);
|
||||||
|
return Flag::VIOLATES_CONSTRAINT;
|
||||||
|
} else {
|
||||||
|
return Flag::SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Flag::Error BiasedLockingBulkRevokeThresholdFunc(intx value, bool verbose) {
|
||||||
|
if (value < BiasedLockingBulkRebiasThreshold) {
|
||||||
|
CommandLineError::print(verbose,
|
||||||
|
"BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ") must be "
|
||||||
|
"greater than or equal to BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ")\n",
|
||||||
|
value, BiasedLockingBulkRebiasThreshold);
|
||||||
|
return Flag::VIOLATES_CONSTRAINT;
|
||||||
|
} else if ((double)value/(double)BiasedLockingDecayTime > 0.1) {
|
||||||
|
CommandLineError::print(verbose,
|
||||||
|
"The ratio of BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ")"
|
||||||
|
" to BiasedLockingDecayTime (" INTX_FORMAT ") must be "
|
||||||
|
"less than or equal to 0.1\n",
|
||||||
|
value, BiasedLockingBulkRebiasThreshold);
|
||||||
|
return Flag::VIOLATES_CONSTRAINT;
|
||||||
|
} else {
|
||||||
|
return Flag::SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Flag::Error BiasedLockingDecayTimeFunc(intx value, bool verbose) {
|
||||||
|
if (BiasedLockingBulkRebiasThreshold/(double)value > 0.1) {
|
||||||
|
CommandLineError::print(verbose,
|
||||||
|
"The ratio of BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ")"
|
||||||
|
" to BiasedLockingDecayTime (" INTX_FORMAT ") must be "
|
||||||
|
"less than or equal to 0.1\n",
|
||||||
|
BiasedLockingBulkRebiasThreshold, value);
|
||||||
|
return Flag::VIOLATES_CONSTRAINT;
|
||||||
|
} else {
|
||||||
|
return Flag::SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose) {
|
||||||
|
if ((value % PeriodicTask::interval_gran != 0)) {
|
||||||
|
CommandLineError::print(verbose,
|
||||||
|
"PerfDataSamplingInterval (" INTX_FORMAT ") must be "
|
||||||
|
"evenly divisible by PeriodicTask::interval_gran (" INTX_FORMAT ")\n",
|
||||||
|
value, PeriodicTask::interval_gran);
|
||||||
|
return Flag::VIOLATES_CONSTRAINT;
|
||||||
|
} else {
|
||||||
|
return Flag::SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -38,4 +38,11 @@ Flag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose);
|
|||||||
|
|
||||||
Flag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose);
|
Flag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose);
|
||||||
|
|
||||||
|
Flag::Error BiasedLockingBulkRebiasThresholdFunc(intx value, bool verbose);
|
||||||
|
Flag::Error BiasedLockingStartupDelayFunc(intx value, bool verbose);
|
||||||
|
Flag::Error BiasedLockingBulkRevokeThresholdFunc(intx value, bool verbose);
|
||||||
|
Flag::Error BiasedLockingDecayTimeFunc(intx value, bool verbose);
|
||||||
|
|
||||||
|
Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose);
|
||||||
|
|
||||||
#endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP */
|
#endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP */
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "runtime/arguments.hpp"
|
#include "runtime/arguments.hpp"
|
||||||
#include "runtime/commandLineFlagRangeList.hpp"
|
#include "runtime/commandLineFlagRangeList.hpp"
|
||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
|
#include "runtime/task.hpp"
|
||||||
#include "utilities/defaultStream.hpp"
|
#include "utilities/defaultStream.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
@ -278,7 +279,7 @@ void emit_range_double(const char* name, double min, double max) {
|
|||||||
// Generate func argument to pass into emit_range_xxx functions
|
// Generate func argument to pass into emit_range_xxx functions
|
||||||
#define EMIT_RANGE_CHECK(a, b) , a, b
|
#define EMIT_RANGE_CHECK(a, b) , a, b
|
||||||
|
|
||||||
#define INITIAL_RANGES_SIZE 165
|
#define INITIAL_RANGES_SIZE 204
|
||||||
GrowableArray<CommandLineFlagRange*>* CommandLineFlagRangeList::_ranges = NULL;
|
GrowableArray<CommandLineFlagRange*>* CommandLineFlagRangeList::_ranges = NULL;
|
||||||
|
|
||||||
// Check the ranges of all flags that have them
|
// Check the ranges of all flags that have them
|
||||||
|
@ -894,9 +894,11 @@ public:
|
|||||||
/* typically, at most a few retries are needed */ \
|
/* typically, at most a few retries are needed */ \
|
||||||
product(intx, SuspendRetryCount, 50, \
|
product(intx, SuspendRetryCount, 50, \
|
||||||
"Maximum retry count for an external suspend request") \
|
"Maximum retry count for an external suspend request") \
|
||||||
|
range(0, max_intx) \
|
||||||
\
|
\
|
||||||
product(intx, SuspendRetryDelay, 5, \
|
product(intx, SuspendRetryDelay, 5, \
|
||||||
"Milliseconds to delay per retry (* current_retry_count)") \
|
"Milliseconds to delay per retry (* current_retry_count)") \
|
||||||
|
range(0, max_intx) \
|
||||||
\
|
\
|
||||||
product(bool, AssertOnSuspendWaitFailure, false, \
|
product(bool, AssertOnSuspendWaitFailure, false, \
|
||||||
"Assert/Guarantee on external suspend wait failure") \
|
"Assert/Guarantee on external suspend wait failure") \
|
||||||
@ -1342,6 +1344,7 @@ public:
|
|||||||
"Maximum allowable local JNI handle capacity to " \
|
"Maximum allowable local JNI handle capacity to " \
|
||||||
"EnsureLocalCapacity() and PushLocalFrame(), " \
|
"EnsureLocalCapacity() and PushLocalFrame(), " \
|
||||||
"where <= 0 is unlimited, default: 65536") \
|
"where <= 0 is unlimited, default: 65536") \
|
||||||
|
range(min_intx, max_intx) \
|
||||||
\
|
\
|
||||||
product(bool, EagerXrunInit, false, \
|
product(bool, EagerXrunInit, false, \
|
||||||
"Eagerly initialize -Xrun libraries; allows startup profiling, " \
|
"Eagerly initialize -Xrun libraries; allows startup profiling, " \
|
||||||
@ -1377,7 +1380,7 @@ public:
|
|||||||
product(intx, ContendedPaddingWidth, 128, \
|
product(intx, ContendedPaddingWidth, 128, \
|
||||||
"How many bytes to pad the fields/classes marked @Contended with")\
|
"How many bytes to pad the fields/classes marked @Contended with")\
|
||||||
range(0, 8192) \
|
range(0, 8192) \
|
||||||
constraint(ContendedPaddingWidthConstraintFunc,AtParse) \
|
constraint(ContendedPaddingWidthConstraintFunc,AfterErgo) \
|
||||||
\
|
\
|
||||||
product(bool, EnableContended, true, \
|
product(bool, EnableContended, true, \
|
||||||
"Enable @Contended annotation support") \
|
"Enable @Contended annotation support") \
|
||||||
@ -1390,6 +1393,8 @@ public:
|
|||||||
\
|
\
|
||||||
product(intx, BiasedLockingStartupDelay, 4000, \
|
product(intx, BiasedLockingStartupDelay, 4000, \
|
||||||
"Number of milliseconds to wait before enabling biased locking") \
|
"Number of milliseconds to wait before enabling biased locking") \
|
||||||
|
range(0, (intx)(max_jint-(max_jint%PeriodicTask::interval_gran))) \
|
||||||
|
constraint(BiasedLockingStartupDelayFunc,AfterErgo) \
|
||||||
\
|
\
|
||||||
diagnostic(bool, PrintBiasedLockingStatistics, false, \
|
diagnostic(bool, PrintBiasedLockingStatistics, false, \
|
||||||
"Print statistics of biased locking in JVM") \
|
"Print statistics of biased locking in JVM") \
|
||||||
@ -1397,14 +1402,20 @@ public:
|
|||||||
product(intx, BiasedLockingBulkRebiasThreshold, 20, \
|
product(intx, BiasedLockingBulkRebiasThreshold, 20, \
|
||||||
"Threshold of number of revocations per type to try to " \
|
"Threshold of number of revocations per type to try to " \
|
||||||
"rebias all objects in the heap of that type") \
|
"rebias all objects in the heap of that type") \
|
||||||
|
range(0, max_intx) \
|
||||||
|
constraint(BiasedLockingBulkRebiasThresholdFunc,AfterErgo) \
|
||||||
\
|
\
|
||||||
product(intx, BiasedLockingBulkRevokeThreshold, 40, \
|
product(intx, BiasedLockingBulkRevokeThreshold, 40, \
|
||||||
"Threshold of number of revocations per type to permanently " \
|
"Threshold of number of revocations per type to permanently " \
|
||||||
"revoke biases of all objects in the heap of that type") \
|
"revoke biases of all objects in the heap of that type") \
|
||||||
|
range(0, max_intx) \
|
||||||
|
constraint(BiasedLockingBulkRevokeThresholdFunc,AfterErgo) \
|
||||||
\
|
\
|
||||||
product(intx, BiasedLockingDecayTime, 25000, \
|
product(intx, BiasedLockingDecayTime, 25000, \
|
||||||
"Decay time (in milliseconds) to re-enable bulk rebiasing of a " \
|
"Decay time (in milliseconds) to re-enable bulk rebiasing of a " \
|
||||||
"type after previous bulk rebias") \
|
"type after previous bulk rebias") \
|
||||||
|
range(500, max_intx) \
|
||||||
|
constraint(BiasedLockingDecayTimeFunc,AfterErgo) \
|
||||||
\
|
\
|
||||||
/* tracing */ \
|
/* tracing */ \
|
||||||
\
|
\
|
||||||
@ -1429,8 +1440,9 @@ public:
|
|||||||
product(bool, StressLdcRewrite, false, \
|
product(bool, StressLdcRewrite, false, \
|
||||||
"Force ldc -> ldc_w rewrite during RedefineClasses") \
|
"Force ldc -> ldc_w rewrite during RedefineClasses") \
|
||||||
\
|
\
|
||||||
product(intx, TraceRedefineClasses, 0, \
|
product(uintx, TraceRedefineClasses, 0, \
|
||||||
"Trace level for JVMTI RedefineClasses") \
|
"Trace level for JVMTI RedefineClasses") \
|
||||||
|
range(0, 0xFFFFFFFF) \
|
||||||
\
|
\
|
||||||
/* change to false by default sometime after Mustang */ \
|
/* change to false by default sometime after Mustang */ \
|
||||||
product(bool, VerifyMergedCPBytecodes, true, \
|
product(bool, VerifyMergedCPBytecodes, true, \
|
||||||
@ -1567,14 +1579,6 @@ public:
|
|||||||
product(bool, TraceDynamicGCThreads, false, \
|
product(bool, TraceDynamicGCThreads, false, \
|
||||||
"Trace the dynamic GC thread usage") \
|
"Trace the dynamic GC thread usage") \
|
||||||
\
|
\
|
||||||
develop(bool, ParallelOldGCSplitALot, false, \
|
|
||||||
"Provoke splitting (copying data from a young gen space to " \
|
|
||||||
"multiple destination spaces)") \
|
|
||||||
\
|
|
||||||
develop(uintx, ParallelOldGCSplitInterval, 3, \
|
|
||||||
"How often to provoke splitting a young gen space") \
|
|
||||||
range(0, max_uintx) \
|
|
||||||
\
|
|
||||||
product(uint, ConcGCThreads, 0, \
|
product(uint, ConcGCThreads, 0, \
|
||||||
"Number of threads concurrent gc will use") \
|
"Number of threads concurrent gc will use") \
|
||||||
constraint(ConcGCThreadsConstraintFunc,AfterErgo) \
|
constraint(ConcGCThreadsConstraintFunc,AfterErgo) \
|
||||||
@ -1593,9 +1597,6 @@ public:
|
|||||||
product(bool, ScavengeBeforeFullGC, true, \
|
product(bool, ScavengeBeforeFullGC, true, \
|
||||||
"Scavenge youngest generation before each full GC.") \
|
"Scavenge youngest generation before each full GC.") \
|
||||||
\
|
\
|
||||||
develop(bool, ScavengeWithObjectsInToSpace, false, \
|
|
||||||
"Allow scavenges to occur when to-space contains objects") \
|
|
||||||
\
|
|
||||||
product(bool, UseConcMarkSweepGC, false, \
|
product(bool, UseConcMarkSweepGC, false, \
|
||||||
"Use Concurrent Mark-Sweep GC in the old generation") \
|
"Use Concurrent Mark-Sweep GC in the old generation") \
|
||||||
\
|
\
|
||||||
@ -2179,6 +2180,7 @@ public:
|
|||||||
\
|
\
|
||||||
product_pd(uint64_t, MaxRAM, \
|
product_pd(uint64_t, MaxRAM, \
|
||||||
"Real memory size (in bytes) used to set maximum heap size") \
|
"Real memory size (in bytes) used to set maximum heap size") \
|
||||||
|
range(0, 0XFFFFFFFFFFFFFFFF) \
|
||||||
\
|
\
|
||||||
product(size_t, ErgoHeapSizeLimit, 0, \
|
product(size_t, ErgoHeapSizeLimit, 0, \
|
||||||
"Maximum ergonomically set heap size (in bytes); zero means use " \
|
"Maximum ergonomically set heap size (in bytes); zero means use " \
|
||||||
@ -2237,12 +2239,6 @@ public:
|
|||||||
"Policy for changing generation size for throughput goals") \
|
"Policy for changing generation size for throughput goals") \
|
||||||
range(0, 1) \
|
range(0, 1) \
|
||||||
\
|
\
|
||||||
develop(bool, PSAdjustTenuredGenForMinorPause, false, \
|
|
||||||
"Adjust tenured generation to achieve a minor pause goal") \
|
|
||||||
\
|
|
||||||
develop(bool, PSAdjustYoungGenForMajorPause, false, \
|
|
||||||
"Adjust young generation to achieve a major pause goal") \
|
|
||||||
\
|
|
||||||
product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \
|
product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \
|
||||||
"Number of steps where heuristics is used before data is used") \
|
"Number of steps where heuristics is used before data is used") \
|
||||||
\
|
\
|
||||||
@ -2687,10 +2683,13 @@ public:
|
|||||||
product(intx, PrintSafepointStatisticsCount, 300, \
|
product(intx, PrintSafepointStatisticsCount, 300, \
|
||||||
"Total number of safepoint statistics collected " \
|
"Total number of safepoint statistics collected " \
|
||||||
"before printing them out") \
|
"before printing them out") \
|
||||||
|
range(1, max_intx) \
|
||||||
\
|
\
|
||||||
product(intx, PrintSafepointStatisticsTimeout, -1, \
|
product(intx, PrintSafepointStatisticsTimeout, -1, \
|
||||||
"Print safepoint statistics only when safepoint takes " \
|
"Print safepoint statistics only when safepoint takes " \
|
||||||
"more than PrintSafepointSatisticsTimeout in millis") \
|
"more than PrintSafepointSatisticsTimeout in millis") \
|
||||||
|
LP64_ONLY(range(-1, max_intx/MICROUNITS)) \
|
||||||
|
NOT_LP64(range(-1, max_intx)) \
|
||||||
\
|
\
|
||||||
product(bool, TraceSafepointCleanupTime, false, \
|
product(bool, TraceSafepointCleanupTime, false, \
|
||||||
"Print the break down of clean up tasks performed during " \
|
"Print the break down of clean up tasks performed during " \
|
||||||
@ -2740,6 +2739,7 @@ public:
|
|||||||
diagnostic(intx, MinPassesBeforeFlush, 10, \
|
diagnostic(intx, MinPassesBeforeFlush, 10, \
|
||||||
"Minimum number of sweeper passes before an nmethod " \
|
"Minimum number of sweeper passes before an nmethod " \
|
||||||
"can be flushed") \
|
"can be flushed") \
|
||||||
|
range(0, max_intx) \
|
||||||
\
|
\
|
||||||
product(bool, UseCodeAging, true, \
|
product(bool, UseCodeAging, true, \
|
||||||
"Insert counter to detect warm methods") \
|
"Insert counter to detect warm methods") \
|
||||||
@ -2818,11 +2818,11 @@ public:
|
|||||||
"standard exit from VM if bytecode verify error " \
|
"standard exit from VM if bytecode verify error " \
|
||||||
"(only in debug mode)") \
|
"(only in debug mode)") \
|
||||||
\
|
\
|
||||||
notproduct(ccstr, AbortVMOnException, NULL, \
|
diagnostic(ccstr, AbortVMOnException, NULL, \
|
||||||
"Call fatal if this exception is thrown. Example: " \
|
"Call fatal if this exception is thrown. Example: " \
|
||||||
"java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
|
"java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
|
||||||
\
|
\
|
||||||
notproduct(ccstr, AbortVMOnExceptionMessage, NULL, \
|
diagnostic(ccstr, AbortVMOnExceptionMessage, NULL, \
|
||||||
"Call fatal if the exception pointed by AbortVMOnException " \
|
"Call fatal if the exception pointed by AbortVMOnException " \
|
||||||
"has this message") \
|
"has this message") \
|
||||||
\
|
\
|
||||||
@ -3116,21 +3116,29 @@ public:
|
|||||||
product(intx, SelfDestructTimer, 0, \
|
product(intx, SelfDestructTimer, 0, \
|
||||||
"Will cause VM to terminate after a given time (in minutes) " \
|
"Will cause VM to terminate after a given time (in minutes) " \
|
||||||
"(0 means off)") \
|
"(0 means off)") \
|
||||||
|
range(0, max_intx) \
|
||||||
\
|
\
|
||||||
product(intx, MaxJavaStackTraceDepth, 1024, \
|
product(intx, MaxJavaStackTraceDepth, 1024, \
|
||||||
"The maximum number of lines in the stack trace for Java " \
|
"The maximum number of lines in the stack trace for Java " \
|
||||||
"exceptions (0 means all)") \
|
"exceptions (0 means all)") \
|
||||||
|
range(0, max_jint/2) \
|
||||||
\
|
\
|
||||||
|
/* notice: the max range value here is max_jint, not max_intx */ \
|
||||||
|
/* because of overflow issue */ \
|
||||||
NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \
|
NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \
|
||||||
"Guarantee a safepoint (at least) every so many milliseconds " \
|
"Guarantee a safepoint (at least) every so many milliseconds " \
|
||||||
"(0 means none)")) \
|
"(0 means none)")) \
|
||||||
|
NOT_EMBEDDED(range(0, max_jint)) \
|
||||||
\
|
\
|
||||||
EMBEDDED_ONLY(product(intx, GuaranteedSafepointInterval, 0, \
|
EMBEDDED_ONLY(product(intx, GuaranteedSafepointInterval, 0, \
|
||||||
"Guarantee a safepoint (at least) every so many milliseconds " \
|
"Guarantee a safepoint (at least) every so many milliseconds " \
|
||||||
"(0 means none)")) \
|
"(0 means none)")) \
|
||||||
|
EMBEDDED_ONLY(range(0, max_jint)) \
|
||||||
\
|
\
|
||||||
product(intx, SafepointTimeoutDelay, 10000, \
|
product(intx, SafepointTimeoutDelay, 10000, \
|
||||||
"Delay in milliseconds for option SafepointTimeout") \
|
"Delay in milliseconds for option SafepointTimeout") \
|
||||||
|
LP64_ONLY(range(0, max_intx/MICROUNITS)) \
|
||||||
|
NOT_LP64(range(0, max_intx)) \
|
||||||
\
|
\
|
||||||
product(intx, NmethodSweepActivity, 10, \
|
product(intx, NmethodSweepActivity, 10, \
|
||||||
"Removes cold nmethods from code cache if > 0. Higher values " \
|
"Removes cold nmethods from code cache if > 0. Higher values " \
|
||||||
@ -3222,6 +3230,7 @@ public:
|
|||||||
product(intx, ProfileIntervalsTicks, 100, \
|
product(intx, ProfileIntervalsTicks, 100, \
|
||||||
"Number of ticks between printing of interval profile " \
|
"Number of ticks between printing of interval profile " \
|
||||||
"(+ProfileIntervals)") \
|
"(+ProfileIntervals)") \
|
||||||
|
range(0, max_intx) \
|
||||||
\
|
\
|
||||||
notproduct(intx, ScavengeALotInterval, 1, \
|
notproduct(intx, ScavengeALotInterval, 1, \
|
||||||
"Interval between which scavenge will occur with +ScavengeALot") \
|
"Interval between which scavenge will occur with +ScavengeALot") \
|
||||||
@ -3255,14 +3264,17 @@ public:
|
|||||||
diagnostic(intx, MallocVerifyInterval, 0, \
|
diagnostic(intx, MallocVerifyInterval, 0, \
|
||||||
"If non-zero, verify C heap after every N calls to " \
|
"If non-zero, verify C heap after every N calls to " \
|
||||||
"malloc/realloc/free") \
|
"malloc/realloc/free") \
|
||||||
|
range(0, max_intx) \
|
||||||
\
|
\
|
||||||
diagnostic(intx, MallocVerifyStart, 0, \
|
diagnostic(intx, MallocVerifyStart, 0, \
|
||||||
"If non-zero, start verifying C heap after Nth call to " \
|
"If non-zero, start verifying C heap after Nth call to " \
|
||||||
"malloc/realloc/free") \
|
"malloc/realloc/free") \
|
||||||
|
range(0, max_intx) \
|
||||||
\
|
\
|
||||||
diagnostic(uintx, MallocMaxTestWords, 0, \
|
diagnostic(uintx, MallocMaxTestWords, 0, \
|
||||||
"If non-zero, maximum number of words that malloc/realloc can " \
|
"If non-zero, maximum number of words that malloc/realloc can " \
|
||||||
"allocate (for testing only)") \
|
"allocate (for testing only)") \
|
||||||
|
range(0, max_uintx) \
|
||||||
\
|
\
|
||||||
product(intx, TypeProfileWidth, 2, \
|
product(intx, TypeProfileWidth, 2, \
|
||||||
"Number of receiver types to record in call/cast profile") \
|
"Number of receiver types to record in call/cast profile") \
|
||||||
@ -3506,10 +3518,12 @@ public:
|
|||||||
product(intx, DeferThrSuspendLoopCount, 4000, \
|
product(intx, DeferThrSuspendLoopCount, 4000, \
|
||||||
"(Unstable) Number of times to iterate in safepoint loop " \
|
"(Unstable) Number of times to iterate in safepoint loop " \
|
||||||
"before blocking VM threads ") \
|
"before blocking VM threads ") \
|
||||||
|
range(-1, max_jint-1) \
|
||||||
\
|
\
|
||||||
product(intx, DeferPollingPageLoopCount, -1, \
|
product(intx, DeferPollingPageLoopCount, -1, \
|
||||||
"(Unsafe,Unstable) Number of iterations in safepoint loop " \
|
"(Unsafe,Unstable) Number of iterations in safepoint loop " \
|
||||||
"before changing safepoint polling page to RO ") \
|
"before changing safepoint polling page to RO ") \
|
||||||
|
range(-1, max_jint-1) \
|
||||||
\
|
\
|
||||||
product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \
|
product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \
|
||||||
range(0, max_intx) \
|
range(0, max_intx) \
|
||||||
@ -3524,23 +3538,25 @@ public:
|
|||||||
/* stack parameters */ \
|
/* stack parameters */ \
|
||||||
product_pd(intx, StackYellowPages, \
|
product_pd(intx, StackYellowPages, \
|
||||||
"Number of yellow zone (recoverable overflows) pages") \
|
"Number of yellow zone (recoverable overflows) pages") \
|
||||||
range(1, max_intx) \
|
range(MIN_STACK_YELLOW_PAGES, (DEFAULT_STACK_YELLOW_PAGES+5)) \
|
||||||
\
|
\
|
||||||
product_pd(intx, StackRedPages, \
|
product_pd(intx, StackRedPages, \
|
||||||
"Number of red zone (unrecoverable overflows) pages") \
|
"Number of red zone (unrecoverable overflows) pages") \
|
||||||
range(1, max_intx) \
|
range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2)) \
|
||||||
\
|
\
|
||||||
/* greater stack shadow pages can't generate instruction to bang stack */ \
|
/* greater stack shadow pages can't generate instruction to bang stack */ \
|
||||||
product_pd(intx, StackShadowPages, \
|
product_pd(intx, StackShadowPages, \
|
||||||
"Number of shadow zone (for overflow checking) pages " \
|
"Number of shadow zone (for overflow checking) pages " \
|
||||||
"this should exceed the depth of the VM and native call stack") \
|
"this should exceed the depth of the VM and native call stack") \
|
||||||
range(1, 50) \
|
range(MIN_STACK_SHADOW_PAGES, (DEFAULT_STACK_SHADOW_PAGES+30)) \
|
||||||
\
|
\
|
||||||
product_pd(intx, ThreadStackSize, \
|
product_pd(intx, ThreadStackSize, \
|
||||||
"Thread Stack Size (in Kbytes)") \
|
"Thread Stack Size (in Kbytes)") \
|
||||||
|
range(0, max_intx-os::vm_page_size()) \
|
||||||
\
|
\
|
||||||
product_pd(intx, VMThreadStackSize, \
|
product_pd(intx, VMThreadStackSize, \
|
||||||
"Non-Java Thread Stack Size (in Kbytes)") \
|
"Non-Java Thread Stack Size (in Kbytes)") \
|
||||||
|
range(0, max_intx/(1 * K)) \
|
||||||
\
|
\
|
||||||
product_pd(intx, CompilerThreadStackSize, \
|
product_pd(intx, CompilerThreadStackSize, \
|
||||||
"Compiler Thread Stack Size (in Kbytes)") \
|
"Compiler Thread Stack Size (in Kbytes)") \
|
||||||
@ -3551,7 +3567,8 @@ public:
|
|||||||
\
|
\
|
||||||
/* code cache parameters */ \
|
/* code cache parameters */ \
|
||||||
/* ppc64/tiered compilation has large code-entry alignment. */ \
|
/* ppc64/tiered compilation has large code-entry alignment. */ \
|
||||||
develop(uintx, CodeCacheSegmentSize, 64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)),\
|
develop(uintx, CodeCacheSegmentSize, \
|
||||||
|
64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)), \
|
||||||
"Code cache segment size (in bytes) - smallest unit of " \
|
"Code cache segment size (in bytes) - smallest unit of " \
|
||||||
"allocation") \
|
"allocation") \
|
||||||
range(1, 1024) \
|
range(1, 1024) \
|
||||||
@ -3729,6 +3746,7 @@ public:
|
|||||||
product(intx, VMThreadPriority, -1, \
|
product(intx, VMThreadPriority, -1, \
|
||||||
"The native priority at which the VM thread should run " \
|
"The native priority at which the VM thread should run " \
|
||||||
"(-1 means no change)") \
|
"(-1 means no change)") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(bool, CompilerThreadHintNoPreempt, true, \
|
product(bool, CompilerThreadHintNoPreempt, true, \
|
||||||
"(Solaris only) Give compiler threads an extra quanta") \
|
"(Solaris only) Give compiler threads an extra quanta") \
|
||||||
@ -3738,33 +3756,43 @@ public:
|
|||||||
\
|
\
|
||||||
product(intx, JavaPriority1_To_OSPriority, -1, \
|
product(intx, JavaPriority1_To_OSPriority, -1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(intx, JavaPriority2_To_OSPriority, -1, \
|
product(intx, JavaPriority2_To_OSPriority, -1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(intx, JavaPriority3_To_OSPriority, -1, \
|
product(intx, JavaPriority3_To_OSPriority, -1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(intx, JavaPriority4_To_OSPriority, -1, \
|
product(intx, JavaPriority4_To_OSPriority, -1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(intx, JavaPriority5_To_OSPriority, -1, \
|
product(intx, JavaPriority5_To_OSPriority, -1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(intx, JavaPriority6_To_OSPriority, -1, \
|
product(intx, JavaPriority6_To_OSPriority, -1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(intx, JavaPriority7_To_OSPriority, -1, \
|
product(intx, JavaPriority7_To_OSPriority, -1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(intx, JavaPriority8_To_OSPriority, -1, \
|
product(intx, JavaPriority8_To_OSPriority, -1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(intx, JavaPriority9_To_OSPriority, -1, \
|
product(intx, JavaPriority9_To_OSPriority, -1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
product(intx, JavaPriority10_To_OSPriority,-1, \
|
product(intx, JavaPriority10_To_OSPriority,-1, \
|
||||||
"Map Java priorities to OS priorities") \
|
"Map Java priorities to OS priorities") \
|
||||||
|
range(-1, 127) \
|
||||||
\
|
\
|
||||||
experimental(bool, UseCriticalJavaThreadPriority, false, \
|
experimental(bool, UseCriticalJavaThreadPriority, false, \
|
||||||
"Java thread priority 10 maps to critical scheduling priority") \
|
"Java thread priority 10 maps to critical scheduling priority") \
|
||||||
@ -3974,6 +4002,7 @@ public:
|
|||||||
\
|
\
|
||||||
product(size_t, MaxDirectMemorySize, 0, \
|
product(size_t, MaxDirectMemorySize, 0, \
|
||||||
"Maximum total size of NIO direct-buffer allocations") \
|
"Maximum total size of NIO direct-buffer allocations") \
|
||||||
|
range(0, (size_t)SIZE_MAX) \
|
||||||
\
|
\
|
||||||
/* Flags used for temporary code during development */ \
|
/* Flags used for temporary code during development */ \
|
||||||
\
|
\
|
||||||
@ -4002,6 +4031,8 @@ public:
|
|||||||
\
|
\
|
||||||
product(intx, PerfDataSamplingInterval, 50, \
|
product(intx, PerfDataSamplingInterval, 50, \
|
||||||
"Data sampling interval (in milliseconds)") \
|
"Data sampling interval (in milliseconds)") \
|
||||||
|
range(PeriodicTask::min_interval, max_jint) \
|
||||||
|
constraint(PerfDataSamplingIntervalFunc, AfterErgo) \
|
||||||
\
|
\
|
||||||
develop(bool, PerfTraceDataCreation, false, \
|
develop(bool, PerfTraceDataCreation, false, \
|
||||||
"Trace creation of Performance Data Entries") \
|
"Trace creation of Performance Data Entries") \
|
||||||
@ -4015,9 +4046,11 @@ public:
|
|||||||
product(intx, PerfDataMemorySize, 64*K, \
|
product(intx, PerfDataMemorySize, 64*K, \
|
||||||
"Size of performance data memory region. Will be rounded " \
|
"Size of performance data memory region. Will be rounded " \
|
||||||
"up to a multiple of the native os page size.") \
|
"up to a multiple of the native os page size.") \
|
||||||
|
range(128, 32*64*K) \
|
||||||
\
|
\
|
||||||
product(intx, PerfMaxStringConstLength, 1024, \
|
product(intx, PerfMaxStringConstLength, 1024, \
|
||||||
"Maximum PerfStringConstant string length before truncation") \
|
"Maximum PerfStringConstant string length before truncation") \
|
||||||
|
range(32, 32*K) \
|
||||||
\
|
\
|
||||||
product(bool, PerfAllowAtExitRegistration, false, \
|
product(bool, PerfAllowAtExitRegistration, false, \
|
||||||
"Allow registration of atexit() methods") \
|
"Allow registration of atexit() methods") \
|
||||||
@ -4077,10 +4110,10 @@ public:
|
|||||||
"If PrintSharedArchiveAndExit is true, also print the shared " \
|
"If PrintSharedArchiveAndExit is true, also print the shared " \
|
||||||
"dictionary") \
|
"dictionary") \
|
||||||
\
|
\
|
||||||
product(size_t, SharedReadWriteSize, NOT_LP64(12*M) LP64_ONLY(16*M), \
|
product(size_t, SharedReadWriteSize, NOT_LP64(12*M) LP64_ONLY(16*M), \
|
||||||
"Size of read-write space for metadata (in bytes)") \
|
"Size of read-write space for metadata (in bytes)") \
|
||||||
\
|
\
|
||||||
product(size_t, SharedReadOnlySize, NOT_LP64(12*M) LP64_ONLY(16*M), \
|
product(size_t, SharedReadOnlySize, NOT_LP64(12*M) LP64_ONLY(16*M), \
|
||||||
"Size of read-only space for metadata (in bytes)") \
|
"Size of read-only space for metadata (in bytes)") \
|
||||||
\
|
\
|
||||||
product(uintx, SharedMiscDataSize, NOT_LP64(2*M) LP64_ONLY(4*M), \
|
product(uintx, SharedMiscDataSize, NOT_LP64(2*M) LP64_ONLY(4*M), \
|
||||||
@ -4095,6 +4128,7 @@ public:
|
|||||||
\
|
\
|
||||||
product(uintx, SharedSymbolTableBucketSize, 4, \
|
product(uintx, SharedSymbolTableBucketSize, 4, \
|
||||||
"Average number of symbols per bucket in shared table") \
|
"Average number of symbols per bucket in shared table") \
|
||||||
|
range(2, 246) \
|
||||||
\
|
\
|
||||||
diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false, \
|
diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false, \
|
||||||
"Do not quit -Xshare:dump even if we encounter unverifiable " \
|
"Do not quit -Xshare:dump even if we encounter unverifiable " \
|
||||||
|
@ -338,7 +338,7 @@ void SafepointSynchronize::begin() {
|
|||||||
tty->print_cr("# SafepointSynchronize: Finished after "
|
tty->print_cr("# SafepointSynchronize: Finished after "
|
||||||
INT64_FORMAT_W(6) " ms",
|
INT64_FORMAT_W(6) " ms",
|
||||||
((current_time - safepoint_limit_time) / MICROUNITS +
|
((current_time - safepoint_limit_time) / MICROUNITS +
|
||||||
SafepointTimeoutDelay));
|
(jlong)SafepointTimeoutDelay));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -1050,10 +1050,6 @@ static void print_header() {
|
|||||||
void SafepointSynchronize::deferred_initialize_stat() {
|
void SafepointSynchronize::deferred_initialize_stat() {
|
||||||
if (init_done) return;
|
if (init_done) return;
|
||||||
|
|
||||||
if (PrintSafepointStatisticsCount <= 0) {
|
|
||||||
fatal("Wrong PrintSafepointStatisticsCount");
|
|
||||||
}
|
|
||||||
|
|
||||||
// If PrintSafepointStatisticsTimeout is specified, the statistics data will
|
// If PrintSafepointStatisticsTimeout is specified, the statistics data will
|
||||||
// be printed right away, in which case, _safepoint_stats will regress to
|
// be printed right away, in which case, _safepoint_stats will regress to
|
||||||
// a single element array. Otherwise, it is a circular ring buffer with default
|
// a single element array. Otherwise, it is a circular ring buffer with default
|
||||||
@ -1164,7 +1160,7 @@ void SafepointSynchronize::end_statistics(jlong vmop_end_time) {
|
|||||||
// PrintSafepointStatisticsTimeout will be printed out right away.
|
// PrintSafepointStatisticsTimeout will be printed out right away.
|
||||||
// By default, it is -1 meaning all samples will be put into the list.
|
// By default, it is -1 meaning all samples will be put into the list.
|
||||||
if ( PrintSafepointStatisticsTimeout > 0) {
|
if ( PrintSafepointStatisticsTimeout > 0) {
|
||||||
if (spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
|
if (spstat->_time_to_sync > (jlong)PrintSafepointStatisticsTimeout * MICROUNITS) {
|
||||||
print_statistics();
|
print_statistics();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1230,7 +1226,7 @@ void SafepointSynchronize::print_stat_on_exit() {
|
|||||||
os::javaTimeNanos() - cleanup_end_time;
|
os::javaTimeNanos() - cleanup_end_time;
|
||||||
|
|
||||||
if ( PrintSafepointStatisticsTimeout < 0 ||
|
if ( PrintSafepointStatisticsTimeout < 0 ||
|
||||||
spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
|
spstat->_time_to_sync > (jlong)PrintSafepointStatisticsTimeout * MICROUNITS) {
|
||||||
print_statistics();
|
print_statistics();
|
||||||
}
|
}
|
||||||
tty->cr();
|
tty->cr();
|
||||||
|
@ -2222,7 +2222,7 @@ void JavaThread::send_thread_stop(oop java_throwable) {
|
|||||||
tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
|
tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
|
||||||
}
|
}
|
||||||
// for AbortVMOnException flag
|
// for AbortVMOnException flag
|
||||||
NOT_PRODUCT(Exceptions::debug_check_abort(InstanceKlass::cast(_pending_async_exception->klass())->external_name()));
|
Exceptions::debug_check_abort(InstanceKlass::cast(_pending_async_exception->klass())->external_name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +55,6 @@
|
|||||||
#include "gc/shared/generation.hpp"
|
#include "gc/shared/generation.hpp"
|
||||||
#include "gc/shared/generationSpec.hpp"
|
#include "gc/shared/generationSpec.hpp"
|
||||||
#include "gc/shared/space.hpp"
|
#include "gc/shared/space.hpp"
|
||||||
#include "gc/shared/watermark.hpp"
|
|
||||||
#include "interpreter/bytecodeInterpreter.hpp"
|
#include "interpreter/bytecodeInterpreter.hpp"
|
||||||
#include "interpreter/bytecodes.hpp"
|
#include "interpreter/bytecodes.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
@ -405,7 +404,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
|||||||
nonstatic_field(Method, _method_counters, MethodCounters*) \
|
nonstatic_field(Method, _method_counters, MethodCounters*) \
|
||||||
nonstatic_field(Method, _access_flags, AccessFlags) \
|
nonstatic_field(Method, _access_flags, AccessFlags) \
|
||||||
nonstatic_field(Method, _vtable_index, int) \
|
nonstatic_field(Method, _vtable_index, int) \
|
||||||
nonstatic_field(Method, _method_size, u2) \
|
|
||||||
nonstatic_field(Method, _intrinsic_id, u2) \
|
nonstatic_field(Method, _intrinsic_id, u2) \
|
||||||
nonstatic_field(Method, _flags, u1) \
|
nonstatic_field(Method, _flags, u1) \
|
||||||
nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \
|
nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \
|
||||||
@ -535,7 +533,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
|||||||
\
|
\
|
||||||
nonstatic_field(BlockOffsetArrayNonContigSpace, _unallocated_block, HeapWord*) \
|
nonstatic_field(BlockOffsetArrayNonContigSpace, _unallocated_block, HeapWord*) \
|
||||||
\
|
\
|
||||||
nonstatic_field(CardGeneration, _rs, GenRemSet*) \
|
nonstatic_field(CardGeneration, _rs, CardTableRS*) \
|
||||||
nonstatic_field(CardGeneration, _bts, BlockOffsetSharedArray*) \
|
nonstatic_field(CardGeneration, _bts, BlockOffsetSharedArray*) \
|
||||||
nonstatic_field(CardGeneration, _shrink_factor, size_t) \
|
nonstatic_field(CardGeneration, _shrink_factor, size_t) \
|
||||||
nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \
|
nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \
|
||||||
@ -625,8 +623,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
|||||||
nonstatic_field(VirtualSpace, _lower_high, char*) \
|
nonstatic_field(VirtualSpace, _lower_high, char*) \
|
||||||
nonstatic_field(VirtualSpace, _middle_high, char*) \
|
nonstatic_field(VirtualSpace, _middle_high, char*) \
|
||||||
nonstatic_field(VirtualSpace, _upper_high, char*) \
|
nonstatic_field(VirtualSpace, _upper_high, char*) \
|
||||||
nonstatic_field(WaterMark, _point, HeapWord*) \
|
|
||||||
nonstatic_field(WaterMark, _space, Space*) \
|
|
||||||
\
|
\
|
||||||
/************************/ \
|
/************************/ \
|
||||||
/* PerfMemory - jvmstat */ \
|
/* PerfMemory - jvmstat */ \
|
||||||
@ -1609,8 +1605,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
|||||||
declare_type(CardTableModRefBS, ModRefBarrierSet) \
|
declare_type(CardTableModRefBS, ModRefBarrierSet) \
|
||||||
declare_type(CardTableModRefBSForCTRS, CardTableModRefBS) \
|
declare_type(CardTableModRefBSForCTRS, CardTableModRefBS) \
|
||||||
declare_toplevel_type(BarrierSet::Name) \
|
declare_toplevel_type(BarrierSet::Name) \
|
||||||
declare_toplevel_type(GenRemSet) \
|
declare_toplevel_type(CardTableRS) \
|
||||||
declare_type(CardTableRS, GenRemSet) \
|
|
||||||
declare_toplevel_type(BlockOffsetSharedArray) \
|
declare_toplevel_type(BlockOffsetSharedArray) \
|
||||||
declare_toplevel_type(BlockOffsetTable) \
|
declare_toplevel_type(BlockOffsetTable) \
|
||||||
declare_type(BlockOffsetArray, BlockOffsetTable) \
|
declare_type(BlockOffsetArray, BlockOffsetTable) \
|
||||||
@ -1626,7 +1621,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
|||||||
declare_toplevel_type(MemRegion) \
|
declare_toplevel_type(MemRegion) \
|
||||||
declare_toplevel_type(ThreadLocalAllocBuffer) \
|
declare_toplevel_type(ThreadLocalAllocBuffer) \
|
||||||
declare_toplevel_type(VirtualSpace) \
|
declare_toplevel_type(VirtualSpace) \
|
||||||
declare_toplevel_type(WaterMark) \
|
|
||||||
declare_toplevel_type(ObjPtrQueue) \
|
declare_toplevel_type(ObjPtrQueue) \
|
||||||
declare_toplevel_type(DirtyCardQueue) \
|
declare_toplevel_type(DirtyCardQueue) \
|
||||||
\
|
\
|
||||||
@ -1634,7 +1628,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
|||||||
\
|
\
|
||||||
declare_toplevel_type(BarrierSet*) \
|
declare_toplevel_type(BarrierSet*) \
|
||||||
declare_toplevel_type(BlockOffsetSharedArray*) \
|
declare_toplevel_type(BlockOffsetSharedArray*) \
|
||||||
declare_toplevel_type(GenRemSet*) \
|
|
||||||
declare_toplevel_type(CardTableRS*) \
|
declare_toplevel_type(CardTableRS*) \
|
||||||
declare_toplevel_type(CardTableModRefBS*) \
|
declare_toplevel_type(CardTableModRefBS*) \
|
||||||
declare_toplevel_type(CardTableModRefBS**) \
|
declare_toplevel_type(CardTableModRefBS**) \
|
||||||
|
@ -419,7 +419,7 @@ void VMThread::loop() {
|
|||||||
|
|
||||||
// Support for self destruction
|
// Support for self destruction
|
||||||
if ((SelfDestructTimer != 0) && !is_error_reported() &&
|
if ((SelfDestructTimer != 0) && !is_error_reported() &&
|
||||||
(os::elapsedTime() > SelfDestructTimer * 60)) {
|
(os::elapsedTime() > (double)SelfDestructTimer * 60.0)) {
|
||||||
tty->print_cr("VM self-destructed");
|
tty->print_cr("VM self-destructed");
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
@ -145,7 +145,7 @@ void Exceptions::_throw(Thread* thread, const char* file, int line, Handle h_exc
|
|||||||
p2i(h_exception()), file, line, p2i(thread));
|
p2i(h_exception()), file, line, p2i(thread));
|
||||||
}
|
}
|
||||||
// for AbortVMOnException flag
|
// for AbortVMOnException flag
|
||||||
NOT_PRODUCT(Exceptions::debug_check_abort(h_exception, message));
|
Exceptions::debug_check_abort(h_exception, message);
|
||||||
|
|
||||||
// Check for special boot-strapping/vm-thread handling
|
// Check for special boot-strapping/vm-thread handling
|
||||||
if (special_exception(thread, file, line, h_exception)) {
|
if (special_exception(thread, file, line, h_exception)) {
|
||||||
@ -477,13 +477,12 @@ ExceptionMark::~ExceptionMark() {
|
|||||||
|
|
||||||
// ----------------------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------------------
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
// caller frees value_string if necessary
|
// caller frees value_string if necessary
|
||||||
void Exceptions::debug_check_abort(const char *value_string, const char* message) {
|
void Exceptions::debug_check_abort(const char *value_string, const char* message) {
|
||||||
if (AbortVMOnException != NULL && value_string != NULL &&
|
if (AbortVMOnException != NULL && value_string != NULL &&
|
||||||
strstr(value_string, AbortVMOnException)) {
|
strstr(value_string, AbortVMOnException)) {
|
||||||
if (AbortVMOnExceptionMessage == NULL || message == NULL ||
|
if (AbortVMOnExceptionMessage == NULL || (message != NULL &&
|
||||||
strcmp(message, AbortVMOnExceptionMessage) == 0) {
|
strstr(message, AbortVMOnExceptionMessage))) {
|
||||||
fatal("Saw %s, aborting", value_string);
|
fatal("Saw %s, aborting", value_string);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -491,14 +490,17 @@ void Exceptions::debug_check_abort(const char *value_string, const char* message
|
|||||||
|
|
||||||
void Exceptions::debug_check_abort(Handle exception, const char* message) {
|
void Exceptions::debug_check_abort(Handle exception, const char* message) {
|
||||||
if (AbortVMOnException != NULL) {
|
if (AbortVMOnException != NULL) {
|
||||||
ResourceMark rm;
|
debug_check_abort_helper(exception, message);
|
||||||
if (message == NULL && exception->is_a(SystemDictionary::Throwable_klass())) {
|
|
||||||
oop msg = java_lang_Throwable::message(exception);
|
|
||||||
if (msg != NULL) {
|
|
||||||
message = java_lang_String::as_utf8_string(msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
debug_check_abort(InstanceKlass::cast(exception()->klass())->external_name(), message);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
void Exceptions::debug_check_abort_helper(Handle exception, const char* message) {
|
||||||
|
ResourceMark rm;
|
||||||
|
if (message == NULL && exception->is_a(SystemDictionary::Throwable_klass())) {
|
||||||
|
oop msg = java_lang_Throwable::message(exception);
|
||||||
|
if (msg != NULL) {
|
||||||
|
message = java_lang_String::as_utf8_string(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug_check_abort(InstanceKlass::cast(exception()->klass())->external_name(), message);
|
||||||
|
}
|
||||||
|
@ -174,8 +174,9 @@ class Exceptions {
|
|||||||
static void print_exception_counts_on_error(outputStream* st);
|
static void print_exception_counts_on_error(outputStream* st);
|
||||||
|
|
||||||
// for AbortVMOnException flag
|
// for AbortVMOnException flag
|
||||||
NOT_PRODUCT(static void debug_check_abort(Handle exception, const char* message = NULL);)
|
static void debug_check_abort(Handle exception, const char* message = NULL);
|
||||||
NOT_PRODUCT(static void debug_check_abort(const char *value_string, const char* message = NULL);)
|
static void debug_check_abort_helper(Handle exception, const char* message = NULL);
|
||||||
|
static void debug_check_abort(const char *value_string, const char* message = NULL);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user