Merge
This commit is contained in:
commit
91140d95d2
.hgtags.hgtags-top-repo
common/autoconf
corba
hotspot
.hgtags
src
cpu
jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities
jdk.vm.ci/share/classes
jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot
jdk.vm.ci.meta/src/jdk/vm/ci/meta
os
aix/vm
bsd/vm
linux/vm
posix/vm
solaris/vm
windows/vm
os_cpu
aix_ppc/vm
bsd_x86/vm
bsd_zero/vm
linux_aarch64/vm
linux_ppc/vm
linux_sparc/vm
linux_x86/vm
linux_zero/vm
solaris_sparc/vm
solaris_x86/vm
share/vm
c1
classfile
altHashing.cppaltHashing.hppclassFileParser.cppclassFileParser.hppclassLoaderData.cppklassFactory.cppklassFactory.hppsystemDictionary.cppsystemDictionary.hppverifier.cppvmSymbols.cpp
code
gc
cms
g1
g1CodeCacheRemSet.cppg1CodeCacheRemSet.hppg1CodeRootSetTable.hppg1CollectedHeap.cppg1CollectionSet.cppg1ConcurrentMark.cppg1DefaultPolicy.cppg1DefaultPolicy.hppg1MarkSweep.cppg1Policy.hppg1Predictions.cppg1Predictions.hppg1RootProcessor.cppg1RootProcessor.hppheapRegionManager.cpp
parallel
serial
shared
interpreter
jvmci
logging
log.cpplogConfiguration.cpplogConfiguration.hpplogFileOutput.cpplogFileOutput.hpplogTagLevelExpression.cpplogTagLevelExpression.hpp
oops
opto
2
.hgtags
2
.hgtags
@ -376,3 +376,5 @@ e613affb88d178dc7c589f1679db113d589bddb4 jdk-9+130
|
||||
4d2a15091124488080d65848b704e25599b2aaeb jdk-9+131
|
||||
2e83d21d78cd9c1d52e6cd2599e9c8aa36ea1f52 jdk-9+132
|
||||
e17429a7e843c4a4ed3651458d0f950970edcbcc jdk-9+133
|
||||
a71210c0d9800eb6925b61ecd6198abd554f90ee jdk-9+134
|
||||
e384420383a5b79fa0012ebcb25d8f83cff7f777 jdk-9+135
|
||||
|
@ -376,3 +376,5 @@ d94d54a3192fea79234c3ac55cd0b4052d45e954 jdk-9+130
|
||||
8728756c2f70a79a90188f4019cfd6b9a275765c jdk-9+131
|
||||
a24702d4d5ab0015a5c553ed57f66fce7d85155e jdk-9+132
|
||||
be1218f792a450dfb5d4b1f82616b9d95a6a732e jdk-9+133
|
||||
065724348690eda41fc69112278d8da6dcde548c jdk-9+134
|
||||
82b94cb5f342319d2cda77f9fa59703ad7fde576 jdk-9+135
|
||||
|
@ -5095,7 +5095,7 @@ VS_SDK_PLATFORM_NAME_2013=
|
||||
#CUSTOM_AUTOCONF_INCLUDE
|
||||
|
||||
# Do not change or remove the following line, it is needed for consistency checks:
|
||||
DATE_WHEN_GENERATED=1470863189
|
||||
DATE_WHEN_GENERATED=1472718471
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
@ -15944,6 +15944,8 @@ $as_echo "$COMPILE_TYPE" >&6; }
|
||||
HOTSPOT_TARGET_CPU_DEFINE=S390
|
||||
elif test "x$OPENJDK_TARGET_CPU" = xs390x; then
|
||||
HOTSPOT_TARGET_CPU_DEFINE=S390
|
||||
elif test "x$OPENJDK_TARGET_CPU" != x; then
|
||||
HOTSPOT_TARGET_CPU_DEFINE=$(echo $OPENJDK_TARGET_CPU | tr a-z A-Z)
|
||||
fi
|
||||
|
||||
|
||||
@ -16117,6 +16119,8 @@ $as_echo "$COMPILE_TYPE" >&6; }
|
||||
HOTSPOT_BUILD_CPU_DEFINE=S390
|
||||
elif test "x$OPENJDK_BUILD_CPU" = xs390x; then
|
||||
HOTSPOT_BUILD_CPU_DEFINE=S390
|
||||
elif test "x$OPENJDK_BUILD_CPU" != x; then
|
||||
HOTSPOT_BUILD_CPU_DEFINE=$(echo $OPENJDK_BUILD_CPU | tr a-z A-Z)
|
||||
fi
|
||||
|
||||
|
||||
|
@ -454,6 +454,8 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER],
|
||||
HOTSPOT_$1_CPU_DEFINE=S390
|
||||
elif test "x$OPENJDK_$1_CPU" = xs390x; then
|
||||
HOTSPOT_$1_CPU_DEFINE=S390
|
||||
elif test "x$OPENJDK_$1_CPU" != x; then
|
||||
HOTSPOT_$1_CPU_DEFINE=$(echo $OPENJDK_$1_CPU | tr a-z A-Z)
|
||||
fi
|
||||
AC_SUBST(HOTSPOT_$1_CPU_DEFINE)
|
||||
|
||||
|
@ -376,3 +376,5 @@ c3e83ccab3bb1733ae903d681879a33f85ed465c jdk-9+129
|
||||
f7e1d5337c2e550fe553df7a3886bbed80292ecd jdk-9+131
|
||||
1ab4b9399c4cba584f66c1c088188f2f565fbf9c jdk-9+132
|
||||
2021bfedf1c478a4808a7711a6090682a12f4c0e jdk-9+133
|
||||
1a497f5ca0cfd88115cc7daa8af8a62b8741caf2 jdk-9+134
|
||||
094d0db606db976045f594dba47d4593b715cc81 jdk-9+135
|
||||
|
@ -536,3 +536,5 @@ e96b34b76d863ed1fa04e0eeb3f297ac17b490fd jdk-9+129
|
||||
943bf73b49c33c2d7cbd796f6a4ae3c7a00ae932 jdk-9+131
|
||||
713951c08aa26813375175c2ab6cc99ff2a56903 jdk-9+132
|
||||
a25e0fb6033245ab075136e744d362ce765464cd jdk-9+133
|
||||
b8b694c6b4d2ab0939aed7adaf0eec1ac321a085 jdk-9+134
|
||||
3b1c4562953db47e36b237a500f368d5c9746d47 jdk-9+135
|
||||
|
@ -57,10 +57,12 @@ define_pd_global(intx, InlineSmallCode, 1500);
|
||||
|
||||
#ifdef _LP64
|
||||
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
|
||||
define_pd_global(intx, CompilerThreadStackSize, 1024);
|
||||
define_pd_global(intx, ThreadStackSize, 1024);
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
|
||||
#else
|
||||
define_pd_global(intx, CompilerThreadStackSize, 512);
|
||||
define_pd_global(intx, ThreadStackSize, 512);
|
||||
define_pd_global(intx, VMThreadStackSize, 512);
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
|
||||
|
@ -2921,6 +2921,26 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
|
||||
__ cmp( Rold, O7 );
|
||||
%}
|
||||
|
||||
// raw int cas without using tmp register for compareAndExchange
|
||||
enc_class enc_casi_exch( iRegP mem, iRegL old, iRegL new) %{
|
||||
Register Rmem = reg_to_register_object($mem$$reg);
|
||||
Register Rold = reg_to_register_object($old$$reg);
|
||||
Register Rnew = reg_to_register_object($new$$reg);
|
||||
|
||||
MacroAssembler _masm(&cbuf);
|
||||
__ cas(Rmem, Rold, Rnew);
|
||||
%}
|
||||
|
||||
// 64-bit cas without using tmp register for compareAndExchange
|
||||
enc_class enc_casx_exch( iRegP mem, iRegL old, iRegL new) %{
|
||||
Register Rmem = reg_to_register_object($mem$$reg);
|
||||
Register Rold = reg_to_register_object($old$$reg);
|
||||
Register Rnew = reg_to_register_object($new$$reg);
|
||||
|
||||
MacroAssembler _masm(&cbuf);
|
||||
__ casx(Rmem, Rold, Rnew);
|
||||
%}
|
||||
|
||||
enc_class enc_lflags_ne_to_boolean( iRegI res ) %{
|
||||
Register Rres = reg_to_register_object($res$$reg);
|
||||
|
||||
@ -7105,6 +7125,7 @@ instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsReg
|
||||
instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr, KILL ccr, KILL tmp1);
|
||||
format %{
|
||||
"MOV $newval,O7\n\t"
|
||||
@ -7121,6 +7142,7 @@ instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI r
|
||||
|
||||
instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
|
||||
match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr, KILL ccr, KILL tmp1);
|
||||
format %{
|
||||
"MOV $newval,O7\n\t"
|
||||
@ -7139,6 +7161,7 @@ instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI r
|
||||
predicate(VM_Version::supports_cx8());
|
||||
#endif
|
||||
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr, KILL ccr, KILL tmp1);
|
||||
format %{
|
||||
"MOV $newval,O7\n\t"
|
||||
@ -7159,6 +7182,7 @@ instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI r
|
||||
|
||||
instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
|
||||
match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapN mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr, KILL ccr, KILL tmp1);
|
||||
format %{
|
||||
"MOV $newval,O7\n\t"
|
||||
@ -7172,6 +7196,54 @@ instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI r
|
||||
ins_pipe( long_memory_op );
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeI(iRegP mem_ptr, iRegI oldval, iRegI newval)
|
||||
%{
|
||||
match(Set newval (CompareAndExchangeI mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr );
|
||||
|
||||
format %{
|
||||
"CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
|
||||
%}
|
||||
ins_encode( enc_casi_exch(mem_ptr, oldval, newval) );
|
||||
ins_pipe( long_memory_op );
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeL(iRegP mem_ptr, iRegL oldval, iRegL newval)
|
||||
%{
|
||||
match(Set newval (CompareAndExchangeL mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr );
|
||||
|
||||
format %{
|
||||
"CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
|
||||
%}
|
||||
ins_encode( enc_casx_exch(mem_ptr, oldval, newval) );
|
||||
ins_pipe( long_memory_op );
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeP(iRegP mem_ptr, iRegP oldval, iRegP newval)
|
||||
%{
|
||||
match(Set newval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr );
|
||||
|
||||
format %{
|
||||
"CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
|
||||
%}
|
||||
ins_encode( enc_casx_exch(mem_ptr, oldval, newval) );
|
||||
ins_pipe( long_memory_op );
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeN(iRegP mem_ptr, iRegN oldval, iRegN newval)
|
||||
%{
|
||||
match(Set newval (CompareAndExchangeN mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr );
|
||||
|
||||
format %{
|
||||
"CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
|
||||
%}
|
||||
ins_encode( enc_casi_exch(mem_ptr, oldval, newval) );
|
||||
ins_pipe( long_memory_op );
|
||||
%}
|
||||
|
||||
instruct xchgI( memory mem, iRegI newval) %{
|
||||
match(Set newval (GetAndSetI mem newval));
|
||||
format %{ "SWAP [$mem],$newval" %}
|
||||
|
@ -65,10 +65,10 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
||||
#ifdef AMD64
|
||||
// Very large C++ stack frames using solaris-amd64 optimized builds
|
||||
// due to lack of optimization caused by C++ compiler bugs
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2))
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(7) DEBUG_ONLY(+2))
|
||||
// For those clients that do not use write socket, we allow
|
||||
// the min range value to be below that of the default
|
||||
#define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(6) DEBUG_ONLY(+2))
|
||||
#define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(7) DEBUG_ONLY(+2))
|
||||
#else
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
|
||||
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||
|
@ -8131,8 +8131,7 @@ void MacroAssembler::has_negatives(Register ary1, Register len,
|
||||
jmp(FALSE_LABEL);
|
||||
|
||||
clear_vector_masking(); // closing of the stub context for programming mask registers
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
movl(result, len); // copy
|
||||
|
||||
if (UseAVX == 2 && UseSSE >= 2) {
|
||||
@ -8169,8 +8168,7 @@ void MacroAssembler::has_negatives(Register ary1, Register len,
|
||||
bind(COMPARE_TAIL); // len is zero
|
||||
movl(len, result);
|
||||
// Fallthru to tail compare
|
||||
}
|
||||
else if (UseSSE42Intrinsics) {
|
||||
} else if (UseSSE42Intrinsics) {
|
||||
// With SSE4.2, use double quad vector compare
|
||||
Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
|
||||
|
||||
@ -10748,7 +10746,10 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
|
||||
// save length for return
|
||||
push(len);
|
||||
|
||||
// 8165287: EVEX version disabled for now, needs to be refactored as
|
||||
// it is returning incorrect results.
|
||||
if ((UseAVX > 2) && // AVX512
|
||||
0 &&
|
||||
VM_Version::supports_avx512vlbw() &&
|
||||
VM_Version::supports_bmi2()) {
|
||||
|
||||
@ -11067,10 +11068,11 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
|
||||
|
||||
bind(below_threshold);
|
||||
bind(copy_new_tail);
|
||||
if (UseAVX > 2) {
|
||||
if ((UseAVX > 2) &&
|
||||
VM_Version::supports_avx512vlbw() &&
|
||||
VM_Version::supports_bmi2()) {
|
||||
movl(tmp2, len);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
movl(len, tmp2);
|
||||
}
|
||||
andl(tmp2, 0x00000007);
|
||||
|
@ -65,4 +65,7 @@ public class GrowableArray<T> extends GenericGrowableArray {
|
||||
super(addr);
|
||||
virtualConstructor = v;
|
||||
}
|
||||
public Address getData() {
|
||||
return dataField.getValue(getAddress());
|
||||
}
|
||||
}
|
||||
|
@ -366,8 +366,8 @@ final class CompilerToVM {
|
||||
* {@code exactReceiver}.
|
||||
*
|
||||
* @param caller the caller or context type used to perform access checks
|
||||
* @return the link-time resolved method (might be abstract) or {@code 0} if it can not be
|
||||
* linked
|
||||
* @return the link-time resolved method (might be abstract) or {@code null} if it is either a
|
||||
* signature polymorphic method or can not be linked.
|
||||
*/
|
||||
native HotSpotResolvedJavaMethodImpl resolveMethod(HotSpotResolvedObjectTypeImpl exactReceiver, HotSpotResolvedJavaMethodImpl method, HotSpotResolvedObjectTypeImpl caller);
|
||||
|
||||
|
2
hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantPool.java
2
hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantPool.java
@ -722,7 +722,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
|
||||
/**
|
||||
* Determines if {@code type} contains signature polymorphic methods.
|
||||
*/
|
||||
private static boolean isSignaturePolymorphicHolder(final HotSpotResolvedObjectTypeImpl type) {
|
||||
static boolean isSignaturePolymorphicHolder(final ResolvedJavaType type) {
|
||||
String name = type.getName();
|
||||
if (signaturePolymorphicHolders == null) {
|
||||
signaturePolymorphicHolders = compilerToVM().getSignaturePolymorphicHolders();
|
||||
|
@ -24,6 +24,7 @@ package jdk.vm.ci.hotspot;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static jdk.vm.ci.hotspot.CompilerToVM.compilerToVM;
|
||||
import static jdk.vm.ci.hotspot.HotSpotConstantPool.isSignaturePolymorphicHolder;
|
||||
import static jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.runtime;
|
||||
import static jdk.vm.ci.hotspot.HotSpotVMConfig.config;
|
||||
import static jdk.vm.ci.hotspot.UnsafeAccess.UNSAFE;
|
||||
@ -426,7 +427,7 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
|
||||
// Methods can only be resolved against concrete types
|
||||
return null;
|
||||
}
|
||||
if (method.isConcrete() && method.getDeclaringClass().equals(this) && method.isPublic()) {
|
||||
if (method.isConcrete() && method.getDeclaringClass().equals(this) && method.isPublic() && !isSignaturePolymorphicHolder(method.getDeclaringClass())) {
|
||||
return method;
|
||||
}
|
||||
if (!method.getDeclaringClass().isAssignableFrom(this)) {
|
||||
|
@ -209,8 +209,8 @@ public interface ResolvedJavaType extends JavaType, ModifiersProvider, Annotated
|
||||
*
|
||||
* @param method the method to select the implementation of
|
||||
* @param callerType the caller or context type used to perform access checks
|
||||
* @return the method that would be selected at runtime (might be abstract) or {@code null} if
|
||||
* it can not be resolved
|
||||
* @return the link-time resolved method (might be abstract) or {@code null} if it is either a
|
||||
* signature polymorphic method or can not be linked.
|
||||
*/
|
||||
ResolvedJavaMethod resolveMethod(ResolvedJavaMethod method, ResolvedJavaType callerType);
|
||||
|
||||
|
@ -847,7 +847,8 @@ static void *thread_native_entry(Thread *thread) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
||||
bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
size_t req_stack_size) {
|
||||
|
||||
assert(thread->osthread() == NULL, "caller responsible");
|
||||
|
||||
@ -880,37 +881,12 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
||||
guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
|
||||
|
||||
// calculate stack size if it's not specified by caller
|
||||
if (stack_size == 0) {
|
||||
stack_size = os::Aix::default_stack_size(thr_type);
|
||||
|
||||
switch (thr_type) {
|
||||
case os::java_thread:
|
||||
// Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
|
||||
assert(JavaThread::stack_size_at_create() > 0, "this should be set");
|
||||
stack_size = JavaThread::stack_size_at_create();
|
||||
break;
|
||||
case os::compiler_thread:
|
||||
if (CompilerThreadStackSize > 0) {
|
||||
stack_size = (size_t)(CompilerThreadStackSize * K);
|
||||
break;
|
||||
} // else fall through:
|
||||
// use VMThreadStackSize if CompilerThreadStackSize is not defined
|
||||
case os::vm_thread:
|
||||
case os::pgc_thread:
|
||||
case os::cgc_thread:
|
||||
case os::watcher_thread:
|
||||
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
|
||||
size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
|
||||
pthread_attr_setstacksize(&attr, stack_size);
|
||||
|
||||
pthread_t tid;
|
||||
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
|
||||
|
||||
|
||||
char buf[64];
|
||||
if (ret == 0) {
|
||||
log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
|
||||
@ -3593,32 +3569,11 @@ jint os::init_2(void) {
|
||||
Aix::signal_sets_init();
|
||||
Aix::install_signal_handlers();
|
||||
|
||||
// Check minimum allowable stack size for thread creation and to initialize
|
||||
// the java system classes, including StackOverflowError - depends on page
|
||||
// size. Add two 4K pages for compiler2 recursion in main thread.
|
||||
// Add in 4*BytesPerWord 4K pages to account for VM stack during
|
||||
// class initialization depending on 32 or 64 bit VM.
|
||||
os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
|
||||
JavaThread::stack_guard_zone_size() +
|
||||
JavaThread::stack_shadow_zone_size() +
|
||||
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
|
||||
|
||||
os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::vm_page_size());
|
||||
|
||||
size_t threadStackSizeInBytes = ThreadStackSize * K;
|
||||
if (threadStackSizeInBytes != 0 &&
|
||||
threadStackSizeInBytes < os::Aix::min_stack_allowed) {
|
||||
tty->print_cr("\nThe stack size specified is too small, "
|
||||
"Specify at least %dk",
|
||||
os::Aix::min_stack_allowed / K);
|
||||
// Check and sets minimum stack sizes against command line options
|
||||
if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
||||
// Make the stack size a multiple of the page size so that
|
||||
// the yellow/red zones can be guarded.
|
||||
// Note that this can be 0, if no default stacksize was set.
|
||||
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
|
||||
|
||||
if (UseNUMA) {
|
||||
UseNUMA = false;
|
||||
warning("NUMA optimizations are not available on this OS.");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -140,14 +140,6 @@ class Aix {
|
||||
// libpthread version string
|
||||
static void libpthread_init();
|
||||
|
||||
// Minimum stack size a thread can be created with (allowing
|
||||
// the VM to completely create the thread and enter user code)
|
||||
static size_t min_stack_allowed;
|
||||
|
||||
// Return default stack size or guard size for the specified thread type
|
||||
static size_t default_stack_size(os::ThreadType thr_type);
|
||||
static size_t default_guard_size(os::ThreadType thr_type);
|
||||
|
||||
// Function returns true if we run on OS/400 (pase), false if we run
|
||||
// on AIX.
|
||||
static bool on_pase() {
|
||||
|
@ -734,7 +734,8 @@ static void *thread_native_entry(Thread *thread) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
||||
bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
size_t req_stack_size) {
|
||||
assert(thread->osthread() == NULL, "caller responsible");
|
||||
|
||||
// Allocate the OSThread object
|
||||
@ -757,32 +758,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
||||
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
|
||||
|
||||
// calculate stack size if it's not specified by caller
|
||||
if (stack_size == 0) {
|
||||
stack_size = os::Bsd::default_stack_size(thr_type);
|
||||
|
||||
switch (thr_type) {
|
||||
case os::java_thread:
|
||||
// Java threads use ThreadStackSize which default value can be
|
||||
// changed with the flag -Xss
|
||||
assert(JavaThread::stack_size_at_create() > 0, "this should be set");
|
||||
stack_size = JavaThread::stack_size_at_create();
|
||||
break;
|
||||
case os::compiler_thread:
|
||||
if (CompilerThreadStackSize > 0) {
|
||||
stack_size = (size_t)(CompilerThreadStackSize * K);
|
||||
break;
|
||||
} // else fall through:
|
||||
// use VMThreadStackSize if CompilerThreadStackSize is not defined
|
||||
case os::vm_thread:
|
||||
case os::pgc_thread:
|
||||
case os::cgc_thread:
|
||||
case os::watcher_thread:
|
||||
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
stack_size = MAX2(stack_size, os::Bsd::min_stack_allowed);
|
||||
size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
|
||||
pthread_attr_setstacksize(&attr, stack_size);
|
||||
|
||||
ThreadState state;
|
||||
@ -3502,32 +3478,11 @@ jint os::init_2(void) {
|
||||
Bsd::signal_sets_init();
|
||||
Bsd::install_signal_handlers();
|
||||
|
||||
// Check minimum allowable stack size for thread creation and to initialize
|
||||
// the java system classes, including StackOverflowError - depends on page
|
||||
// size. Add two 4K pages for compiler2 recursion in main thread.
|
||||
// Add in 4*BytesPerWord 4K pages to account for VM stack during
|
||||
// class initialization depending on 32 or 64 bit VM.
|
||||
os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
|
||||
JavaThread::stack_guard_zone_size() +
|
||||
JavaThread::stack_shadow_zone_size() +
|
||||
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
|
||||
|
||||
os::Bsd::min_stack_allowed = align_size_up(os::Bsd::min_stack_allowed, os::vm_page_size());
|
||||
|
||||
size_t threadStackSizeInBytes = ThreadStackSize * K;
|
||||
if (threadStackSizeInBytes != 0 &&
|
||||
threadStackSizeInBytes < os::Bsd::min_stack_allowed) {
|
||||
tty->print_cr("\nThe stack size specified is too small, "
|
||||
"Specify at least %dk",
|
||||
os::Bsd::min_stack_allowed/ K);
|
||||
// Check and sets minimum stack sizes against command line options
|
||||
if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
||||
// Make the stack size a multiple of the page size so that
|
||||
// the yellow/red zones can be guarded.
|
||||
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
|
||||
vm_page_size()));
|
||||
|
||||
if (MaxFDLimit) {
|
||||
// set the number of file descriptors to max. print out error
|
||||
// if getrlimit/setrlimit fails but continue regardless.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -120,14 +120,6 @@ class Bsd {
|
||||
static struct sigaction *get_chained_signal_action(int sig);
|
||||
static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
|
||||
|
||||
// Minimum stack size a thread can be created with (allowing
|
||||
// the VM to completely create the thread and enter user code)
|
||||
static size_t min_stack_allowed;
|
||||
|
||||
// Return default stack size or guard size for the specified thread type
|
||||
static size_t default_stack_size(os::ThreadType thr_type);
|
||||
static size_t default_guard_size(os::ThreadType thr_type);
|
||||
|
||||
// Real-time clock functions
|
||||
static void clock_init(void);
|
||||
|
||||
|
@ -701,7 +701,7 @@ static void *thread_native_entry(Thread *thread) {
|
||||
}
|
||||
|
||||
bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
size_t stack_size) {
|
||||
size_t req_stack_size) {
|
||||
assert(thread->osthread() == NULL, "caller responsible");
|
||||
|
||||
// Allocate the OSThread object
|
||||
@ -723,34 +723,8 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
pthread_attr_init(&attr);
|
||||
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
|
||||
|
||||
// stack size
|
||||
// calculate stack size if it's not specified by caller
|
||||
if (stack_size == 0) {
|
||||
stack_size = os::Linux::default_stack_size(thr_type);
|
||||
|
||||
switch (thr_type) {
|
||||
case os::java_thread:
|
||||
// Java threads use ThreadStackSize which default value can be
|
||||
// changed with the flag -Xss
|
||||
assert(JavaThread::stack_size_at_create() > 0, "this should be set");
|
||||
stack_size = JavaThread::stack_size_at_create();
|
||||
break;
|
||||
case os::compiler_thread:
|
||||
if (CompilerThreadStackSize > 0) {
|
||||
stack_size = (size_t)(CompilerThreadStackSize * K);
|
||||
break;
|
||||
} // else fall through:
|
||||
// use VMThreadStackSize if CompilerThreadStackSize is not defined
|
||||
case os::vm_thread:
|
||||
case os::pgc_thread:
|
||||
case os::cgc_thread:
|
||||
case os::watcher_thread:
|
||||
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
stack_size = MAX2(stack_size, os::Linux::min_stack_allowed);
|
||||
size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
|
||||
pthread_attr_setstacksize(&attr, stack_size);
|
||||
|
||||
// glibc guard page
|
||||
@ -956,10 +930,9 @@ static bool find_vma(address addr, address* vma_low, address* vma_high) {
|
||||
// bogus value for initial thread.
|
||||
void os::Linux::capture_initial_stack(size_t max_size) {
|
||||
// stack size is the easy part, get it from RLIMIT_STACK
|
||||
size_t stack_size;
|
||||
struct rlimit rlim;
|
||||
getrlimit(RLIMIT_STACK, &rlim);
|
||||
stack_size = rlim.rlim_cur;
|
||||
size_t stack_size = rlim.rlim_cur;
|
||||
|
||||
// 6308388: a bug in ld.so will relocate its own .data section to the
|
||||
// lower end of primordial stack; reduce ulimit -s value a little bit
|
||||
@ -2875,7 +2848,7 @@ void os::Linux::rebuild_cpu_to_node_map() {
|
||||
// in the library.
|
||||
const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
|
||||
|
||||
size_t cpu_num = os::active_processor_count();
|
||||
size_t cpu_num = processor_count();
|
||||
size_t cpu_map_size = NCPUS / BitsPerCLong;
|
||||
size_t cpu_map_valid_size =
|
||||
MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
|
||||
@ -4793,32 +4766,10 @@ jint os::init_2(void) {
|
||||
Linux::signal_sets_init();
|
||||
Linux::install_signal_handlers();
|
||||
|
||||
// Check minimum allowable stack size for thread creation and to initialize
|
||||
// the java system classes, including StackOverflowError - depends on page
|
||||
// size. Add two 4K pages for compiler2 recursion in main thread.
|
||||
// Add in 4*BytesPerWord 4K pages to account for VM stack during
|
||||
// class initialization depending on 32 or 64 bit VM.
|
||||
os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
|
||||
JavaThread::stack_guard_zone_size() +
|
||||
JavaThread::stack_shadow_zone_size() +
|
||||
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
|
||||
|
||||
os::Linux::min_stack_allowed = align_size_up(os::Linux::min_stack_allowed, os::vm_page_size());
|
||||
|
||||
size_t threadStackSizeInBytes = ThreadStackSize * K;
|
||||
if (threadStackSizeInBytes != 0 &&
|
||||
threadStackSizeInBytes < os::Linux::min_stack_allowed) {
|
||||
tty->print_cr("\nThe stack size specified is too small, "
|
||||
"Specify at least " SIZE_FORMAT "k",
|
||||
os::Linux::min_stack_allowed/ K);
|
||||
// Check and sets minimum stack sizes against command line options
|
||||
if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
||||
// Make the stack size a multiple of the page size so that
|
||||
// the yellow/red zones can be guarded.
|
||||
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
|
||||
vm_page_size()));
|
||||
|
||||
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
|
||||
|
||||
#if defined(IA32)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -170,12 +170,8 @@ class Linux {
|
||||
static void libpthread_init();
|
||||
static bool libnuma_init();
|
||||
static void* libnuma_dlsym(void* handle, const char* name);
|
||||
// Minimum stack size a thread can be created with (allowing
|
||||
// the VM to completely create the thread and enter user code)
|
||||
static size_t min_stack_allowed;
|
||||
|
||||
// Return default stack size or guard size for the specified thread type
|
||||
static size_t default_stack_size(os::ThreadType thr_type);
|
||||
// Return default guard size for the specified thread type
|
||||
static size_t default_guard_size(os::ThreadType thr_type);
|
||||
|
||||
static void capture_initial_stack(size_t max_size);
|
||||
|
@ -1099,6 +1099,123 @@ char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_a
|
||||
return buf;
|
||||
}
|
||||
|
||||
// Check minimum allowable stack sizes for thread creation and to initialize
|
||||
// the java system classes, including StackOverflowError - depends on page
|
||||
// size. Add two 4K pages for compiler2 recursion in main thread.
|
||||
// Add in 4*BytesPerWord 4K pages to account for VM stack during
|
||||
// class initialization depending on 32 or 64 bit VM.
|
||||
jint os::Posix::set_minimum_stack_sizes() {
|
||||
_java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed,
|
||||
JavaThread::stack_guard_zone_size() +
|
||||
JavaThread::stack_shadow_zone_size() +
|
||||
(4 * BytesPerWord COMPILER2_PRESENT(+ 2)) * 4 * K);
|
||||
|
||||
_java_thread_min_stack_allowed = align_size_up(_java_thread_min_stack_allowed, vm_page_size());
|
||||
|
||||
size_t stack_size_in_bytes = ThreadStackSize * K;
|
||||
if (stack_size_in_bytes != 0 &&
|
||||
stack_size_in_bytes < _java_thread_min_stack_allowed) {
|
||||
// The '-Xss' and '-XX:ThreadStackSize=N' options both set
|
||||
// ThreadStackSize so we go with "Java thread stack size" instead
|
||||
// of "ThreadStackSize" to be more friendly.
|
||||
tty->print_cr("\nThe Java thread stack size specified is too small. "
|
||||
"Specify at least " SIZE_FORMAT "k",
|
||||
_java_thread_min_stack_allowed / K);
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
||||
#ifdef SOLARIS
|
||||
// For 64kbps there will be a 64kb page size, which makes
|
||||
// the usable default stack size quite a bit less. Increase the
|
||||
// stack for 64kb (or any > than 8kb) pages, this increases
|
||||
// virtual memory fragmentation (since we're not creating the
|
||||
// stack on a power of 2 boundary. The real fix for this
|
||||
// should be to fix the guard page mechanism.
|
||||
|
||||
if (vm_page_size() > 8*K) {
|
||||
stack_size_in_bytes = (stack_size_in_bytes != 0)
|
||||
? stack_size_in_bytes +
|
||||
JavaThread::stack_red_zone_size() +
|
||||
JavaThread::stack_yellow_zone_size()
|
||||
: 0;
|
||||
ThreadStackSize = stack_size_in_bytes/K;
|
||||
}
|
||||
#endif // SOLARIS
|
||||
|
||||
// Make the stack size a multiple of the page size so that
|
||||
// the yellow/red zones can be guarded.
|
||||
JavaThread::set_stack_size_at_create(round_to(stack_size_in_bytes,
|
||||
vm_page_size()));
|
||||
|
||||
_compiler_thread_min_stack_allowed = align_size_up(_compiler_thread_min_stack_allowed, vm_page_size());
|
||||
|
||||
stack_size_in_bytes = CompilerThreadStackSize * K;
|
||||
if (stack_size_in_bytes != 0 &&
|
||||
stack_size_in_bytes < _compiler_thread_min_stack_allowed) {
|
||||
tty->print_cr("\nThe CompilerThreadStackSize specified is too small. "
|
||||
"Specify at least " SIZE_FORMAT "k",
|
||||
_compiler_thread_min_stack_allowed / K);
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
||||
_vm_internal_thread_min_stack_allowed = align_size_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
|
||||
|
||||
stack_size_in_bytes = VMThreadStackSize * K;
|
||||
if (stack_size_in_bytes != 0 &&
|
||||
stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) {
|
||||
tty->print_cr("\nThe VMThreadStackSize specified is too small. "
|
||||
"Specify at least " SIZE_FORMAT "k",
|
||||
_vm_internal_thread_min_stack_allowed / K);
|
||||
return JNI_ERR;
|
||||
}
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
// Called when creating the thread. The minimum stack sizes have already been calculated
|
||||
size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
|
||||
size_t stack_size;
|
||||
if (req_stack_size == 0) {
|
||||
stack_size = default_stack_size(thr_type);
|
||||
} else {
|
||||
stack_size = req_stack_size;
|
||||
}
|
||||
|
||||
switch (thr_type) {
|
||||
case os::java_thread:
|
||||
// Java threads use ThreadStackSize which default value can be
|
||||
// changed with the flag -Xss
|
||||
if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) {
|
||||
// no requested size and we have a more specific default value
|
||||
stack_size = JavaThread::stack_size_at_create();
|
||||
}
|
||||
stack_size = MAX2(stack_size,
|
||||
_java_thread_min_stack_allowed);
|
||||
break;
|
||||
case os::compiler_thread:
|
||||
if (req_stack_size == 0 && CompilerThreadStackSize > 0) {
|
||||
// no requested size and we have a more specific default value
|
||||
stack_size = (size_t)(CompilerThreadStackSize * K);
|
||||
}
|
||||
stack_size = MAX2(stack_size,
|
||||
_compiler_thread_min_stack_allowed);
|
||||
break;
|
||||
case os::vm_thread:
|
||||
case os::pgc_thread:
|
||||
case os::cgc_thread:
|
||||
case os::watcher_thread:
|
||||
default: // presume the unknown thr_type is a VM internal
|
||||
if (req_stack_size == 0 && VMThreadStackSize > 0) {
|
||||
// no requested size and we have a more specific default value
|
||||
stack_size = (size_t)(VMThreadStackSize * K);
|
||||
}
|
||||
|
||||
stack_size = MAX2(stack_size,
|
||||
_vm_internal_thread_min_stack_allowed);
|
||||
break;
|
||||
}
|
||||
|
||||
return stack_size;
|
||||
}
|
||||
|
||||
os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
|
||||
assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,18 @@ protected:
|
||||
static void print_libversion_info(outputStream* st);
|
||||
static void print_load_average(outputStream* st);
|
||||
|
||||
// Minimum stack size a thread can be created with (allowing
|
||||
// the VM to completely create the thread and enter user code)
|
||||
static size_t _compiler_thread_min_stack_allowed;
|
||||
static size_t _java_thread_min_stack_allowed;
|
||||
static size_t _vm_internal_thread_min_stack_allowed;
|
||||
|
||||
public:
|
||||
// Return default stack size for the specified thread type
|
||||
static size_t default_stack_size(os::ThreadType thr_type);
|
||||
// Check and sets minimum stack sizes
|
||||
static jint set_minimum_stack_sizes();
|
||||
static size_t get_initial_stack_size(ThreadType thr_type, size_t req_stack_size);
|
||||
|
||||
// Returns true if signal is valid.
|
||||
static bool is_valid_signal(int sig);
|
||||
|
@ -917,8 +917,15 @@ static char* describe_thr_create_attributes(char* buf, size_t buflen,
|
||||
return buf;
|
||||
}
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size when not specified by caller is 1M (2M for LP64)
|
||||
size_t s = (BytesPerWord >> 2) * K * K;
|
||||
return s;
|
||||
}
|
||||
|
||||
bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
size_t stack_size) {
|
||||
size_t req_stack_size) {
|
||||
// Allocate the OSThread object
|
||||
OSThread* osthread = new OSThread(NULL, NULL);
|
||||
if (osthread == NULL) {
|
||||
@ -953,31 +960,8 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
|
||||
}
|
||||
|
||||
// Calculate stack size if it's not specified by caller.
|
||||
if (stack_size == 0) {
|
||||
// The default stack size 1M (2M for LP64).
|
||||
stack_size = (BytesPerWord >> 2) * K * K;
|
||||
|
||||
switch (thr_type) {
|
||||
case os::java_thread:
|
||||
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss
|
||||
if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
|
||||
break;
|
||||
case os::compiler_thread:
|
||||
if (CompilerThreadStackSize > 0) {
|
||||
stack_size = (size_t)(CompilerThreadStackSize * K);
|
||||
break;
|
||||
} // else fall through:
|
||||
// use VMThreadStackSize if CompilerThreadStackSize is not defined
|
||||
case os::vm_thread:
|
||||
case os::pgc_thread:
|
||||
case os::cgc_thread:
|
||||
case os::watcher_thread:
|
||||
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
|
||||
break;
|
||||
}
|
||||
}
|
||||
stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
|
||||
// calculate stack size if it's not specified by caller
|
||||
size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
|
||||
|
||||
// Initial state is ALLOCATED but not INITIALIZED
|
||||
osthread->set_state(ALLOCATED);
|
||||
@ -4400,7 +4384,12 @@ void os::init(void) {
|
||||
// Constant minimum stack size allowed. It must be at least
|
||||
// the minimum of what the OS supports (thr_min_stack()), and
|
||||
// enough to allow the thread to get to user bytecode execution.
|
||||
Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
|
||||
Posix::_compiler_thread_min_stack_allowed = MAX2(thr_min_stack(),
|
||||
Posix::_compiler_thread_min_stack_allowed);
|
||||
Posix::_java_thread_min_stack_allowed = MAX2(thr_min_stack(),
|
||||
Posix::_java_thread_min_stack_allowed);
|
||||
Posix::_vm_internal_thread_min_stack_allowed = MAX2(thr_min_stack(),
|
||||
Posix::_vm_internal_thread_min_stack_allowed);
|
||||
|
||||
// dynamic lookup of functions that may not be available in our lowest
|
||||
// supported Solaris release
|
||||
@ -4445,47 +4434,11 @@ jint os::init_2(void) {
|
||||
log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
|
||||
}
|
||||
|
||||
// Check minimum allowable stack size for thread creation and to initialize
|
||||
// the java system classes, including StackOverflowError - depends on page
|
||||
// size. Add two 4K pages for compiler2 recursion in main thread.
|
||||
// Add in 4*BytesPerWord 4K pages to account for VM stack during
|
||||
// class initialization depending on 32 or 64 bit VM.
|
||||
os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
|
||||
JavaThread::stack_guard_zone_size() +
|
||||
JavaThread::stack_shadow_zone_size() +
|
||||
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
|
||||
|
||||
os::Solaris::min_stack_allowed = align_size_up(os::Solaris::min_stack_allowed, os::vm_page_size());
|
||||
|
||||
size_t threadStackSizeInBytes = ThreadStackSize * K;
|
||||
if (threadStackSizeInBytes != 0 &&
|
||||
threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
|
||||
tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
|
||||
os::Solaris::min_stack_allowed/K);
|
||||
// Check and sets minimum stack sizes against command line options
|
||||
if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
||||
// For 64kbps there will be a 64kb page size, which makes
|
||||
// the usable default stack size quite a bit less. Increase the
|
||||
// stack for 64kb (or any > than 8kb) pages, this increases
|
||||
// virtual memory fragmentation (since we're not creating the
|
||||
// stack on a power of 2 boundary. The real fix for this
|
||||
// should be to fix the guard page mechanism.
|
||||
|
||||
if (vm_page_size() > 8*K) {
|
||||
threadStackSizeInBytes = (threadStackSizeInBytes != 0)
|
||||
? threadStackSizeInBytes +
|
||||
JavaThread::stack_red_zone_size() +
|
||||
JavaThread::stack_yellow_zone_size()
|
||||
: 0;
|
||||
ThreadStackSize = threadStackSizeInBytes/K;
|
||||
}
|
||||
|
||||
// Make the stack size a multiple of the page size so that
|
||||
// the yellow/red zones can be guarded.
|
||||
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
|
||||
vm_page_size()));
|
||||
|
||||
Solaris::libthread_init();
|
||||
|
||||
if (UseNUMA) {
|
||||
|
@ -292,10 +292,6 @@ class Solaris {
|
||||
static jint _os_thread_limit;
|
||||
static volatile jint _os_thread_count;
|
||||
|
||||
// Minimum stack size a thread can be created with (allowing
|
||||
// the VM to completely create the thread and enter user code)
|
||||
|
||||
static size_t min_stack_allowed;
|
||||
|
||||
// Stack overflow handling
|
||||
|
||||
|
@ -2504,13 +2504,15 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
// It write enables the page immediately after protecting it
|
||||
// so just return.
|
||||
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
|
||||
JavaThread* thread = (JavaThread*) t;
|
||||
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
|
||||
address addr = (address) exceptionRecord->ExceptionInformation[1];
|
||||
if (os::is_memory_serialize_page(thread, addr)) {
|
||||
// Block current thread until the memory serialize page permission restored.
|
||||
os::block_on_serialize_page_trap();
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
if (t != NULL && t->is_Java_thread()) {
|
||||
JavaThread* thread = (JavaThread*) t;
|
||||
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
|
||||
address addr = (address) exceptionRecord->ExceptionInformation[1];
|
||||
if (os::is_memory_serialize_page(thread, addr)) {
|
||||
// Block current thread until the memory serialize page permission restored.
|
||||
os::block_on_serialize_page_trap();
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2564,7 +2566,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
}
|
||||
#endif
|
||||
if (thread->stack_guards_enabled()) {
|
||||
if (_thread_in_Java) {
|
||||
if (in_java) {
|
||||
frame fr;
|
||||
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
|
||||
address addr = (address) exceptionRecord->ExceptionInformation[1];
|
||||
@ -2576,6 +2578,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
// Yellow zone violation. The o/s has unprotected the first yellow
|
||||
// zone page for us. Note: must call disable_stack_yellow_zone to
|
||||
// update the enabled status, even if the zone contains only one page.
|
||||
assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
// If not in java code, return and hope for the best.
|
||||
return in_java
|
||||
@ -3793,6 +3796,11 @@ void os::win32::initialize_system_info() {
|
||||
GlobalMemoryStatusEx(&ms);
|
||||
_physical_memory = ms.ullTotalPhys;
|
||||
|
||||
if (FLAG_IS_DEFAULT(MaxRAM)) {
|
||||
// Adjust MaxRAM according to the maximum virtual address space available.
|
||||
FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
|
||||
}
|
||||
|
||||
OSVERSIONINFOEX oi;
|
||||
oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
|
||||
GetVersionEx((OSVERSIONINFO*)&oi);
|
||||
@ -4207,7 +4215,7 @@ jint os::init_2(void) {
|
||||
min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
|
||||
|
||||
if (actual_reserve_size < min_stack_allowed) {
|
||||
tty->print_cr("\nThe stack size specified is too small, "
|
||||
tty->print_cr("\nThe Java thread stack size specified is too small. "
|
||||
"Specify at least %dk",
|
||||
min_stack_allowed / K);
|
||||
return JNI_ERR;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -33,10 +33,6 @@ define_pd_global(bool, DontYieldALot, false);
|
||||
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 2048);
|
||||
|
||||
// if we set CompilerThreadStackSize to a value different than 0, it will
|
||||
// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
|
||||
// the stack size for compiler threads will default to VMThreadStackSize, although it
|
||||
// is defined to 4M in os::Aix::default_stack_size()!
|
||||
define_pd_global(intx, CompilerThreadStackSize, 4096);
|
||||
|
||||
// Allow extra space in DEBUG builds for asserts.
|
||||
|
@ -533,23 +533,17 @@ void os::Aix::init_thread_fpu_state(void) {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
size_t os::Aix::min_stack_allowed = 128*K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 128 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Aix::default_stack_size(os::ThreadType thr_type) {
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
// Notice that the setting for compiler threads here have no impact
|
||||
// because of the strange 'fallback logic' in os::create_thread().
|
||||
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
|
||||
// specify a different stack size for compiler threads!
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
return s;
|
||||
}
|
||||
|
||||
size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
|
||||
return 2 * page_size();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// helper functions for fatal error handler
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,9 +31,11 @@
|
||||
//
|
||||
define_pd_global(bool, DontYieldALot, false);
|
||||
#ifdef AMD64
|
||||
define_pd_global(intx, CompilerThreadStackSize, 1024);
|
||||
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
#else
|
||||
define_pd_global(intx, CompilerThreadStackSize, 512);
|
||||
// ThreadStackSize 320 allows a couple of test cases to run while
|
||||
// keeping the number of threads that can be created high. System
|
||||
// default ThreadStackSize appears to be 512 which is too big.
|
||||
@ -41,7 +43,6 @@ define_pd_global(intx, ThreadStackSize, 320);
|
||||
define_pd_global(intx, VMThreadStackSize, 512);
|
||||
#endif // AMD64
|
||||
|
||||
define_pd_global(intx, CompilerThreadStackSize, 0);
|
||||
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
||||
|
||||
|
@ -838,9 +838,13 @@ bool os::is_allocatable(size_t bytes) {
|
||||
// thread stack
|
||||
|
||||
#ifdef AMD64
|
||||
size_t os::Bsd::min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
|
||||
#else
|
||||
size_t os::Bsd::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
|
||||
@ -849,7 +853,7 @@ size_t os::Bsd::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
|
||||
#endif // AMD64
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
#ifdef AMD64
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
@ -859,11 +863,6 @@ size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
|
||||
return s;
|
||||
}
|
||||
|
||||
size_t os::Bsd::default_guard_size(os::ThreadType thr_type) {
|
||||
// Creating guard page is very expensive. Java thread has HotSpot
|
||||
// guard page, only enable glibc guard page for non-Java threads.
|
||||
return (thr_type == java_thread ? 0 : page_size());
|
||||
}
|
||||
|
||||
// Java thread:
|
||||
//
|
||||
|
@ -282,9 +282,11 @@ bool os::is_allocatable(size_t bytes) {
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
size_t os::Bsd::min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
|
||||
|
||||
size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
#ifdef _LP64
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
#else
|
||||
@ -293,12 +295,6 @@ size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
|
||||
return s;
|
||||
}
|
||||
|
||||
size_t os::Bsd::default_guard_size(os::ThreadType thr_type) {
|
||||
// Only enable glibc guard pages for non-Java threads
|
||||
// (Java threads have HotSpot guard pages)
|
||||
return (thr_type == java_thread ? 0 : page_size());
|
||||
}
|
||||
|
||||
static void current_stack_region(address *bottom, size_t *size) {
|
||||
address stack_bottom;
|
||||
address stack_top;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -33,7 +33,7 @@ define_pd_global(bool, DontYieldALot, false);
|
||||
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 2048);
|
||||
|
||||
define_pd_global(intx, CompilerThreadStackSize, 0);
|
||||
define_pd_global(intx, CompilerThreadStackSize, 2048);
|
||||
|
||||
define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
|
||||
|
||||
|
@ -473,10 +473,12 @@ bool os::is_allocatable(size_t bytes) {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
size_t os::Linux::min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
return s;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -33,10 +33,6 @@ define_pd_global(bool, DontYieldALot, false);
|
||||
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 2048);
|
||||
|
||||
// if we set CompilerThreadStackSize to a value different than 0, it will
|
||||
// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
|
||||
// the stack size for compiler threads will default to VMThreadStackSize, although it
|
||||
// is defined to 4M in os::Linux::default_stack_size()!
|
||||
define_pd_global(intx, CompilerThreadStackSize, 4096);
|
||||
|
||||
// Allow extra space in DEBUG builds for asserts.
|
||||
|
@ -533,15 +533,13 @@ void os::Linux::set_fpu_control_word(int fpu_control) {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
size_t os::Linux::min_stack_allowed = 128*K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 128 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
// Notice that the setting for compiler threads here have no impact
|
||||
// because of the strange 'fallback logic' in os::create_thread().
|
||||
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
|
||||
// specify a different stack size for compiler threads!
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
|
||||
return s;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,6 @@
|
||||
//
|
||||
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 12288);
|
||||
define_pd_global(intx, CompilerThreadStackSize, 0);
|
||||
|
||||
// Used on 64 bit platforms for UseCompressedOops base address
|
||||
define_pd_global(size_t, HeapBaseMinAddress, CONST64(4)*G);
|
||||
|
@ -726,10 +726,12 @@ bool os::is_allocatable(size_t bytes) {
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
size_t os::Linux::min_stack_allowed = 128 * K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 128 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
return s;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,9 +30,11 @@
|
||||
|
||||
define_pd_global(bool, DontYieldALot, false);
|
||||
#ifdef AMD64
|
||||
define_pd_global(intx, CompilerThreadStackSize, 1024);
|
||||
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
#else
|
||||
define_pd_global(intx, CompilerThreadStackSize, 512);
|
||||
// ThreadStackSize 320 allows a couple of test cases to run while
|
||||
// keeping the number of threads that can be created high. System
|
||||
// default ThreadStackSize appears to be 512 which is too big.
|
||||
@ -40,8 +42,6 @@ define_pd_global(intx, ThreadStackSize, 320);
|
||||
define_pd_global(intx, VMThreadStackSize, 512);
|
||||
#endif // AMD64
|
||||
|
||||
define_pd_global(intx, CompilerThreadStackSize, 0);
|
||||
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
||||
|
||||
// Used on 64 bit platforms for UseCompressedOops base address
|
||||
|
@ -676,13 +676,17 @@ bool os::is_allocatable(size_t bytes) {
|
||||
// thread stack
|
||||
|
||||
#ifdef AMD64
|
||||
size_t os::Linux::min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
|
||||
#else
|
||||
size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
|
||||
#endif // AMD64
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
#ifdef AMD64
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
|
@ -307,9 +307,11 @@ bool os::is_allocatable(size_t bytes) {
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
size_t os::Linux::min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
|
||||
|
||||
size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
#ifdef _LP64
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
#else
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,6 @@
|
||||
//
|
||||
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 12288);
|
||||
define_pd_global(intx, CompilerThreadStackSize, 0);
|
||||
|
||||
// Used on 64 bit platforms for UseCompressedOops base address
|
||||
#ifdef _LP64
|
||||
|
@ -84,9 +84,13 @@
|
||||
// Minimum stack size for the VM. It's easier to document a constant
|
||||
// but it's different for x86 and sparc because the page sizes are different.
|
||||
#ifdef _LP64
|
||||
size_t os::Solaris::min_stack_allowed = 128*K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 128 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
|
||||
#else
|
||||
size_t os::Solaris::min_stack_allowed = 96*K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 96 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 96 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 96 * K;
|
||||
#endif
|
||||
|
||||
int os::Solaris::max_register_window_saves_before_flushing() {
|
||||
@ -444,7 +448,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
||||
|
||||
|
||||
if (thread->thread_state() == _thread_in_vm) {
|
||||
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
|
||||
if (sig == SIGBUS && thread->doing_unsafe_access()) {
|
||||
stub = SharedRuntime::handle_unsafe_access(thread, npc);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,10 +30,12 @@
|
||||
|
||||
define_pd_global(bool, DontYieldALot, true); // Determined in the design center
|
||||
#ifdef AMD64
|
||||
define_pd_global(intx, CompilerThreadStackSize, 1024);
|
||||
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 8*K);
|
||||
#else
|
||||
define_pd_global(intx, CompilerThreadStackSize, 512);
|
||||
// ThreadStackSize 320 allows a couple of test cases to run while
|
||||
// keeping the number of threads that can be created high.
|
||||
define_pd_global(intx, ThreadStackSize, 320);
|
||||
@ -41,7 +43,6 @@ define_pd_global(intx, VMThreadStackSize, 512);
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 10*K);
|
||||
#endif // AMD64
|
||||
|
||||
define_pd_global(intx, CompilerThreadStackSize, 0);
|
||||
|
||||
// Used on 64 bit platforms for UseCompressedOops base address
|
||||
define_pd_global(size_t, HeapBaseMinAddress, 2*G);
|
||||
|
@ -86,15 +86,19 @@
|
||||
|
||||
#define MAX_PATH (2 * K)
|
||||
|
||||
// Minimum stack size for the VM. It's easier to document a constant value
|
||||
// Minimum stack sizes for the VM. It's easier to document a constant value
|
||||
// but it's different for x86 and sparc because the page sizes are different.
|
||||
#ifdef AMD64
|
||||
size_t os::Solaris::min_stack_allowed = 224*K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 394 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 224 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 224 * K;
|
||||
#define REG_SP REG_RSP
|
||||
#define REG_PC REG_RIP
|
||||
#define REG_FP REG_RBP
|
||||
#else
|
||||
size_t os::Solaris::min_stack_allowed = 64*K;
|
||||
size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
|
||||
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
|
||||
#define REG_SP UESP
|
||||
#define REG_PC EIP
|
||||
#define REG_FP EBP
|
||||
|
@ -221,6 +221,9 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
|
||||
case vmIntrinsics::_putCharStringU:
|
||||
#ifdef TRACE_HAVE_INTRINSICS
|
||||
case vmIntrinsics::_counterTime:
|
||||
#if defined(_LP64) || !defined(TRACE_ID_CLASS_SHIFT)
|
||||
case vmIntrinsics::_getClassId:
|
||||
#endif
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
|
@ -2410,6 +2410,15 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
if (x->is_volatile() && os::is_MP()) __ membar_acquire();
|
||||
|
||||
/* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */
|
||||
if (type == T_BOOLEAN) {
|
||||
LabelObj* equalZeroLabel = new LabelObj();
|
||||
__ cmp(lir_cond_equal, value, 0);
|
||||
__ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
|
||||
__ move(LIR_OprFact::intConst(1), value);
|
||||
__ branch_destination(equalZeroLabel->label());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -3083,6 +3092,37 @@ void LIRGenerator::do_IfOp(IfOp* x) {
|
||||
__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
|
||||
}
|
||||
|
||||
#ifdef TRACE_HAVE_INTRINSICS
|
||||
void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
|
||||
CodeEmitInfo* info = state_for(x);
|
||||
CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
|
||||
|
||||
assert(info != NULL, "must have info");
|
||||
LIRItem arg(x->argument_at(0), this);
|
||||
|
||||
arg.load_item();
|
||||
LIR_Opr klass = new_register(T_METADATA);
|
||||
__ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
|
||||
LIR_Opr id = new_register(T_LONG);
|
||||
ByteSize offset = TRACE_KLASS_TRACE_ID_OFFSET;
|
||||
LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
|
||||
|
||||
__ move(trace_id_addr, id);
|
||||
__ logical_or(id, LIR_OprFact::longConst(0x01l), id);
|
||||
__ store(id, trace_id_addr);
|
||||
|
||||
#ifdef TRACE_ID_META_BITS
|
||||
__ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id);
|
||||
#endif
|
||||
#ifdef TRACE_ID_CLASS_SHIFT
|
||||
__ unsigned_shift_right(id, TRACE_ID_CLASS_SHIFT, id);
|
||||
#endif
|
||||
|
||||
__ move(id, rlock_result(x));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 0, "wrong type");
|
||||
// Enforce computation of _reserved_argument_area_size which is required on some platforms.
|
||||
@ -3108,6 +3148,9 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
||||
}
|
||||
|
||||
#ifdef TRACE_HAVE_INTRINSICS
|
||||
case vmIntrinsics::_getClassId:
|
||||
do_ClassIDIntrinsic(x);
|
||||
break;
|
||||
case vmIntrinsics::_counterTime:
|
||||
do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), x);
|
||||
break;
|
||||
|
@ -438,6 +438,10 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
||||
SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
|
||||
void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
|
||||
|
||||
#ifdef TRACE_HAVE_INTRINSICS
|
||||
void do_ClassIDIntrinsic(Intrinsic* x);
|
||||
#endif
|
||||
|
||||
void do_RuntimeCall(address routine, Intrinsic* x);
|
||||
|
||||
ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
|
||||
|
@ -576,9 +576,8 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
|
||||
// normal bytecode execution.
|
||||
thread->clear_exception_oop_and_pc();
|
||||
|
||||
Handle original_exception(thread, exception());
|
||||
|
||||
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
|
||||
bool recursive_exception = false;
|
||||
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
|
||||
// If an exception was thrown during exception dispatch, the exception oop may have changed
|
||||
thread->set_exception_oop(exception());
|
||||
thread->set_exception_pc(pc);
|
||||
@ -586,8 +585,9 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
|
||||
// the exception cache is used only by non-implicit exceptions
|
||||
// Update the exception cache only when there didn't happen
|
||||
// another exception during the computation of the compiled
|
||||
// exception handler.
|
||||
if (continuation != NULL && original_exception() == exception()) {
|
||||
// exception handler. Checking for exception oop equality is not
|
||||
// sufficient because some exceptions are pre-allocated and reused.
|
||||
if (continuation != NULL && !recursive_exception) {
|
||||
nm->add_handler_for_exception_and_pc(exception, pc, continuation);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -205,103 +205,3 @@ juint AltHashing::murmur3_32(juint seed, const int* data, int len) {
|
||||
juint AltHashing::murmur3_32(const int* data, int len) {
|
||||
return murmur3_32(0, data, len);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Overloaded versions for internal test.
|
||||
juint AltHashing::murmur3_32(const jbyte* data, int len) {
|
||||
return murmur3_32(0, data, len);
|
||||
}
|
||||
|
||||
juint AltHashing::murmur3_32(const jchar* data, int len) {
|
||||
return murmur3_32(0, data, len);
|
||||
}
|
||||
|
||||
// Internal test for alternate hashing. Translated from JDK version
|
||||
// test/sun/misc/Hashing.java
|
||||
static const jbyte ONE_BYTE[] = { (jbyte) 0x80};
|
||||
static const jbyte TWO_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81};
|
||||
static const jchar ONE_CHAR[] = { (jchar) 0x8180};
|
||||
static const jbyte THREE_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82};
|
||||
static const jbyte FOUR_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83};
|
||||
static const jchar TWO_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382};
|
||||
static const jint ONE_INT[] = { (jint)0x83828180};
|
||||
static const jbyte SIX_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83, (jbyte) 0x84, (jbyte) 0x85};
|
||||
static const jchar THREE_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382, (jchar) 0x8584};
|
||||
static const jbyte EIGHT_BYTE[] = {
|
||||
(jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82,
|
||||
(jbyte) 0x83, (jbyte) 0x84, (jbyte) 0x85,
|
||||
(jbyte) 0x86, (jbyte) 0x87};
|
||||
static const jchar FOUR_CHAR[] = {
|
||||
(jchar) 0x8180, (jchar) 0x8382,
|
||||
(jchar) 0x8584, (jchar) 0x8786};
|
||||
|
||||
static const jint TWO_INT[] = { (jint)0x83828180, (jint)0x87868584};
|
||||
|
||||
static const juint MURMUR3_32_X86_CHECK_VALUE = 0xB0F57EE3;
|
||||
|
||||
void AltHashing::testMurmur3_32_ByteArray() {
|
||||
// printf("testMurmur3_32_ByteArray\n");
|
||||
|
||||
jbyte vector[256];
|
||||
jbyte hashes[4 * 256];
|
||||
|
||||
for (int i = 0; i < 256; i++) {
|
||||
vector[i] = (jbyte) i;
|
||||
}
|
||||
|
||||
// Hash subranges {}, {0}, {0,1}, {0,1,2}, ..., {0,...,255}
|
||||
for (int i = 0; i < 256; i++) {
|
||||
juint hash = murmur3_32(256 - i, vector, i);
|
||||
hashes[i * 4] = (jbyte) hash;
|
||||
hashes[i * 4 + 1] = (jbyte)(hash >> 8);
|
||||
hashes[i * 4 + 2] = (jbyte)(hash >> 16);
|
||||
hashes[i * 4 + 3] = (jbyte)(hash >> 24);
|
||||
}
|
||||
|
||||
// hash to get const result.
|
||||
juint final_hash = murmur3_32(hashes, 4*256);
|
||||
|
||||
assert (MURMUR3_32_X86_CHECK_VALUE == final_hash,
|
||||
"Calculated hash result not as expected. Expected %08X got %08X\n",
|
||||
MURMUR3_32_X86_CHECK_VALUE,
|
||||
final_hash);
|
||||
}
|
||||
|
||||
void AltHashing::testEquivalentHashes() {
|
||||
juint jbytes, jchars, ints;
|
||||
|
||||
// printf("testEquivalentHashes\n");
|
||||
|
||||
jbytes = murmur3_32(TWO_BYTE, 2);
|
||||
jchars = murmur3_32(ONE_CHAR, 1);
|
||||
assert (jbytes == jchars,
|
||||
"Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars);
|
||||
|
||||
jbytes = murmur3_32(FOUR_BYTE, 4);
|
||||
jchars = murmur3_32(TWO_CHAR, 2);
|
||||
ints = murmur3_32(ONE_INT, 1);
|
||||
assert ((jbytes == jchars) && (jbytes == ints),
|
||||
"Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints);
|
||||
|
||||
jbytes = murmur3_32(SIX_BYTE, 6);
|
||||
jchars = murmur3_32(THREE_CHAR, 3);
|
||||
assert (jbytes == jchars,
|
||||
"Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars);
|
||||
|
||||
jbytes = murmur3_32(EIGHT_BYTE, 8);
|
||||
jchars = murmur3_32(FOUR_CHAR, 4);
|
||||
ints = murmur3_32(TWO_INT, 2);
|
||||
assert ((jbytes == jchars) && (jbytes == ints),
|
||||
"Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints);
|
||||
}
|
||||
|
||||
// Returns true if the alternate hashcode is correct
|
||||
void AltHashing::test_alt_hash() {
|
||||
testMurmur3_32_ByteArray();
|
||||
testEquivalentHashes();
|
||||
}
|
||||
|
||||
void AltHashing_test() {
|
||||
AltHashing::test_alt_hash();
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,26 +37,18 @@
|
||||
*/
|
||||
|
||||
class AltHashing : AllStatic {
|
||||
friend class AltHashingTest;
|
||||
|
||||
// utility function copied from java/lang/Integer
|
||||
static juint Integer_rotateLeft(juint i, int distance) {
|
||||
return (i << distance) | (i >> (32-distance));
|
||||
return (i << distance) | (i >> (32 - distance));
|
||||
}
|
||||
static juint murmur3_32(const int* data, int len);
|
||||
static juint murmur3_32(juint seed, const int* data, int len);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Hashing functions used for internal testing
|
||||
static juint murmur3_32(const jbyte* data, int len);
|
||||
static juint murmur3_32(const jchar* data, int len);
|
||||
static void testMurmur3_32_ByteArray();
|
||||
static void testEquivalentHashes();
|
||||
#endif // PRODUCT
|
||||
|
||||
public:
|
||||
static juint compute_seed();
|
||||
static juint murmur3_32(juint seed, const jbyte* data, int len);
|
||||
static juint murmur3_32(juint seed, const jchar* data, int len);
|
||||
NOT_PRODUCT(static void test_alt_hash();)
|
||||
};
|
||||
#endif // SHARE_VM_CLASSFILE_ALTHASHING_HPP
|
||||
|
@ -95,7 +95,6 @@
|
||||
#define JAVA_6_VERSION 50
|
||||
|
||||
// Used for backward compatibility reasons:
|
||||
// - to check NameAndType_info signatures more aggressively
|
||||
// - to disallow argument and require ACC_STATIC for <clinit> methods
|
||||
#define JAVA_7_VERSION 51
|
||||
|
||||
@ -564,7 +563,7 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_NameAndType: {
|
||||
if (_need_verify && _major_version >= JAVA_7_VERSION) {
|
||||
if (_need_verify) {
|
||||
const int sig_index = cp->signature_ref_index_at(index);
|
||||
const int name_index = cp->name_ref_index_at(index);
|
||||
const Symbol* const name = cp->symbol_at(name_index);
|
||||
@ -572,9 +571,17 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
|
||||
guarantee_property(sig->utf8_length() != 0,
|
||||
"Illegal zero length constant pool entry at %d in class %s",
|
||||
sig_index, CHECK);
|
||||
guarantee_property(name->utf8_length() != 0,
|
||||
"Illegal zero length constant pool entry at %d in class %s",
|
||||
name_index, CHECK);
|
||||
|
||||
if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) {
|
||||
// Format check method name and signature
|
||||
verify_legal_method_name(name, CHECK);
|
||||
verify_legal_method_signature(name, sig, CHECK);
|
||||
} else {
|
||||
// Format check field name and signature
|
||||
verify_legal_field_name(name, CHECK);
|
||||
verify_legal_field_signature(name, sig, CHECK);
|
||||
}
|
||||
}
|
||||
@ -595,42 +602,32 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
|
||||
const Symbol* const name = cp->symbol_at(name_ref_index);
|
||||
const Symbol* const signature = cp->symbol_at(signature_ref_index);
|
||||
if (tag == JVM_CONSTANT_Fieldref) {
|
||||
verify_legal_field_name(name, CHECK);
|
||||
if (_need_verify && _major_version >= JAVA_7_VERSION) {
|
||||
// Signature is verified above, when iterating NameAndType_info.
|
||||
// Need only to be sure it's non-zero length and the right type.
|
||||
if (_need_verify) {
|
||||
// Field name and signature are verified above, when iterating NameAndType_info.
|
||||
// Need only to be sure signature is non-zero length and the right type.
|
||||
if (signature->utf8_length() == 0 ||
|
||||
signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
|
||||
throwIllegalSignature(
|
||||
"Field", name, signature, CHECK);
|
||||
throwIllegalSignature("Field", name, signature, CHECK);
|
||||
}
|
||||
} else {
|
||||
verify_legal_field_signature(name, signature, CHECK);
|
||||
}
|
||||
} else {
|
||||
verify_legal_method_name(name, CHECK);
|
||||
if (_need_verify && _major_version >= JAVA_7_VERSION) {
|
||||
// Signature is verified above, when iterating NameAndType_info.
|
||||
// Need only to be sure it's non-zero length and the right type.
|
||||
if (_need_verify) {
|
||||
// Method name and signature are verified above, when iterating NameAndType_info.
|
||||
// Need only to be sure signature is non-zero length and the right type.
|
||||
if (signature->utf8_length() == 0 ||
|
||||
signature->byte_at(0) != JVM_SIGNATURE_FUNC) {
|
||||
throwIllegalSignature(
|
||||
"Method", name, signature, CHECK);
|
||||
throwIllegalSignature("Method", name, signature, CHECK);
|
||||
}
|
||||
} else {
|
||||
verify_legal_method_signature(name, signature, CHECK);
|
||||
}
|
||||
if (tag == JVM_CONSTANT_Methodref) {
|
||||
// 4509014: If a class method name begins with '<', it must be "<init>".
|
||||
assert(name != NULL, "method name in constant pool is null");
|
||||
const unsigned int name_len = name->utf8_length();
|
||||
if (name_len != 0 && name->byte_at(0) == '<') {
|
||||
if (name != vmSymbols::object_initializer_name()) {
|
||||
classfile_parse_error(
|
||||
"Bad method name at constant pool index %u in class file %s",
|
||||
name_ref_index, CHECK);
|
||||
}
|
||||
}
|
||||
// 4509014: If a class method name begins with '<', it must be "<init>"
|
||||
const unsigned int name_len = name->utf8_length();
|
||||
if (tag == JVM_CONSTANT_Methodref &&
|
||||
name_len != 0 &&
|
||||
name->byte_at(0) == '<' &&
|
||||
name != vmSymbols::object_initializer_name()) {
|
||||
classfile_parse_error(
|
||||
"Bad method name at constant pool index %u in class file %s",
|
||||
name_ref_index, CHECK);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -4843,19 +4840,28 @@ const char* ClassFileParser::skip_over_field_signature(const char* signature,
|
||||
}
|
||||
}
|
||||
else {
|
||||
// 4900761: For class version > 48, any unicode is allowed in class name.
|
||||
// Skip leading 'L' and ignore first appearance of ';'
|
||||
length--;
|
||||
signature++;
|
||||
while (length > 0 && signature[0] != ';') {
|
||||
if (signature[0] == '.') {
|
||||
classfile_parse_error("Class name contains illegal character '.' in descriptor in class file %s", CHECK_0);
|
||||
}
|
||||
length--;
|
||||
signature++;
|
||||
}
|
||||
if (signature[0] == ';') { return signature + 1; }
|
||||
}
|
||||
char* c = strchr((char*) signature, ';');
|
||||
// Format check signature
|
||||
if (c != NULL) {
|
||||
ResourceMark rm(THREAD);
|
||||
int newlen = c - (char*) signature;
|
||||
char* sig = NEW_RESOURCE_ARRAY(char, newlen + 1);
|
||||
strncpy(sig, signature, newlen);
|
||||
sig[newlen] = '\0';
|
||||
|
||||
bool legal = verify_unqualified_name(sig, newlen, LegalClass);
|
||||
if (!legal) {
|
||||
classfile_parse_error("Class name contains illegal character "
|
||||
"in descriptor in class file %s",
|
||||
CHECK_0);
|
||||
return NULL;
|
||||
}
|
||||
return signature + newlen + 1;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
case JVM_SIGNATURE_ARRAY:
|
||||
@ -4869,7 +4875,6 @@ const char* ClassFileParser::skip_over_field_signature(const char* signature,
|
||||
length--;
|
||||
void_ok = false;
|
||||
break;
|
||||
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
@ -5402,6 +5407,59 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
|
||||
debug_only(ik->verify();)
|
||||
}
|
||||
|
||||
// For an anonymous class that is in the unnamed package, move it to its host class's
|
||||
// package by prepending its host class's package name to its class name and setting
|
||||
// its _class_name field.
|
||||
void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass, TRAPS) {
|
||||
ResourceMark rm(THREAD);
|
||||
assert(strrchr(_class_name->as_C_string(), '/') == NULL,
|
||||
"Anonymous class should not be in a package");
|
||||
const char* host_pkg_name =
|
||||
ClassLoader::package_from_name(host_klass->name()->as_C_string(), NULL);
|
||||
|
||||
if (host_pkg_name != NULL) {
|
||||
size_t host_pkg_len = strlen(host_pkg_name);
|
||||
int class_name_len = _class_name->utf8_length();
|
||||
char* new_anon_name =
|
||||
NEW_RESOURCE_ARRAY(char, host_pkg_len + 1 + class_name_len);
|
||||
// Copy host package name and trailing /.
|
||||
strncpy(new_anon_name, host_pkg_name, host_pkg_len);
|
||||
new_anon_name[host_pkg_len] = '/';
|
||||
// Append anonymous class name. The anonymous class name can contain odd
|
||||
// characters. So, do a strncpy instead of using sprintf("%s...").
|
||||
strncpy(new_anon_name + host_pkg_len + 1, (char *)_class_name->base(), class_name_len);
|
||||
|
||||
// Create a symbol and update the anonymous class name.
|
||||
_class_name = SymbolTable::new_symbol(new_anon_name,
|
||||
(int)host_pkg_len + 1 + class_name_len,
|
||||
CHECK);
|
||||
}
|
||||
}
|
||||
|
||||
// If the host class and the anonymous class are in the same package then do
|
||||
// nothing. If the anonymous class is in the unnamed package then move it to its
|
||||
// host's package. If the classes are in different packages then throw an IAE
|
||||
// exception.
|
||||
void ClassFileParser::fix_anonymous_class_name(TRAPS) {
|
||||
assert(_host_klass != NULL, "Expected an anonymous class");
|
||||
|
||||
const jbyte* anon_last_slash = UTF8::strrchr(_class_name->base(),
|
||||
_class_name->utf8_length(), '/');
|
||||
if (anon_last_slash == NULL) { // Unnamed package
|
||||
prepend_host_package_name(_host_klass, CHECK);
|
||||
} else {
|
||||
if (!InstanceKlass::is_same_class_package(_host_klass->class_loader(),
|
||||
_host_klass->name(),
|
||||
_host_klass->class_loader(),
|
||||
_class_name)) {
|
||||
ResourceMark rm(THREAD);
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
|
||||
err_msg("Host class %s and anonymous class %s are in different packages",
|
||||
_host_klass->name()->as_C_string(), _class_name->as_C_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool relax_format_check_for(ClassLoaderData* loader_data) {
|
||||
bool trusted = (loader_data->is_the_null_class_loader_data() ||
|
||||
SystemDictionary::is_platform_class_loader(loader_data->class_loader()));
|
||||
@ -5417,7 +5475,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
|
||||
Symbol* name,
|
||||
ClassLoaderData* loader_data,
|
||||
Handle protection_domain,
|
||||
const Klass* host_klass,
|
||||
const InstanceKlass* host_klass,
|
||||
GrowableArray<Handle>* cp_patches,
|
||||
Publicity pub_level,
|
||||
TRAPS) :
|
||||
@ -5692,6 +5750,13 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
|
||||
return;
|
||||
}
|
||||
|
||||
// if this is an anonymous class fix up its name if it's in the unnamed
|
||||
// package. Otherwise, throw IAE if it is in a different package than
|
||||
// its host class.
|
||||
if (_host_klass != NULL) {
|
||||
fix_anonymous_class_name(CHECK);
|
||||
}
|
||||
|
||||
// Verification prevents us from creating names with dots in them, this
|
||||
// asserts that that's the case.
|
||||
assert(is_internal_format(_class_name), "external class name format used internally");
|
||||
|
@ -79,7 +79,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
const Symbol* _requested_name;
|
||||
Symbol* _class_name;
|
||||
mutable ClassLoaderData* _loader_data;
|
||||
const Klass* _host_klass;
|
||||
const InstanceKlass* _host_klass;
|
||||
GrowableArray<Handle>* _cp_patches; // overrides for CP entries
|
||||
|
||||
// Metadata created before the instance klass is created. Must be deallocated
|
||||
@ -155,6 +155,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
ConstantPool* cp,
|
||||
TRAPS);
|
||||
|
||||
void prepend_host_package_name(const InstanceKlass* host_klass, TRAPS);
|
||||
void fix_anonymous_class_name(TRAPS);
|
||||
|
||||
void fill_instance_klass(InstanceKlass* ik, bool cf_changed_in_CFLH, TRAPS);
|
||||
void set_klass(InstanceKlass* instance);
|
||||
|
||||
@ -474,7 +477,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
Symbol* name,
|
||||
ClassLoaderData* loader_data,
|
||||
Handle protection_domain,
|
||||
const Klass* host_klass,
|
||||
const InstanceKlass* host_klass,
|
||||
GrowableArray<Handle>* cp_patches,
|
||||
Publicity pub_level,
|
||||
TRAPS);
|
||||
@ -500,7 +503,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
bool is_anonymous() const { return _host_klass != NULL; }
|
||||
bool is_interface() const { return _access_flags.is_interface(); }
|
||||
|
||||
const Klass* host_klass() const { return _host_klass; }
|
||||
const InstanceKlass* host_klass() const { return _host_klass; }
|
||||
const GrowableArray<Handle>* cp_patches() const { return _cp_patches; }
|
||||
ClassLoaderData* loader_data() const { return _loader_data; }
|
||||
const Symbol* class_name() const { return _class_name; }
|
||||
|
@ -962,11 +962,11 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
|
||||
|
||||
// Mark metadata seen on the stack only so we can delete unneeded entries.
|
||||
// Only walk all metadata, including the expensive code cache walk, for Full GC
|
||||
// and only if class redefinition occurred and if there are previous versions of
|
||||
// and only if class redefinition and if there's previous versions of
|
||||
// Klasses to delete.
|
||||
bool walk_all_metadata = clean_previous_versions &&
|
||||
JvmtiExport::has_redefined_a_class() &&
|
||||
InstanceKlass::has_previous_versions();
|
||||
InstanceKlass::has_previous_versions_and_reset();
|
||||
MetadataOnStackMark md_on_stack(walk_all_metadata);
|
||||
|
||||
// Save previous _unloading pointer for CMS which may add to unloading list before
|
||||
|
@ -94,7 +94,7 @@ instanceKlassHandle KlassFactory::create_from_stream(ClassFileStream* stream,
|
||||
Symbol* name,
|
||||
ClassLoaderData* loader_data,
|
||||
Handle protection_domain,
|
||||
const Klass* host_klass,
|
||||
const InstanceKlass* host_klass,
|
||||
GrowableArray<Handle>* cp_patches,
|
||||
TRAPS) {
|
||||
|
||||
|
@ -72,7 +72,7 @@ class KlassFactory : AllStatic {
|
||||
Symbol* name,
|
||||
ClassLoaderData* loader_data,
|
||||
Handle protection_domain,
|
||||
const Klass* host_klass,
|
||||
const InstanceKlass* host_klass,
|
||||
GrowableArray<Handle>* cp_patches,
|
||||
TRAPS);
|
||||
};
|
||||
|
@ -1027,7 +1027,7 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
|
||||
Handle class_loader,
|
||||
Handle protection_domain,
|
||||
ClassFileStream* st,
|
||||
const Klass* host_klass,
|
||||
const InstanceKlass* host_klass,
|
||||
GrowableArray<Handle>* cp_patches,
|
||||
TRAPS) {
|
||||
|
||||
|
@ -299,7 +299,7 @@ public:
|
||||
Handle class_loader,
|
||||
Handle protection_domain,
|
||||
ClassFileStream* st,
|
||||
const Klass* host_klass,
|
||||
const InstanceKlass* host_klass,
|
||||
GrowableArray<Handle>* cp_patches,
|
||||
TRAPS);
|
||||
|
||||
|
@ -2786,7 +2786,7 @@ void ClassVerifier::verify_invoke_instructions(
|
||||
// direct interface relative to the host class
|
||||
have_imr_indirect = (have_imr_indirect &&
|
||||
!is_same_or_direct_interface(
|
||||
InstanceKlass::cast(current_class()->host_klass()),
|
||||
current_class()->host_klass(),
|
||||
host_klass_type, ref_class_type));
|
||||
}
|
||||
if (!subtype) {
|
||||
|
@ -366,6 +366,7 @@ bool vmIntrinsics::can_trap(vmIntrinsics::ID id) {
|
||||
switch(id) {
|
||||
#ifdef TRACE_HAVE_INTRINSICS
|
||||
case vmIntrinsics::_counterTime:
|
||||
case vmIntrinsics::_getClassId:
|
||||
#endif
|
||||
case vmIntrinsics::_currentTimeMillis:
|
||||
case vmIntrinsics::_nanoTime:
|
||||
|
@ -1305,7 +1305,7 @@ void CodeCache::report_codemem_full(int code_blob_type, bool print) {
|
||||
event.set_entryCount(heap->blob_count());
|
||||
event.set_methodCount(heap->nmethod_count());
|
||||
event.set_adaptorCount(heap->adapter_count());
|
||||
event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
|
||||
event.set_unallocatedCapacity(heap->unallocated_capacity());
|
||||
event.set_fullCount(heap->full_count());
|
||||
event.commit();
|
||||
}
|
||||
|
@ -3511,6 +3511,7 @@ bool CMSCollector::do_marking_mt() {
|
||||
conc_workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
num_workers = conc_workers()->update_active_workers(num_workers);
|
||||
log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
|
||||
|
||||
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
||||
|
||||
|
@ -899,6 +899,8 @@ void ParNewGeneration::collect(bool full,
|
||||
workers->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
active_workers = workers->update_active_workers(active_workers);
|
||||
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
|
||||
|
||||
_old_gen = gch->old_gen();
|
||||
|
||||
// If the next generation is too full to accommodate worst-case promotion
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "gc/g1/g1CodeRootSetTable.hpp"
|
||||
#include "gc/g1/g1CodeCacheRemSet.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "memory/heap.hpp"
|
||||
@ -33,58 +34,13 @@
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
class CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
|
||||
friend class G1CodeRootSetTest;
|
||||
typedef HashtableEntry<nmethod*, mtGC> Entry;
|
||||
G1CodeRootSetTable* volatile G1CodeRootSetTable::_purge_list = NULL;
|
||||
|
||||
static CodeRootSetTable* volatile _purge_list;
|
||||
|
||||
CodeRootSetTable* _purge_next;
|
||||
|
||||
unsigned int compute_hash(nmethod* nm) {
|
||||
uintptr_t hash = (uintptr_t)nm;
|
||||
return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
|
||||
}
|
||||
|
||||
void remove_entry(Entry* e, Entry* previous);
|
||||
Entry* new_entry(nmethod* nm);
|
||||
|
||||
public:
|
||||
CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
|
||||
~CodeRootSetTable();
|
||||
|
||||
// Needs to be protected locks
|
||||
bool add(nmethod* nm);
|
||||
bool remove(nmethod* nm);
|
||||
|
||||
// Can be called without locking
|
||||
bool contains(nmethod* nm);
|
||||
|
||||
int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
|
||||
|
||||
void copy_to(CodeRootSetTable* new_table);
|
||||
void nmethods_do(CodeBlobClosure* blk);
|
||||
|
||||
template<typename CB>
|
||||
int remove_if(CB& should_remove);
|
||||
|
||||
static void purge_list_append(CodeRootSetTable* tbl);
|
||||
static void purge();
|
||||
|
||||
static size_t static_mem_size() {
|
||||
return sizeof(_purge_list);
|
||||
}
|
||||
|
||||
size_t mem_size();
|
||||
};
|
||||
|
||||
CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL;
|
||||
|
||||
size_t CodeRootSetTable::mem_size() {
|
||||
return sizeof(CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
|
||||
size_t G1CodeRootSetTable::mem_size() {
|
||||
return sizeof(G1CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
|
||||
}
|
||||
|
||||
CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
|
||||
G1CodeRootSetTable::Entry* G1CodeRootSetTable::new_entry(nmethod* nm) {
|
||||
unsigned int hash = compute_hash(nm);
|
||||
Entry* entry = (Entry*) new_entry_free_list();
|
||||
if (entry == NULL) {
|
||||
@ -96,7 +52,7 @@ CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
|
||||
void G1CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
|
||||
int index = hash_to_index(e->hash());
|
||||
assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null");
|
||||
|
||||
@ -108,7 +64,7 @@ void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
|
||||
free_entry(e);
|
||||
}
|
||||
|
||||
CodeRootSetTable::~CodeRootSetTable() {
|
||||
G1CodeRootSetTable::~G1CodeRootSetTable() {
|
||||
for (int index = 0; index < table_size(); ++index) {
|
||||
for (Entry* e = bucket(index); e != NULL; ) {
|
||||
Entry* to_remove = e;
|
||||
@ -125,7 +81,7 @@ CodeRootSetTable::~CodeRootSetTable() {
|
||||
}
|
||||
}
|
||||
|
||||
bool CodeRootSetTable::add(nmethod* nm) {
|
||||
bool G1CodeRootSetTable::add(nmethod* nm) {
|
||||
if (!contains(nm)) {
|
||||
Entry* e = new_entry(nm);
|
||||
int index = hash_to_index(e->hash());
|
||||
@ -135,7 +91,7 @@ bool CodeRootSetTable::add(nmethod* nm) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CodeRootSetTable::contains(nmethod* nm) {
|
||||
bool G1CodeRootSetTable::contains(nmethod* nm) {
|
||||
int index = hash_to_index(compute_hash(nm));
|
||||
for (Entry* e = bucket(index); e != NULL; e = e->next()) {
|
||||
if (e->literal() == nm) {
|
||||
@ -145,7 +101,7 @@ bool CodeRootSetTable::contains(nmethod* nm) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CodeRootSetTable::remove(nmethod* nm) {
|
||||
bool G1CodeRootSetTable::remove(nmethod* nm) {
|
||||
int index = hash_to_index(compute_hash(nm));
|
||||
Entry* previous = NULL;
|
||||
for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) {
|
||||
@ -157,7 +113,7 @@ bool CodeRootSetTable::remove(nmethod* nm) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) {
|
||||
void G1CodeRootSetTable::copy_to(G1CodeRootSetTable* new_table) {
|
||||
for (int index = 0; index < table_size(); ++index) {
|
||||
for (Entry* e = bucket(index); e != NULL; e = e->next()) {
|
||||
new_table->add(e->literal());
|
||||
@ -166,7 +122,7 @@ void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) {
|
||||
new_table->copy_freelist(this);
|
||||
}
|
||||
|
||||
void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
|
||||
void G1CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
|
||||
for (int index = 0; index < table_size(); ++index) {
|
||||
for (Entry* e = bucket(index); e != NULL; e = e->next()) {
|
||||
blk->do_code_blob(e->literal());
|
||||
@ -175,7 +131,7 @@ void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
|
||||
}
|
||||
|
||||
template<typename CB>
|
||||
int CodeRootSetTable::remove_if(CB& should_remove) {
|
||||
int G1CodeRootSetTable::remove_if(CB& should_remove) {
|
||||
int num_removed = 0;
|
||||
for (int index = 0; index < table_size(); ++index) {
|
||||
Entry* previous = NULL;
|
||||
@ -198,52 +154,52 @@ G1CodeRootSet::~G1CodeRootSet() {
|
||||
delete _table;
|
||||
}
|
||||
|
||||
CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
|
||||
return (CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
|
||||
G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
|
||||
return (G1CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
|
||||
}
|
||||
|
||||
void G1CodeRootSet::allocate_small_table() {
|
||||
CodeRootSetTable* temp = new CodeRootSetTable(SmallSize);
|
||||
G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
|
||||
|
||||
OrderAccess::release_store_ptr(&_table, temp);
|
||||
}
|
||||
|
||||
void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) {
|
||||
void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
|
||||
for (;;) {
|
||||
table->_purge_next = _purge_list;
|
||||
CodeRootSetTable* old = (CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
|
||||
G1CodeRootSetTable* old = (G1CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
|
||||
if (old == table->_purge_next) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CodeRootSetTable::purge() {
|
||||
CodeRootSetTable* table = _purge_list;
|
||||
void G1CodeRootSetTable::purge() {
|
||||
G1CodeRootSetTable* table = _purge_list;
|
||||
_purge_list = NULL;
|
||||
while (table != NULL) {
|
||||
CodeRootSetTable* to_purge = table;
|
||||
G1CodeRootSetTable* to_purge = table;
|
||||
table = table->_purge_next;
|
||||
delete to_purge;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CodeRootSet::move_to_large() {
|
||||
CodeRootSetTable* temp = new CodeRootSetTable(LargeSize);
|
||||
G1CodeRootSetTable* temp = new G1CodeRootSetTable(LargeSize);
|
||||
|
||||
_table->copy_to(temp);
|
||||
|
||||
CodeRootSetTable::purge_list_append(_table);
|
||||
G1CodeRootSetTable::purge_list_append(_table);
|
||||
|
||||
OrderAccess::release_store_ptr(&_table, temp);
|
||||
}
|
||||
|
||||
void G1CodeRootSet::purge() {
|
||||
CodeRootSetTable::purge();
|
||||
G1CodeRootSetTable::purge();
|
||||
}
|
||||
|
||||
size_t G1CodeRootSet::static_mem_size() {
|
||||
return CodeRootSetTable::static_mem_size();
|
||||
return G1CodeRootSetTable::static_mem_size();
|
||||
}
|
||||
|
||||
void G1CodeRootSet::add(nmethod* method) {
|
||||
@ -278,7 +234,7 @@ bool G1CodeRootSet::remove(nmethod* method) {
|
||||
}
|
||||
|
||||
bool G1CodeRootSet::contains(nmethod* method) {
|
||||
CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync.
|
||||
G1CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync.
|
||||
if (table != NULL) {
|
||||
return table->contains(method);
|
||||
}
|
||||
@ -348,67 +304,3 @@ void G1CodeRootSet::clean(HeapRegion* owner) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
class G1CodeRootSetTest {
|
||||
public:
|
||||
static void test() {
|
||||
{
|
||||
G1CodeRootSet set1;
|
||||
assert(set1.is_empty(), "Code root set must be initially empty but is not.");
|
||||
|
||||
assert(G1CodeRootSet::static_mem_size() == sizeof(void*),
|
||||
"The code root set's static memory usage is incorrect, " SIZE_FORMAT " bytes", G1CodeRootSet::static_mem_size());
|
||||
|
||||
set1.add((nmethod*)1);
|
||||
assert(set1.length() == 1, "Added exactly one element, but set contains "
|
||||
SIZE_FORMAT " elements", set1.length());
|
||||
|
||||
const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1;
|
||||
|
||||
for (size_t i = 1; i <= num_to_add; i++) {
|
||||
set1.add((nmethod*)1);
|
||||
}
|
||||
assert(set1.length() == 1,
|
||||
"Duplicate detection should not have increased the set size but "
|
||||
"is " SIZE_FORMAT, set1.length());
|
||||
|
||||
for (size_t i = 2; i <= num_to_add; i++) {
|
||||
set1.add((nmethod*)(uintptr_t)(i));
|
||||
}
|
||||
assert(set1.length() == num_to_add,
|
||||
"After adding in total " SIZE_FORMAT " distinct code roots, they "
|
||||
"need to be in the set, but there are only " SIZE_FORMAT,
|
||||
num_to_add, set1.length());
|
||||
|
||||
assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
|
||||
|
||||
size_t num_popped = 0;
|
||||
for (size_t i = 1; i <= num_to_add; i++) {
|
||||
bool removed = set1.remove((nmethod*)i);
|
||||
if (removed) {
|
||||
num_popped += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(num_popped == num_to_add,
|
||||
"Managed to pop " SIZE_FORMAT " code roots, but only " SIZE_FORMAT " "
|
||||
"were added", num_popped, num_to_add);
|
||||
assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
|
||||
|
||||
G1CodeRootSet::purge();
|
||||
|
||||
assert(CodeRootSetTable::_purge_list == NULL, "should have purged old small tables");
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
void TestCodeCacheRemSet_test() {
|
||||
G1CodeRootSetTest::test();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class CodeBlobClosure;
|
||||
class CodeRootSetTable;
|
||||
class G1CodeRootSetTable;
|
||||
class HeapRegion;
|
||||
class nmethod;
|
||||
|
||||
@ -42,8 +42,8 @@ class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
|
||||
const static size_t Threshold = 24;
|
||||
const static size_t LargeSize = 512;
|
||||
|
||||
CodeRootSetTable* _table;
|
||||
CodeRootSetTable* load_acquire_table();
|
||||
G1CodeRootSetTable* _table;
|
||||
G1CodeRootSetTable* load_acquire_table();
|
||||
|
||||
size_t _length;
|
||||
|
||||
|
76
hotspot/src/share/vm/gc/g1/g1CodeRootSetTable.hpp
Normal file
76
hotspot/src/share/vm/gc/g1/g1CodeRootSetTable.hpp
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1CODEROOTSETTABLE_HPP
|
||||
#define SHARE_VM_GC_G1_G1CODEROOTSETTABLE_HPP
|
||||
|
||||
#include "utilities/hashtable.hpp"
|
||||
|
||||
class nmethod;
|
||||
|
||||
class G1CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
|
||||
friend class G1CodeRootSetTest;
|
||||
typedef HashtableEntry<nmethod*, mtGC> Entry;
|
||||
|
||||
static G1CodeRootSetTable* volatile _purge_list;
|
||||
|
||||
G1CodeRootSetTable* _purge_next;
|
||||
|
||||
unsigned int compute_hash(nmethod* nm) {
|
||||
uintptr_t hash = (uintptr_t)nm;
|
||||
return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
|
||||
}
|
||||
|
||||
void remove_entry(Entry* e, Entry* previous);
|
||||
Entry* new_entry(nmethod* nm);
|
||||
|
||||
public:
|
||||
G1CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
|
||||
~G1CodeRootSetTable();
|
||||
|
||||
// Needs to be protected by locks
|
||||
bool add(nmethod* nm);
|
||||
bool remove(nmethod* nm);
|
||||
|
||||
// Can be called without locking
|
||||
bool contains(nmethod* nm);
|
||||
|
||||
int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
|
||||
|
||||
void copy_to(G1CodeRootSetTable* new_table);
|
||||
void nmethods_do(CodeBlobClosure* blk);
|
||||
|
||||
template<typename CB>
|
||||
int remove_if(CB& should_remove);
|
||||
|
||||
static void purge_list_append(G1CodeRootSetTable* tbl);
|
||||
static void purge();
|
||||
|
||||
static size_t static_mem_size() {
|
||||
return sizeof(_purge_list);
|
||||
}
|
||||
|
||||
size_t mem_size();
|
||||
};
|
||||
|
||||
#endif /* SHARE_VM_GC_G1_G1CODEROOTSETTABLE_HPP */
|
@ -1332,6 +1332,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
|
||||
workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
workers()->update_active_workers(n_workers);
|
||||
log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, workers()->total_workers());
|
||||
|
||||
ParRebuildRSTask rebuild_rs_task(this);
|
||||
workers()->run_task(&rebuild_rs_task);
|
||||
@ -3068,6 +3069,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
workers()->update_active_workers(active_workers);
|
||||
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
|
||||
|
||||
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
|
||||
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
|
||||
@ -4513,6 +4515,7 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
DerivedPointerTable::update_pointers();
|
||||
#endif
|
||||
g1_policy()->print_age_table();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::record_obj_copy_mem_stats() {
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gc/g1/heapRegionSet.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
G1CollectorState* G1CollectionSet::collector_state() {
|
||||
return _g1->collector_state();
|
||||
@ -396,6 +397,16 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
|
||||
return time_remaining_ms;
|
||||
}
|
||||
|
||||
static int compare_region_idx(const uint a, const uint b) {
|
||||
if (a > b) {
|
||||
return 1;
|
||||
} else if (a == b) {
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
double non_young_start_time_sec = os::elapsedTime();
|
||||
double predicted_old_time_ms = 0.0;
|
||||
@ -493,6 +504,8 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
|
||||
double non_young_end_time_sec = os::elapsedTime();
|
||||
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
|
||||
|
||||
QuickSort::sort<uint>(_collection_set_regions, (int)_collection_set_cur_length, compare_region_idx, true);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
@ -1035,6 +1035,8 @@ void G1ConcurrentMark::mark_from_roots() {
|
||||
// worker threads may currently exist and more may not be
|
||||
// available.
|
||||
active_workers = _parallel_workers->update_active_workers(active_workers);
|
||||
log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers());
|
||||
|
||||
// Parallel task terminator is set in "set_concurrency_and_phase()"
|
||||
set_concurrency_and_phase(active_workers, true /* concurrent */);
|
||||
|
||||
@ -1902,7 +1904,8 @@ G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
assert(_g1h->is_in_g1_reserved(finger), "invariant");
|
||||
|
||||
HeapRegion* curr_region = _g1h->heap_region_containing(finger);
|
||||
|
||||
// Make sure that the reads below do not float before loading curr_region.
|
||||
OrderAccess::loadload();
|
||||
// Above heap_region_containing may return NULL as we always scan claim
|
||||
// until the end of the heap. In this case, just jump to the next region.
|
||||
HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
|
||||
|
@ -885,6 +885,15 @@ bool G1DefaultPolicy::adaptive_young_list_length() const {
|
||||
return _young_gen_sizer.adaptive_young_list_length();
|
||||
}
|
||||
|
||||
size_t G1DefaultPolicy::desired_survivor_size() const {
|
||||
size_t const survivor_capacity = HeapRegion::GrainWords * _max_survivor_regions;
|
||||
return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
|
||||
}
|
||||
|
||||
void G1DefaultPolicy::print_age_table() {
|
||||
_survivors_age_table.print_age_table(_tenuring_threshold);
|
||||
}
|
||||
|
||||
void G1DefaultPolicy::update_max_gc_locker_expansion() {
|
||||
uint expansion_region_num = 0;
|
||||
if (GCLockerEdenExpansionPercent > 0) {
|
||||
@ -908,8 +917,11 @@ void G1DefaultPolicy::update_survivors_policy() {
|
||||
// smaller than 1.0) we'll get 1.
|
||||
_max_survivor_regions = (uint) ceil(max_survivor_regions_d);
|
||||
|
||||
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
|
||||
HeapRegion::GrainWords * _max_survivor_regions, _policy_counters);
|
||||
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(desired_survivor_size());
|
||||
if (UsePerfData) {
|
||||
_policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
|
||||
_policy_counters->desired_survivor_size()->set_value(desired_survivor_size() * oopSize);
|
||||
}
|
||||
}
|
||||
|
||||
bool G1DefaultPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
|
||||
|
@ -360,6 +360,8 @@ private:
|
||||
|
||||
AgeTable _survivors_age_table;
|
||||
|
||||
protected:
|
||||
size_t desired_survivor_size() const;
|
||||
public:
|
||||
uint tenuring_threshold() const { return _tenuring_threshold; }
|
||||
|
||||
@ -379,6 +381,8 @@ public:
|
||||
_survivors_age_table.merge(age_table);
|
||||
}
|
||||
|
||||
void print_age_table();
|
||||
|
||||
void update_max_gc_locker_expansion();
|
||||
|
||||
void update_survivors_policy();
|
||||
|
@ -132,9 +132,16 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
|
||||
{
|
||||
G1RootProcessor root_processor(g1h, 1);
|
||||
root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure,
|
||||
&GenMarkSweep::follow_cld_closure,
|
||||
&follow_code_closure);
|
||||
if (ClassUnloading) {
|
||||
root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure,
|
||||
&GenMarkSweep::follow_cld_closure,
|
||||
&follow_code_closure);
|
||||
} else {
|
||||
root_processor.process_all_roots_no_string_table(
|
||||
&GenMarkSweep::follow_root_closure,
|
||||
&GenMarkSweep::follow_cld_closure,
|
||||
&follow_code_closure);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
@ -157,7 +164,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
// This is the point where the entire marking should have completed.
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
|
||||
|
||||
{
|
||||
if (ClassUnloading) {
|
||||
GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());
|
||||
|
||||
// Unload classes and purge the SystemDictionary.
|
||||
|
@ -181,6 +181,9 @@ public:
|
||||
virtual void note_stop_adding_survivor_regions() = 0;
|
||||
|
||||
virtual void record_age_table(AgeTable* age_table) = 0;
|
||||
virtual void print_age_table() = 0;
|
||||
protected:
|
||||
virtual size_t desired_survivor_size() const = 0;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1POLICY_HPP
|
||||
|
@ -1,98 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1Predictions.hpp"
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void G1Predictions::test() {
|
||||
double const epsilon = 1e-6;
|
||||
{
|
||||
// Some basic formula tests with confidence = 0.0
|
||||
G1Predictions predictor(0.0);
|
||||
TruncatedSeq s;
|
||||
|
||||
double p0 = predictor.get_new_prediction(&s);
|
||||
assert(p0 < epsilon, "Initial prediction of empty sequence must be 0.0 but is %f", p0);
|
||||
|
||||
s.add(5.0);
|
||||
double p1 = predictor.get_new_prediction(&s);
|
||||
assert(fabs(p1 - 5.0) < epsilon, "Prediction should be 5.0 but is %f", p1);
|
||||
for (int i = 0; i < 40; i++) {
|
||||
s.add(5.0);
|
||||
}
|
||||
double p2 = predictor.get_new_prediction(&s);
|
||||
assert(fabs(p2 - 5.0) < epsilon, "Prediction should be 5.0 but is %f", p1);
|
||||
}
|
||||
|
||||
{
|
||||
// The following tests checks that the initial predictions are based on the
|
||||
// average of the sequence and not on the stddev (which is 0).
|
||||
G1Predictions predictor(0.5);
|
||||
TruncatedSeq s;
|
||||
|
||||
s.add(1.0);
|
||||
double p1 = predictor.get_new_prediction(&s);
|
||||
assert(p1 > 1.0, "First prediction must be larger than average, but avg is %f and prediction %f", s.davg(), p1);
|
||||
s.add(1.0);
|
||||
double p2 = predictor.get_new_prediction(&s);
|
||||
assert(p2 < p1, "First prediction must be larger than second, but they are %f %f", p1, p2);
|
||||
s.add(1.0);
|
||||
double p3 = predictor.get_new_prediction(&s);
|
||||
assert(p3 < p2, "Second prediction must be larger than third, but they are %f %f", p2, p3);
|
||||
s.add(1.0);
|
||||
s.add(1.0); // Five elements are now in the sequence.
|
||||
double p5 = predictor.get_new_prediction(&s);
|
||||
assert(p5 < p3, "Fifth prediction must be smaller than third, but they are %f %f", p3, p5);
|
||||
assert(fabs(p5 - 1.0) < epsilon, "Prediction must be 1.0+epsilon, but is %f", p5);
|
||||
}
|
||||
|
||||
{
|
||||
// The following tests checks that initially prediction based on the average is
|
||||
// used, that gets overridden by the stddev prediction at the end.
|
||||
G1Predictions predictor(0.5);
|
||||
TruncatedSeq s;
|
||||
|
||||
s.add(0.5);
|
||||
double p1 = predictor.get_new_prediction(&s);
|
||||
assert(p1 > 0.5, "First prediction must be larger than average, but avg is %f and prediction %f", s.davg(), p1);
|
||||
s.add(0.2);
|
||||
double p2 = predictor.get_new_prediction(&s);
|
||||
assert(p2 < p1, "First prediction must be larger than second, but they are %f %f", p1, p2);
|
||||
s.add(0.5);
|
||||
double p3 = predictor.get_new_prediction(&s);
|
||||
assert(p3 < p2, "Second prediction must be larger than third, but they are %f %f", p2, p3);
|
||||
s.add(0.2);
|
||||
s.add(2.0);
|
||||
double p5 = predictor.get_new_prediction(&s);
|
||||
assert(p5 > p3, "Fifth prediction must be bigger than third, but they are %f %f", p3, p5);
|
||||
}
|
||||
}
|
||||
|
||||
void TestPredictions_test() {
|
||||
G1Predictions::test();
|
||||
}
|
||||
|
||||
#endif
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -57,10 +57,6 @@ class G1Predictions VALUE_OBJ_CLASS_SPEC {
|
||||
double get_new_prediction(TruncatedSeq const* seq) const {
|
||||
return seq->davg() + _sigma * stddev_estimate(seq);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
static void test();
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1PREDICTIONS_HPP
|
||||
|
@ -83,6 +83,7 @@ void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint wo
|
||||
}
|
||||
|
||||
process_vm_roots(closures, phase_times, worker_i);
|
||||
process_string_table_roots(closures, phase_times, worker_i);
|
||||
|
||||
{
|
||||
// Now the CM ref_processor roots.
|
||||
@ -191,19 +192,34 @@ public:
|
||||
|
||||
void G1RootProcessor::process_all_roots(OopClosure* oops,
|
||||
CLDClosure* clds,
|
||||
CodeBlobClosure* blobs) {
|
||||
CodeBlobClosure* blobs,
|
||||
bool process_string_table) {
|
||||
AllRootsClosures closures(oops, clds);
|
||||
|
||||
process_java_roots(&closures, NULL, 0);
|
||||
process_vm_roots(&closures, NULL, 0);
|
||||
|
||||
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
|
||||
CodeCache::blobs_do(blobs);
|
||||
if (process_string_table) {
|
||||
process_string_table_roots(&closures, NULL, 0);
|
||||
}
|
||||
process_code_cache_roots(blobs, NULL, 0);
|
||||
|
||||
_process_strong_tasks.all_tasks_completed(n_workers());
|
||||
}
|
||||
|
||||
void G1RootProcessor::process_all_roots(OopClosure* oops,
|
||||
CLDClosure* clds,
|
||||
CodeBlobClosure* blobs) {
|
||||
process_all_roots(oops, clds, blobs, true);
|
||||
}
|
||||
|
||||
void G1RootProcessor::process_all_roots_no_string_table(OopClosure* oops,
|
||||
CLDClosure* clds,
|
||||
CodeBlobClosure* blobs) {
|
||||
assert(!ClassUnloading, "Should only be used when class unloading is disabled");
|
||||
process_all_roots(oops, clds, blobs, false);
|
||||
}
|
||||
|
||||
void G1RootProcessor::process_java_roots(G1RootClosures* closures,
|
||||
G1GCPhaseTimes* phase_times,
|
||||
uint worker_i) {
|
||||
@ -280,14 +296,23 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
|
||||
SystemDictionary::roots_oops_do(strong_roots, weak_roots);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
|
||||
// All threads execute the following. A specific chunk of buckets
|
||||
// from the StringTable are the individual tasks.
|
||||
if (weak_roots != NULL) {
|
||||
StringTable::possibly_parallel_oops_do(weak_roots);
|
||||
}
|
||||
void G1RootProcessor::process_string_table_roots(G1RootClosures* closures,
|
||||
G1GCPhaseTimes* phase_times,
|
||||
uint worker_i) {
|
||||
assert(closures->weak_oops() != NULL, "Should only be called when all roots are processed");
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
|
||||
// All threads execute the following. A specific chunk of buckets
|
||||
// from the StringTable are the individual tasks.
|
||||
StringTable::possibly_parallel_oops_do(closures->weak_oops());
|
||||
}
|
||||
|
||||
void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
|
||||
G1GCPhaseTimes* phase_times,
|
||||
uint worker_i) {
|
||||
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
|
||||
CodeCache::blobs_do(code_closure);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,6 +73,11 @@ class G1RootProcessor : public StackObj {
|
||||
void worker_has_discovered_all_strong_classes();
|
||||
void wait_until_all_strong_classes_discovered();
|
||||
|
||||
void process_all_roots(OopClosure* oops,
|
||||
CLDClosure* clds,
|
||||
CodeBlobClosure* blobs,
|
||||
bool process_string_table);
|
||||
|
||||
void process_java_roots(G1RootClosures* closures,
|
||||
G1GCPhaseTimes* phase_times,
|
||||
uint worker_i);
|
||||
@ -81,6 +86,14 @@ class G1RootProcessor : public StackObj {
|
||||
G1GCPhaseTimes* phase_times,
|
||||
uint worker_i);
|
||||
|
||||
void process_string_table_roots(G1RootClosures* closures,
|
||||
G1GCPhaseTimes* phase_times,
|
||||
uint worker_i);
|
||||
|
||||
void process_code_cache_roots(CodeBlobClosure* code_closure,
|
||||
G1GCPhaseTimes* phase_times,
|
||||
uint worker_i);
|
||||
|
||||
public:
|
||||
G1RootProcessor(G1CollectedHeap* g1h, uint n_workers);
|
||||
|
||||
@ -99,6 +112,13 @@ public:
|
||||
CLDClosure* clds,
|
||||
CodeBlobClosure* blobs);
|
||||
|
||||
// Apply oops, clds and blobs to strongly and weakly reachable roots in the system,
|
||||
// the only thing different from process_all_roots is that we skip the string table
|
||||
// to avoid keeping every string live when doing class unloading.
|
||||
void process_all_roots_no_string_table(OopClosure* oops,
|
||||
CLDClosure* clds,
|
||||
CodeBlobClosure* blobs);
|
||||
|
||||
// Number of worker threads used by the root processor.
|
||||
uint n_workers() const;
|
||||
};
|
||||
|
@ -123,6 +123,7 @@ void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
|
||||
for (uint i = start; i < start + num_regions; i++) {
|
||||
if (_regions.get_by_index(i) == NULL) {
|
||||
HeapRegion* new_hr = new_heap_region(i);
|
||||
OrderAccess::storestore();
|
||||
_regions.set_by_index(i, new_hr);
|
||||
_allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1133,18 +1133,3 @@ bool PSAdaptiveSizePolicy::print() const {
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void TestOldFreeSpaceCalculation_test() {
|
||||
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 20) == 25, "Calculation of free memory failed");
|
||||
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 50) == 100, "Calculation of free memory failed");
|
||||
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 60) == 150, "Calculation of free memory failed");
|
||||
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 75) == 300, "Calculation of free memory failed");
|
||||
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 20) == 100, "Calculation of free memory failed");
|
||||
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 50) == 400, "Calculation of free memory failed");
|
||||
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 60) == 600, "Calculation of free memory failed");
|
||||
assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 75) == 1200, "Calculation of free memory failed");
|
||||
}
|
||||
|
||||
#endif /* !PRODUCT */
|
||||
|
@ -564,9 +564,18 @@ HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
|
||||
|
||||
void DefNewGeneration::adjust_desired_tenuring_threshold() {
|
||||
// Set the desired survivor size to half the real survivor space
|
||||
GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
|
||||
_tenuring_threshold =
|
||||
age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
|
||||
size_t const survivor_capacity = to()->capacity() / HeapWordSize;
|
||||
size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
|
||||
|
||||
_tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
|
||||
|
||||
if (UsePerfData) {
|
||||
GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
|
||||
gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
|
||||
gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
|
||||
}
|
||||
|
||||
age_table()->print_age_table(_tenuring_threshold);
|
||||
}
|
||||
|
||||
void DefNewGeneration::collect(bool full,
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "gc/shared/ageTableTracer.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
#include "gc/shared/gcPolicyCounters.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -75,8 +74,7 @@ void AgeTable::merge(AgeTable* subTable) {
|
||||
}
|
||||
}
|
||||
|
||||
uint AgeTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) {
|
||||
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
|
||||
uint AgeTable::compute_tenuring_threshold(size_t desired_survivor_size) {
|
||||
uint result;
|
||||
|
||||
if (AlwaysTenure || NeverTenure) {
|
||||
@ -99,9 +97,16 @@ uint AgeTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCoun
|
||||
|
||||
|
||||
log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold " UINTX_FORMAT " (max threshold " UINTX_FORMAT ")",
|
||||
desired_survivor_size*oopSize, (uintx) result, MaxTenuringThreshold);
|
||||
desired_survivor_size * oopSize, (uintx) result, MaxTenuringThreshold);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void AgeTable::print_age_table(uint tenuring_threshold) {
|
||||
if (log_is_enabled(Trace, gc, age) || UsePerfData || AgeTableTracer::is_tenuring_distribution_event_enabled()) {
|
||||
log_trace(gc, age)("Age table with threshold %u (max threshold " UINTX_FORMAT ")",
|
||||
tenuring_threshold, MaxTenuringThreshold);
|
||||
|
||||
size_t total = 0;
|
||||
uint age = 1;
|
||||
while (age < table_size) {
|
||||
@ -109,20 +114,14 @@ uint AgeTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCoun
|
||||
total += wordSize;
|
||||
if (wordSize > 0) {
|
||||
log_trace(gc, age)("- age %3u: " SIZE_FORMAT_W(10) " bytes, " SIZE_FORMAT_W(10) " total",
|
||||
age, wordSize*oopSize, total*oopSize);
|
||||
age, wordSize * oopSize, total * oopSize);
|
||||
}
|
||||
AgeTableTracer::send_tenuring_distribution_event(age, wordSize*oopSize);
|
||||
AgeTableTracer::send_tenuring_distribution_event(age, wordSize * oopSize);
|
||||
if (UsePerfData) {
|
||||
_perf_sizes[age]->set_value(wordSize*oopSize);
|
||||
_perf_sizes[age]->set_value(wordSize * oopSize);
|
||||
}
|
||||
age++;
|
||||
}
|
||||
if (UsePerfData) {
|
||||
gc_counters->tenuring_threshold()->set_value(result);
|
||||
gc_counters->desired_survivor_size()->set_value(
|
||||
desired_survivor_size*oopSize);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -29,8 +29,6 @@
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
|
||||
class GCPolicyCounters;
|
||||
|
||||
/* Copyright (c) 1992, 2016, Oracle and/or its affiliates, and Stanford University.
|
||||
See the LICENSE file for license information. */
|
||||
|
||||
@ -67,10 +65,12 @@ class AgeTable VALUE_OBJ_CLASS_SPEC {
|
||||
// for parallel young generation gc.
|
||||
void merge(AgeTable* subTable);
|
||||
|
||||
// calculate new tenuring threshold based on age information
|
||||
uint compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters);
|
||||
// Calculate new tenuring threshold based on age information.
|
||||
uint compute_tenuring_threshold(size_t desired_survivor_size);
|
||||
void print_age_table(uint tenuring_threshold);
|
||||
|
||||
private:
|
||||
|
||||
PerfVariable* _perf_sizes[table_size];
|
||||
};
|
||||
|
||||
|
@ -185,8 +185,10 @@ void OldGCTracer::report_concurrent_mode_failure() {
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void G1MMUTracer::report_mmu(double timeSlice, double gcTime, double maxTime) {
|
||||
send_g1_mmu_event(timeSlice, gcTime, maxTime);
|
||||
void G1MMUTracer::report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec) {
|
||||
send_g1_mmu_event(time_slice_sec * MILLIUNITS,
|
||||
gc_time_sec * MILLIUNITS,
|
||||
max_time_sec * MILLIUNITS);
|
||||
}
|
||||
|
||||
void G1NewTracer::report_yc_type(G1YCType type) {
|
||||
|
@ -235,10 +235,10 @@ class ParNewTracer : public YoungGCTracer {
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
class G1MMUTracer : public AllStatic {
|
||||
static void send_g1_mmu_event(double timeSlice, double gcTime, double maxTime);
|
||||
static void send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms);
|
||||
|
||||
public:
|
||||
static void report_mmu(double timeSlice, double gcTime, double maxTime);
|
||||
static void report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec);
|
||||
};
|
||||
|
||||
class G1NewTracer : public YoungGCTracer {
|
||||
|
@ -200,13 +200,13 @@ void G1NewTracer::send_g1_young_gc_event() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1MMUTracer::send_g1_mmu_event(double timeSlice, double gcTime, double maxTime) {
|
||||
void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms) {
|
||||
EventG1MMU e;
|
||||
if (e.should_commit()) {
|
||||
e.set_gcId(GCId::current());
|
||||
e.set_timeSlice(timeSlice);
|
||||
e.set_gcTime(gcTime);
|
||||
e.set_maxGcTime(maxTime);
|
||||
e.set_timeSlice(time_slice_ms);
|
||||
e.set_gcTime(gc_time_ms);
|
||||
e.set_pauseTarget(max_time_ms);
|
||||
e.commit();
|
||||
}
|
||||
}
|
||||
@ -281,10 +281,10 @@ void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
|
||||
evt.set_targetOccupancy(target_occupancy);
|
||||
evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
|
||||
evt.set_currentOccupancy(current_occupancy);
|
||||
evt.set_lastAllocationSize(last_allocation_size);
|
||||
evt.set_lastAllocationDuration(last_allocation_duration);
|
||||
evt.set_lastAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
|
||||
evt.set_lastMarkingLength(last_marking_length);
|
||||
evt.set_recentMutatorAllocationSize(last_allocation_size);
|
||||
evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
|
||||
evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
|
||||
evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS);
|
||||
evt.commit();
|
||||
}
|
||||
}
|
||||
@ -301,11 +301,11 @@ void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold,
|
||||
evt.set_gcId(GCId::current());
|
||||
evt.set_threshold(threshold);
|
||||
evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0);
|
||||
evt.set_internalTargetOccupancy(internal_target_occupancy);
|
||||
evt.set_ihopTargetOccupancy(internal_target_occupancy);
|
||||
evt.set_currentOccupancy(current_occupancy);
|
||||
evt.set_additionalBufferSize(additional_buffer_size);
|
||||
evt.set_predictedAllocationRate(predicted_allocation_rate);
|
||||
evt.set_predictedMarkingLength(predicted_marking_length);
|
||||
evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS);
|
||||
evt.set_predictionActive(prediction_active);
|
||||
evt.commit();
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ class AbstractWorkGang : public CHeapObj<mtInternal> {
|
||||
_active_workers = MIN2(v, _total_workers);
|
||||
add_workers(false /* exit_on_failure */);
|
||||
assert(v != 0, "Trying to set active workers to 0");
|
||||
log_info(gc, task)("GC Workers: using %d out of %d", _active_workers, _total_workers);
|
||||
log_trace(gc, task)("%s: using %d out of %d workers", name(), _active_workers, _total_workers);
|
||||
return _active_workers;
|
||||
}
|
||||
|
||||
|
@ -576,27 +576,39 @@ void InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code byt
|
||||
// compute auxiliary field attributes
|
||||
TosState state = as_TosState(info.field_type());
|
||||
|
||||
// Put instructions on final fields are not resolved. This is required so we throw
|
||||
// exceptions at the correct place (when the instruction is actually invoked).
|
||||
// Resolution of put instructions on final fields is delayed. That is required so that
|
||||
// exceptions are thrown at the correct place (when the instruction is actually invoked).
|
||||
// If we do not resolve an instruction in the current pass, leaving the put_code
|
||||
// set to zero will cause the next put instruction to the same field to reresolve.
|
||||
|
||||
// Resolution of put instructions to final instance fields with invalid updates (i.e.,
|
||||
// to final instance fields with updates originating from a method different than <init>)
|
||||
// is inhibited. A putfield instruction targeting an instance final field must throw
|
||||
// an IllegalAccessError if the instruction is not in an instance
|
||||
// initializer method <init>. If resolution were not inhibited, a putfield
|
||||
// in an initializer method could be resolved in the initializer. Subsequent
|
||||
// putfield instructions to the same field would then use cached information.
|
||||
// As a result, those instructions would not pass through the VM. That is,
|
||||
// checks in resolve_field_access() would not be executed for those instructions
|
||||
// and the required IllegalAccessError would not be thrown.
|
||||
//
|
||||
// Also, we need to delay resolving getstatic and putstatic instructions until the
|
||||
// class is initialized. This is required so that access to the static
|
||||
// field will call the initialization function every time until the class
|
||||
// is completely initialized ala. in 2.17.5 in JVM Specification.
|
||||
InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
|
||||
bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
|
||||
!klass->is_initialized());
|
||||
|
||||
Bytecodes::Code put_code = (Bytecodes::Code)0;
|
||||
if (is_put && !info.access_flags().is_final() && !uninitialized_static) {
|
||||
put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
|
||||
}
|
||||
bool uninitialized_static = is_static && !klass->is_initialized();
|
||||
bool has_initialized_final_update = info.field_holder()->major_version() >= 53 &&
|
||||
info.has_initialized_final_update();
|
||||
assert(!(has_initialized_final_update && !info.access_flags().is_final()), "Fields with initialized final updates must be final");
|
||||
|
||||
Bytecodes::Code get_code = (Bytecodes::Code)0;
|
||||
Bytecodes::Code put_code = (Bytecodes::Code)0;
|
||||
if (!uninitialized_static) {
|
||||
get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield);
|
||||
if ((is_put && !has_initialized_final_update) || !info.access_flags().is_final()) {
|
||||
put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
|
||||
}
|
||||
}
|
||||
|
||||
cp_cache_entry->set_field(
|
||||
|
@ -768,6 +768,11 @@ C2V_VMENTRY(jobject, resolveMethod, (JNIEnv *, jobject, jobject receiver_jvmci_t
|
||||
Symbol* h_name = method->name();
|
||||
Symbol* h_signature = method->signature();
|
||||
|
||||
if (MethodHandles::is_signature_polymorphic_method(method())) {
|
||||
// Signature polymorphic methods are already resolved, JVMCI just returns NULL in this case.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
LinkInfo link_info(h_resolved, h_name, h_signature, caller_klass);
|
||||
methodHandle m;
|
||||
// Only do exact lookup if receiver klass has been linked. Otherwise,
|
||||
@ -782,7 +787,7 @@ C2V_VMENTRY(jobject, resolveMethod, (JNIEnv *, jobject, jobject receiver_jvmci_t
|
||||
}
|
||||
|
||||
if (m.is_null()) {
|
||||
// Return NULL only if there was a problem with lookup (uninitialized class, etc.)
|
||||
// Return NULL if there was a problem with lookup (uninitialized class, etc.)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -313,13 +313,18 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
|
||||
// normal bytecode execution.
|
||||
thread->clear_exception_oop_and_pc();
|
||||
|
||||
continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false);
|
||||
bool recursive_exception = false;
|
||||
continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false, recursive_exception);
|
||||
// If an exception was thrown during exception dispatch, the exception oop may have changed
|
||||
thread->set_exception_oop(exception());
|
||||
thread->set_exception_pc(pc);
|
||||
|
||||
// the exception cache is used only by non-implicit exceptions
|
||||
if (continuation != NULL && !SharedRuntime::deopt_blob()->contains(continuation)) {
|
||||
// Update the exception cache only when there didn't happen
|
||||
// another exception during the computation of the compiled
|
||||
// exception handler. Checking for exception oop equality is not
|
||||
// sufficient because some exceptions are pre-allocated and reused.
|
||||
if (continuation != NULL && !recursive_exception && !SharedRuntime::deopt_blob()->contains(continuation)) {
|
||||
cm->add_handler_for_exception_and_pc(exception, pc, continuation);
|
||||
}
|
||||
}
|
||||
|
@ -1161,7 +1161,7 @@ void Test_invalid_log_file() {
|
||||
|
||||
// Attempt to log to a directory (existing log not a regular file)
|
||||
create_directory(target_name);
|
||||
LogFileOutput bad_file("tmplogdir");
|
||||
LogFileOutput bad_file("file=tmplogdir");
|
||||
assert(bad_file.initialize("", &ss) == false, "file was initialized "
|
||||
"when there was an existing directory with the same name");
|
||||
assert(strstr(ss.as_string(), "tmplogdir is not a regular file") != NULL,
|
||||
|
@ -44,6 +44,9 @@ size_t LogConfiguration::_n_outputs = 0;
|
||||
LogConfiguration::UpdateListenerFunction* LogConfiguration::_listener_callbacks = NULL;
|
||||
size_t LogConfiguration::_n_listener_callbacks = 0;
|
||||
|
||||
// LogFileOutput is the default type of output, its type prefix should be used if no type was specified
|
||||
static const char* implicit_output_prefix = LogFileOutput::Prefix;
|
||||
|
||||
// Stack object to take the lock for configuring the logging.
|
||||
// Should only be held during the critical parts of the configuration
|
||||
// (when calling configure_output or reading/modifying the outputs array).
|
||||
@ -107,6 +110,55 @@ void LogConfiguration::finalize() {
|
||||
FREE_C_HEAP_ARRAY(LogOutput*, _outputs);
|
||||
}
|
||||
|
||||
// Normalizes the given LogOutput name to type=name form.
|
||||
// For example, foo, "foo", file="foo", will all be normalized to file=foo (no quotes, prefixed).
|
||||
static bool normalize_output_name(const char* full_name, char* buffer, size_t len, outputStream* errstream) {
|
||||
const char* start_quote = strchr(full_name, '"');
|
||||
const char* equals = strchr(full_name, '=');
|
||||
const bool quoted = start_quote != NULL;
|
||||
const bool is_stdout_or_stderr = (strcmp(full_name, "stdout") == 0 || strcmp(full_name, "stderr") == 0);
|
||||
|
||||
// ignore equals sign within quotes
|
||||
if (quoted && equals > start_quote) {
|
||||
equals = NULL;
|
||||
}
|
||||
|
||||
const char* prefix = "";
|
||||
size_t prefix_len = 0;
|
||||
const char* name = full_name;
|
||||
if (equals != NULL) {
|
||||
// split on equals sign
|
||||
name = equals + 1;
|
||||
prefix = full_name;
|
||||
prefix_len = equals - full_name + 1;
|
||||
} else if (!is_stdout_or_stderr) {
|
||||
prefix = implicit_output_prefix;
|
||||
prefix_len = strlen(prefix);
|
||||
}
|
||||
size_t name_len = strlen(name);
|
||||
|
||||
if (quoted) {
|
||||
const char* end_quote = strchr(start_quote + 1, '"');
|
||||
if (end_quote == NULL) {
|
||||
errstream->print_cr("Output name has opening quote but is missing a terminating quote.");
|
||||
return false;
|
||||
}
|
||||
if (start_quote != name || end_quote[1] != '\0') {
|
||||
errstream->print_cr("Output name can not be partially quoted."
|
||||
" Either surround the whole name with quotation marks,"
|
||||
" or do not use quotation marks at all.");
|
||||
return false;
|
||||
}
|
||||
// strip start and end quote
|
||||
name++;
|
||||
name_len -= 2;
|
||||
}
|
||||
|
||||
int ret = jio_snprintf(buffer, len, "%.*s%.*s", prefix_len, prefix, name_len, name);
|
||||
assert(ret > 0, "buffer issue");
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t LogConfiguration::find_output(const char* name) {
|
||||
for (size_t i = 0; i < _n_outputs; i++) {
|
||||
if (strcmp(_outputs[i]->name(), name) == 0) {
|
||||
@ -116,39 +168,14 @@ size_t LogConfiguration::find_output(const char* name) {
|
||||
return SIZE_MAX;
|
||||
}
|
||||
|
||||
LogOutput* LogConfiguration::new_output(char* name, const char* options, outputStream* errstream) {
|
||||
const char* type;
|
||||
char* equals_pos = strchr(name, '=');
|
||||
if (equals_pos == NULL) {
|
||||
type = "file";
|
||||
} else {
|
||||
*equals_pos = '\0';
|
||||
type = name;
|
||||
name = equals_pos + 1;
|
||||
}
|
||||
|
||||
// Check if name is quoted, and if so, strip the quotes
|
||||
char* quote = strchr(name, '"');
|
||||
if (quote != NULL) {
|
||||
char* end_quote = strchr(name + 1, '"');
|
||||
if (end_quote == NULL) {
|
||||
errstream->print_cr("Output name has opening quote but is missing a terminating quote.");
|
||||
return NULL;
|
||||
} else if (quote != name || end_quote[1] != '\0') {
|
||||
errstream->print_cr("Output name can not be partially quoted."
|
||||
" Either surround the whole name with quotation marks,"
|
||||
" or do not use quotation marks at all.");
|
||||
return NULL;
|
||||
}
|
||||
name++;
|
||||
*end_quote = '\0';
|
||||
}
|
||||
|
||||
LogOutput* LogConfiguration::new_output(const char* name,
|
||||
const char* options,
|
||||
outputStream* errstream) {
|
||||
LogOutput* output;
|
||||
if (strcmp(type, "file") == 0) {
|
||||
if (strncmp(name, LogFileOutput::Prefix, strlen(LogFileOutput::Prefix)) == 0) {
|
||||
output = new LogFileOutput(name);
|
||||
} else {
|
||||
errstream->print_cr("Unsupported log output type.");
|
||||
errstream->print_cr("Unsupported log output type: %s", name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -358,6 +385,7 @@ bool LogConfiguration::parse_log_arguments(const char* outputstr,
|
||||
const char* decoratorstr,
|
||||
const char* output_options,
|
||||
outputStream* errstream) {
|
||||
assert(errstream != NULL, "errstream can not be NULL");
|
||||
if (outputstr == NULL || strlen(outputstr) == 0) {
|
||||
outputstr = "stdout";
|
||||
}
|
||||
@ -374,25 +402,35 @@ bool LogConfiguration::parse_log_arguments(const char* outputstr,
|
||||
|
||||
ConfigurationLock cl;
|
||||
size_t idx;
|
||||
if (outputstr[0] == '#') {
|
||||
int ret = sscanf(outputstr+1, SIZE_FORMAT, &idx);
|
||||
if (outputstr[0] == '#') { // Output specified using index
|
||||
int ret = sscanf(outputstr + 1, SIZE_FORMAT, &idx);
|
||||
if (ret != 1 || idx >= _n_outputs) {
|
||||
errstream->print_cr("Invalid output index '%s'", outputstr);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
idx = find_output(outputstr);
|
||||
} else { // Output specified using name
|
||||
// Normalize the name, stripping quotes and ensures it includes type prefix
|
||||
size_t len = strlen(outputstr) + strlen(implicit_output_prefix) + 1;
|
||||
char* normalized = NEW_C_HEAP_ARRAY(char, len, mtLogging);
|
||||
if (!normalize_output_name(outputstr, normalized, len, errstream)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
idx = find_output(normalized);
|
||||
if (idx == SIZE_MAX) {
|
||||
char* tmp = os::strdup_check_oom(outputstr, mtLogging);
|
||||
LogOutput* output = new_output(tmp, output_options, errstream);
|
||||
os::free(tmp);
|
||||
if (output == NULL) {
|
||||
return false;
|
||||
// Attempt to create and add the output
|
||||
LogOutput* output = new_output(normalized, output_options, errstream);
|
||||
if (output != NULL) {
|
||||
idx = add_output(output);
|
||||
}
|
||||
idx = add_output(output);
|
||||
} else if (output_options != NULL && strlen(output_options) > 0) {
|
||||
errstream->print_cr("Output options for existing outputs are ignored.");
|
||||
}
|
||||
|
||||
FREE_C_HEAP_ARRAY(char, normalized);
|
||||
if (idx == SIZE_MAX) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
configure_output(idx, expr, decorators);
|
||||
notify_update_listeners();
|
||||
|
@ -59,7 +59,7 @@ class LogConfiguration : public AllStatic {
|
||||
static size_t _n_listener_callbacks;
|
||||
|
||||
// Create a new output. Returns NULL if failed.
|
||||
static LogOutput* new_output(char* name, const char* options, outputStream* errstream);
|
||||
static LogOutput* new_output(const char* name, const char* options, outputStream* errstream);
|
||||
|
||||
// Add an output to the list of configured outputs. Returns the assigned index.
|
||||
static size_t add_output(LogOutput* out);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
|
||||
const char* LogFileOutput::Prefix = "file=";
|
||||
const char* LogFileOutput::FileOpenMode = "a";
|
||||
const char* LogFileOutput::PidFilenamePlaceholder = "%p";
|
||||
const char* LogFileOutput::TimestampFilenamePlaceholder = "%t";
|
||||
@ -45,7 +46,8 @@ LogFileOutput::LogFileOutput(const char* name)
|
||||
_file_name(NULL), _archive_name(NULL), _archive_name_len(0),
|
||||
_rotate_size(DefaultFileSize), _file_count(DefaultFileCount),
|
||||
_current_size(0), _current_file(0), _rotation_semaphore(1) {
|
||||
_file_name = make_file_name(name, _pid_str, _vm_start_time_str);
|
||||
assert(strstr(name, Prefix) == name, "invalid output name '%s': missing prefix: %s", name, Prefix);
|
||||
_file_name = make_file_name(name + strlen(Prefix), _pid_str, _vm_start_time_str);
|
||||
}
|
||||
|
||||
void LogFileOutput::set_file_name_parameters(jlong vm_start_time) {
|
||||
|
@ -91,6 +91,7 @@ class LogFileOutput : public LogFileStreamOutput {
|
||||
return _name;
|
||||
}
|
||||
|
||||
static const char* Prefix;
|
||||
static void set_file_name_parameters(jlong start_time);
|
||||
};
|
||||
|
||||
|
@ -164,7 +164,14 @@ bool LogTagLevelExpression::parse(const char* str, outputStream* errstream) {
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
add_tag(tag);
|
||||
if (!add_tag(tag)) {
|
||||
if (errstream != NULL) {
|
||||
errstream->print_cr("Tag combination have duplicate tag '%s' in what-expression.",
|
||||
cur_tag);
|
||||
}
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
cur_tag = plus_pos + 1;
|
||||
} while (plus_pos != NULL);
|
||||
|
||||
|
@ -59,9 +59,15 @@ class LogTagLevelExpression : public StackObj {
|
||||
_ntags = 0;
|
||||
}
|
||||
|
||||
void add_tag(LogTagType tag) {
|
||||
bool add_tag(LogTagType tag) {
|
||||
assert(_ntags < LogTag::MaxTags, "Can't have more tags than MaxTags!");
|
||||
for (size_t i = 0; i < _ntags; i++) {
|
||||
if (_tags[_ncombinations][i] == tag) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
_tags[_ncombinations][_ntags++] = tag;
|
||||
return true;
|
||||
}
|
||||
|
||||
void set_level(LogLevelType level) {
|
||||
|
@ -3365,94 +3365,119 @@ void InstanceKlass::set_init_state(ClassState state) {
|
||||
|
||||
#if INCLUDE_JVMTI
|
||||
|
||||
// RedefineClasses() support for previous versions:
|
||||
int InstanceKlass::_previous_version_count = 0;
|
||||
// RedefineClasses() support for previous versions
|
||||
|
||||
// Purge previous versions before adding new previous versions of the class.
|
||||
void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
|
||||
// Globally, there is at least one previous version of a class to walk
|
||||
// during class unloading, which is saved because old methods in the class
|
||||
// are still running. Otherwise the previous version list is cleaned up.
|
||||
bool InstanceKlass::_has_previous_versions = false;
|
||||
|
||||
// Returns true if there are previous versions of a class for class
|
||||
// unloading only. Also resets the flag to false. purge_previous_version
|
||||
// will set the flag to true if there are any left, i.e., if there's any
|
||||
// work to do for next time. This is to avoid the expensive code cache
|
||||
// walk in CLDG::do_unloading().
|
||||
bool InstanceKlass::has_previous_versions_and_reset() {
|
||||
bool ret = _has_previous_versions;
|
||||
log_trace(redefine, class, iklass, purge)("Class unloading: has_previous_versions = %s",
|
||||
ret ? "true" : "false");
|
||||
_has_previous_versions = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Purge previous versions before adding new previous versions of the class and
|
||||
// during class unloading.
|
||||
void InstanceKlass::purge_previous_version_list() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
|
||||
if (ik->previous_versions() != NULL) {
|
||||
// This klass has previous versions so see what we can cleanup
|
||||
// while it is safe to do so.
|
||||
assert(has_been_redefined(), "Should only be called for main class");
|
||||
|
||||
int deleted_count = 0; // leave debugging breadcrumbs
|
||||
int live_count = 0;
|
||||
ClassLoaderData* loader_data = ik->class_loader_data();
|
||||
assert(loader_data != NULL, "should never be null");
|
||||
// Quick exit.
|
||||
if (previous_versions() == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
log_trace(redefine, class, iklass, purge)("%s: previous versions", ik->external_name());
|
||||
// This klass has previous versions so see what we can cleanup
|
||||
// while it is safe to do so.
|
||||
|
||||
// previous versions are linked together through the InstanceKlass
|
||||
InstanceKlass* pv_node = ik->previous_versions();
|
||||
InstanceKlass* last = ik;
|
||||
int version = 0;
|
||||
int deleted_count = 0; // leave debugging breadcrumbs
|
||||
int live_count = 0;
|
||||
ClassLoaderData* loader_data = class_loader_data();
|
||||
assert(loader_data != NULL, "should never be null");
|
||||
|
||||
// check the previous versions list
|
||||
for (; pv_node != NULL; ) {
|
||||
ResourceMark rm;
|
||||
log_trace(redefine, class, iklass, purge)("%s: previous versions", external_name());
|
||||
|
||||
ConstantPool* pvcp = pv_node->constants();
|
||||
assert(pvcp != NULL, "cp ref was unexpectedly cleared");
|
||||
// previous versions are linked together through the InstanceKlass
|
||||
InstanceKlass* pv_node = previous_versions();
|
||||
InstanceKlass* last = this;
|
||||
int version = 0;
|
||||
|
||||
if (!pvcp->on_stack()) {
|
||||
// If the constant pool isn't on stack, none of the methods
|
||||
// are executing. Unlink this previous_version.
|
||||
// The previous version InstanceKlass is on the ClassLoaderData deallocate list
|
||||
// so will be deallocated during the next phase of class unloading.
|
||||
//
|
||||
// Update count for class unloading.
|
||||
_previous_version_count--;
|
||||
log_trace(redefine, class, iklass, purge)
|
||||
("previous version " INTPTR_FORMAT " is dead. previous_version_count = %d",
|
||||
p2i(pv_node), _previous_version_count);
|
||||
// For debugging purposes.
|
||||
pv_node->set_is_scratch_class();
|
||||
pv_node->class_loader_data()->add_to_deallocate_list(pv_node);
|
||||
pv_node = pv_node->previous_versions();
|
||||
last->link_previous_versions(pv_node);
|
||||
deleted_count++;
|
||||
version++;
|
||||
continue;
|
||||
} else {
|
||||
log_trace(redefine, class, iklass, purge)("previous version " INTPTR_FORMAT " is alive", p2i(pv_node));
|
||||
assert(pvcp->pool_holder() != NULL, "Constant pool with no holder");
|
||||
guarantee (!loader_data->is_unloading(), "unloaded classes can't be on the stack");
|
||||
live_count++;
|
||||
}
|
||||
// check the previous versions list
|
||||
for (; pv_node != NULL; ) {
|
||||
|
||||
// At least one method is live in this previous version.
|
||||
// Reset dead EMCP methods not to get breakpoints.
|
||||
// All methods are deallocated when all of the methods for this class are no
|
||||
// longer running.
|
||||
Array<Method*>* method_refs = pv_node->methods();
|
||||
if (method_refs != NULL) {
|
||||
log_trace(redefine, class, iklass, purge)("previous methods length=%d", method_refs->length());
|
||||
for (int j = 0; j < method_refs->length(); j++) {
|
||||
Method* method = method_refs->at(j);
|
||||
ConstantPool* pvcp = pv_node->constants();
|
||||
assert(pvcp != NULL, "cp ref was unexpectedly cleared");
|
||||
|
||||
if (!method->on_stack()) {
|
||||
// no breakpoints for non-running methods
|
||||
if (method->is_running_emcp()) {
|
||||
method->set_running_emcp(false);
|
||||
}
|
||||
} else {
|
||||
assert (method->is_obsolete() || method->is_running_emcp(),
|
||||
"emcp method cannot run after emcp bit is cleared");
|
||||
log_trace(redefine, class, iklass, purge)
|
||||
("purge: %s(%s): prev method @%d in version @%d is alive",
|
||||
method->name()->as_C_string(), method->signature()->as_C_string(), j, version);
|
||||
if (!pvcp->on_stack()) {
|
||||
// If the constant pool isn't on stack, none of the methods
|
||||
// are executing. Unlink this previous_version.
|
||||
// The previous version InstanceKlass is on the ClassLoaderData deallocate list
|
||||
// so will be deallocated during the next phase of class unloading.
|
||||
log_trace(redefine, class, iklass, purge)
|
||||
("previous version " INTPTR_FORMAT " is dead.", p2i(pv_node));
|
||||
// For debugging purposes.
|
||||
pv_node->set_is_scratch_class();
|
||||
// Unlink from previous version list.
|
||||
assert(pv_node->class_loader_data() == loader_data, "wrong loader_data");
|
||||
InstanceKlass* next = pv_node->previous_versions();
|
||||
pv_node->link_previous_versions(NULL); // point next to NULL
|
||||
last->link_previous_versions(next);
|
||||
// Add to the deallocate list after unlinking
|
||||
loader_data->add_to_deallocate_list(pv_node);
|
||||
pv_node = next;
|
||||
deleted_count++;
|
||||
version++;
|
||||
continue;
|
||||
} else {
|
||||
log_trace(redefine, class, iklass, purge)("previous version " INTPTR_FORMAT " is alive", p2i(pv_node));
|
||||
assert(pvcp->pool_holder() != NULL, "Constant pool with no holder");
|
||||
guarantee (!loader_data->is_unloading(), "unloaded classes can't be on the stack");
|
||||
live_count++;
|
||||
// found a previous version for next time we do class unloading
|
||||
_has_previous_versions = true;
|
||||
}
|
||||
|
||||
// At least one method is live in this previous version.
|
||||
// Reset dead EMCP methods not to get breakpoints.
|
||||
// All methods are deallocated when all of the methods for this class are no
|
||||
// longer running.
|
||||
Array<Method*>* method_refs = pv_node->methods();
|
||||
if (method_refs != NULL) {
|
||||
log_trace(redefine, class, iklass, purge)("previous methods length=%d", method_refs->length());
|
||||
for (int j = 0; j < method_refs->length(); j++) {
|
||||
Method* method = method_refs->at(j);
|
||||
|
||||
if (!method->on_stack()) {
|
||||
// no breakpoints for non-running methods
|
||||
if (method->is_running_emcp()) {
|
||||
method->set_running_emcp(false);
|
||||
}
|
||||
} else {
|
||||
assert (method->is_obsolete() || method->is_running_emcp(),
|
||||
"emcp method cannot run after emcp bit is cleared");
|
||||
log_trace(redefine, class, iklass, purge)
|
||||
("purge: %s(%s): prev method @%d in version @%d is alive",
|
||||
method->name()->as_C_string(), method->signature()->as_C_string(), j, version);
|
||||
}
|
||||
}
|
||||
// next previous version
|
||||
last = pv_node;
|
||||
pv_node = pv_node->previous_versions();
|
||||
version++;
|
||||
}
|
||||
log_trace(redefine, class, iklass, purge)
|
||||
("previous version stats: live=%d, deleted=%d",
|
||||
live_count, deleted_count);
|
||||
// next previous version
|
||||
last = pv_node;
|
||||
pv_node = pv_node->previous_versions();
|
||||
version++;
|
||||
}
|
||||
log_trace(redefine, class, iklass, purge)
|
||||
("previous version stats: live=%d, deleted=%d", live_count, deleted_count);
|
||||
}
|
||||
|
||||
void InstanceKlass::mark_newly_obsolete_methods(Array<Method*>* old_methods,
|
||||
@ -3519,14 +3544,13 @@ void InstanceKlass::add_previous_version(instanceKlassHandle scratch_class,
|
||||
int emcp_method_count) {
|
||||
assert(Thread::current()->is_VM_thread(),
|
||||
"only VMThread can add previous versions");
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
|
||||
|
||||
ResourceMark rm;
|
||||
log_trace(redefine, class, iklass, add)
|
||||
("adding previous version ref for %s, EMCP_cnt=%d", scratch_class->external_name(), emcp_method_count);
|
||||
|
||||
// Clean out old previous versions
|
||||
purge_previous_versions(this);
|
||||
// Clean out old previous versions for this class
|
||||
purge_previous_version_list();
|
||||
|
||||
// Mark newly obsolete methods in remaining previous versions. An EMCP method from
|
||||
// a previous redefinition may be made obsolete by this redefinition.
|
||||
@ -3570,11 +3594,9 @@ void InstanceKlass::add_previous_version(instanceKlassHandle scratch_class,
|
||||
}
|
||||
|
||||
// Add previous version if any methods are still running.
|
||||
// Update count for class unloading.
|
||||
_previous_version_count++;
|
||||
log_trace(redefine, class, iklass, add)
|
||||
("scratch class added; one of its methods is on_stack. previous_version_count = %d",
|
||||
_previous_version_count);
|
||||
// Set has_previous_version flag for processing during class unloading.
|
||||
_has_previous_versions = true;
|
||||
log_trace(redefine, class, iklass, add) ("scratch class added; one of its methods is on_stack.");
|
||||
assert(scratch_class->previous_versions() == NULL, "shouldn't have a previous version");
|
||||
scratch_class->link_previous_versions(previous_versions());
|
||||
link_previous_versions(scratch_class());
|
||||
|
@ -619,8 +619,8 @@ class InstanceKlass: public Klass {
|
||||
objArrayOop signers() const;
|
||||
|
||||
// host class
|
||||
Klass* host_klass() const {
|
||||
Klass** hk = (Klass**)adr_host_klass();
|
||||
InstanceKlass* host_klass() const {
|
||||
InstanceKlass** hk = adr_host_klass();
|
||||
if (hk == NULL) {
|
||||
return NULL;
|
||||
} else {
|
||||
@ -628,9 +628,9 @@ class InstanceKlass: public Klass {
|
||||
return *hk;
|
||||
}
|
||||
}
|
||||
void set_host_klass(const Klass* host) {
|
||||
void set_host_klass(const InstanceKlass* host) {
|
||||
assert(is_anonymous(), "not anonymous");
|
||||
const Klass** addr = (const Klass**)adr_host_klass();
|
||||
const InstanceKlass** addr = (const InstanceKlass **)adr_host_klass();
|
||||
assert(addr != NULL, "no reversed space");
|
||||
if (addr != NULL) {
|
||||
*addr = host;
|
||||
@ -709,6 +709,7 @@ class InstanceKlass: public Klass {
|
||||
|
||||
// RedefineClasses() support for previous versions:
|
||||
void add_previous_version(instanceKlassHandle ikh, int emcp_method_count);
|
||||
void purge_previous_version_list();
|
||||
|
||||
InstanceKlass* previous_versions() const { return _previous_versions; }
|
||||
#else
|
||||
@ -768,14 +769,16 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
static int _previous_version_count;
|
||||
static bool _has_previous_versions;
|
||||
public:
|
||||
static void purge_previous_versions(InstanceKlass* ik);
|
||||
static bool has_previous_versions() {
|
||||
assert(_previous_version_count >= 0, "count should never be negative");
|
||||
return _previous_version_count > 0;
|
||||
static void purge_previous_versions(InstanceKlass* ik) {
|
||||
if (ik->has_been_redefined()) {
|
||||
ik->purge_previous_version_list();
|
||||
}
|
||||
}
|
||||
|
||||
static bool has_previous_versions_and_reset();
|
||||
|
||||
// JVMTI: Support for caching a class file before it is modified by an agent that can do retransformation
|
||||
void set_cached_class_file(JvmtiCachedClassFileData *data) {
|
||||
_cached_class_file = data;
|
||||
@ -795,7 +798,7 @@ public:
|
||||
#else // INCLUDE_JVMTI
|
||||
|
||||
static void purge_previous_versions(InstanceKlass* ik) { return; };
|
||||
static bool has_previous_versions() { return false; }
|
||||
static bool has_previous_versions_and_reset() { return false; }
|
||||
|
||||
void set_cached_class_file(JvmtiCachedClassFileData *data) {
|
||||
assert(data == NULL, "unexpected call with JVMTI disabled");
|
||||
@ -1060,13 +1063,13 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
Klass** adr_host_klass() const {
|
||||
InstanceKlass** adr_host_klass() const {
|
||||
if (is_anonymous()) {
|
||||
Klass** adr_impl = adr_implementor();
|
||||
InstanceKlass** adr_impl = (InstanceKlass **)adr_implementor();
|
||||
if (adr_impl != NULL) {
|
||||
return adr_impl + 1;
|
||||
} else {
|
||||
return end_of_nonstatic_oop_maps();
|
||||
return (InstanceKlass **)end_of_nonstatic_oop_maps();
|
||||
}
|
||||
} else {
|
||||
return NULL;
|
||||
|
@ -431,6 +431,12 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive
|
||||
if (clean_alive_klasses && current->is_instance_klass()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(current);
|
||||
ik->clean_weak_instanceklass_links(is_alive);
|
||||
|
||||
// JVMTI RedefineClasses creates previous versions that are not in
|
||||
// the class hierarchy, so process them here.
|
||||
while ((ik = ik->previous_versions()) != NULL) {
|
||||
ik->clean_weak_instanceklass_links(is_alive);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -530,6 +530,7 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
|
||||
case vmIntrinsics::_isInterrupted:
|
||||
#ifdef TRACE_HAVE_INTRINSICS
|
||||
case vmIntrinsics::_counterTime:
|
||||
case vmIntrinsics::_getClassId:
|
||||
#endif
|
||||
case vmIntrinsics::_currentTimeMillis:
|
||||
case vmIntrinsics::_nanoTime:
|
||||
|
@ -2172,10 +2172,9 @@ Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
|
||||
java_bc() == Bytecodes::_instanceof ||
|
||||
java_bc() == Bytecodes::_aastore) {
|
||||
ciProfileData* data = method()->method_data()->bci_to_data(bci());
|
||||
bool maybe_null = data == NULL ? true : data->as_BitData()->null_seen();
|
||||
maybe_null = data == NULL ? true : data->as_BitData()->null_seen();
|
||||
}
|
||||
return record_profile_for_speculation(n, exact_kls, maybe_null);
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user