Merge
This commit is contained in:
commit
add20086d2
@ -286,3 +286,4 @@ cf136458ee747e151a27aa9ea0c1492ea55ef3e7 jdk9-b40
|
||||
67395f7ca2db3b52e3a62a84888487de5cb9210a jdk9-b41
|
||||
f7c11da0b0481d49cc7a65a453336c108191e821 jdk9-b42
|
||||
02ee8c65622e8bd97496d584e22fc7dcf0edc4ae jdk9-b43
|
||||
8994f5d87b3bb5e8d317d4e8ccb326da1a73684a jdk9-b44
|
||||
|
@ -286,3 +286,4 @@ e27c725d6c9d155667b35255f442d4ceb8c3c084 jdk9-b40
|
||||
1908b886ba1eda46fa725cf1160fe5d30fd1a7e5 jdk9-b41
|
||||
078bb11af876fe528d4b516f33ad4dd9bb60549e jdk9-b42
|
||||
9645e35616b60c5c07b4fdf11a132afc8081dfa8 jdk9-b43
|
||||
1f57bd728c9e6865ccb9d43ccd80a1c11230a32f jdk9-b44
|
||||
|
@ -446,3 +446,4 @@ c363a8b87e477ee45d6d3cb2a36cb365141bc596 jdk9-b38
|
||||
1d29b13e8a515a7ea3b882f140576d5d675bc11f jdk9-b41
|
||||
38cb4fbd47e3472bd1b5ebac83bda96fe4869c4f jdk9-b42
|
||||
65a9747147b8090037541040ba67156ec914db6a jdk9-b43
|
||||
43a44b56dca61a4d766a20f0528fdd8b5ceff873 jdk9-b44
|
||||
|
@ -22,6 +22,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// Disable CRT security warning against strcpy/strcat
|
||||
#pragma warning(disable: 4996)
|
||||
|
||||
// this is source code windbg based SA debugger agent to debug
|
||||
// Dr. Watson dump files and process snapshots.
|
||||
|
||||
|
@ -33,6 +33,8 @@
|
||||
*/
|
||||
|
||||
#ifdef _WINDOWS
|
||||
// Disable CRT security warning against _snprintf
|
||||
#pragma warning (disable : 4996)
|
||||
|
||||
#define snprintf _snprintf
|
||||
#define vsnprintf _vsnprintf
|
||||
@ -90,12 +92,8 @@ static int getLastErrorString(char *buf, size_t len)
|
||||
if (errno != 0)
|
||||
{
|
||||
/* C runtime error that has no corresponding DOS error code */
|
||||
const char *s = strerror(errno);
|
||||
size_t n = strlen(s);
|
||||
if (n >= len) n = len - 1;
|
||||
strncpy(buf, s, n);
|
||||
buf[n] = '\0';
|
||||
return (int)n;
|
||||
strerror_s(buf, len, errno);
|
||||
return strlen(buf);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -111,16 +109,30 @@ JNIEXPORT jlong JNICALL Java_sun_jvm_hotspot_asm_Disassembler_load_1library(JNIE
|
||||
jstring jrepath_s,
|
||||
jstring libname_s) {
|
||||
uintptr_t func = 0;
|
||||
const char* error_message = NULL;
|
||||
jboolean isCopy;
|
||||
|
||||
const char * jrepath = (*env)->GetStringUTFChars(env, jrepath_s, &isCopy); // like $JAVA_HOME/jre/lib/sparc/
|
||||
const char * libname = (*env)->GetStringUTFChars(env, libname_s, &isCopy);
|
||||
const char *error_message = NULL;
|
||||
const char *jrepath = NULL;
|
||||
const char *libname = NULL;
|
||||
char buffer[128];
|
||||
|
||||
#ifdef _WINDOWS
|
||||
HINSTANCE hsdis_handle = (HINSTANCE) NULL;
|
||||
#else
|
||||
void* hsdis_handle = NULL;
|
||||
#endif
|
||||
|
||||
jrepath = (*env)->GetStringUTFChars(env, jrepath_s, NULL); // like $JAVA_HOME/jre/lib/sparc/
|
||||
if (jrepath == NULL || (*env)->ExceptionOccurred(env)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
libname = (*env)->GetStringUTFChars(env, libname_s, NULL);
|
||||
if (libname == NULL || (*env)->ExceptionOccurred(env)) {
|
||||
(*env)->ReleaseStringUTFChars(env, jrepath_s, jrepath);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Load the hsdis library */
|
||||
#ifdef _WINDOWS
|
||||
HINSTANCE hsdis_handle;
|
||||
hsdis_handle = LoadLibrary(libname);
|
||||
if (hsdis_handle == NULL) {
|
||||
snprintf(buffer, sizeof(buffer), "%s%s", jrepath, libname);
|
||||
@ -134,7 +146,6 @@ JNIEXPORT jlong JNICALL Java_sun_jvm_hotspot_asm_Disassembler_load_1library(JNIE
|
||||
error_message = buffer;
|
||||
}
|
||||
#else
|
||||
void* hsdis_handle;
|
||||
hsdis_handle = dlopen(libname, RTLD_LAZY | RTLD_GLOBAL);
|
||||
if (hsdis_handle == NULL) {
|
||||
snprintf(buffer, sizeof(buffer), "%s%s", jrepath, libname);
|
||||
@ -156,6 +167,11 @@ JNIEXPORT jlong JNICALL Java_sun_jvm_hotspot_asm_Disassembler_load_1library(JNIE
|
||||
* platform dependent error message.
|
||||
*/
|
||||
jclass eclass = (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException");
|
||||
if ((*env)->ExceptionOccurred(env)) {
|
||||
/* Can't throw exception, probably OOM, so silently return 0 */
|
||||
return (jlong) 0;
|
||||
}
|
||||
|
||||
(*env)->ThrowNew(env, eclass, error_message);
|
||||
}
|
||||
return (jlong)func;
|
||||
@ -184,16 +200,22 @@ typedef struct {
|
||||
|
||||
/* event callback binding to Disassembler.handleEvent */
|
||||
static void* event_to_env(void* env_pv, const char* event, void* arg) {
|
||||
jlong result = 0;
|
||||
decode_env* denv = (decode_env*)env_pv;
|
||||
JNIEnv* env = denv->env;
|
||||
jstring event_string = (*env)->NewStringUTF(env, event);
|
||||
jlong result = (*env)->CallLongMethod(env, denv->dis, denv->handle_event, denv->visitor,
|
||||
event_string, (jlong) (uintptr_t)arg);
|
||||
if ((*env)->ExceptionOccurred(env) != NULL) {
|
||||
if ((*env)->ExceptionOccurred(env)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result = (*env)->CallLongMethod(env, denv->dis, denv->handle_event, denv->visitor,
|
||||
event_string, (jlong) (uintptr_t)arg);
|
||||
if ((*env)->ExceptionOccurred(env)) {
|
||||
/* ignore exceptions for now */
|
||||
(*env)->ExceptionClear(env);
|
||||
result = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void*)(uintptr_t)result;
|
||||
}
|
||||
|
||||
@ -219,10 +241,13 @@ static int printf_to_env(void* env_pv, const char* format, ...) {
|
||||
}
|
||||
if (raw != NULL) {
|
||||
jstring output = (*env)->NewStringUTF(env, raw);
|
||||
(*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
|
||||
if ((*env)->ExceptionOccurred(env) != NULL) {
|
||||
if (!(*env)->ExceptionOccurred(env)) {
|
||||
/* make sure that UTF allocation doesn't cause OOM */
|
||||
(*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
|
||||
}
|
||||
if ((*env)->ExceptionOccurred(env)) {
|
||||
/* ignore exceptions for now */
|
||||
(*env)->ExceptionClear(env);
|
||||
(*env)->ExceptionClear(env);
|
||||
}
|
||||
return (int) flen;
|
||||
}
|
||||
@ -231,11 +256,16 @@ static int printf_to_env(void* env_pv, const char* format, ...) {
|
||||
va_end(ap);
|
||||
|
||||
output = (*env)->NewStringUTF(env, denv->buffer);
|
||||
(*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
|
||||
if ((*env)->ExceptionOccurred(env) != NULL) {
|
||||
if (!(*env)->ExceptionOccurred(env)) {
|
||||
/* make sure that UTF allocation doesn't cause OOM */
|
||||
(*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
|
||||
}
|
||||
|
||||
if ((*env)->ExceptionOccurred(env)) {
|
||||
/* ignore exceptions for now */
|
||||
(*env)->ExceptionClear(env);
|
||||
}
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
@ -251,13 +281,24 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_asm_Disassembler_decode(JNIEnv * env
|
||||
jbyteArray code,
|
||||
jstring options_s,
|
||||
jlong decode_instructions_virtual) {
|
||||
jboolean isCopy;
|
||||
jbyte* start = (*env)->GetByteArrayElements(env, code, &isCopy);
|
||||
jbyte* end = start + (*env)->GetArrayLength(env, code);
|
||||
const char * options = (*env)->GetStringUTFChars(env, options_s, &isCopy);
|
||||
jclass disclass = (*env)->GetObjectClass(env, dis);
|
||||
|
||||
jbyte *start = NULL;
|
||||
jbyte *end = NULL;
|
||||
jclass disclass = NULL;
|
||||
const char *options = NULL;
|
||||
decode_env denv;
|
||||
|
||||
start = (*env)->GetByteArrayElements(env, code, NULL);
|
||||
if ((*env)->ExceptionOccurred(env)) {
|
||||
return;
|
||||
}
|
||||
end = start + (*env)->GetArrayLength(env, code);
|
||||
options = (*env)->GetStringUTFChars(env, options_s, NULL);
|
||||
if ((*env)->ExceptionOccurred(env)) {
|
||||
(*env)->ReleaseByteArrayElements(env, code, start, JNI_ABORT);
|
||||
return;
|
||||
}
|
||||
disclass = (*env)->GetObjectClass(env, dis);
|
||||
|
||||
denv.env = env;
|
||||
denv.dis = dis;
|
||||
denv.visitor = visitor;
|
||||
@ -266,6 +307,8 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_asm_Disassembler_decode(JNIEnv * env
|
||||
denv.handle_event = (*env)->GetMethodID(env, disclass, "handleEvent",
|
||||
"(Lsun/jvm/hotspot/asm/InstructionVisitor;Ljava/lang/String;J)J");
|
||||
if ((*env)->ExceptionOccurred(env)) {
|
||||
(*env)->ReleaseByteArrayElements(env, code, start, JNI_ABORT);
|
||||
(*env)->ReleaseStringUTFChars(env, options_s, options);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -273,11 +316,13 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_asm_Disassembler_decode(JNIEnv * env
|
||||
denv.raw_print = (*env)->GetMethodID(env, disclass, "rawPrint",
|
||||
"(Lsun/jvm/hotspot/asm/InstructionVisitor;Ljava/lang/String;)V");
|
||||
if ((*env)->ExceptionOccurred(env)) {
|
||||
(*env)->ReleaseByteArrayElements(env, code, start, JNI_ABORT);
|
||||
(*env)->ReleaseStringUTFChars(env, options_s, options);
|
||||
return;
|
||||
}
|
||||
|
||||
/* decode the buffer */
|
||||
(*(decode_func)(uintptr_t)decode_instructions_virtual)(startPc,
|
||||
(*(decode_func)(uintptr_t)decode_instructions_virtual)((uintptr_t) startPc,
|
||||
startPc + end - start,
|
||||
(unsigned char*)start,
|
||||
end - start,
|
||||
|
@ -72,7 +72,6 @@ SUNWprivate_1.1 {
|
||||
JVM_FillInStackTrace;
|
||||
JVM_FindClassFromCaller;
|
||||
JVM_FindClassFromClass;
|
||||
JVM_FindClassFromClassLoader;
|
||||
JVM_FindClassFromBootLoader;
|
||||
JVM_FindLibraryEntry;
|
||||
JVM_FindLoadedClass;
|
||||
|
@ -72,7 +72,6 @@ SUNWprivate_1.1 {
|
||||
JVM_FillInStackTrace;
|
||||
JVM_FindClassFromCaller;
|
||||
JVM_FindClassFromClass;
|
||||
JVM_FindClassFromClassLoader;
|
||||
JVM_FindClassFromBootLoader;
|
||||
JVM_FindLibraryEntry;
|
||||
JVM_FindLoadedClass;
|
||||
|
@ -70,7 +70,6 @@
|
||||
_JVM_FillInStackTrace
|
||||
_JVM_FindClassFromCaller
|
||||
_JVM_FindClassFromClass
|
||||
_JVM_FindClassFromClassLoader
|
||||
_JVM_FindClassFromBootLoader
|
||||
_JVM_FindLibraryEntry
|
||||
_JVM_FindLoadedClass
|
||||
|
@ -70,7 +70,6 @@
|
||||
_JVM_FillInStackTrace
|
||||
_JVM_FindClassFromCaller
|
||||
_JVM_FindClassFromClass
|
||||
_JVM_FindClassFromClassLoader
|
||||
_JVM_FindClassFromBootLoader
|
||||
_JVM_FindLibraryEntry
|
||||
_JVM_FindLoadedClass
|
||||
|
@ -72,7 +72,6 @@ SUNWprivate_1.1 {
|
||||
JVM_FillInStackTrace;
|
||||
JVM_FindClassFromCaller;
|
||||
JVM_FindClassFromClass;
|
||||
JVM_FindClassFromClassLoader;
|
||||
JVM_FindClassFromBootLoader;
|
||||
JVM_FindLibraryEntry;
|
||||
JVM_FindLoadedClass;
|
||||
|
@ -72,7 +72,6 @@ SUNWprivate_1.1 {
|
||||
JVM_FillInStackTrace;
|
||||
JVM_FindClassFromCaller;
|
||||
JVM_FindClassFromClass;
|
||||
JVM_FindClassFromClassLoader;
|
||||
JVM_FindClassFromBootLoader;
|
||||
JVM_FindLibraryEntry;
|
||||
JVM_FindLoadedClass;
|
||||
|
@ -72,7 +72,6 @@ SUNWprivate_1.1 {
|
||||
JVM_FillInStackTrace;
|
||||
JVM_FindClassFromCaller;
|
||||
JVM_FindClassFromClass;
|
||||
JVM_FindClassFromClassLoader;
|
||||
JVM_FindClassFromBootLoader;
|
||||
JVM_FindLibraryEntry;
|
||||
JVM_FindLoadedClass;
|
||||
|
@ -72,7 +72,6 @@ SUNWprivate_1.1 {
|
||||
JVM_FillInStackTrace;
|
||||
JVM_FindClassFromCaller;
|
||||
JVM_FindClassFromClass;
|
||||
JVM_FindClassFromClassLoader;
|
||||
JVM_FindClassFromBootLoader;
|
||||
JVM_FindLibraryEntry;
|
||||
JVM_FindLoadedClass;
|
||||
|
@ -72,7 +72,6 @@ SUNWprivate_1.1 {
|
||||
JVM_FillInStackTrace;
|
||||
JVM_FindClassFromCaller;
|
||||
JVM_FindClassFromClass;
|
||||
JVM_FindClassFromClassLoader;
|
||||
JVM_FindClassFromBootLoader;
|
||||
JVM_FindLibraryEntry;
|
||||
JVM_FindLoadedClass;
|
||||
|
@ -544,6 +544,9 @@ void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Registe
|
||||
cmplw(CCR0, Rindex, Rlength);
|
||||
sldi(RsxtIndex, RsxtIndex, index_shift);
|
||||
blt(CCR0, LnotOOR);
|
||||
// Index should be in R17_tos, array should be in R4_ARG2.
|
||||
mr(R17_tos, Rindex);
|
||||
mr(R4_ARG2, Rarray);
|
||||
load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
|
||||
mtctr(Rtmp);
|
||||
bctr();
|
||||
@ -1678,6 +1681,228 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
}
|
||||
}
|
||||
|
||||
// Argument and return type profilig.
|
||||
// kills: tmp, tmp2, R0, CR0, CR1
|
||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base,
|
||||
RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2) {
|
||||
Label do_nothing, do_update;
|
||||
|
||||
// tmp2 = obj is allowed
|
||||
assert_different_registers(obj, mdo_addr_base, tmp, R0);
|
||||
assert_different_registers(tmp2, mdo_addr_base, tmp, R0);
|
||||
const Register klass = tmp2;
|
||||
|
||||
verify_oop(obj);
|
||||
|
||||
ld(tmp, mdo_addr_offs, mdo_addr_base);
|
||||
|
||||
// Set null_seen if obj is 0.
|
||||
cmpdi(CCR0, obj, 0);
|
||||
ori(R0, tmp, TypeEntries::null_seen);
|
||||
beq(CCR0, do_update);
|
||||
|
||||
load_klass(klass, obj);
|
||||
|
||||
clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
|
||||
// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
|
||||
cmpd(CCR1, R0, klass);
|
||||
// Klass seen before, nothing to do (regardless of unknown bit).
|
||||
//beq(CCR1, do_nothing);
|
||||
|
||||
andi_(R0, klass, TypeEntries::type_unknown);
|
||||
// Already unknown. Nothing to do anymore.
|
||||
//bne(CCR0, do_nothing);
|
||||
crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2); // cr0 eq = cr1 eq or cr0 ne
|
||||
beq(CCR0, do_nothing);
|
||||
|
||||
clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
|
||||
orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
|
||||
beq(CCR0, do_update); // First time here. Set profile type.
|
||||
|
||||
// Different than before. Cannot keep accurate profile.
|
||||
ori(R0, tmp, TypeEntries::type_unknown);
|
||||
|
||||
bind(do_update);
|
||||
// update profile
|
||||
std(R0, mdo_addr_offs, mdo_addr_base);
|
||||
|
||||
align(32, 12);
|
||||
bind(do_nothing);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) {
|
||||
if (!ProfileInterpreter) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert_different_registers(callee, tmp1, tmp2, R28_mdx);
|
||||
|
||||
if (MethodData::profile_arguments() || MethodData::profile_return()) {
|
||||
Label profile_continue;
|
||||
|
||||
test_method_data_pointer(profile_continue);
|
||||
|
||||
int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
|
||||
|
||||
lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
|
||||
cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
||||
bne(CCR0, profile_continue);
|
||||
|
||||
if (MethodData::profile_arguments()) {
|
||||
Label done;
|
||||
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
|
||||
add(R28_mdx, off_to_args, R28_mdx);
|
||||
|
||||
for (int i = 0; i < TypeProfileArgsLimit; i++) {
|
||||
if (i > 0 || MethodData::profile_return()) {
|
||||
// If return value type is profiled we may have no argument to profile.
|
||||
ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
|
||||
cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
|
||||
addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
|
||||
blt(CCR0, done);
|
||||
}
|
||||
ld(tmp1, in_bytes(Method::const_offset()), callee);
|
||||
lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
|
||||
// Stack offset o (zero based) from the start of the argument
|
||||
// list, for n arguments translates into offset n - o - 1 from
|
||||
// the end of the argument list. But there's an extra slot at
|
||||
// the top of the stack. So the offset is n - o from Lesp.
|
||||
ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx);
|
||||
subf(tmp1, tmp2, tmp1);
|
||||
|
||||
sldi(tmp1, tmp1, Interpreter::logStackElementSize);
|
||||
ldx(tmp1, tmp1, R15_esp);
|
||||
|
||||
profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1);
|
||||
|
||||
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
|
||||
addi(R28_mdx, R28_mdx, to_add);
|
||||
off_to_args += to_add;
|
||||
}
|
||||
|
||||
if (MethodData::profile_return()) {
|
||||
ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
|
||||
addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
|
||||
}
|
||||
|
||||
bind(done);
|
||||
|
||||
if (MethodData::profile_return()) {
|
||||
// We're right after the type profile for the last
|
||||
// argument. tmp1 is the number of cells left in the
|
||||
// CallTypeData/VirtualCallTypeData to reach its end. Non null
|
||||
// if there's a return to profile.
|
||||
assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
|
||||
sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size));
|
||||
add(R28_mdx, tmp1, R28_mdx);
|
||||
}
|
||||
} else {
|
||||
assert(MethodData::profile_return(), "either profile call args or call ret");
|
||||
update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
|
||||
}
|
||||
|
||||
// Mdp points right after the end of the
|
||||
// CallTypeData/VirtualCallTypeData, right after the cells for the
|
||||
// return value type if there's one.
|
||||
align(32, 12);
|
||||
bind(profile_continue);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
|
||||
assert_different_registers(ret, tmp1, tmp2);
|
||||
if (ProfileInterpreter && MethodData::profile_return()) {
|
||||
Label profile_continue;
|
||||
|
||||
test_method_data_pointer(profile_continue);
|
||||
|
||||
if (MethodData::profile_return_jsr292_only()) {
|
||||
// If we don't profile all invoke bytecodes we must make sure
|
||||
// it's a bytecode we indeed profile. We can't go back to the
|
||||
// begining of the ProfileData we intend to update to check its
|
||||
// type because we're right after it and we don't known its
|
||||
// length.
|
||||
lbz(tmp1, 0, R14_bcp);
|
||||
lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method);
|
||||
cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
|
||||
cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
|
||||
cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
|
||||
cmpwi(CCR1, tmp2, vmIntrinsics::_compiledLambdaForm);
|
||||
cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
|
||||
bne(CCR0, profile_continue);
|
||||
}
|
||||
|
||||
profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
|
||||
|
||||
align(32, 12);
|
||||
bind(profile_continue);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
|
||||
if (ProfileInterpreter && MethodData::profile_parameters()) {
|
||||
Label profile_continue, done;
|
||||
|
||||
test_method_data_pointer(profile_continue);
|
||||
|
||||
// Load the offset of the area within the MDO used for
|
||||
// parameters. If it's negative we're not profiling any parameters.
|
||||
lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
|
||||
cmpwi(CCR0, tmp1, 0);
|
||||
blt(CCR0, profile_continue);
|
||||
|
||||
// Compute a pointer to the area for parameters from the offset
|
||||
// and move the pointer to the slot for the last
|
||||
// parameters. Collect profiling from last parameter down.
|
||||
// mdo start + parameters offset + array length - 1
|
||||
|
||||
// Pointer to the parameter area in the MDO.
|
||||
const Register mdp = tmp1;
|
||||
add(mdp, tmp1, R28_mdx);
|
||||
|
||||
// Pffset of the current profile entry to update.
|
||||
const Register entry_offset = tmp2;
|
||||
// entry_offset = array len in number of cells
|
||||
ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
|
||||
|
||||
int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
|
||||
assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
|
||||
|
||||
// entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field
|
||||
addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size));
|
||||
// entry_offset in bytes
|
||||
sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
|
||||
|
||||
Label loop;
|
||||
align(32, 12);
|
||||
bind(loop);
|
||||
|
||||
// Load offset on the stack from the slot for this parameter.
|
||||
ld(tmp3, entry_offset, mdp);
|
||||
sldi(tmp3, tmp3, Interpreter::logStackElementSize);
|
||||
neg(tmp3, tmp3);
|
||||
// Read the parameter from the local area.
|
||||
ldx(tmp3, tmp3, R18_locals);
|
||||
|
||||
// Make entry_offset now point to the type field for this parameter.
|
||||
int type_base = in_bytes(ParametersTypeData::type_offset(0));
|
||||
assert(type_base > off_base, "unexpected");
|
||||
addi(entry_offset, entry_offset, type_base - off_base);
|
||||
|
||||
// Profile the parameter.
|
||||
profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3);
|
||||
|
||||
// Go to next parameter.
|
||||
int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
|
||||
cmpdi(CCR0, entry_offset, off_base + delta);
|
||||
addi(entry_offset, entry_offset, -delta);
|
||||
bge(CCR0, loop);
|
||||
|
||||
align(32, 12);
|
||||
bind(profile_continue);
|
||||
}
|
||||
}
|
||||
|
||||
// Add a InterpMonitorElem to stack (see frame_sparc.hpp).
|
||||
void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
|
||||
|
||||
@ -2039,20 +2264,19 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis
|
||||
bne(CCR0, test);
|
||||
|
||||
address fd = CAST_FROM_FN_PTR(address, verify_return_address);
|
||||
unsigned int nbytes_save = 10*8; // 10 volatile gprs
|
||||
|
||||
save_LR_CR(Rtmp);
|
||||
const int nbytes_save = 11*8; // volatile gprs except R0
|
||||
save_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
||||
save_LR_CR(Rtmp); // Save in old frame.
|
||||
push_frame_reg_args(nbytes_save, Rtmp);
|
||||
save_volatile_gprs(R1_SP, 112); // except R0
|
||||
|
||||
load_const_optimized(Rtmp, fd, R0);
|
||||
mr_if_needed(R4_ARG2, reg);
|
||||
mr(R3_ARG1, R19_method);
|
||||
call_c(Rtmp); // call C
|
||||
|
||||
restore_volatile_gprs(R1_SP, 112); // except R0
|
||||
pop_frame();
|
||||
restore_LR_CR(Rtmp);
|
||||
restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
||||
b(skip);
|
||||
|
||||
// Perform a more elaborate out-of-line call.
|
||||
|
@ -255,6 +255,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
|
||||
void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
|
||||
|
||||
// Argument and return type profiling.
|
||||
void profile_obj_type(Register obj, Register mdo_addr_base, RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2);
|
||||
void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual);
|
||||
void profile_return_type(Register ret, Register tmp1, Register tmp2);
|
||||
void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
|
||||
|
||||
#endif // !CC_INTERP
|
||||
|
||||
// Debugging
|
||||
|
@ -807,6 +807,7 @@ void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
|
||||
|
||||
// For verify_oops.
|
||||
void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
|
||||
std(R2, offset, dst); offset += 8;
|
||||
std(R3, offset, dst); offset += 8;
|
||||
std(R4, offset, dst); offset += 8;
|
||||
std(R5, offset, dst); offset += 8;
|
||||
@ -821,6 +822,7 @@ void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
|
||||
|
||||
// For verify_oops.
|
||||
void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
|
||||
ld(R2, offset, src); offset += 8;
|
||||
ld(R3, offset, src); offset += 8;
|
||||
ld(R4, offset, src); offset += 8;
|
||||
ld(R5, offset, src); offset += 8;
|
||||
@ -1187,6 +1189,16 @@ void MacroAssembler::call_VM(Register oop_result, address entry_point, Register
|
||||
call_VM(oop_result, entry_point, check_exceptions);
|
||||
}
|
||||
|
||||
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
|
||||
bool check_exceptions) {
|
||||
// R3_ARG1 is reserved for the thread
|
||||
mr_if_needed(R4_ARG2, arg_1);
|
||||
assert(arg_2 != R4_ARG2, "smashed argument");
|
||||
mr_if_needed(R5_ARG3, arg_2);
|
||||
mr_if_needed(R6_ARG4, arg_3);
|
||||
call_VM(oop_result, entry_point, check_exceptions);
|
||||
}
|
||||
|
||||
void MacroAssembler::call_VM_leaf(address entry_point) {
|
||||
call_VM_leaf_base(entry_point);
|
||||
}
|
||||
@ -3059,35 +3071,27 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
|
||||
if (!VerifyOops) {
|
||||
return;
|
||||
}
|
||||
// Will be preserved.
|
||||
Register tmp = R11;
|
||||
assert(oop != tmp, "precondition");
|
||||
unsigned int nbytes_save = 10*8; // 10 volatile gprs
|
||||
|
||||
address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
|
||||
// save tmp
|
||||
mr(R0, tmp);
|
||||
// kill tmp
|
||||
save_LR_CR(tmp);
|
||||
const Register tmp = R11; // Will be preserved.
|
||||
const int nbytes_save = 11*8; // Volatile gprs except R0.
|
||||
save_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
||||
|
||||
if (oop == tmp) mr(R4_ARG2, oop);
|
||||
save_LR_CR(tmp); // save in old frame
|
||||
push_frame_reg_args(nbytes_save, tmp);
|
||||
// restore tmp
|
||||
mr(tmp, R0);
|
||||
save_volatile_gprs(R1_SP, 112); // except R0
|
||||
// load FunctionDescriptor** / entry_address *
|
||||
load_const(tmp, fd);
|
||||
load_const_optimized(tmp, fd, R0);
|
||||
// load FunctionDescriptor* / entry_address
|
||||
ld(tmp, 0, tmp);
|
||||
mr(R4_ARG2, oop);
|
||||
load_const(R3_ARG1, (address)msg);
|
||||
// call destination for its side effect
|
||||
if (oop != tmp) mr_if_needed(R4_ARG2, oop);
|
||||
load_const_optimized(R3_ARG1, (address)msg, R0);
|
||||
// Call destination for its side effect.
|
||||
call_c(tmp);
|
||||
restore_volatile_gprs(R1_SP, 112); // except R0
|
||||
|
||||
pop_frame();
|
||||
// save tmp
|
||||
mr(R0, tmp);
|
||||
// kill tmp
|
||||
restore_LR_CR(tmp);
|
||||
// restore tmp
|
||||
mr(tmp, R0);
|
||||
restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
||||
}
|
||||
|
||||
const char* stop_types[] = {
|
||||
|
@ -369,6 +369,7 @@ class MacroAssembler: public Assembler {
|
||||
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
|
||||
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
|
||||
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
|
||||
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
|
||||
void call_VM_leaf(address entry_point);
|
||||
void call_VM_leaf(address entry_point, Register arg_1);
|
||||
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
|
||||
|
@ -100,10 +100,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
|
||||
// Patch the call.
|
||||
if (ReoptimizeCallSequences &&
|
||||
a->is_within_range_of_b(dest, addr_call)) {
|
||||
a->bl(dest);
|
||||
} else {
|
||||
if (!ReoptimizeCallSequences || !a->is_within_range_of_b(dest, addr_call)) {
|
||||
address trampoline_stub_addr = get_trampoline();
|
||||
|
||||
// We did not find a trampoline stub because the current codeblob
|
||||
@ -115,9 +112,12 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
||||
|
||||
// Patch the constant in the call's trampoline stub.
|
||||
NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
|
||||
|
||||
a->bl(trampoline_stub_addr);
|
||||
dest = trampoline_stub_addr;
|
||||
}
|
||||
|
||||
OrderAccess::release();
|
||||
a->bl(dest);
|
||||
|
||||
ICache::ppc64_flush_icache_bytes(addr_call, code_size);
|
||||
}
|
||||
|
||||
|
@ -1936,8 +1936,9 @@ ArchOpcode MachSpillCopyNode_archOpcode(MachSpillCopyNode *n, PhaseRegAlloc *ra_
|
||||
// --------------------------------------------------------------------
|
||||
// Check for hi bits still needing moving. Only happens for misaligned
|
||||
// arguments to native calls.
|
||||
if (src_hi == dst_hi)
|
||||
if (src_hi == dst_hi) {
|
||||
return ppc64Opcode_none; // Self copy; no move.
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
return ppc64Opcode_undefined;
|
||||
@ -1959,14 +1960,15 @@ void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const {
|
||||
}
|
||||
|
||||
uint MachNopNode::size(PhaseRegAlloc *ra_) const {
|
||||
return _count * 4;
|
||||
return _count * 4;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
|
||||
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
|
||||
int reg = ra_->get_reg_first(this);
|
||||
st->print("ADDI %s, SP, %d \t// box node", Matcher::regName[reg], offset);
|
||||
char reg_str[128];
|
||||
ra_->dump_register(this, reg_str);
|
||||
st->print("ADDI %s, SP, %d \t// box node", reg_str, offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -91,7 +91,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handle
|
||||
|
||||
// Thread will be loaded to R3_ARG1.
|
||||
// Target class oop is in register R5_ARG3 by convention!
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
|
||||
// Above call must not return here since exception pending.
|
||||
DEBUG_ONLY(__ should_not_reach_here();)
|
||||
return entry;
|
||||
@ -172,6 +172,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
// Compiled code destroys templateTableBase, reload.
|
||||
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
|
||||
|
||||
if (state == atos) {
|
||||
__ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
|
||||
}
|
||||
|
||||
const Register cache = R11_scratch1;
|
||||
const Register size = R12_scratch2;
|
||||
__ get_cache_and_index_at_bcp(cache, 1, index_size);
|
||||
@ -1189,6 +1193,10 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ li(R0, 1);
|
||||
__ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
|
||||
}
|
||||
|
||||
// Argument and return type profiling.
|
||||
__ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
|
||||
|
||||
// Increment invocation counter and check for overflow.
|
||||
if (inc_counter) {
|
||||
generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
|
||||
@ -1469,6 +1477,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
|
||||
if (ProfileInterpreter) {
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
__ ld(R11_scratch1, 0, R1_SP);
|
||||
__ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
|
||||
}
|
||||
#if INCLUDE_JVMTI
|
||||
Label L_done;
|
||||
@ -1480,13 +1490,11 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
|
||||
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
|
||||
__ ld(R4_ARG2, 0, R18_locals);
|
||||
__ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
|
||||
R4_ARG2, R19_method, R14_bcp);
|
||||
|
||||
__ cmpdi(CCR0, R11_scratch1, 0);
|
||||
__ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
|
||||
__ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
|
||||
__ cmpdi(CCR0, R4_ARG2, 0);
|
||||
__ beq(CCR0, L_done);
|
||||
|
||||
__ std(R11_scratch1, wordSize, R15_esp);
|
||||
__ std(R4_ARG2, wordSize, R15_esp);
|
||||
__ bind(L_done);
|
||||
#endif // INCLUDE_JVMTI
|
||||
__ dispatch_next(vtos);
|
||||
|
@ -3235,6 +3235,8 @@ void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex,
|
||||
// Load target.
|
||||
__ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
|
||||
__ ldx(Rtarget_method, Rindex, Rrecv_klass);
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
|
||||
__ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
|
||||
}
|
||||
|
||||
@ -3318,6 +3320,8 @@ void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Regis
|
||||
__ null_check_throw(Rrecv, -1, Rscratch1);
|
||||
|
||||
__ profile_final_call(Rrecv, Rscratch1);
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
|
||||
|
||||
// Do the call.
|
||||
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
|
||||
@ -3339,6 +3343,8 @@ void TemplateTable::invokespecial(int byte_no) {
|
||||
__ null_check_throw(Rreceiver, -1, R11_scratch1);
|
||||
|
||||
__ profile_call(R11_scratch1, R12_scratch2);
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
|
||||
__ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
|
||||
}
|
||||
|
||||
@ -3353,6 +3359,8 @@ void TemplateTable::invokestatic(int byte_no) {
|
||||
prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
|
||||
|
||||
__ profile_call(R11_scratch1, R12_scratch2);
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
|
||||
__ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
|
||||
}
|
||||
|
||||
@ -3374,6 +3382,8 @@ void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
|
||||
|
||||
// Final call case.
|
||||
__ profile_final_call(Rtemp1, Rscratch);
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
|
||||
// Do the final call - the index (f2) contains the method.
|
||||
__ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
|
||||
|
||||
@ -3425,6 +3435,8 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ cmpdi(CCR0, Rindex, 0);
|
||||
__ beq(CCR0, Lthrow_ame);
|
||||
// Found entry. Jump off!
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true);
|
||||
__ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
|
||||
|
||||
// Vtable entry was NULL => Throw abstract method error.
|
||||
@ -3468,6 +3480,8 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
// to be the callsite object the bootstrap method returned. This is passed to a
|
||||
// "link" method which does the dispatch (Most likely just grabs the MH stored
|
||||
// inside the callsite and does an invokehandle).
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
|
||||
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
|
||||
}
|
||||
|
||||
@ -3488,6 +3502,8 @@ void TemplateTable::invokehandle(int byte_no) {
|
||||
__ profile_final_call(Rrecv, Rscratch1);
|
||||
|
||||
// Still no call from handle => We call the method handle interpreter here.
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
|
||||
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
|
||||
}
|
||||
|
||||
|
@ -134,13 +134,44 @@ void VM_Version::initialize() {
|
||||
}
|
||||
|
||||
assert(AllocatePrefetchLines > 0, "invalid value");
|
||||
if (AllocatePrefetchLines < 1) // Set valid value in product VM.
|
||||
if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
|
||||
AllocatePrefetchLines = 1; // Conservative value.
|
||||
}
|
||||
|
||||
if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size)
|
||||
if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
|
||||
AllocatePrefetchStyle = 1; // Fall back if inappropriate.
|
||||
}
|
||||
|
||||
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
|
||||
warning("CRC32 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
|
||||
}
|
||||
|
||||
// The AES intrinsic stubs require AES instruction support.
|
||||
if (UseAES) {
|
||||
warning("AES instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAES, false);
|
||||
}
|
||||
if (UseAESIntrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
|
||||
warning("AES intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA) {
|
||||
warning("SHA instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseSHA, false);
|
||||
}
|
||||
if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
|
||||
warning("SHA intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
|
||||
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void VM_Version::print_features() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -45,7 +45,8 @@ void OSThread::pd_initialize() {
|
||||
|
||||
sigemptyset(&_caller_sigmask);
|
||||
|
||||
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
|
||||
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
|
||||
Monitor::_safepoint_check_never);
|
||||
assert(_startThread_lock !=NULL, "check");
|
||||
}
|
||||
|
||||
|
@ -124,12 +124,6 @@ extern "C" {
|
||||
}
|
||||
#endif
|
||||
|
||||
// Excerpts from systemcfg.h definitions newer than AIX 5.3
|
||||
#ifndef PV_7
|
||||
# define PV_7 0x200000 // Power PC 7
|
||||
# define PV_7_Compat 0x208000 // Power PC 7
|
||||
#endif
|
||||
|
||||
#define MAX_PATH (2 * K)
|
||||
|
||||
// for timer info max values which include all bits
|
||||
@ -140,17 +134,40 @@ extern "C" {
|
||||
#define ERROR_MP_VMGETINFO_FAILED 102
|
||||
#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
|
||||
|
||||
// the semantics in this file are thus that codeptr_t is a *real code ptr*
|
||||
// The semantics in this file are thus that codeptr_t is a *real code ptr*.
|
||||
// This means that any function taking codeptr_t as arguments will assume
|
||||
// a real codeptr and won't handle function descriptors (eg getFuncName),
|
||||
// whereas functions taking address as args will deal with function
|
||||
// descriptors (eg os::dll_address_to_library_name)
|
||||
// descriptors (eg os::dll_address_to_library_name).
|
||||
typedef unsigned int* codeptr_t;
|
||||
|
||||
// typedefs for stackslots, stack pointers, pointers to op codes
|
||||
// Typedefs for stackslots, stack pointers, pointers to op codes.
|
||||
typedef unsigned long stackslot_t;
|
||||
typedef stackslot_t* stackptr_t;
|
||||
|
||||
// Excerpts from systemcfg.h definitions newer than AIX 5.3.
|
||||
#ifndef PV_7
|
||||
#define PV_7 0x200000 /* Power PC 7 */
|
||||
#define PV_7_Compat 0x208000 /* Power PC 7 */
|
||||
#endif
|
||||
#ifndef PV_8
|
||||
#define PV_8 0x300000 /* Power PC 8 */
|
||||
#define PV_8_Compat 0x308000 /* Power PC 8 */
|
||||
#endif
|
||||
|
||||
#define trcVerbose(fmt, ...) { /* PPC port */ \
|
||||
if (Verbose) { \
|
||||
fprintf(stderr, fmt, ##__VA_ARGS__); \
|
||||
fputc('\n', stderr); fflush(stderr); \
|
||||
} \
|
||||
}
|
||||
#define trc(fmt, ...) /* PPC port */
|
||||
|
||||
#define ERRBYE(s) { \
|
||||
trcVerbose(s); \
|
||||
return -1; \
|
||||
}
|
||||
|
||||
// query dimensions of the stack of the calling thread
|
||||
static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
|
||||
|
||||
@ -182,12 +199,12 @@ inline bool is_valid_codepointer(codeptr_t p) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// macro to check a given stack pointer against given stack limits and to die if test fails
|
||||
// Macro to check a given stack pointer against given stack limits and to die if test fails.
|
||||
#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
|
||||
guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
|
||||
}
|
||||
|
||||
// macro to check the current stack pointer against given stacklimits
|
||||
// Macro to check the current stack pointer against given stacklimits.
|
||||
#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
|
||||
address sp; \
|
||||
sp = os::current_stack_pointer(); \
|
||||
@ -221,7 +238,7 @@ static bool check_signals = true;
|
||||
static pid_t _initial_pid = 0;
|
||||
static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
|
||||
static sigset_t SR_sigset;
|
||||
static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls */
|
||||
static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls.
|
||||
|
||||
julong os::available_memory() {
|
||||
return Aix::available_memory();
|
||||
@ -253,7 +270,6 @@ bool os::getenv(const char* name, char* buf, int len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// Return true if user is running as root.
|
||||
|
||||
bool os::have_special_privileges() {
|
||||
@ -284,8 +300,7 @@ static bool my_disclaim64(char* addr, size_t size) {
|
||||
|
||||
for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
|
||||
if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
|
||||
//if (Verbose)
|
||||
fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
|
||||
trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
|
||||
return false;
|
||||
}
|
||||
p += maxDisclaimSize;
|
||||
@ -293,8 +308,7 @@ static bool my_disclaim64(char* addr, size_t size) {
|
||||
|
||||
if (lastDisclaimSize > 0) {
|
||||
if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
|
||||
//if (Verbose)
|
||||
fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
|
||||
trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -334,11 +348,11 @@ pid_t os::Aix::gettid() {
|
||||
|
||||
void os::Aix::initialize_system_info() {
|
||||
|
||||
// get the number of online(logical) cpus instead of configured
|
||||
// Get the number of online(logical) cpus instead of configured.
|
||||
os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
assert(_processor_count > 0, "_processor_count must be > 0");
|
||||
|
||||
// retrieve total physical storage
|
||||
// Retrieve total physical storage.
|
||||
os::Aix::meminfo_t mi;
|
||||
if (!os::Aix::get_meminfo(&mi)) {
|
||||
fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
|
||||
@ -513,7 +527,6 @@ query_multipage_support_end:
|
||||
|
||||
} // end os::Aix::query_multipage_support()
|
||||
|
||||
// The code for this method was initially derived from the version in os_linux.cpp.
|
||||
void os::init_system_properties_values() {
|
||||
|
||||
#define DEFAULT_LIBPATH "/usr/lib:/lib"
|
||||
@ -603,10 +616,11 @@ bool os::Aix::is_sig_ignored(int sig) {
|
||||
sigaction(sig, (struct sigaction*)NULL, &oact);
|
||||
void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
|
||||
: CAST_FROM_FN_PTR(void*, oact.sa_handler);
|
||||
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
|
||||
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
|
||||
return true;
|
||||
else
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void os::Aix::signal_sets_init() {
|
||||
@ -780,6 +794,9 @@ bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
|
||||
|
||||
// get the processor version from _system_configuration
|
||||
switch (_system_configuration.version) {
|
||||
case PV_8:
|
||||
strcpy(pci->version, "Power PC 8");
|
||||
break;
|
||||
case PV_7:
|
||||
strcpy(pci->version, "Power PC 7");
|
||||
break;
|
||||
@ -807,6 +824,9 @@ bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
|
||||
case PV_7_Compat:
|
||||
strcpy(pci->version, "PV_7_Compat");
|
||||
break;
|
||||
case PV_8_Compat:
|
||||
strcpy(pci->version, "PV_8_Compat");
|
||||
break;
|
||||
default:
|
||||
strcpy(pci->version, "unknown");
|
||||
}
|
||||
@ -942,7 +962,9 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
||||
|
||||
pthread_attr_destroy(&attr);
|
||||
|
||||
if (ret != 0) {
|
||||
if (ret == 0) {
|
||||
// PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
|
||||
} else {
|
||||
if (PrintMiscellaneous && (Verbose || WizardMode)) {
|
||||
perror("pthread_create()");
|
||||
}
|
||||
@ -1103,8 +1125,7 @@ jlong os::javaTimeNanos() {
|
||||
if (os::Aix::on_pase()) {
|
||||
Unimplemented();
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// On AIX use the precision of processors real time clock
|
||||
// or time base registers.
|
||||
timebasestruct_t time;
|
||||
@ -1152,7 +1173,6 @@ bool os::getTimesSecs(double* process_real_time,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
char * os::local_time_string(char *buf, size_t buflen) {
|
||||
struct tm t;
|
||||
time_t long_time;
|
||||
@ -1190,7 +1210,6 @@ void os::shutdown() {
|
||||
if (abort_hook != NULL) {
|
||||
abort_hook();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Note: os::abort() might be called very early during initialization, or
|
||||
@ -1222,8 +1241,7 @@ void os::die() {
|
||||
// from src/solaris/hpi/src/system_md.c
|
||||
|
||||
size_t os::lasterror(char *buf, size_t len) {
|
||||
|
||||
if (errno == 0) return 0;
|
||||
if (errno == 0) return 0;
|
||||
|
||||
const char *s = ::strerror(errno);
|
||||
size_t n = ::strlen(s);
|
||||
@ -1236,6 +1254,7 @@ size_t os::lasterror(char *buf, size_t len) {
|
||||
}
|
||||
|
||||
intx os::current_thread_id() { return (intx)pthread_self(); }
|
||||
|
||||
int os::current_process_id() {
|
||||
|
||||
// This implementation returns a unique pid, the pid of the
|
||||
@ -1372,9 +1391,9 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
if (offset) {
|
||||
*offset = -1;
|
||||
}
|
||||
if (buf) {
|
||||
buf[0] = '\0';
|
||||
}
|
||||
// Buf is not optional, but offset is optional.
|
||||
assert(buf != NULL, "sanity check");
|
||||
buf[0] = '\0';
|
||||
|
||||
// Resolve function ptr literals first.
|
||||
addr = resolve_function_descriptor_to_code_pointer(addr);
|
||||
@ -1407,12 +1426,9 @@ static int getModuleName(codeptr_t pc, // [in] program counte
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (Verbose) {
|
||||
fprintf(stderr, "pc outside any module");
|
||||
}
|
||||
trcVerbose("pc outside any module");
|
||||
|
||||
return -1;
|
||||
|
||||
}
|
||||
|
||||
bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
@ -1420,9 +1436,9 @@ bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
if (offset) {
|
||||
*offset = -1;
|
||||
}
|
||||
if (buf) {
|
||||
buf[0] = '\0';
|
||||
}
|
||||
// Buf is not optional, but offset is optional.
|
||||
assert(buf != NULL, "sanity check");
|
||||
buf[0] = '\0';
|
||||
|
||||
// Resolve function ptr literals first.
|
||||
addr = resolve_function_descriptor_to_code_pointer(addr);
|
||||
@ -1437,7 +1453,7 @@ bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
}
|
||||
|
||||
// Loads .dll/.so and in case of error it checks if .dll/.so was built
|
||||
// for the same architecture as Hotspot is running on
|
||||
// for the same architecture as Hotspot is running on.
|
||||
void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
|
||||
if (ebuf && ebuflen > 0) {
|
||||
@ -1600,7 +1616,6 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
|
||||
static void print_signal_handler(outputStream* st, int sig,
|
||||
char* buf, size_t buflen);
|
||||
|
||||
@ -1624,7 +1639,7 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so or libjvm_g.so
|
||||
// Find the full path to the current module, libjvm.so.
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
@ -1695,7 +1710,7 @@ void* os::signal(int signal_number, void* handler) {
|
||||
// Do not block out synchronous signals in the signal handler.
|
||||
// Blocking synchronous signals only makes sense if you can really
|
||||
// be sure that those signals won't happen during signal handling,
|
||||
// when the blocking applies. Normal signal handlers are lean and
|
||||
// when the blocking applies. Normal signal handlers are lean and
|
||||
// do not cause signals. But our signal handlers tend to be "risky"
|
||||
// - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
|
||||
// On AIX, PASE there was a case where a SIGSEGV happened, followed
|
||||
@ -2861,13 +2876,9 @@ OSReturn os::set_native_priority(Thread* thread, int newpri) {
|
||||
param.sched_priority = newpri;
|
||||
int ret = pthread_setschedparam(thr, policy, ¶m);
|
||||
|
||||
if (Verbose) {
|
||||
if (ret == 0) {
|
||||
fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
|
||||
} else {
|
||||
fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
|
||||
(int)thr, newpri, ret, strerror(ret));
|
||||
}
|
||||
if (ret != 0) {
|
||||
trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
|
||||
(int)thr, newpri, ret, strerror(ret));
|
||||
}
|
||||
return (ret == 0) ? OS_OK : OS_ERR;
|
||||
}
|
||||
@ -2988,7 +2999,6 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
|
||||
errno = old_errno;
|
||||
}
|
||||
|
||||
|
||||
static int SR_initialize() {
|
||||
struct sigaction act;
|
||||
char *s;
|
||||
@ -3187,7 +3197,6 @@ void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
|
||||
JVM_handle_aix_signal(sig, info, uc, true);
|
||||
}
|
||||
|
||||
|
||||
// This boolean allows users to forward their own non-matching signals
|
||||
// to JVM_handle_aix_signal, harmlessly.
|
||||
bool os::Aix::signal_handlers_are_installed = false;
|
||||
@ -3381,7 +3390,7 @@ void os::Aix::install_signal_handlers() {
|
||||
set_signal_handler(SIGDANGER, true);
|
||||
|
||||
if (libjsig_is_loaded) {
|
||||
// Tell libjsig jvm finishes setting signal handlers
|
||||
// Tell libjsig jvm finishes setting signal handlers.
|
||||
(*end_signal_setting)();
|
||||
}
|
||||
|
||||
@ -3397,7 +3406,7 @@ void os::Aix::install_signal_handlers() {
|
||||
tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
|
||||
check_signals = false;
|
||||
}
|
||||
// need to initialize check_signal_done
|
||||
// Need to initialize check_signal_done.
|
||||
::sigemptyset(&check_signal_done);
|
||||
}
|
||||
}
|
||||
@ -3471,7 +3480,6 @@ static void print_signal_handler(outputStream* st, int sig,
|
||||
st->cr();
|
||||
}
|
||||
|
||||
|
||||
#define DO_SIGNAL_CHECK(sig) \
|
||||
if (!sigismember(&check_signal_done, sig)) \
|
||||
os::Aix::check_signal_handler(sig)
|
||||
@ -3532,7 +3540,6 @@ void os::Aix::check_signal_handler(int sig) {
|
||||
? CAST_FROM_FN_PTR(address, act.sa_sigaction)
|
||||
: CAST_FROM_FN_PTR(address, act.sa_handler);
|
||||
|
||||
|
||||
switch(sig) {
|
||||
case SIGSEGV:
|
||||
case SIGBUS:
|
||||
@ -3685,15 +3692,13 @@ void os::init(void) {
|
||||
pthread_mutex_init(&dl_mutex, NULL);
|
||||
}
|
||||
|
||||
// this is called _after_ the global arguments have been parsed
|
||||
// This is called _after_ the global arguments have been parsed.
|
||||
jint os::init_2(void) {
|
||||
|
||||
if (Verbose) {
|
||||
fprintf(stderr, "processor count: %d\n", os::_processor_count);
|
||||
fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
|
||||
}
|
||||
trcVerbose("processor count: %d", os::_processor_count);
|
||||
trcVerbose("physical memory: %lu", Aix::_physical_memory);
|
||||
|
||||
// initially build up the loaded dll map
|
||||
// Initially build up the loaded dll map.
|
||||
LoadedLibraries::reload();
|
||||
|
||||
const int page_size = Aix::page_size();
|
||||
@ -3743,7 +3748,7 @@ jint os::init_2(void) {
|
||||
}
|
||||
|
||||
if (map_address != (address) MAP_FAILED) {
|
||||
// map succeeded, but polling_page is not at wished address, unmap and continue.
|
||||
// Map succeeded, but polling_page is not at wished address, unmap and continue.
|
||||
::munmap(map_address, map_size);
|
||||
map_address = (address) MAP_FAILED;
|
||||
}
|
||||
@ -3797,7 +3802,7 @@ jint os::init_2(void) {
|
||||
|
||||
// Make the stack size a multiple of the page size so that
|
||||
// the yellow/red zones can be guarded.
|
||||
// note that this can be 0, if no default stacksize was set
|
||||
// Note that this can be 0, if no default stacksize was set.
|
||||
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
|
||||
|
||||
Aix::libpthread_init();
|
||||
@ -4088,7 +4093,6 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
|
||||
// create binary file, rewriting existing file if required
|
||||
int os::create_binary_file(const char* path, bool rewrite_existing) {
|
||||
int oflags = O_WRONLY | O_CREAT;
|
||||
@ -4169,7 +4173,6 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
return mapped_address;
|
||||
}
|
||||
|
||||
|
||||
// Remap a block of memory.
|
||||
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
@ -4217,14 +4220,14 @@ static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong*
|
||||
jlong sys_time = 0;
|
||||
jlong user_time = 0;
|
||||
|
||||
// reimplemented using getthrds64().
|
||||
// Reimplemented using getthrds64().
|
||||
//
|
||||
// goes like this:
|
||||
// Works like this:
|
||||
// For the thread in question, get the kernel thread id. Then get the
|
||||
// kernel thread statistics using that id.
|
||||
//
|
||||
// This only works of course when no pthread scheduling is used,
|
||||
// ie there is a 1:1 relationship to kernel threads.
|
||||
// i.e. there is a 1:1 relationship to kernel threads.
|
||||
// On AIX, see AIXTHREAD_SCOPE variable.
|
||||
|
||||
pthread_t pthtid = thread->osthread()->pthread_id();
|
||||
@ -4371,14 +4374,12 @@ void os::Aix::initialize_os_info() {
|
||||
memset(&uts, 0, sizeof(uts));
|
||||
strcpy(uts.sysname, "?");
|
||||
if (::uname(&uts) == -1) {
|
||||
fprintf(stderr, "uname failed (%d)\n", errno);
|
||||
trc("uname failed (%d)", errno);
|
||||
guarantee(0, "Could not determine whether we run on AIX or PASE");
|
||||
} else {
|
||||
if (Verbose) {
|
||||
fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
|
||||
"node \"%s\" machine \"%s\"\n",
|
||||
uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
|
||||
}
|
||||
trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
|
||||
"node \"%s\" machine \"%s\"\n",
|
||||
uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
|
||||
const int major = atoi(uts.version);
|
||||
assert(major > 0, "invalid OS version");
|
||||
const int minor = atoi(uts.release);
|
||||
@ -4390,12 +4391,10 @@ void os::Aix::initialize_os_info() {
|
||||
// We run on AIX. We do not support versions older than AIX 5.3.
|
||||
_on_pase = 0;
|
||||
if (_os_version < 0x0503) {
|
||||
fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
|
||||
trc("AIX release older than AIX 5.3 not supported.");
|
||||
assert(false, "AIX release too old.");
|
||||
} else {
|
||||
if (Verbose) {
|
||||
fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
|
||||
}
|
||||
trcVerbose("We run on AIX %d.%d\n", major, minor);
|
||||
}
|
||||
} else {
|
||||
assert(false, "unknown OS");
|
||||
@ -4403,7 +4402,6 @@ void os::Aix::initialize_os_info() {
|
||||
}
|
||||
|
||||
guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
|
||||
|
||||
} // end: os::Aix::initialize_os_info()
|
||||
|
||||
// Scan environment for important settings which might effect the VM.
|
||||
@ -4441,12 +4439,10 @@ void os::Aix::scan_environment() {
|
||||
// Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
|
||||
// exec() ? before loading the libjvm ? ....)
|
||||
p = ::getenv("XPG_SUS_ENV");
|
||||
if (Verbose) {
|
||||
fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
|
||||
}
|
||||
trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
|
||||
if (p && strcmp(p, "ON") == 0) {
|
||||
_xpg_sus_mode = 1;
|
||||
fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
|
||||
trc("Unsupported setting: XPG_SUS_ENV=ON");
|
||||
// This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
|
||||
// clobber address ranges. If we ever want to support that, we have to do some
|
||||
// testing first.
|
||||
@ -4458,10 +4454,7 @@ void os::Aix::scan_environment() {
|
||||
// Switch off AIX internal (pthread) guard pages. This has
|
||||
// immediate effect for any pthread_create calls which follow.
|
||||
p = ::getenv("AIXTHREAD_GUARDPAGES");
|
||||
if (Verbose) {
|
||||
fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
|
||||
fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
|
||||
}
|
||||
trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
|
||||
rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
|
||||
guarantee(rc == 0, "");
|
||||
|
||||
@ -4479,7 +4472,7 @@ void os::Aix::initialize_libperfstat() {
|
||||
assert(os::Aix::on_aix(), "AIX only");
|
||||
|
||||
if (!libperfstat::init()) {
|
||||
fprintf(stderr, "libperfstat initialization failed.\n");
|
||||
trc("libperfstat initialization failed.");
|
||||
assert(false, "libperfstat initialization failed");
|
||||
} else {
|
||||
if (Verbose) {
|
||||
@ -4651,7 +4644,6 @@ static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
|
||||
return abstime;
|
||||
}
|
||||
|
||||
|
||||
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
|
||||
// Conceptually TryPark() should be equivalent to park(0).
|
||||
|
||||
@ -4732,7 +4724,7 @@ int os::PlatformEvent::park(jlong millis) {
|
||||
while (_Event < 0) {
|
||||
status = pthread_cond_timedwait(_cond, _mutex, &abst);
|
||||
assert_status(status == 0 || status == ETIMEDOUT,
|
||||
status, "cond_timedwait");
|
||||
status, "cond_timedwait");
|
||||
if (!FilterSpuriousWakeups) break; // previous semantics
|
||||
if (status == ETIMEDOUT) break;
|
||||
// We consume and ignore EINTR and spurious wakeups.
|
||||
@ -4866,9 +4858,9 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
// Optional fast-path check:
|
||||
// Return immediately if a permit is available.
|
||||
if (_counter > 0) {
|
||||
_counter = 0;
|
||||
OrderAccess::fence();
|
||||
return;
|
||||
_counter = 0;
|
||||
OrderAccess::fence();
|
||||
return;
|
||||
}
|
||||
|
||||
Thread* thread = Thread::current();
|
||||
@ -4890,7 +4882,6 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
unpackTime(&absTime, isAbsolute, time);
|
||||
}
|
||||
|
||||
|
||||
// Enter safepoint region
|
||||
// Beware of deadlocks such as 6317397.
|
||||
// The per-thread Parker:: mutex is a classic leaf-lock.
|
||||
@ -4978,7 +4969,6 @@ void Parker::unpark() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
extern char** environ;
|
||||
|
||||
// Run the specified command in a separate process. Return its exit value,
|
||||
@ -4997,44 +4987,43 @@ int os::fork_and_exec(char* cmd) {
|
||||
} else if (pid == 0) {
|
||||
// child process
|
||||
|
||||
// try to be consistent with system(), which uses "/usr/bin/sh" on AIX
|
||||
// Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
|
||||
execve("/usr/bin/sh", argv, environ);
|
||||
|
||||
// execve failed
|
||||
_exit(-1);
|
||||
|
||||
} else {
|
||||
} else {
|
||||
// copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
|
||||
// care about the actual exit code, for now.
|
||||
|
||||
int status;
|
||||
|
||||
// Wait for the child process to exit. This returns immediately if
|
||||
// Wait for the child process to exit. This returns immediately if
|
||||
// the child has already exited. */
|
||||
while (waitpid(pid, &status, 0) < 0) {
|
||||
switch (errno) {
|
||||
switch (errno) {
|
||||
case ECHILD: return 0;
|
||||
case EINTR: break;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (WIFEXITED(status)) {
|
||||
// The child exited normally; get its exit code.
|
||||
return WEXITSTATUS(status);
|
||||
// The child exited normally; get its exit code.
|
||||
return WEXITSTATUS(status);
|
||||
} else if (WIFSIGNALED(status)) {
|
||||
// The child exited because of a signal
|
||||
// The best value to return is 0x80 + signal number,
|
||||
// because that is what all Unix shells do, and because
|
||||
// it allows callers to distinguish between process exit and
|
||||
// process death by signal.
|
||||
return 0x80 + WTERMSIG(status);
|
||||
// The child exited because of a signal.
|
||||
// The best value to return is 0x80 + signal number,
|
||||
// because that is what all Unix shells do, and because
|
||||
// it allows callers to distinguish between process exit and
|
||||
// process death by signal.
|
||||
return 0x80 + WTERMSIG(status);
|
||||
} else {
|
||||
// Unknown exit code; pass it through
|
||||
return status;
|
||||
// Unknown exit code; pass it through.
|
||||
return status;
|
||||
}
|
||||
}
|
||||
// Remove warning.
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -5049,7 +5038,7 @@ bool os::is_headless_jre() {
|
||||
struct stat statbuf;
|
||||
char buf[MAXPATHLEN];
|
||||
char libmawtpath[MAXPATHLEN];
|
||||
const char *xawtstr = "/xawt/libmawt.so";
|
||||
const char *xawtstr = "/xawt/libmawt.so";
|
||||
const char *new_xawtstr = "/libawt_xawt.so";
|
||||
|
||||
char *p;
|
||||
@ -5090,6 +5079,9 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
|
||||
p, current_process_id());
|
||||
|
||||
return strlen(buffer);
|
||||
}
|
||||
|
||||
|
@ -209,7 +209,7 @@ class Aix {
|
||||
return _can_use_16M_pages == 1 ? true : false;
|
||||
}
|
||||
|
||||
static address ucontext_get_pc(ucontext_t* uc);
|
||||
static address ucontext_get_pc(const ucontext_t* uc);
|
||||
static intptr_t* ucontext_get_sp(ucontext_t* uc);
|
||||
static intptr_t* ucontext_get_fp(ucontext_t* uc);
|
||||
// Set PC into context. Needed for continuation after signal.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,7 +43,8 @@ void OSThread::pd_initialize() {
|
||||
|
||||
sigemptyset(&_caller_sigmask);
|
||||
|
||||
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
|
||||
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
|
||||
Monitor::_safepoint_check_never);
|
||||
assert(_startThread_lock !=NULL, "check");
|
||||
}
|
||||
|
||||
|
@ -4673,7 +4673,7 @@ bool os::is_headless_jre() {
|
||||
// Get the default path to the core file
|
||||
// Returns the length of the string
|
||||
int os::get_core_path(char* buffer, size_t bufferSize) {
|
||||
int n = jio_snprintf(buffer, bufferSize, "/cores");
|
||||
int n = jio_snprintf(buffer, bufferSize, "/cores/core.%d", current_process_id());
|
||||
|
||||
// Truncate if theoretical string was longer than bufferSize
|
||||
n = MIN2(n, (int)bufferSize);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,8 @@ void OSThread::pd_initialize() {
|
||||
|
||||
sigemptyset(&_caller_sigmask);
|
||||
|
||||
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
|
||||
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
|
||||
Monitor::_safepoint_check_never);
|
||||
assert(_startThread_lock !=NULL, "check");
|
||||
}
|
||||
|
||||
|
@ -5988,13 +5988,70 @@ bool os::is_headless_jre() {
|
||||
// Get the default path to the core file
|
||||
// Returns the length of the string
|
||||
int os::get_core_path(char* buffer, size_t bufferSize) {
|
||||
const char* p = get_current_directory(buffer, bufferSize);
|
||||
/*
|
||||
* Max length of /proc/sys/kernel/core_pattern is 128 characters.
|
||||
* See https://www.kernel.org/doc/Documentation/sysctl/kernel.txt
|
||||
*/
|
||||
const int core_pattern_len = 129;
|
||||
char core_pattern[core_pattern_len] = {0};
|
||||
|
||||
if (p == NULL) {
|
||||
assert(p != NULL, "failed to get current directory");
|
||||
int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY);
|
||||
if (core_pattern_file != -1) {
|
||||
ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len);
|
||||
::close(core_pattern_file);
|
||||
|
||||
if (ret > 0) {
|
||||
char *last_char = core_pattern + strlen(core_pattern) - 1;
|
||||
|
||||
if (*last_char == '\n') {
|
||||
*last_char = '\0';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (strlen(core_pattern) == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
char *pid_pos = strstr(core_pattern, "%p");
|
||||
size_t written;
|
||||
|
||||
if (core_pattern[0] == '/') {
|
||||
written = jio_snprintf(buffer, bufferSize, core_pattern);
|
||||
} else {
|
||||
char cwd[PATH_MAX];
|
||||
|
||||
const char* p = get_current_directory(cwd, PATH_MAX);
|
||||
if (p == NULL) {
|
||||
assert(p != NULL, "failed to get current directory");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (core_pattern[0] == '|') {
|
||||
written = jio_snprintf(buffer, bufferSize,
|
||||
"\"%s\" (or dumping to %s/core.%d)",
|
||||
&core_pattern[1], p, current_process_id());
|
||||
} else {
|
||||
written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern);
|
||||
}
|
||||
}
|
||||
|
||||
if ((written >= 0) && (written < bufferSize)
|
||||
&& (pid_pos == NULL) && (core_pattern[0] != '|')) {
|
||||
int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY);
|
||||
|
||||
if (core_uses_pid_file != -1) {
|
||||
char core_uses_pid = 0;
|
||||
ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1);
|
||||
::close(core_uses_pid_file);
|
||||
|
||||
if (core_uses_pid == '1'){
|
||||
jio_snprintf(buffer + written, bufferSize - written,
|
||||
".%d", current_process_id());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return strlen(buffer);
|
||||
}
|
||||
|
||||
|
@ -51,15 +51,24 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
||||
struct rlimit rlim;
|
||||
bool success;
|
||||
|
||||
n = get_core_path(buffer, bufferSize);
|
||||
char core_path[PATH_MAX];
|
||||
n = get_core_path(core_path, PATH_MAX);
|
||||
|
||||
if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
|
||||
jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (may not exist)", current_process_id());
|
||||
if (n <= 0) {
|
||||
jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
|
||||
success = true;
|
||||
#ifdef LINUX
|
||||
} else if (core_path[0] == '"') { // redirect to user process
|
||||
jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
|
||||
success = true;
|
||||
#endif
|
||||
} else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
|
||||
jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
|
||||
success = true;
|
||||
} else {
|
||||
switch(rlim.rlim_cur) {
|
||||
case RLIM_INFINITY:
|
||||
jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d", current_process_id());
|
||||
jio_snprintf(buffer, bufferSize, "%s", core_path);
|
||||
success = true;
|
||||
break;
|
||||
case 0:
|
||||
@ -67,11 +76,12 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
||||
success = false;
|
||||
break;
|
||||
default:
|
||||
jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (max size %lu kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", current_process_id(), (unsigned long)(rlim.rlim_cur >> 10));
|
||||
jio_snprintf(buffer, bufferSize, "%s (max size %lu kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, (unsigned long)(rlim.rlim_cur >> 10));
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
VMError::report_coredump_status(buffer, success);
|
||||
}
|
||||
|
||||
@ -89,8 +99,8 @@ int os::get_native_stack(address* stack, int frames, int toSkip) {
|
||||
} else {
|
||||
stack[frame_idx ++] = fr.pc();
|
||||
}
|
||||
if (fr.fp() == NULL || os::is_first_C_frame(&fr)
|
||||
||fr.sender_pc() == NULL || fr.cb() != NULL) break;
|
||||
if (fr.fp() == NULL || fr.cb() != NULL ||
|
||||
fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break;
|
||||
|
||||
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
|
||||
fr = os::get_sender_for_C_frame(&fr);
|
||||
|
@ -5979,6 +5979,9 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
|
||||
p, current_process_id());
|
||||
|
||||
return strlen(buffer);
|
||||
}
|
||||
|
||||
|
@ -3768,7 +3768,6 @@ HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define MAX_EXIT_HANDLES PRODUCT_ONLY(32) NOT_PRODUCT(128)
|
||||
#define EXIT_TIMEOUT PRODUCT_ONLY(1000) NOT_PRODUCT(4000) /* 1 sec in product, 4 sec in debug */
|
||||
|
||||
static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
|
||||
@ -3787,7 +3786,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
// _endthreadex().
|
||||
// Should be large enough to avoid blocking the exiting thread due to lack of
|
||||
// a free slot.
|
||||
static HANDLE handles[MAX_EXIT_HANDLES];
|
||||
static HANDLE handles[MAXIMUM_WAIT_OBJECTS];
|
||||
static int handle_count = 0;
|
||||
|
||||
static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
|
||||
@ -3809,32 +3808,34 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
if (res == WAIT_TIMEOUT) {
|
||||
handles[j++] = handles[i];
|
||||
} else {
|
||||
if (res != WAIT_OBJECT_0) {
|
||||
warning("WaitForSingleObject failed in %s: %d\n", __FILE__, __LINE__);
|
||||
// Don't keep the handle, if we failed waiting for it.
|
||||
if (res == WAIT_FAILED) {
|
||||
warning("WaitForSingleObject failed (%u) in %s: %d\n",
|
||||
GetLastError(), __FILE__, __LINE__);
|
||||
}
|
||||
// Don't keep the handle, if we failed waiting for it.
|
||||
CloseHandle(handles[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// If there's no free slot in the array of the kept handles, we'll have to
|
||||
// wait until at least one thread completes exiting.
|
||||
if ((handle_count = j) == MAX_EXIT_HANDLES) {
|
||||
if ((handle_count = j) == MAXIMUM_WAIT_OBJECTS) {
|
||||
// Raise the priority of the oldest exiting thread to increase its chances
|
||||
// to complete sooner.
|
||||
SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
|
||||
res = WaitForMultipleObjects(MAX_EXIT_HANDLES, handles, FALSE, EXIT_TIMEOUT);
|
||||
if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
|
||||
res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
|
||||
if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
|
||||
i = (res - WAIT_OBJECT_0);
|
||||
handle_count = MAX_EXIT_HANDLES - 1;
|
||||
handle_count = MAXIMUM_WAIT_OBJECTS - 1;
|
||||
for (; i < handle_count; ++i) {
|
||||
handles[i] = handles[i + 1];
|
||||
}
|
||||
} else {
|
||||
warning("WaitForMultipleObjects %s in %s: %d\n",
|
||||
(res == WAIT_FAILED ? "failed" : "timed out"), __FILE__, __LINE__);
|
||||
warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
|
||||
(res == WAIT_FAILED ? "failed" : "timed out"),
|
||||
GetLastError(), __FILE__, __LINE__);
|
||||
// Don't keep handles, if we failed waiting for them.
|
||||
for (i = 0; i < MAX_EXIT_HANDLES; ++i) {
|
||||
for (i = 0; i < MAXIMUM_WAIT_OBJECTS; ++i) {
|
||||
CloseHandle(handles[i]);
|
||||
}
|
||||
handle_count = 0;
|
||||
@ -3846,7 +3847,8 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
hthr = GetCurrentThread();
|
||||
if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
|
||||
0, FALSE, DUPLICATE_SAME_ACCESS)) {
|
||||
warning("DuplicateHandle failed in %s: %d\n", __FILE__, __LINE__);
|
||||
warning("DuplicateHandle failed (%u) in %s: %d\n",
|
||||
GetLastError(), __FILE__, __LINE__);
|
||||
} else {
|
||||
++handle_count;
|
||||
}
|
||||
@ -3869,9 +3871,10 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
SetThreadPriority(handles[i], THREAD_PRIORITY_ABOVE_NORMAL);
|
||||
}
|
||||
res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT);
|
||||
if (res < WAIT_OBJECT_0 || res >= (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
|
||||
warning("WaitForMultipleObjects %s in %s: %d\n",
|
||||
(res == WAIT_FAILED ? "failed" : "timed out"), __FILE__, __LINE__);
|
||||
if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
|
||||
warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
|
||||
(res == WAIT_FAILED ? "failed" : "timed out"),
|
||||
GetLastError(), __FILE__, __LINE__);
|
||||
}
|
||||
for (i = 0; i < handle_count; ++i) {
|
||||
CloseHandle(handles[i]);
|
||||
@ -3909,7 +3912,6 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
return exit_code;
|
||||
}
|
||||
|
||||
#undef MAX_EXIT_HANDLES
|
||||
#undef EXIT_TIMEOUT
|
||||
|
||||
void os::win32::setmode_streams() {
|
||||
|
@ -91,8 +91,9 @@ void os::initialize_thread(Thread *thread) { }
|
||||
|
||||
// Frame information (pc, sp, fp) retrieved via ucontext
|
||||
// always looks like a C-frame according to the frame
|
||||
// conventions in frame_ppc64.hpp.
|
||||
address os::Aix::ucontext_get_pc(ucontext_t * uc) {
|
||||
// conventions in frame_ppc.hpp.
|
||||
|
||||
address os::Aix::ucontext_get_pc(const ucontext_t * uc) {
|
||||
return (address)uc->uc_mcontext.jmp_context.iar;
|
||||
}
|
||||
|
||||
@ -486,7 +487,7 @@ void os::Aix::init_thread_fpu_state(void) {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
size_t os::Aix::min_stack_allowed = 768*K;
|
||||
size_t os::Aix::min_stack_allowed = 128*K;
|
||||
|
||||
// Aix is always in floating stack mode. The stack size for a new
|
||||
// thread can be set via pthread_attr_setstacksize().
|
||||
@ -499,7 +500,7 @@ size_t os::Aix::default_stack_size(os::ThreadType thr_type) {
|
||||
// because of the strange 'fallback logic' in os::create_thread().
|
||||
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
|
||||
// specify a different stack size for compiler threads!
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -23,8 +23,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
|
||||
#define OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
|
||||
#ifndef OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
|
||||
#define OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
|
||||
|
||||
static void setup_fpu() {}
|
||||
|
||||
@ -32,4 +32,4 @@
|
||||
// Note: Currently only used in 64 bit Windows implementations
|
||||
static bool register_code_area(char *low, char *high) { return true; }
|
||||
|
||||
#endif // OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
|
||||
#endif // OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
|
||||
|
@ -23,8 +23,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
|
||||
#define OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
|
||||
#ifndef OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
|
||||
#define OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
|
||||
@ -55,4 +55,4 @@ inline void Prefetch::write(void *loc, intx interval) {
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
|
||||
#endif // OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
|
||||
|
@ -23,8 +23,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
|
||||
#define OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
|
||||
#ifndef OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
|
||||
#define OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
|
||||
|
||||
// Processor dependent parts of ThreadLocalStorage
|
||||
|
||||
@ -33,4 +33,4 @@ public:
|
||||
return (Thread *) os::thread_local_storage_at(thread_index());
|
||||
}
|
||||
|
||||
#endif // OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
|
||||
#endif // OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
|
||||
|
@ -23,8 +23,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
|
||||
#define OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
|
||||
#ifndef OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
|
||||
#define OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
|
||||
|
||||
private:
|
||||
void pd_initialize() {
|
||||
@ -76,4 +76,4 @@
|
||||
|
||||
intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
|
||||
|
||||
#endif // OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
|
||||
#endif // OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
|
||||
|
@ -453,7 +453,7 @@ void os::Linux::set_fpu_control_word(int fpu_control) {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
size_t os::Linux::min_stack_allowed = 768*K;
|
||||
size_t os::Linux::min_stack_allowed = 128*K;
|
||||
|
||||
bool os::Linux::supports_variable_stack_size() { return true; }
|
||||
|
||||
|
@ -81,7 +81,8 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Depen
|
||||
_metaspace(NULL), _unloading(false), _klasses(NULL),
|
||||
_claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
|
||||
_next(NULL), _dependencies(dependencies),
|
||||
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
|
||||
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
|
||||
Monitor::_safepoint_check_never)) {
|
||||
// empty
|
||||
}
|
||||
|
||||
|
@ -83,9 +83,11 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
||||
// Note: this requires that CFLspace c'tors
|
||||
// are called serially in the order in which the locks are
|
||||
// are acquired in the program text. This is true today.
|
||||
_freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
|
||||
_freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
|
||||
Monitor::_safepoint_check_sometimes),
|
||||
_parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
|
||||
"CompactibleFreeListSpace._dict_par_lock", true),
|
||||
"CompactibleFreeListSpace._dict_par_lock", true,
|
||||
Monitor::_safepoint_check_never),
|
||||
_rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
|
||||
CMSRescanMultiple),
|
||||
_marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
|
||||
@ -152,8 +154,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
||||
// Initialize locks for parallel case.
|
||||
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
_indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
|
||||
"a freelist par lock",
|
||||
true);
|
||||
"a freelist par lock", true, Mutex::_safepoint_check_sometimes);
|
||||
DEBUG_ONLY(
|
||||
_indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
|
||||
)
|
||||
@ -2559,12 +2560,12 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
|
||||
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
|
||||
x }
|
||||
|
||||
// Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
|
||||
// OldPLABSize, whose static default is different; if overridden at the
|
||||
// Initialize with default setting for CMS, _not_
|
||||
// generic OldPLABSize, whose static default is different; if overridden at the
|
||||
// command-line, this will get reinitialized via a call to
|
||||
// modify_initialization() below.
|
||||
AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
|
||||
VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
|
||||
VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
|
||||
size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
|
||||
uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
|
||||
|
||||
|
@ -690,6 +690,9 @@ class CFLS_LAB : public CHeapObj<mtGC> {
|
||||
void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
|
||||
|
||||
public:
|
||||
static const int _default_dynamic_old_plab_size = 16;
|
||||
static const int _default_static_old_plab_size = 50;
|
||||
|
||||
CFLS_LAB(CompactibleFreeListSpace* cfls);
|
||||
|
||||
// Allocate and return a block of the given size, or else return NULL.
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "gc_implementation/shared/isGCActiveMark.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/cardGeneration.inline.hpp"
|
||||
#include "memory/cardTableRS.hpp"
|
||||
#include "memory/collectorPolicy.hpp"
|
||||
#include "memory/gcLocker.inline.hpp"
|
||||
@ -479,7 +480,9 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_restart_addr(NULL),
|
||||
_overflow_list(NULL),
|
||||
_stats(cmsGen),
|
||||
_eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
|
||||
_eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
|
||||
//verify that this lock should be acquired with safepoint check.
|
||||
Monitor::_safepoint_check_sometimes)),
|
||||
_eden_chunk_array(NULL), // may be set in ctor body
|
||||
_eden_chunk_capacity(0), // -- ditto --
|
||||
_eden_chunk_index(0), // -- ditto --
|
||||
@ -1608,14 +1611,15 @@ void CMSCollector::acquire_control_and_collect(bool full,
|
||||
|
||||
// If the collection is being acquired from the background
|
||||
// collector, there may be references on the discovered
|
||||
// references lists that have NULL referents (being those
|
||||
// that were concurrently cleared by a mutator) or
|
||||
// that are no longer active (having been enqueued concurrently
|
||||
// by the mutator).
|
||||
// Scrub the list of those references because Mark-Sweep-Compact
|
||||
// code assumes referents are not NULL and that all discovered
|
||||
// Reference objects are active.
|
||||
ref_processor()->clean_up_discovered_references();
|
||||
// references lists. Abandon those references, since some
|
||||
// of them may have become unreachable after concurrent
|
||||
// discovery; the STW compacting collector will redo discovery
|
||||
// more precisely, without being subject to floating garbage.
|
||||
// Leaving otherwise unreachable references in the discovered
|
||||
// lists would require special handling.
|
||||
ref_processor()->disable_discovery();
|
||||
ref_processor()->abandon_partial_discovery();
|
||||
ref_processor()->verify_no_references_recorded();
|
||||
|
||||
if (first_state > Idling) {
|
||||
save_heap_summary();
|
||||
@ -1681,7 +1685,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
|
||||
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
|
||||
|
||||
ref_processor()->set_enqueuing_is_done(false);
|
||||
ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
|
||||
ref_processor()->enable_discovery();
|
||||
ref_processor()->setup_policy(clear_all_soft_refs);
|
||||
// If an asynchronous collection finishes, the _modUnionTable is
|
||||
// all clear. If we are assuming the collection from an asynchronous
|
||||
@ -2998,7 +3002,7 @@ void CMSCollector::checkpointRootsInitial() {
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
checkpointRootsInitialWork();
|
||||
// enable ("weak") refs discovery
|
||||
rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
|
||||
rp->enable_discovery();
|
||||
_collectorState = Marking;
|
||||
}
|
||||
SpecializationStats::print();
|
||||
@ -5945,7 +5949,8 @@ HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
|
||||
CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
|
||||
_bm(),
|
||||
_shifter(shifter),
|
||||
_lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
|
||||
_lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
|
||||
Monitor::_safepoint_check_sometimes) : NULL)
|
||||
{
|
||||
_bmStartWord = 0;
|
||||
_bmWordSize = 0;
|
||||
|
@ -187,7 +187,8 @@ class CMSMarkStack: public CHeapObj<mtGC> {
|
||||
|
||||
public:
|
||||
CMSMarkStack():
|
||||
_par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
|
||||
_par_lock(Mutex::event, "CMSMarkStack._par_lock", true,
|
||||
Monitor::_safepoint_check_never),
|
||||
_hit_limit(0),
|
||||
_failed_double(0) {}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,7 +52,8 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
|
||||
// The 0th worker in notified by mutator threads and has a special monitor.
|
||||
// The last worker is used for young gen rset size sampling.
|
||||
if (worker_id > 0) {
|
||||
_monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true);
|
||||
_monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true,
|
||||
Monitor::_safepoint_check_never);
|
||||
} else {
|
||||
_monitor = DirtyCardQ_CBL_mon;
|
||||
}
|
||||
|
@ -971,7 +971,7 @@ void ConcurrentMark::checkpointRootsInitialPost() {
|
||||
// Start Concurrent Marking weak-reference discovery.
|
||||
ReferenceProcessor* rp = g1h->ref_processor_cm();
|
||||
// enable ("weak") refs discovery
|
||||
rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
|
||||
rp->enable_discovery();
|
||||
rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
|
||||
|
||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||
|
@ -254,25 +254,23 @@ void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
|
||||
bool force) {
|
||||
assert(!force, "not supported for GC alloc regions");
|
||||
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
|
||||
return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Young);
|
||||
}
|
||||
|
||||
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes) {
|
||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
||||
GCAllocForSurvived);
|
||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Young);
|
||||
}
|
||||
|
||||
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
|
||||
bool force) {
|
||||
assert(!force, "not supported for GC alloc regions");
|
||||
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
|
||||
return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Old);
|
||||
}
|
||||
|
||||
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes) {
|
||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
||||
GCAllocForTenured);
|
||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Old);
|
||||
}
|
||||
|
||||
HeapRegion* OldGCAllocRegion::release() {
|
||||
|
@ -113,15 +113,16 @@ void G1DefaultAllocator::abandon_gc_alloc_regions() {
|
||||
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
|
||||
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
|
||||
|
||||
HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
|
||||
HeapWord* obj = NULL;
|
||||
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
||||
HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
|
||||
size_t word_sz,
|
||||
AllocationContext_t context) {
|
||||
size_t gclab_word_size = _g1h->desired_plab_sz(dest);
|
||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(dest, context);
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
|
||||
|
||||
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
|
||||
HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
|
||||
if (buf == NULL) {
|
||||
return NULL; // Let caller handle allocation failure.
|
||||
}
|
||||
@ -129,30 +130,33 @@ HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz
|
||||
alloc_buf->set_word_size(gclab_word_size);
|
||||
alloc_buf->set_buf(buf);
|
||||
|
||||
obj = alloc_buf->allocate(word_sz);
|
||||
HeapWord* const obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
return obj;
|
||||
} else {
|
||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
|
||||
return _g1h->par_allocate_during_gc(dest, word_sz, context);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
|
||||
G1ParGCAllocator(g1h),
|
||||
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
|
||||
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
|
||||
|
||||
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
|
||||
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
|
||||
|
||||
G1ParGCAllocator(g1h),
|
||||
_surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
|
||||
_tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
|
||||
for (uint state = 0; state < InCSetState::Num; state++) {
|
||||
_alloc_buffers[state] = NULL;
|
||||
}
|
||||
_alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
|
||||
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
|
||||
}
|
||||
|
||||
void G1DefaultParGCAllocator::retire_alloc_buffers() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
size_t waste = _alloc_buffers[ap]->words_remaining();
|
||||
add_to_alloc_buffer_waste(waste);
|
||||
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
|
||||
true /* end_of_gc */,
|
||||
false /* retain */);
|
||||
for (uint state = 0; state < InCSetState::Num; state++) {
|
||||
G1ParGCAllocBuffer* const buf = _alloc_buffers[state];
|
||||
if (buf != NULL) {
|
||||
add_to_alloc_buffer_waste(buf->words_remaining());
|
||||
buf->flush_stats_and_retire(_g1h->alloc_buffer_stats(state),
|
||||
true /* end_of_gc */,
|
||||
false /* retain */);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -27,14 +27,9 @@
|
||||
|
||||
#include "gc_implementation/g1/g1AllocationContext.hpp"
|
||||
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||
#include "gc_implementation/g1/g1InCSetState.hpp"
|
||||
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
|
||||
|
||||
enum GCAllocPurpose {
|
||||
GCAllocForTenured,
|
||||
GCAllocForSurvived,
|
||||
GCAllocPurposeCount
|
||||
};
|
||||
|
||||
// Base class for G1 allocators.
|
||||
class G1Allocator : public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
@ -178,20 +173,40 @@ class G1ParGCAllocator : public CHeapObj<mtGC> {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The survivor alignment in effect in bytes.
|
||||
// == 0 : don't align survivors
|
||||
// != 0 : align survivors to that alignment
|
||||
// These values were chosen to favor the non-alignment case since some
|
||||
// architectures have a special compare against zero instructions.
|
||||
const uint _survivor_alignment_bytes;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
|
||||
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||
|
||||
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
|
||||
|
||||
virtual void retire_alloc_buffers() = 0;
|
||||
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
|
||||
virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
|
||||
|
||||
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
|
||||
// there are no restrictions on survivor alignment.
|
||||
static uint calc_survivor_alignment_bytes() {
|
||||
assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
|
||||
if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
|
||||
// No need to align objects in the survivors differently, return 0
|
||||
// which means "survivor alignment is not used".
|
||||
return 0;
|
||||
} else {
|
||||
assert(SurvivorAlignmentInBytes > 0, "sanity");
|
||||
return SurvivorAlignmentInBytes;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParGCAllocator(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
|
||||
_g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
|
||||
_alloc_buffer_waste(0), _undo_waste(0) {
|
||||
}
|
||||
|
||||
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
|
||||
@ -199,24 +214,40 @@ public:
|
||||
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
|
||||
size_t undo_waste() {return _undo_waste; }
|
||||
|
||||
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
|
||||
HeapWord* obj = NULL;
|
||||
if (purpose == GCAllocForSurvived) {
|
||||
obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
|
||||
// Allocate word_sz words in dest, either directly into the regions or by
|
||||
// allocating a new PLAB. Returns the address of the allocated memory, NULL if
|
||||
// not successful.
|
||||
HeapWord* allocate_direct_or_new_plab(InCSetState dest,
|
||||
size_t word_sz,
|
||||
AllocationContext_t context);
|
||||
|
||||
// Allocate word_sz words in the PLAB of dest. Returns the address of the
|
||||
// allocated memory, NULL if not successful.
|
||||
HeapWord* plab_allocate(InCSetState dest,
|
||||
size_t word_sz,
|
||||
AllocationContext_t context) {
|
||||
G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
|
||||
if (_survivor_alignment_bytes == 0) {
|
||||
return buffer->allocate(word_sz);
|
||||
} else {
|
||||
obj = alloc_buffer(purpose, context)->allocate(word_sz);
|
||||
return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* allocate(InCSetState dest, size_t word_sz,
|
||||
AllocationContext_t context) {
|
||||
HeapWord* const obj = plab_allocate(dest, word_sz, context);
|
||||
if (obj != NULL) {
|
||||
return obj;
|
||||
}
|
||||
return allocate_slow(purpose, word_sz, context);
|
||||
return allocate_direct_or_new_plab(dest, word_sz, context);
|
||||
}
|
||||
|
||||
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
|
||||
if (alloc_buffer(purpose, context)->contains(obj)) {
|
||||
assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
|
||||
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
|
||||
if (alloc_buffer(dest, context)->contains(obj)) {
|
||||
assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
|
||||
"should contain whole object");
|
||||
alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
|
||||
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
|
||||
} else {
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
add_to_undo_waste(word_sz);
|
||||
@ -227,13 +258,17 @@ public:
|
||||
class G1DefaultParGCAllocator : public G1ParGCAllocator {
|
||||
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
||||
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
||||
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
||||
G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
|
||||
|
||||
public:
|
||||
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
|
||||
|
||||
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
|
||||
return _alloc_buffers[purpose];
|
||||
virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
|
||||
assert(dest.is_valid(),
|
||||
err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
|
||||
assert(_alloc_buffers[dest.value()] != NULL,
|
||||
err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
|
||||
return _alloc_buffers[dest.value()];
|
||||
}
|
||||
|
||||
virtual void retire_alloc_buffers() ;
|
||||
|
@ -1301,7 +1301,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
// Temporarily clear the STW ref processor's _is_alive_non_header field.
|
||||
ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
|
||||
|
||||
ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
|
||||
ref_processor_stw()->enable_discovery();
|
||||
ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
|
||||
|
||||
// Do collection work
|
||||
@ -1886,13 +1886,12 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||
|
||||
// Create the gen rem set (and barrier set) for the entire reserved region.
|
||||
_rem_set = collector_policy()->create_rem_set(reserved_region());
|
||||
set_barrier_set(rem_set()->bs());
|
||||
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
|
||||
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
// Create the barrier set for the entire reserved region.
|
||||
G1SATBCardTableLoggingModRefBS* bs
|
||||
= new G1SATBCardTableLoggingModRefBS(reserved_region());
|
||||
bs->initialize();
|
||||
assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
|
||||
set_barrier_set(bs);
|
||||
|
||||
// Also create a G1 rem set.
|
||||
_g1_rem_set = new G1RemSet(this, g1_barrier_set());
|
||||
@ -3153,8 +3152,6 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
|
||||
failures = true;
|
||||
}
|
||||
}
|
||||
if (!silent) gclog_or_tty->print("RemSet ");
|
||||
rem_set()->verify();
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
if (!silent) gclog_or_tty->print("StrDedup ");
|
||||
@ -3750,8 +3747,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// reference processing currently works in G1.
|
||||
|
||||
// Enable discovery in the STW reference processor
|
||||
ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
|
||||
true /*verify_no_refs*/);
|
||||
ref_processor_stw()->enable_discovery();
|
||||
|
||||
{
|
||||
// We want to temporarily turn off discovery by the
|
||||
@ -3819,6 +3815,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
|
||||
register_humongous_regions_with_in_cset_fast_test();
|
||||
|
||||
assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
|
||||
|
||||
_cm->note_start_of_gc();
|
||||
// We should not verify the per-thread SATB buffers given that
|
||||
// we have not filtered them yet (we'll do so during the
|
||||
@ -4048,29 +4046,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
|
||||
{
|
||||
size_t gclab_word_size;
|
||||
switch (purpose) {
|
||||
case GCAllocForSurvived:
|
||||
gclab_word_size = _survivor_plab_stats.desired_plab_sz();
|
||||
break;
|
||||
case GCAllocForTenured:
|
||||
gclab_word_size = _old_plab_stats.desired_plab_sz();
|
||||
break;
|
||||
default:
|
||||
assert(false, "unknown GCAllocPurpose");
|
||||
gclab_word_size = _old_plab_stats.desired_plab_sz();
|
||||
break;
|
||||
}
|
||||
|
||||
// Prevent humongous PLAB sizes for two reasons:
|
||||
// * PLABs are allocated using a similar paths as oops, but should
|
||||
// never be in a humongous region
|
||||
// * Allowing humongous PLABs needlessly churns the region free lists
|
||||
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
|
||||
_drain_in_progress = false;
|
||||
set_evac_failure_closure(cl);
|
||||
@ -4196,35 +4171,6 @@ void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
||||
size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
if (purpose == GCAllocForSurvived) {
|
||||
HeapWord* result = survivor_attempt_allocation(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
} else {
|
||||
// Let's try to allocate in the old gen in case we can fit the
|
||||
// object there.
|
||||
return old_attempt_allocation(word_size, context);
|
||||
}
|
||||
} else {
|
||||
assert(purpose == GCAllocForTenured, "sanity");
|
||||
HeapWord* result = old_attempt_allocation(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
} else {
|
||||
// Let's try to allocate in the survivors in case we can fit the
|
||||
// object there.
|
||||
return survivor_attempt_allocation(word_size, context);
|
||||
}
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
// Trying to keep some compilers happy.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void G1ParCopyHelper::mark_object(oop obj) {
|
||||
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||
|
||||
@ -4267,15 +4213,14 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
|
||||
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
|
||||
|
||||
G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
|
||||
|
||||
if (state == G1CollectedHeap::InCSet) {
|
||||
const InCSetState state = _g1->in_cset_state(obj);
|
||||
if (state.is_in_cset()) {
|
||||
oop forwardee;
|
||||
markOop m = obj->mark();
|
||||
if (m->is_marked()) {
|
||||
forwardee = (oop) m->decode_pointer();
|
||||
} else {
|
||||
forwardee = _par_scan_state->copy_to_survivor_space(obj, m);
|
||||
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
|
||||
}
|
||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||
@ -4289,7 +4234,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
do_klass_barrier(p, forwardee);
|
||||
}
|
||||
} else {
|
||||
if (state == G1CollectedHeap::IsHumongous) {
|
||||
if (state.is_humongous()) {
|
||||
_g1->set_humongous_is_live(obj);
|
||||
}
|
||||
// The object is not in collection set. If we're a root scanning
|
||||
@ -4609,7 +4554,7 @@ void
|
||||
G1CollectedHeap::
|
||||
g1_process_roots(OopClosure* scan_non_heap_roots,
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
G1ParPushHeapRSClosure* scan_rs,
|
||||
CLDClosure* scan_strong_clds,
|
||||
CLDClosure* scan_weak_clds,
|
||||
CodeBlobClosure* scan_strong_code,
|
||||
@ -4933,7 +4878,7 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
|
||||
Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
|
||||
|
||||
class G1KlassCleaningTask : public StackObj {
|
||||
BoolObjectClosure* _is_alive;
|
||||
@ -5145,17 +5090,17 @@ public:
|
||||
oop obj = *p;
|
||||
assert(obj != NULL, "the caller should have filtered out NULL values");
|
||||
|
||||
G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
|
||||
if (cset_state == G1CollectedHeap::InNeither) {
|
||||
const InCSetState cset_state = _g1->in_cset_state(obj);
|
||||
if (!cset_state.is_in_cset_or_humongous()) {
|
||||
return;
|
||||
}
|
||||
if (cset_state == G1CollectedHeap::InCSet) {
|
||||
if (cset_state.is_in_cset()) {
|
||||
assert( obj->is_forwarded(), "invariant" );
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
assert(!obj->is_forwarded(), "invariant" );
|
||||
assert(cset_state == G1CollectedHeap::IsHumongous,
|
||||
err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
|
||||
assert(cset_state.is_humongous(),
|
||||
err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
|
||||
_g1->set_humongous_is_live(obj);
|
||||
}
|
||||
}
|
||||
@ -5640,8 +5585,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
|
||||
init_for_evac_failure(NULL);
|
||||
|
||||
rem_set()->prepare_for_younger_refs_iterate(true);
|
||||
|
||||
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
|
||||
double start_par_time_sec = os::elapsedTime();
|
||||
double end_par_time_sec;
|
||||
@ -5951,6 +5894,70 @@ void G1CollectedHeap::check_bitmaps(const char* caller) {
|
||||
heap_region_iterate(&cl);
|
||||
guarantee(!cl.failures(), "bitmap verification");
|
||||
}
|
||||
|
||||
class G1CheckCSetFastTableClosure : public HeapRegionClosure {
|
||||
private:
|
||||
bool _failures;
|
||||
public:
|
||||
G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* hr) {
|
||||
uint i = hr->hrm_index();
|
||||
InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
|
||||
if (hr->is_humongous()) {
|
||||
if (hr->in_collection_set()) {
|
||||
gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (cset_state.is_in_cset()) {
|
||||
gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->is_continues_humongous() && cset_state.is_humongous()) {
|
||||
gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (cset_state.is_humongous()) {
|
||||
gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->in_collection_set() != cset_state.is_in_cset()) {
|
||||
gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
|
||||
hr->in_collection_set(), cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (cset_state.is_in_cset()) {
|
||||
if (hr->is_young() != (cset_state.is_young())) {
|
||||
gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
|
||||
hr->is_young(), cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->is_old() != (cset_state.is_old())) {
|
||||
gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
|
||||
hr->is_old(), cset_state.value(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool failures() const { return _failures; }
|
||||
};
|
||||
|
||||
bool G1CollectedHeap::check_cset_fast_test() {
|
||||
G1CheckCSetFastTableClosure cl;
|
||||
_hrm.iterate(&cl);
|
||||
return !cl.failures();
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void G1CollectedHeap::cleanUpCardTable() {
|
||||
@ -6519,20 +6526,20 @@ void G1CollectedHeap::set_par_threads() {
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||
uint count,
|
||||
GCAllocPurpose ap) {
|
||||
InCSetState dest) {
|
||||
assert(FreeList_lock->owned_by_self(), "pre-condition");
|
||||
|
||||
if (count < g1_policy()->max_regions(ap)) {
|
||||
bool survivor = (ap == GCAllocForSurvived);
|
||||
if (count < g1_policy()->max_regions(dest)) {
|
||||
const bool is_survivor = (dest.is_young());
|
||||
HeapRegion* new_alloc_region = new_region(word_size,
|
||||
!survivor,
|
||||
!is_survivor,
|
||||
true /* do_expand */);
|
||||
if (new_alloc_region != NULL) {
|
||||
// We really only need to do this for old regions given that we
|
||||
// should never scan survivors. But it doesn't hurt to do it
|
||||
// for survivors too.
|
||||
new_alloc_region->record_timestamp();
|
||||
if (survivor) {
|
||||
if (is_survivor) {
|
||||
new_alloc_region->set_survivor();
|
||||
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
|
||||
check_bitmaps("Survivor Region Allocation", new_alloc_region);
|
||||
@ -6544,8 +6551,6 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||
bool during_im = g1_policy()->during_initial_mark_pause();
|
||||
new_alloc_region->note_start_of_copying(during_im);
|
||||
return new_alloc_region;
|
||||
} else {
|
||||
g1_policy()->note_alloc_region_limit_reached(ap);
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
@ -6553,11 +6558,11 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||
|
||||
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes,
|
||||
GCAllocPurpose ap) {
|
||||
InCSetState dest) {
|
||||
bool during_im = g1_policy()->during_initial_mark_pause();
|
||||
alloc_region->note_end_of_copying(during_im);
|
||||
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
|
||||
if (ap == GCAllocForSurvived) {
|
||||
if (dest.is_young()) {
|
||||
young_list()->add_survivor_region(alloc_region);
|
||||
} else {
|
||||
_old_set.add(alloc_region);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||
#include "gc_implementation/g1/g1BiasedArray.hpp"
|
||||
#include "gc_implementation/g1/g1HRPrinter.hpp"
|
||||
#include "gc_implementation/g1/g1InCSetState.hpp"
|
||||
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/g1YCTypes.hpp"
|
||||
@ -213,6 +214,9 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class G1MarkSweep;
|
||||
friend class HeapRegionClaimer;
|
||||
|
||||
// Testing classes.
|
||||
friend class G1CheckCSetFastTableClosure;
|
||||
|
||||
private:
|
||||
// The one and only G1CollectedHeap, so static functions can find it.
|
||||
static G1CollectedHeap* _g1h;
|
||||
@ -547,15 +551,9 @@ protected:
|
||||
// allocation region, either by picking one or expanding the
|
||||
// heap, and then allocate a block of the given size. The block
|
||||
// may not be a humongous - it must fit into a single heap region.
|
||||
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
|
||||
size_t word_size,
|
||||
AllocationContext_t context);
|
||||
|
||||
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||
HeapRegion* alloc_region,
|
||||
bool par,
|
||||
size_t word_size);
|
||||
|
||||
inline HeapWord* par_allocate_during_gc(InCSetState dest,
|
||||
size_t word_size,
|
||||
AllocationContext_t context);
|
||||
// Ensure that no further allocations can happen in "r", bearing in mind
|
||||
// that parallel threads might be attempting allocations.
|
||||
void par_allocate_remaining_space(HeapRegion* r);
|
||||
@ -577,9 +575,9 @@ protected:
|
||||
|
||||
// For GC alloc regions.
|
||||
HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
|
||||
GCAllocPurpose ap);
|
||||
InCSetState dest);
|
||||
void retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes, GCAllocPurpose ap);
|
||||
size_t allocated_bytes, InCSetState dest);
|
||||
|
||||
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
||||
// inspection request and should collect the entire heap
|
||||
@ -640,26 +638,11 @@ public:
|
||||
// (Rounds up to a HeapRegion boundary.)
|
||||
bool expand(size_t expand_bytes);
|
||||
|
||||
// Returns the PLAB statistics given a purpose.
|
||||
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
|
||||
PLABStats* stats = NULL;
|
||||
// Returns the PLAB statistics for a given destination.
|
||||
inline PLABStats* alloc_buffer_stats(InCSetState dest);
|
||||
|
||||
switch (purpose) {
|
||||
case GCAllocForSurvived:
|
||||
stats = &_survivor_plab_stats;
|
||||
break;
|
||||
case GCAllocForTenured:
|
||||
stats = &_old_plab_stats;
|
||||
break;
|
||||
default:
|
||||
assert(false, "unrecognized GCAllocPurpose");
|
||||
}
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
// Determines PLAB size for a particular allocation purpose.
|
||||
size_t desired_plab_sz(GCAllocPurpose purpose);
|
||||
// Determines PLAB size for a given destination.
|
||||
inline size_t desired_plab_sz(InCSetState dest);
|
||||
|
||||
inline AllocationContextStats& allocation_context_stats();
|
||||
|
||||
@ -683,8 +666,11 @@ public:
|
||||
void register_humongous_regions_with_in_cset_fast_test();
|
||||
// We register a region with the fast "in collection set" test. We
|
||||
// simply set to true the array slot corresponding to this region.
|
||||
void register_region_with_in_cset_fast_test(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_in_cset(r->hrm_index());
|
||||
void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_in_young(r->hrm_index());
|
||||
}
|
||||
void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_in_old(r->hrm_index());
|
||||
}
|
||||
|
||||
// This is a fast test on whether a reference points into the
|
||||
@ -821,7 +807,7 @@ protected:
|
||||
// In the sequential case this param will be ignored.
|
||||
void g1_process_roots(OopClosure* scan_non_heap_roots,
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
G1ParPushHeapRSClosure* scan_rs,
|
||||
CLDClosure* scan_strong_clds,
|
||||
CLDClosure* scan_weak_clds,
|
||||
CodeBlobClosure* scan_strong_code,
|
||||
@ -1181,6 +1167,9 @@ public:
|
||||
// appropriate error messages and crash.
|
||||
void check_bitmaps(const char* caller) PRODUCT_RETURN;
|
||||
|
||||
// Do sanity check on the contents of the in-cset fast test table.
|
||||
bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
|
||||
|
||||
// verify_region_sets() performs verification over the region
|
||||
// lists. It will be compiled in the product code to be used when
|
||||
// necessary (i.e., during heap verification).
|
||||
@ -1276,53 +1265,15 @@ public:
|
||||
|
||||
inline bool is_in_cset_or_humongous(const oop obj);
|
||||
|
||||
enum in_cset_state_t {
|
||||
InNeither, // neither in collection set nor humongous
|
||||
InCSet, // region is in collection set only
|
||||
IsHumongous // region is a humongous start region
|
||||
};
|
||||
private:
|
||||
// Instances of this class are used for quick tests on whether a reference points
|
||||
// into the collection set or is a humongous object (points into a humongous
|
||||
// object).
|
||||
// Each of the array's elements denotes whether the corresponding region is in
|
||||
// the collection set or a humongous region.
|
||||
// We use this to quickly reclaim humongous objects: by making a humongous region
|
||||
// succeed this test, we sort-of add it to the collection set. During the reference
|
||||
// iteration closures, when we see a humongous region, we simply mark it as
|
||||
// referenced, i.e. live.
|
||||
class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
|
||||
protected:
|
||||
char default_value() const { return G1CollectedHeap::InNeither; }
|
||||
public:
|
||||
void set_humongous(uintptr_t index) {
|
||||
assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
|
||||
set_by_index(index, G1CollectedHeap::IsHumongous);
|
||||
}
|
||||
|
||||
void clear_humongous(uintptr_t index) {
|
||||
set_by_index(index, G1CollectedHeap::InNeither);
|
||||
}
|
||||
|
||||
void set_in_cset(uintptr_t index) {
|
||||
assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
|
||||
set_by_index(index, G1CollectedHeap::InCSet);
|
||||
}
|
||||
|
||||
bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
|
||||
bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
|
||||
G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
|
||||
void clear() { G1BiasedMappedArray<char>::clear(); }
|
||||
};
|
||||
|
||||
// This array is used for a quick test on whether a reference points into
|
||||
// the collection set or not. Each of the array's elements denotes whether the
|
||||
// corresponding region is in the collection set or not.
|
||||
G1FastCSetBiasedMappedArray _in_cset_fast_test;
|
||||
G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
|
||||
|
||||
public:
|
||||
|
||||
inline in_cset_state_t in_cset_state(const oop obj);
|
||||
inline InCSetState in_cset_state(const oop obj);
|
||||
|
||||
// Return "TRUE" iff the given object address is in the reserved
|
||||
// region of g1.
|
||||
|
@ -35,6 +35,41 @@
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
|
||||
PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
|
||||
switch (dest.value()) {
|
||||
case InCSetState::Young:
|
||||
return &_survivor_plab_stats;
|
||||
case InCSetState::Old:
|
||||
return &_old_plab_stats;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL; // Keep some compilers happy
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
|
||||
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
|
||||
// Prevent humongous PLAB sizes for two reasons:
|
||||
// * PLABs are allocated using a similar paths as oops, but should
|
||||
// never be in a humongous region
|
||||
// * Allowing humongous PLABs needlessly churns the region free lists
|
||||
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
|
||||
size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
switch (dest.value()) {
|
||||
case InCSetState::Young:
|
||||
return survivor_attempt_allocation(word_size, context);
|
||||
case InCSetState::Old:
|
||||
return old_attempt_allocation(word_size, context);
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL; // Keep some compilers happy
|
||||
}
|
||||
}
|
||||
|
||||
// Inline functions for G1CollectedHeap
|
||||
|
||||
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
|
||||
@ -203,7 +238,7 @@ bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
|
||||
return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
|
||||
}
|
||||
|
||||
G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
|
||||
InCSetState G1CollectedHeap::in_cset_state(const oop obj) {
|
||||
return _in_cset_fast_test.at((HeapWord*)obj);
|
||||
}
|
||||
|
||||
|
@ -1437,18 +1437,6 @@ bool G1CollectorPolicy::can_expand_young_list() {
|
||||
return young_list_length < young_list_max_length;
|
||||
}
|
||||
|
||||
uint G1CollectorPolicy::max_regions(int purpose) {
|
||||
switch (purpose) {
|
||||
case GCAllocForSurvived:
|
||||
return _max_survivor_regions;
|
||||
case GCAllocForTenured:
|
||||
return REGIONS_UNLIMITED;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return REGIONS_UNLIMITED;
|
||||
};
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::update_max_gc_locker_expansion() {
|
||||
uint expansion_region_num = 0;
|
||||
if (GCLockerEdenExpansionPercent > 0) {
|
||||
@ -1634,7 +1622,7 @@ void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
|
||||
hr->set_next_in_collection_set(_collection_set);
|
||||
_collection_set = hr;
|
||||
_collection_set_bytes_used_before += hr->used();
|
||||
_g1->register_region_with_in_cset_fast_test(hr);
|
||||
_g1->register_old_region_with_in_cset_fast_test(hr);
|
||||
size_t rs_length = hr->rem_set()->occupied();
|
||||
_recorded_rs_lengths += rs_length;
|
||||
_old_cset_region_length += 1;
|
||||
@ -1767,7 +1755,7 @@ void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
|
||||
hr->set_in_collection_set(true);
|
||||
assert( hr->next_in_collection_set() == NULL, "invariant");
|
||||
|
||||
_g1->register_region_with_in_cset_fast_test(hr);
|
||||
_g1->register_young_region_with_in_cset_fast_test(hr);
|
||||
}
|
||||
|
||||
// Add the region at the RHS of the incremental cset
|
||||
|
@ -881,28 +881,20 @@ private:
|
||||
public:
|
||||
uint tenuring_threshold() const { return _tenuring_threshold; }
|
||||
|
||||
inline GCAllocPurpose
|
||||
evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
|
||||
if (age < _tenuring_threshold && src_region->is_young()) {
|
||||
return GCAllocForSurvived;
|
||||
} else {
|
||||
return GCAllocForTenured;
|
||||
}
|
||||
}
|
||||
|
||||
inline bool track_object_age(GCAllocPurpose purpose) {
|
||||
return purpose == GCAllocForSurvived;
|
||||
}
|
||||
|
||||
static const uint REGIONS_UNLIMITED = (uint) -1;
|
||||
|
||||
uint max_regions(int purpose);
|
||||
|
||||
// The limit on regions for a particular purpose is reached.
|
||||
void note_alloc_region_limit_reached(int purpose) {
|
||||
if (purpose == GCAllocForSurvived) {
|
||||
_tenuring_threshold = 0;
|
||||
uint max_regions(InCSetState dest) {
|
||||
switch (dest.value()) {
|
||||
case InCSetState::Young:
|
||||
return _max_survivor_regions;
|
||||
case InCSetState::Old:
|
||||
return REGIONS_UNLIMITED;
|
||||
default:
|
||||
assert(false, err_msg("Unknown dest state: " CSETSTATE_FORMAT, dest.value()));
|
||||
break;
|
||||
}
|
||||
// keep some compilers happy
|
||||
return 0;
|
||||
}
|
||||
|
||||
void note_start_adding_survivor_regions() {
|
||||
|
132
hotspot/src/share/vm/gc_implementation/g1/g1InCSetState.hpp
Normal file
132
hotspot/src/share/vm/gc_implementation/g1/g1InCSetState.hpp
Normal file
@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1BiasedArray.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
// Per-region state during garbage collection.
|
||||
struct InCSetState {
|
||||
public:
|
||||
// We use different types to represent the state value. Particularly SPARC puts
|
||||
// values in structs from "left to right", i.e. MSB to LSB. This results in many
|
||||
// unnecessary shift operations when loading and storing values of this type.
|
||||
// This degrades performance significantly (>10%) on that platform.
|
||||
// Other tested ABIs do not seem to have this problem, and actually tend to
|
||||
// favor smaller types, so we use the smallest usable type there.
|
||||
#ifdef SPARC
|
||||
#define CSETSTATE_FORMAT INTPTR_FORMAT
|
||||
typedef intptr_t in_cset_state_t;
|
||||
#else
|
||||
#define CSETSTATE_FORMAT "%d"
|
||||
typedef int8_t in_cset_state_t;
|
||||
#endif
|
||||
private:
|
||||
in_cset_state_t _value;
|
||||
public:
|
||||
enum {
|
||||
// Selection of the values were driven to micro-optimize the encoding and
|
||||
// frequency of the checks.
|
||||
// The most common check is whether the region is in the collection set or not.
|
||||
// This encoding allows us to use an != 0 check which in some architectures
|
||||
// (x86*) can be encoded slightly more efficently than a normal comparison
|
||||
// against zero.
|
||||
// The same situation occurs when checking whether the region is humongous
|
||||
// or not, which is encoded by values < 0.
|
||||
// The other values are simply encoded in increasing generation order, which
|
||||
// makes getting the next generation fast by a simple increment.
|
||||
Humongous = -1, // The region is humongous - note that actually any value < 0 would be possible here.
|
||||
NotInCSet = 0, // The region is not in the collection set.
|
||||
Young = 1, // The region is in the collection set and a young region.
|
||||
Old = 2, // The region is in the collection set and an old region.
|
||||
Num
|
||||
};
|
||||
|
||||
InCSetState(in_cset_state_t value = NotInCSet) : _value(value) {
|
||||
assert(is_valid(), err_msg("Invalid state %d", _value));
|
||||
}
|
||||
|
||||
in_cset_state_t value() const { return _value; }
|
||||
|
||||
void set_old() { _value = Old; }
|
||||
|
||||
bool is_in_cset_or_humongous() const { return _value != NotInCSet; }
|
||||
bool is_in_cset() const { return _value > NotInCSet; }
|
||||
bool is_humongous() const { return _value < NotInCSet; }
|
||||
bool is_young() const { return _value == Young; }
|
||||
bool is_old() const { return _value == Old; }
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_default() const { return !is_in_cset_or_humongous(); }
|
||||
bool is_valid() const { return (_value >= Humongous) && (_value < Num); }
|
||||
bool is_valid_gen() const { return (_value >= Young && _value <= Old); }
|
||||
#endif
|
||||
};
|
||||
|
||||
// Instances of this class are used for quick tests on whether a reference points
|
||||
// into the collection set and into which generation or is a humongous object
|
||||
//
|
||||
// Each of the array's elements indicates whether the corresponding region is in
|
||||
// the collection set and if so in which generation, or a humongous region.
|
||||
//
|
||||
// We use this to speed up reference processing during young collection and
|
||||
// quickly reclaim humongous objects. For the latter, by making a humongous region
|
||||
// succeed this test, we sort-of add it to the collection set. During the reference
|
||||
// iteration closures, when we see a humongous region, we then simply mark it as
|
||||
// referenced, i.e. live.
|
||||
class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<InCSetState> {
|
||||
protected:
|
||||
InCSetState default_value() const { return InCSetState::NotInCSet; }
|
||||
public:
|
||||
void set_humongous(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
|
||||
set_by_index(index, InCSetState::Humongous);
|
||||
}
|
||||
|
||||
void clear_humongous(uintptr_t index) {
|
||||
set_by_index(index, InCSetState::NotInCSet);
|
||||
}
|
||||
|
||||
void set_in_young(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
|
||||
set_by_index(index, InCSetState::Young);
|
||||
}
|
||||
|
||||
void set_in_old(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
|
||||
set_by_index(index, InCSetState::Old);
|
||||
}
|
||||
|
||||
bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); }
|
||||
bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
|
||||
InCSetState at(HeapWord* addr) const { return get_by_address(addr); }
|
||||
void clear() { G1BiasedMappedArray<InCSetState>::clear(); }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
|
||||
class HeapRegion;
|
||||
class G1CollectedHeap;
|
||||
@ -239,14 +240,14 @@ class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
G1RemSet* _g1_rem_set;
|
||||
HeapRegion* _from;
|
||||
OopsInHeapRegionClosure* _push_ref_cl;
|
||||
G1ParPushHeapRSClosure* _push_ref_cl;
|
||||
bool _record_refs_into_cset;
|
||||
uint _worker_i;
|
||||
|
||||
public:
|
||||
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||
G1RemSet* rs,
|
||||
OopsInHeapRegionClosure* push_ref_cl,
|
||||
G1ParPushHeapRSClosure* push_ref_cl,
|
||||
bool record_refs_into_cset,
|
||||
uint worker_i = 0);
|
||||
|
||||
@ -256,7 +257,8 @@ public:
|
||||
}
|
||||
|
||||
bool self_forwarded(oop obj) {
|
||||
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
|
||||
markOop m = obj->mark();
|
||||
bool result = (m->is_marked() && ((oop)m->decode_pointer() == obj));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -67,8 +67,8 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
|
||||
if (state == G1CollectedHeap::InCSet) {
|
||||
const InCSetState state = _g1->in_cset_state(obj);
|
||||
if (state.is_in_cset()) {
|
||||
// We're not going to even bother checking whether the object is
|
||||
// already forwarded or not, as this usually causes an immediate
|
||||
// stall. We'll try to prefetch the object (for write, given that
|
||||
@ -87,7 +87,7 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
if (state == G1CollectedHeap::IsHumongous) {
|
||||
if (state.is_humongous()) {
|
||||
_g1->set_humongous_is_live(obj);
|
||||
}
|
||||
_par_scan_state->update_rs(_from, p, _worker_id);
|
||||
|
@ -131,6 +131,9 @@ MemRegion G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages)
|
||||
_committed.set_range(start, start + size_in_pages);
|
||||
|
||||
MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
|
||||
if (AlwaysPreTouch) {
|
||||
os::pretouch_memory((char*)result.start(), (char*)result.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
|
||||
_g1_rem(g1h->g1_rem_set()),
|
||||
_hash_seed(17), _queue_num(queue_num),
|
||||
_term_attempts(0),
|
||||
_tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
|
||||
_age_table(false), _scanner(g1h, rp),
|
||||
_strong_roots_time(0), _term_time(0) {
|
||||
_scanner.set_par_scan_thread_state(this);
|
||||
@ -59,6 +60,12 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
|
||||
|
||||
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
|
||||
|
||||
_dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
|
||||
// The dest for Young is used when the objects are aged enough to
|
||||
// need to be moved to the next space.
|
||||
_dest[InCSetState::Young] = InCSetState::Old;
|
||||
_dest[InCSetState::Old] = InCSetState::Old;
|
||||
|
||||
_start = os::elapsedTime();
|
||||
}
|
||||
|
||||
@ -150,52 +157,94 @@ void G1ParScanThreadState::trim_queue() {
|
||||
} while (!_refs->is_empty());
|
||||
}
|
||||
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
|
||||
HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
|
||||
InCSetState* dest,
|
||||
size_t word_sz,
|
||||
AllocationContext_t const context) {
|
||||
assert(state.is_in_cset_or_humongous(), err_msg("Unexpected state: " CSETSTATE_FORMAT, state.value()));
|
||||
assert(dest->is_in_cset_or_humongous(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
|
||||
|
||||
// Right now we only have two types of regions (young / old) so
|
||||
// let's keep the logic here simple. We can generalize it when necessary.
|
||||
if (dest->is_young()) {
|
||||
HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old,
|
||||
word_sz, context);
|
||||
if (obj_ptr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
// Make sure that we won't attempt to copy any other objects out
|
||||
// of a survivor region (given that apparently we cannot allocate
|
||||
// any new ones) to avoid coming into this slow path.
|
||||
_tenuring_threshold = 0;
|
||||
dest->set_old();
|
||||
return obj_ptr;
|
||||
} else {
|
||||
assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
|
||||
// no other space to try.
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
|
||||
if (state.is_young()) {
|
||||
age = !m->has_displaced_mark_helper() ? m->age()
|
||||
: m->displaced_mark_helper()->age();
|
||||
if (age < _tenuring_threshold) {
|
||||
return state;
|
||||
}
|
||||
}
|
||||
return dest(state);
|
||||
}
|
||||
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
oop const old,
|
||||
markOop const old_mark) {
|
||||
size_t word_sz = old->size();
|
||||
HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
|
||||
const size_t word_sz = old->size();
|
||||
HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
|
||||
// +1 to make the -1 indexes valid...
|
||||
int young_index = from_region->young_index_in_cset()+1;
|
||||
const int young_index = from_region->young_index_in_cset()+1;
|
||||
assert( (from_region->is_young() && young_index > 0) ||
|
||||
(!from_region->is_young() && young_index == 0), "invariant" );
|
||||
G1CollectorPolicy* g1p = _g1h->g1_policy();
|
||||
uint age = old_mark->has_displaced_mark_helper() ? old_mark->displaced_mark_helper()->age()
|
||||
: old_mark->age();
|
||||
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
|
||||
word_sz);
|
||||
AllocationContext_t context = from_region->allocation_context();
|
||||
HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
|
||||
const AllocationContext_t context = from_region->allocation_context();
|
||||
|
||||
uint age = 0;
|
||||
InCSetState dest_state = next_state(state, old_mark, age);
|
||||
HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context);
|
||||
|
||||
// PLAB allocations should succeed most of the time, so we'll
|
||||
// normally check against NULL once and that's it.
|
||||
if (obj_ptr == NULL) {
|
||||
obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
|
||||
if (obj_ptr == NULL) {
|
||||
obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
|
||||
if (obj_ptr == NULL) {
|
||||
// This will either forward-to-self, or detect that someone else has
|
||||
// installed a forwarding pointer.
|
||||
return _g1h->handle_evacuation_failure_par(this, old);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
|
||||
#ifndef PRODUCT
|
||||
// Should this evacuation fail?
|
||||
if (_g1h->evacuation_should_fail()) {
|
||||
if (obj_ptr != NULL) {
|
||||
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
|
||||
obj_ptr = NULL;
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
if (obj_ptr == NULL) {
|
||||
// This will either forward-to-self, or detect that someone else has
|
||||
// installed a forwarding pointer.
|
||||
// Doing this after all the allocation attempts also tests the
|
||||
// undo_allocation() method too.
|
||||
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
|
||||
return _g1h->handle_evacuation_failure_par(this, old);
|
||||
}
|
||||
|
||||
oop obj = oop(obj_ptr);
|
||||
#endif // !PRODUCT
|
||||
|
||||
// We're going to allocate linearly, so might as well prefetch ahead.
|
||||
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
|
||||
|
||||
oop forward_ptr = old->forward_to_atomic(obj);
|
||||
const oop obj = oop(obj_ptr);
|
||||
const oop forward_ptr = old->forward_to_atomic(obj);
|
||||
if (forward_ptr == NULL) {
|
||||
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
|
||||
|
||||
// alloc_purpose is just a hint to allocate() above, recheck the type of region
|
||||
// we actually allocated from and update alloc_purpose accordingly
|
||||
HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
|
||||
alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
|
||||
|
||||
if (g1p->track_object_age(alloc_purpose)) {
|
||||
if (dest_state.is_young()) {
|
||||
if (age < markOopDesc::max_age) {
|
||||
age++;
|
||||
}
|
||||
@ -215,13 +264,19 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
|
||||
to_region->is_young(),
|
||||
const bool is_from_young = state.is_young();
|
||||
const bool is_to_young = dest_state.is_young();
|
||||
assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(),
|
||||
"sanity");
|
||||
assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(),
|
||||
"sanity");
|
||||
G1StringDedup::enqueue_from_evacuation(is_from_young,
|
||||
is_to_young,
|
||||
queue_num(),
|
||||
obj);
|
||||
}
|
||||
|
||||
size_t* surv_young_words = surviving_young_words();
|
||||
size_t* const surv_young_words = surviving_young_words();
|
||||
surv_young_words[young_index] += word_sz;
|
||||
|
||||
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
|
||||
@ -232,14 +287,13 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
|
||||
oop* old_p = set_partial_array_mask(old);
|
||||
push_on_queue(old_p);
|
||||
} else {
|
||||
// No point in using the slower heap_region_containing() method,
|
||||
// given that we know obj is in the heap.
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(obj));
|
||||
HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr);
|
||||
_scanner.set_region(to_region);
|
||||
obj->oop_iterate_backwards(&_scanner);
|
||||
}
|
||||
return obj;
|
||||
} else {
|
||||
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
|
||||
obj = forward_ptr;
|
||||
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
|
||||
return forward_ptr;
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
@ -46,14 +46,16 @@ class G1ParScanThreadState : public StackObj {
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
G1RemSet* _g1_rem;
|
||||
|
||||
G1ParGCAllocator* _g1_par_allocator;
|
||||
G1ParGCAllocator* _g1_par_allocator;
|
||||
|
||||
ageTable _age_table;
|
||||
ageTable _age_table;
|
||||
InCSetState _dest[InCSetState::Num];
|
||||
// Local tenuring threshold.
|
||||
uint _tenuring_threshold;
|
||||
G1ParScanClosure _scanner;
|
||||
|
||||
G1ParScanClosure _scanner;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
|
||||
OopsInHeapRegionClosure* _evac_failure_cl;
|
||||
|
||||
@ -82,6 +84,14 @@ class G1ParScanThreadState : public StackObj {
|
||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
|
||||
|
||||
InCSetState dest(InCSetState original) const {
|
||||
assert(original.is_valid(),
|
||||
err_msg("Original state invalid: " CSETSTATE_FORMAT, original.value()));
|
||||
assert(_dest[original.value()].is_valid_gen(),
|
||||
err_msg("Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value()));
|
||||
return _dest[original.value()];
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
|
||||
~G1ParScanThreadState();
|
||||
@ -112,7 +122,6 @@ class G1ParScanThreadState : public StackObj {
|
||||
}
|
||||
}
|
||||
}
|
||||
public:
|
||||
|
||||
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
||||
_evac_failure_cl = evac_failure_cl;
|
||||
@ -193,9 +202,20 @@ class G1ParScanThreadState : public StackObj {
|
||||
template <class T> inline void deal_with_reference(T* ref_to_scan);
|
||||
|
||||
inline void dispatch_reference(StarTask ref);
|
||||
|
||||
// Tries to allocate word_sz in the PLAB of the next "generation" after trying to
|
||||
// allocate into dest. State is the original (source) cset state for the object
|
||||
// that is allocated for.
|
||||
// Returns a non-NULL pointer if successful, and updates dest if required.
|
||||
HeapWord* allocate_in_next_plab(InCSetState const state,
|
||||
InCSetState* dest,
|
||||
size_t word_sz,
|
||||
AllocationContext_t const context);
|
||||
|
||||
inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
|
||||
public:
|
||||
|
||||
oop copy_to_survivor_space(oop const obj, markOop const old_mark);
|
||||
oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
|
||||
|
||||
void trim_queue();
|
||||
|
||||
|
@ -38,21 +38,21 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
|
||||
// set, due to (benign) races in the claim mechanism during RSet scanning more
|
||||
// than one thread might claim the same card. So the same card may be
|
||||
// processed multiple times. So redo this check.
|
||||
G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
|
||||
if (in_cset_state == G1CollectedHeap::InCSet) {
|
||||
const InCSetState in_cset_state = _g1h->in_cset_state(obj);
|
||||
if (in_cset_state.is_in_cset()) {
|
||||
oop forwardee;
|
||||
markOop m = obj->mark();
|
||||
if (m->is_marked()) {
|
||||
forwardee = (oop) m->decode_pointer();
|
||||
} else {
|
||||
forwardee = copy_to_survivor_space(obj, m);
|
||||
forwardee = copy_to_survivor_space(in_cset_state, obj, m);
|
||||
}
|
||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||
} else if (in_cset_state == G1CollectedHeap::IsHumongous) {
|
||||
} else if (in_cset_state.is_humongous()) {
|
||||
_g1h->set_humongous_is_live(obj);
|
||||
} else {
|
||||
assert(in_cset_state == G1CollectedHeap::InNeither,
|
||||
err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
|
||||
assert(!in_cset_state.is_in_cset_or_humongous(),
|
||||
err_msg("In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value()));
|
||||
}
|
||||
|
||||
assert(obj != NULL, "Must be");
|
||||
|
@ -80,7 +80,7 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
||||
_prev_period_summary()
|
||||
{
|
||||
_seq_task = new SubTasksDone(NumSeqTasks);
|
||||
_cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC);
|
||||
_cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC);
|
||||
for (uint i = 0; i < n_workers(); i++) {
|
||||
_cset_rs_update_cl[i] = NULL;
|
||||
}
|
||||
@ -94,14 +94,14 @@ G1RemSet::~G1RemSet() {
|
||||
for (uint i = 0; i < n_workers(); i++) {
|
||||
assert(_cset_rs_update_cl[i] == NULL, "it should be");
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
|
||||
FREE_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, _cset_rs_update_cl);
|
||||
}
|
||||
|
||||
class ScanRSClosure : public HeapRegionClosure {
|
||||
size_t _cards_done, _cards;
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
OopsInHeapRegionClosure* _oc;
|
||||
G1ParPushHeapRSClosure* _oc;
|
||||
CodeBlobClosure* _code_root_cl;
|
||||
|
||||
G1BlockOffsetSharedArray* _bot_shared;
|
||||
@ -113,7 +113,7 @@ class ScanRSClosure : public HeapRegionClosure {
|
||||
bool _try_claimed;
|
||||
|
||||
public:
|
||||
ScanRSClosure(OopsInHeapRegionClosure* oc,
|
||||
ScanRSClosure(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
uint worker_i) :
|
||||
_oc(oc),
|
||||
@ -135,8 +135,7 @@ public:
|
||||
void scanCard(size_t index, HeapRegion *r) {
|
||||
// Stack allocate the DirtyCardToOopClosure instance
|
||||
HeapRegionDCTOC cl(_g1h, r, _oc,
|
||||
CardTableModRefBS::Precise,
|
||||
HeapRegionDCTOC::IntoCSFilterKind);
|
||||
CardTableModRefBS::Precise);
|
||||
|
||||
// Set the "from" region in the closure.
|
||||
_oc->set_region(r);
|
||||
@ -231,7 +230,7 @@ public:
|
||||
size_t cards_looked_up() { return _cards;}
|
||||
};
|
||||
|
||||
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
|
||||
void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
uint worker_i) {
|
||||
double rs_time_start = os::elapsedTime();
|
||||
@ -301,7 +300,7 @@ void G1RemSet::cleanupHRRS() {
|
||||
HeapRegionRemSet::cleanup();
|
||||
}
|
||||
|
||||
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
||||
void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
uint worker_i) {
|
||||
#if CARD_REPEAT_HISTO
|
||||
@ -417,7 +416,7 @@ G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
|
||||
G1UpdateRSOrPushRefOopClosure::
|
||||
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||
G1RemSet* rs,
|
||||
OopsInHeapRegionClosure* push_ref_cl,
|
||||
G1ParPushHeapRSClosure* push_ref_cl,
|
||||
bool record_refs_into_cset,
|
||||
uint worker_i) :
|
||||
_g1(g1h), _g1_rem_set(rs), _from(NULL),
|
||||
@ -518,7 +517,7 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
|
||||
ct_freq_note_card(_ct_bs->index_for(start));
|
||||
#endif
|
||||
|
||||
OopsInHeapRegionClosure* oops_in_heap_closure = NULL;
|
||||
G1ParPushHeapRSClosure* oops_in_heap_closure = NULL;
|
||||
if (check_for_refs_into_cset) {
|
||||
// ConcurrentG1RefineThreads have worker numbers larger than what
|
||||
// _cset_rs_update_cl[] is set up to handle. But those threads should
|
||||
|
@ -33,6 +33,7 @@
|
||||
class G1CollectedHeap;
|
||||
class CardTableModRefBarrierSet;
|
||||
class ConcurrentG1Refine;
|
||||
class G1ParPushHeapRSClosure;
|
||||
|
||||
// A G1RemSet in which each heap region has a rem set that records the
|
||||
// external heap references into it. Uses a mod ref bs to track updates,
|
||||
@ -68,7 +69,7 @@ protected:
|
||||
|
||||
// Used for caching the closure that is responsible for scanning
|
||||
// references into the collection set.
|
||||
OopsInHeapRegionClosure** _cset_rs_update_cl;
|
||||
G1ParPushHeapRSClosure** _cset_rs_update_cl;
|
||||
|
||||
// Print the given summary info
|
||||
virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
|
||||
@ -95,7 +96,7 @@ public:
|
||||
// partitioning the work to be done. It should be the same as
|
||||
// the "i" passed to the calling thread's work(i) function.
|
||||
// In the sequential case this param will be ignored.
|
||||
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
||||
void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
uint worker_i);
|
||||
|
||||
@ -107,7 +108,7 @@ public:
|
||||
void prepare_for_oops_into_collection_set_do();
|
||||
void cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
void scanRS(OopsInHeapRegionClosure* oc,
|
||||
void scanRS(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* code_root_cl,
|
||||
uint worker_i);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,7 +33,7 @@
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap) :
|
||||
CardTableModRefBSForCTRS(whole_heap)
|
||||
CardTableModRefBS(whole_heap)
|
||||
{
|
||||
_kind = G1SATBCT;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,7 @@ class G1SATBCardTableLoggingModRefBS;
|
||||
// This barrier is specialized to use a logging barrier to support
|
||||
// snapshot-at-the-beginning marking.
|
||||
|
||||
class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
|
||||
class G1SATBCardTableModRefBS: public CardTableModRefBS {
|
||||
protected:
|
||||
enum G1CardValues {
|
||||
g1_young_gen = CT_MR_BS_last_reserved << 1
|
||||
|
@ -48,93 +48,55 @@ size_t HeapRegion::GrainWords = 0;
|
||||
size_t HeapRegion::CardsPerRegion = 0;
|
||||
|
||||
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
|
||||
HeapRegion* hr, ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
FilterKind fk) :
|
||||
HeapRegion* hr,
|
||||
G1ParPushHeapRSClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision) :
|
||||
DirtyCardToOopClosure(hr, cl, precision, NULL),
|
||||
_hr(hr), _fk(fk), _g1(g1) { }
|
||||
_hr(hr), _rs_scan(cl), _g1(g1) { }
|
||||
|
||||
FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
|
||||
OopClosure* oc) :
|
||||
_r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
|
||||
|
||||
template<class ClosureType>
|
||||
HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
|
||||
HeapRegion* hr,
|
||||
HeapWord* cur, HeapWord* top) {
|
||||
oop cur_oop = oop(cur);
|
||||
size_t oop_size = hr->block_size(cur);
|
||||
HeapWord* next_obj = cur + oop_size;
|
||||
while (next_obj < top) {
|
||||
// Keep filtering the remembered set.
|
||||
if (!g1h->is_obj_dead(cur_oop, hr)) {
|
||||
// Bottom lies entirely below top, so we can call the
|
||||
// non-memRegion version of oop_iterate below.
|
||||
cur_oop->oop_iterate(cl);
|
||||
}
|
||||
cur = next_obj;
|
||||
cur_oop = oop(cur);
|
||||
oop_size = hr->block_size(cur);
|
||||
next_obj = cur + oop_size;
|
||||
}
|
||||
return cur;
|
||||
}
|
||||
|
||||
void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
HeapWord* bottom,
|
||||
HeapWord* top) {
|
||||
G1CollectedHeap* g1h = _g1;
|
||||
size_t oop_size;
|
||||
ExtendedOopClosure* cl2 = NULL;
|
||||
|
||||
FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
|
||||
FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
|
||||
|
||||
switch (_fk) {
|
||||
case NoFilterKind: cl2 = _cl; break;
|
||||
case IntoCSFilterKind: cl2 = &intoCSFilt; break;
|
||||
case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
HeapWord* cur = bottom;
|
||||
|
||||
// Start filtering what we add to the remembered set. If the object is
|
||||
// not considered dead, either because it is marked (in the mark bitmap)
|
||||
// or it was allocated after marking finished, then we add it. Otherwise
|
||||
// we can safely ignore the object.
|
||||
if (!g1h->is_obj_dead(oop(bottom), _hr)) {
|
||||
oop_size = oop(bottom)->oop_iterate(cl2, mr);
|
||||
if (!g1h->is_obj_dead(oop(cur), _hr)) {
|
||||
oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
|
||||
} else {
|
||||
oop_size = _hr->block_size(bottom);
|
||||
oop_size = _hr->block_size(cur);
|
||||
}
|
||||
|
||||
bottom += oop_size;
|
||||
cur += oop_size;
|
||||
|
||||
if (bottom < top) {
|
||||
// We replicate the loop below for several kinds of possible filters.
|
||||
switch (_fk) {
|
||||
case NoFilterKind:
|
||||
bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
|
||||
break;
|
||||
|
||||
case IntoCSFilterKind: {
|
||||
FilterIntoCSClosure filt(this, g1h, _cl);
|
||||
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
|
||||
break;
|
||||
}
|
||||
|
||||
case OutOfRegionFilterKind: {
|
||||
FilterOutOfRegionClosure filt(_hr, _cl);
|
||||
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
if (cur < top) {
|
||||
oop cur_oop = oop(cur);
|
||||
oop_size = _hr->block_size(cur);
|
||||
HeapWord* next_obj = cur + oop_size;
|
||||
while (next_obj < top) {
|
||||
// Keep filtering the remembered set.
|
||||
if (!g1h->is_obj_dead(cur_oop, _hr)) {
|
||||
// Bottom lies entirely below top, so we can call the
|
||||
// non-memRegion version of oop_iterate below.
|
||||
cur_oop->oop_iterate(_rs_scan);
|
||||
}
|
||||
cur = next_obj;
|
||||
cur_oop = oop(cur);
|
||||
oop_size = _hr->block_size(cur);
|
||||
next_obj = cur + oop_size;
|
||||
}
|
||||
|
||||
// Last object. Need to do dead-obj filtering here too.
|
||||
if (!g1h->is_obj_dead(oop(bottom), _hr)) {
|
||||
oop(bottom)->oop_iterate(cl2, mr);
|
||||
if (!g1h->is_obj_dead(oop(cur), _hr)) {
|
||||
oop(cur)->oop_iterate(_rs_scan, mr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -67,17 +67,9 @@ class nmethod;
|
||||
// sets.
|
||||
|
||||
class HeapRegionDCTOC : public DirtyCardToOopClosure {
|
||||
public:
|
||||
// Specification of possible DirtyCardToOopClosure filtering.
|
||||
enum FilterKind {
|
||||
NoFilterKind,
|
||||
IntoCSFilterKind,
|
||||
OutOfRegionFilterKind
|
||||
};
|
||||
|
||||
protected:
|
||||
private:
|
||||
HeapRegion* _hr;
|
||||
FilterKind _fk;
|
||||
G1ParPushHeapRSClosure* _rs_scan;
|
||||
G1CollectedHeap* _g1;
|
||||
|
||||
// Walk the given memory region from bottom to (actual) top
|
||||
@ -90,9 +82,9 @@ protected:
|
||||
|
||||
public:
|
||||
HeapRegionDCTOC(G1CollectedHeap* g1,
|
||||
HeapRegion* hr, ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
FilterKind fk);
|
||||
HeapRegion* hr,
|
||||
G1ParPushHeapRSClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision);
|
||||
};
|
||||
|
||||
// The complicating factor is that BlockOffsetTable diverged
|
||||
|
@ -805,7 +805,7 @@ uint HeapRegionRemSet::num_par_rem_sets() {
|
||||
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
||||
HeapRegion* hr)
|
||||
: _bosa(bosa),
|
||||
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
|
||||
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
|
||||
_code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
|
||||
reset_for_par_iteration();
|
||||
}
|
||||
|
@ -400,7 +400,8 @@ void GCTaskManager::initialize() {
|
||||
assert(workers() != 0, "no workers");
|
||||
_monitor = new Monitor(Mutex::barrier, // rank
|
||||
"GCTaskManager monitor", // name
|
||||
Mutex::_allow_vm_block_flag); // allow_vm_block
|
||||
Mutex::_allow_vm_block_flag, // allow_vm_block
|
||||
Monitor::_safepoint_check_never);
|
||||
// The queue for the GCTaskManager must be a CHeapObj.
|
||||
GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();
|
||||
_queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
|
||||
@ -1125,7 +1126,8 @@ Monitor* MonitorSupply::reserve() {
|
||||
} else {
|
||||
result = new Monitor(Mutex::barrier, // rank
|
||||
"MonitorSupply monitor", // name
|
||||
Mutex::_allow_vm_block_flag); // allow_vm_block
|
||||
Mutex::_allow_vm_block_flag, // allow_vm_block
|
||||
Monitor::_safepoint_check_never);
|
||||
}
|
||||
guarantee(result != NULL, "shouldn't return NULL");
|
||||
assert(!result->is_locked(), "shouldn't be locked");
|
||||
|
@ -195,7 +195,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
|
||||
ref_processor()->enable_discovery();
|
||||
ref_processor()->setup_policy(clear_all_softrefs);
|
||||
|
||||
mark_sweep_phase1(clear_all_softrefs);
|
||||
|
@ -41,31 +41,24 @@ class PSMarkSweep : public MarkSweep {
|
||||
|
||||
// Closure accessors
|
||||
static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
|
||||
static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
|
||||
static VoidClosure* follow_stack_closure() { return &MarkSweep::follow_stack_closure; }
|
||||
static CLDClosure* follow_cld_closure() { return &MarkSweep::follow_cld_closure; }
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
|
||||
static OopClosure* adjust_pointer_closure() { return &MarkSweep::adjust_pointer_closure; }
|
||||
static CLDClosure* adjust_cld_closure() { return &MarkSweep::adjust_cld_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
|
||||
static BoolObjectClosure* is_alive_closure() { return &MarkSweep::is_alive; }
|
||||
|
||||
debug_only(public:) // Used for PSParallelCompact debugging
|
||||
// Mark live objects
|
||||
static void mark_sweep_phase1(bool clear_all_softrefs);
|
||||
// Calculate new addresses
|
||||
static void mark_sweep_phase2();
|
||||
debug_only(private:) // End used for PSParallelCompact debugging
|
||||
// Update pointers
|
||||
static void mark_sweep_phase3();
|
||||
// Move objects to new positions
|
||||
static void mark_sweep_phase4();
|
||||
|
||||
debug_only(public:) // Used for PSParallelCompact debugging
|
||||
// Temporary data structures for traversal and storing/restoring marks
|
||||
static void allocate_stacks();
|
||||
static void deallocate_stacks();
|
||||
static void set_ref_processor(ReferenceProcessor* rp) { // delete this method
|
||||
_ref_processor = rp;
|
||||
}
|
||||
debug_only(private:) // End used for PSParallelCompact debugging
|
||||
|
||||
// If objects are left in eden after a collection, try to move the boundary
|
||||
// and absorb them into the old gen. Returns true if eden was emptied.
|
||||
|
@ -2069,7 +2069,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
|
||||
ref_processor()->enable_discovery();
|
||||
ref_processor()->setup_policy(maximum_heap_compaction);
|
||||
|
||||
bool marked_for_unloading = false;
|
||||
|
@ -147,6 +147,10 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
|
||||
claimed_stack_depth()->push(p);
|
||||
}
|
||||
|
||||
inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size,
|
||||
uint age, bool tenured,
|
||||
const PSPromotionLAB* lab);
|
||||
|
||||
protected:
|
||||
static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
|
||||
public:
|
||||
|
@ -64,6 +64,33 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) {
|
||||
claim_or_forward_internal_depth(p);
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
|
||||
size_t obj_size,
|
||||
uint age, bool tenured,
|
||||
const PSPromotionLAB* lab) {
|
||||
// Skip if memory allocation failed
|
||||
if (new_obj != NULL) {
|
||||
const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
|
||||
|
||||
if (lab != NULL) {
|
||||
// Promotion of object through newly allocated PLAB
|
||||
if (gc_tracer->should_report_promotion_in_new_plab_event()) {
|
||||
size_t obj_bytes = obj_size * HeapWordSize;
|
||||
size_t lab_size = lab->capacity();
|
||||
gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
|
||||
age, tenured, lab_size);
|
||||
}
|
||||
} else {
|
||||
// Promotion of object directly to heap
|
||||
if (gc_tracer->should_report_promotion_outside_plab_event()) {
|
||||
size_t obj_bytes = obj_size * HeapWordSize;
|
||||
gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
|
||||
age, tenured);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// This method is pretty bulky. It would be nice to split it up
|
||||
// into smaller submethods, but we need to be careful not to hurt
|
||||
@ -85,11 +112,11 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
bool new_obj_is_tenured = false;
|
||||
size_t new_obj_size = o->size();
|
||||
|
||||
if (!promote_immediately) {
|
||||
// Find the objects age, MT safe.
|
||||
uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
|
||||
test_mark->displaced_mark_helper()->age() : test_mark->age();
|
||||
// Find the objects age, MT safe.
|
||||
uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
|
||||
test_mark->displaced_mark_helper()->age() : test_mark->age();
|
||||
|
||||
if (!promote_immediately) {
|
||||
// Try allocating obj in to-space (unless too old)
|
||||
if (age < PSScavenge::tenuring_threshold()) {
|
||||
new_obj = (oop) _young_lab.allocate(new_obj_size);
|
||||
@ -98,6 +125,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
if (new_obj_size > (YoungPLABSize / 2)) {
|
||||
// Allocate this object directly
|
||||
new_obj = (oop)young_space()->cas_allocate(new_obj_size);
|
||||
promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
|
||||
} else {
|
||||
// Flush and fill
|
||||
_young_lab.flush();
|
||||
@ -107,6 +135,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
_young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
|
||||
// Try the young lab allocation again.
|
||||
new_obj = (oop) _young_lab.allocate(new_obj_size);
|
||||
promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
|
||||
} else {
|
||||
_young_gen_is_full = true;
|
||||
}
|
||||
@ -132,6 +161,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
if (new_obj_size > (OldPLABSize / 2)) {
|
||||
// Allocate this object directly
|
||||
new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
|
||||
promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
|
||||
} else {
|
||||
// Flush and fill
|
||||
_old_lab.flush();
|
||||
@ -148,6 +178,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
_old_lab.initialize(MemRegion(lab_base, OldPLABSize));
|
||||
// Try the old lab allocation again.
|
||||
new_obj = (oop) _old_lab.allocate(new_obj_size);
|
||||
promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ bool PSScavenge::invoke_no_policy() {
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
|
||||
reference_processor()->enable_discovery();
|
||||
reference_processor()->setup_policy(false);
|
||||
|
||||
// We track how much was promoted to the next generation for
|
||||
|
@ -92,6 +92,7 @@ class PSScavenge: AllStatic {
|
||||
|
||||
// Private accessors
|
||||
static CardTableExtension* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; }
|
||||
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
|
||||
|
||||
public:
|
||||
// Accessors
|
||||
|
@ -88,7 +88,8 @@ static void _sltLoop(JavaThread* thread, TRAPS) {
|
||||
|
||||
SurrogateLockerThread::SurrogateLockerThread() :
|
||||
JavaThread(&_sltLoop),
|
||||
_monitor(Mutex::nonleaf, "SLTMonitor"),
|
||||
_monitor(Mutex::nonleaf, "SLTMonitor", false,
|
||||
Monitor::_safepoint_check_sometimes),
|
||||
_buffer(empty)
|
||||
{}
|
||||
|
||||
|
@ -63,9 +63,7 @@ void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
|
||||
}
|
||||
|
||||
void MutableSpace::pretouch_pages(MemRegion mr) {
|
||||
for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
|
||||
char t = *p; *p = t;
|
||||
}
|
||||
os::pretouch_memory((char*)mr.start(), (char*)mr.end());
|
||||
}
|
||||
|
||||
void MutableSpace::initialize(MemRegion mr,
|
||||
|
@ -394,7 +394,11 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
|
||||
// during deoptimization so the interpreter needs to skip it when
|
||||
// the frame is popped.
|
||||
thread->set_do_not_unlock_if_synchronized(true);
|
||||
#ifdef CC_INTERP
|
||||
return (address) -1;
|
||||
#else
|
||||
return Interpreter::remove_activation_entry();
|
||||
#endif
|
||||
}
|
||||
|
||||
// Need to do this check first since when _do_not_unlock_if_synchronized
|
||||
|
@ -33,24 +33,13 @@
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
CardTableRS::CardTableRS(MemRegion whole_heap) :
|
||||
GenRemSet(),
|
||||
_cur_youngergen_card_val(youngergenP1_card)
|
||||
{
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC) {
|
||||
_ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap);
|
||||
} else {
|
||||
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
|
||||
}
|
||||
#else
|
||||
guarantee(Universe::heap()->kind() == CollectedHeap::GenCollectedHeap, "sanity");
|
||||
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
|
||||
#endif
|
||||
_ct_bs->initialize();
|
||||
set_bs(_ct_bs);
|
||||
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
|
||||
|
@ -70,6 +70,7 @@ enum GCH_strong_roots_tasks {
|
||||
|
||||
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
|
||||
SharedHeap(policy),
|
||||
_rem_set(NULL),
|
||||
_gen_policy(policy),
|
||||
_gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
|
||||
_full_collections_completed(0)
|
||||
@ -465,7 +466,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
// atomic wrt other collectors in this configuration, we
|
||||
// are guaranteed to have empty discovered ref lists.
|
||||
if (rp->discovery_is_atomic()) {
|
||||
rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
|
||||
rp->enable_discovery();
|
||||
rp->setup_policy(do_clear_all_soft_refs);
|
||||
} else {
|
||||
// collect() below will enable discovery as appropriate
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,6 +66,9 @@ public:
|
||||
Generation* _gens[max_gens];
|
||||
GenerationSpec** _gen_specs;
|
||||
|
||||
// The singleton Gen Remembered Set.
|
||||
GenRemSet* _rem_set;
|
||||
|
||||
// The generational collector policy.
|
||||
GenCollectorPolicy* _gen_policy;
|
||||
|
||||
@ -383,6 +386,10 @@ public:
|
||||
return _n_gens;
|
||||
}
|
||||
|
||||
// This function returns the "GenRemSet" object that allows us to scan
|
||||
// generations in a fully generational heap.
|
||||
GenRemSet* rem_set() { return _rem_set; }
|
||||
|
||||
// Convenience function to be used in situations where the heap type can be
|
||||
// asserted to be this type.
|
||||
static GenCollectedHeap* heap();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,7 +44,7 @@ inline void OopsInGenClosure::set_generation(Generation* gen) {
|
||||
_gen_boundary = _gen->reserved().start();
|
||||
// Barrier set for the heap, must be set after heap is initialized
|
||||
if (_rs == NULL) {
|
||||
GenRemSet* rs = SharedHeap::heap()->rem_set();
|
||||
GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
|
||||
_rs = (CardTableRS*)rs;
|
||||
}
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ void Generation::oop_iterate(ExtendedOopClosure* cl) {
|
||||
|
||||
void Generation::younger_refs_in_space_iterate(Space* sp,
|
||||
OopsInGenClosure* cl) {
|
||||
GenRemSet* rs = SharedHeap::heap()->rem_set();
|
||||
GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
|
||||
rs->younger_refs_in_space_iterate(sp, cl);
|
||||
}
|
||||
|
||||
|
@ -792,7 +792,8 @@ const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
|
||||
Mutex* const SpaceManager::_expand_lock =
|
||||
new Mutex(SpaceManager::_expand_lock_rank,
|
||||
SpaceManager::_expand_lock_name,
|
||||
Mutex::_allow_vm_block_flag);
|
||||
Mutex::_allow_vm_block_flag,
|
||||
Monitor::_safepoint_check_never);
|
||||
|
||||
void VirtualSpaceNode::inc_container_count() {
|
||||
assert_lock_strong(SpaceManager::expand_lock());
|
||||
|
@ -68,10 +68,10 @@ void ReferenceProcessor::init_statics() {
|
||||
_pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
|
||||
}
|
||||
|
||||
void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) {
|
||||
void ReferenceProcessor::enable_discovery(bool check_no_refs) {
|
||||
#ifdef ASSERT
|
||||
// Verify that we're not currently discovering refs
|
||||
assert(!verify_disabled || !_discovering_refs, "nested call?");
|
||||
assert(!_discovering_refs, "nested call?");
|
||||
|
||||
if (check_no_refs) {
|
||||
// Verify that the discovered lists are empty
|
||||
@ -963,52 +963,6 @@ ReferenceProcessor::process_discovered_reflist(
|
||||
return total_list_count;
|
||||
}
|
||||
|
||||
void ReferenceProcessor::clean_up_discovered_references() {
|
||||
// loop over the lists
|
||||
for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
||||
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
|
||||
gclog_or_tty->print_cr(
|
||||
"\nScrubbing %s discovered list of Null referents",
|
||||
list_name(i));
|
||||
}
|
||||
clean_up_discovered_reflist(_discovered_refs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
|
||||
assert(!discovery_is_atomic(), "Else why call this method?");
|
||||
DiscoveredListIterator iter(refs_list, NULL, NULL);
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
|
||||
oop next = java_lang_ref_Reference::next(iter.obj());
|
||||
assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
|
||||
// If referent has been cleared or Reference is not active,
|
||||
// drop it.
|
||||
if (iter.referent() == NULL || next != NULL) {
|
||||
debug_only(
|
||||
if (PrintGCDetails && TraceReferenceGC) {
|
||||
gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
|
||||
INTPTR_FORMAT " with next field: " INTPTR_FORMAT
|
||||
" and referent: " INTPTR_FORMAT,
|
||||
(void *)iter.obj(), (void *)next, (void *)iter.referent());
|
||||
}
|
||||
)
|
||||
// Remove Reference object from list
|
||||
iter.remove();
|
||||
iter.move_to_next();
|
||||
} else {
|
||||
iter.next();
|
||||
}
|
||||
}
|
||||
NOT_PRODUCT(
|
||||
if (PrintGCDetails && TraceReferenceGC) {
|
||||
gclog_or_tty->print(
|
||||
" Removed %d Refs with NULL referents out of %d discovered Refs",
|
||||
iter.removed(), iter.processed());
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
|
||||
uint id = 0;
|
||||
// Determine the queue index to use for this object.
|
||||
|
@ -353,19 +353,6 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
GCTimer* gc_timer,
|
||||
GCId gc_id);
|
||||
|
||||
// Delete entries in the discovered lists that have
|
||||
// either a null referent or are not active. Such
|
||||
// Reference objects can result from the clearing
|
||||
// or enqueueing of Reference objects concurrent
|
||||
// with their discovery by a (concurrent) collector.
|
||||
// For a definition of "active" see java.lang.ref.Reference;
|
||||
// Refs are born active, become inactive when enqueued,
|
||||
// and never become active again. The state of being
|
||||
// active is encoded as follows: A Ref is active
|
||||
// if and only if its "next" field is NULL.
|
||||
void clean_up_discovered_references();
|
||||
void clean_up_discovered_reflist(DiscoveredList& refs_list);
|
||||
|
||||
// Returns the name of the discovered reference list
|
||||
// occupying the i / _num_q slot.
|
||||
const char* list_name(uint i);
|
||||
@ -439,7 +426,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
void set_span(MemRegion span) { _span = span; }
|
||||
|
||||
// start and stop weak ref discovery
|
||||
void enable_discovery(bool verify_disabled, bool check_no_refs);
|
||||
void enable_discovery(bool check_no_refs = true);
|
||||
void disable_discovery() { _discovering_refs = false; }
|
||||
bool discovery_enabled() { return _discovering_refs; }
|
||||
|
||||
@ -517,7 +504,7 @@ class NoRefDiscovery: StackObj {
|
||||
|
||||
~NoRefDiscovery() {
|
||||
if (_was_discovering_refs) {
|
||||
_rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
|
||||
_rp->enable_discovery(false /*check_no_refs*/);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -58,7 +58,6 @@ enum SH_process_roots_tasks {
|
||||
SharedHeap::SharedHeap(CollectorPolicy* policy_) :
|
||||
CollectedHeap(),
|
||||
_collector_policy(policy_),
|
||||
_rem_set(NULL),
|
||||
_strong_roots_scope(NULL),
|
||||
_strong_roots_parity(0),
|
||||
_process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
|
||||
@ -152,7 +151,7 @@ SharedHeap::StrongRootsScope::~StrongRootsScope() {
|
||||
}
|
||||
}
|
||||
|
||||
Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
|
||||
Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false, Monitor::_safepoint_check_never);
|
||||
|
||||
void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
|
||||
// The Thread work barrier is only needed by G1 Class Unloading.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -114,10 +114,6 @@ protected:
|
||||
// set the static pointer "_sh" to that instance.
|
||||
static SharedHeap* _sh;
|
||||
|
||||
// and the Gen Remembered Set, at least one good enough to scan the perm
|
||||
// gen.
|
||||
GenRemSet* _rem_set;
|
||||
|
||||
// A gc policy, controls global gc resource issues
|
||||
CollectorPolicy *_collector_policy;
|
||||
|
||||
@ -152,10 +148,6 @@ public:
|
||||
// Initialization of ("weak") reference processing support
|
||||
virtual void ref_processing_init();
|
||||
|
||||
// This function returns the "GenRemSet" object that allows us to scan
|
||||
// generations in a fully generational heap.
|
||||
GenRemSet* rem_set() { return _rem_set; }
|
||||
|
||||
// Iteration functions.
|
||||
void oop_iterate(ExtendedOopClosure* cl) = 0;
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/blockOffsetTable.inline.hpp"
|
||||
#include "memory/cardGeneration.inline.hpp"
|
||||
#include "memory/generationSpec.hpp"
|
||||
#include "memory/genMarkSweep.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
|
@ -658,7 +658,7 @@ void CallNode::dump_req(outputStream *st) const {
|
||||
|
||||
void CallNode::dump_spec(outputStream *st) const {
|
||||
st->print(" ");
|
||||
tf()->dump_on(st);
|
||||
if (tf() != NULL) tf()->dump_on(st);
|
||||
if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
|
||||
if (jvms() != NULL) jvms()->dump_spec(st);
|
||||
}
|
||||
|
@ -417,8 +417,15 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
|
||||
for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
|
||||
old_tst->last_out(i2)->set_req(0, nul_chk);
|
||||
// Clean-up any dead code
|
||||
for (uint i3 = 0; i3 < old_tst->req(); i3++)
|
||||
for (uint i3 = 0; i3 < old_tst->req(); i3++) {
|
||||
Node* in = old_tst->in(i3);
|
||||
old_tst->set_req(i3, NULL);
|
||||
if (in->outcnt() == 0) {
|
||||
// Remove dead input node
|
||||
in->disconnect_inputs(NULL, C);
|
||||
block->find_remove(in);
|
||||
}
|
||||
}
|
||||
|
||||
latency_from_uses(nul_chk);
|
||||
latency_from_uses(best);
|
||||
|
@ -1147,7 +1147,7 @@ const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *u
|
||||
// Dump special per-node info
|
||||
#ifndef PRODUCT
|
||||
void CountedLoopEndNode::dump_spec(outputStream *st) const {
|
||||
if( in(TestValue)->is_Bool() ) {
|
||||
if( in(TestValue) != NULL && in(TestValue)->is_Bool() ) {
|
||||
BoolTest bt( test_trip()); // Added this for g++.
|
||||
|
||||
st->print("[");
|
||||
|
@ -473,8 +473,13 @@ bool MachNode::rematerialize() const {
|
||||
// Print any per-operand special info
|
||||
void MachNode::dump_spec(outputStream *st) const {
|
||||
uint cnt = num_opnds();
|
||||
for( uint i=0; i<cnt; i++ )
|
||||
_opnds[i]->dump_spec(st);
|
||||
for( uint i=0; i<cnt; i++ ) {
|
||||
if (_opnds[i] != NULL) {
|
||||
_opnds[i]->dump_spec(st);
|
||||
} else {
|
||||
st->print(" _");
|
||||
}
|
||||
}
|
||||
const TypePtr *t = adr_type();
|
||||
if( t ) {
|
||||
Compile* C = Compile::current();
|
||||
@ -493,7 +498,11 @@ void MachNode::dump_format(PhaseRegAlloc *ra, outputStream *st) const {
|
||||
//=============================================================================
|
||||
#ifndef PRODUCT
|
||||
void MachTypeNode::dump_spec(outputStream *st) const {
|
||||
_bottom_type->dump_on(st);
|
||||
if (_bottom_type != NULL) {
|
||||
_bottom_type->dump_on(st);
|
||||
} else {
|
||||
st->print(" NULL");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -635,7 +644,7 @@ const Type *MachCallNode::Value(PhaseTransform *phase) const { return tf()->rang
|
||||
#ifndef PRODUCT
|
||||
void MachCallNode::dump_spec(outputStream *st) const {
|
||||
st->print("# ");
|
||||
tf()->dump_on(st);
|
||||
if (tf() != NULL) tf()->dump_on(st);
|
||||
if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
|
||||
if (jvms() != NULL) jvms()->dump_spec(st);
|
||||
}
|
||||
|
@ -4370,7 +4370,7 @@ void MergeMemNode::dump_spec(outputStream *st) const {
|
||||
st->print(" {");
|
||||
Node* base_mem = base_memory();
|
||||
for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) {
|
||||
Node* mem = memory_at(i);
|
||||
Node* mem = (in(i) != NULL) ? memory_at(i) : base_mem;
|
||||
if (mem == base_mem) { st->print(" -"); continue; }
|
||||
st->print( " N%d:", mem->_idx );
|
||||
Compile::current()->get_adr_type(i)->dump_on(st);
|
||||
|
@ -700,23 +700,7 @@ JVM_END
|
||||
|
||||
// Returns a class loaded by the bootstrap class loader; or null
|
||||
// if not found. ClassNotFoundException is not thrown.
|
||||
//
|
||||
// Rationale behind JVM_FindClassFromBootLoader
|
||||
// a> JVM_FindClassFromClassLoader was never exported in the export tables.
|
||||
// b> because of (a) java.dll has a direct dependecy on the unexported
|
||||
// private symbol "_JVM_FindClassFromClassLoader@20".
|
||||
// c> the launcher cannot use the private symbol as it dynamically opens
|
||||
// the entry point, so if something changes, the launcher will fail
|
||||
// unexpectedly at runtime, it is safest for the launcher to dlopen a
|
||||
// stable exported interface.
|
||||
// d> re-exporting JVM_FindClassFromClassLoader as public, will cause its
|
||||
// signature to change from _JVM_FindClassFromClassLoader@20 to
|
||||
// JVM_FindClassFromClassLoader and will not be backward compatible
|
||||
// with older JDKs.
|
||||
// Thus a public/stable exported entry point is the right solution,
|
||||
// public here means public in linker semantics, and is exported only
|
||||
// to the JDK, and is not intended to be a public API.
|
||||
|
||||
// FindClassFromBootLoader is exported to the launcher for windows.
|
||||
JVM_ENTRY(jclass, JVM_FindClassFromBootLoader(JNIEnv* env,
|
||||
const char* name))
|
||||
JVMWrapper2("JVM_FindClassFromBootLoader %s", name);
|
||||
@ -740,33 +724,6 @@ JVM_ENTRY(jclass, JVM_FindClassFromBootLoader(JNIEnv* env,
|
||||
return (jclass) JNIHandles::make_local(env, k->java_mirror());
|
||||
JVM_END
|
||||
|
||||
// Not used; JVM_FindClassFromCaller replaces this.
|
||||
JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name,
|
||||
jboolean init, jobject loader,
|
||||
jboolean throwError))
|
||||
JVMWrapper3("JVM_FindClassFromClassLoader %s throw %s", name,
|
||||
throwError ? "error" : "exception");
|
||||
// Java libraries should ensure that name is never null...
|
||||
if (name == NULL || (int)strlen(name) > Symbol::max_length()) {
|
||||
// It's impossible to create this class; the name cannot fit
|
||||
// into the constant pool.
|
||||
if (throwError) {
|
||||
THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), name);
|
||||
} else {
|
||||
THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), name);
|
||||
}
|
||||
}
|
||||
TempNewSymbol h_name = SymbolTable::new_symbol(name, CHECK_NULL);
|
||||
Handle h_loader(THREAD, JNIHandles::resolve(loader));
|
||||
jclass result = find_class_from_class_loader(env, h_name, init, h_loader,
|
||||
Handle(), throwError, THREAD);
|
||||
|
||||
if (TraceClassResolution && result != NULL) {
|
||||
trace_class_resolution(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(result)));
|
||||
}
|
||||
return result;
|
||||
JVM_END
|
||||
|
||||
// Find a class with this name in this loader, using the caller's protection domain.
|
||||
JVM_ENTRY(jclass, JVM_FindClassFromCaller(JNIEnv* env, const char* name,
|
||||
jboolean init, jobject loader,
|
||||
|
@ -334,15 +334,6 @@ JVM_GetCallerClass(JNIEnv *env, int n);
|
||||
JNIEXPORT jclass JNICALL
|
||||
JVM_FindPrimitiveClass(JNIEnv *env, const char *utf);
|
||||
|
||||
/*
|
||||
* Find a class from a given class loader. Throw ClassNotFoundException
|
||||
* or NoClassDefFoundError depending on the value of the last
|
||||
* argument.
|
||||
*/
|
||||
JNIEXPORT jclass JNICALL
|
||||
JVM_FindClassFromClassLoader(JNIEnv *env, const char *name, jboolean init,
|
||||
jobject loader, jboolean throwError);
|
||||
|
||||
/*
|
||||
* Find a class from a boot class loader. Returns NULL if class not found.
|
||||
*/
|
||||
|
@ -69,6 +69,14 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||
bool WhiteBox::_used = false;
|
||||
volatile bool WhiteBox::compilation_locked = false;
|
||||
|
||||
class VM_WhiteBoxOperation : public VM_Operation {
|
||||
public:
|
||||
VM_WhiteBoxOperation() { }
|
||||
VMOp_Type type() const { return VMOp_WhiteBoxOperation; }
|
||||
bool allow_nested_vm_operations() const { return true; }
|
||||
};
|
||||
|
||||
|
||||
WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
|
||||
return (jlong)(void*)JNIHandles::resolve(obj);
|
||||
WB_END
|
||||
@ -404,6 +412,43 @@ static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobje
|
||||
return env->FromReflectedMethod(method);
|
||||
}
|
||||
|
||||
// Deoptimizes all compiled frames and makes nmethods not entrant if it's requested
|
||||
class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation {
|
||||
private:
|
||||
int _result;
|
||||
const bool _make_not_entrant;
|
||||
public:
|
||||
VM_WhiteBoxDeoptimizeFrames(bool make_not_entrant) :
|
||||
_result(0), _make_not_entrant(make_not_entrant) { }
|
||||
int result() const { return _result; }
|
||||
|
||||
void doit() {
|
||||
for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) {
|
||||
if (t->has_last_Java_frame()) {
|
||||
for (StackFrameStream fst(t, UseBiasedLocking); !fst.is_done(); fst.next()) {
|
||||
frame* f = fst.current();
|
||||
if (f->can_be_deoptimized() && !f->is_deoptimized_frame()) {
|
||||
RegisterMap* reg_map = fst.register_map();
|
||||
Deoptimization::deoptimize(t, *f, reg_map);
|
||||
if (_make_not_entrant) {
|
||||
nmethod* nm = CodeCache::find_nmethod(f->pc());
|
||||
assert(nm != NULL, "sanity check");
|
||||
nm->make_not_entrant();
|
||||
}
|
||||
++_result;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
WB_ENTRY(jint, WB_DeoptimizeFrames(JNIEnv* env, jobject o, jboolean make_not_entrant))
|
||||
VM_WhiteBoxDeoptimizeFrames op(make_not_entrant == JNI_TRUE);
|
||||
VMThread::execute(&op);
|
||||
return op.result();
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
|
||||
MutexLockerEx mu(Compile_lock);
|
||||
CodeCache::mark_all_nmethods_for_deoptimization();
|
||||
@ -526,13 +571,6 @@ WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobjec
|
||||
return (mh->queued_for_compilation() || nm != NULL);
|
||||
WB_END
|
||||
|
||||
class VM_WhiteBoxOperation : public VM_Operation {
|
||||
public:
|
||||
VM_WhiteBoxOperation() { }
|
||||
VMOp_Type type() const { return VMOp_WhiteBoxOperation; }
|
||||
bool allow_nested_vm_operations() const { return true; }
|
||||
};
|
||||
|
||||
class AlwaysFalseClosure : public BoolObjectClosure {
|
||||
public:
|
||||
bool do_object_b(oop p) { return false; }
|
||||
@ -761,7 +799,6 @@ WB_ENTRY(void, WB_SetStringVMFlag(JNIEnv* env, jobject o, jstring name, jstring
|
||||
}
|
||||
WB_END
|
||||
|
||||
|
||||
WB_ENTRY(void, WB_LockCompilation(JNIEnv* env, jobject o, jlong timeout))
|
||||
WhiteBox::compilation_locked = true;
|
||||
WB_END
|
||||
@ -1078,6 +1115,14 @@ WB_ENTRY(jlong, WB_MetaspaceCapacityUntilGC(JNIEnv* env, jobject wb))
|
||||
return (jlong) MetaspaceGC::capacity_until_GC();
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_AssertMatchingSafepointCalls(JNIEnv* env, jobject o, jboolean mutexSafepointValue, jboolean attemptedNoSafepointValue))
|
||||
Monitor::SafepointCheckRequired sfpt_check_required = mutexSafepointValue ?
|
||||
Monitor::_safepoint_check_always :
|
||||
Monitor::_safepoint_check_never;
|
||||
MutexLockerEx ml(new Mutex(Mutex::leaf, "SFPT_Test_lock", true, sfpt_check_required),
|
||||
attemptedNoSafepointValue == JNI_TRUE);
|
||||
WB_END
|
||||
|
||||
//Some convenience methods to deal with objects from java
|
||||
int WhiteBox::offset_for_field(const char* field_name, oop object,
|
||||
Symbol* signature_symbol) {
|
||||
@ -1201,6 +1246,7 @@ static JNINativeMethod methods[] = {
|
||||
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
|
||||
{CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize },
|
||||
#endif // INCLUDE_NMT
|
||||
{CC"deoptimizeFrames", CC"(Z)I", (void*)&WB_DeoptimizeFrames },
|
||||
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
|
||||
{CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I",
|
||||
(void*)&WB_DeoptimizeMethod },
|
||||
@ -1274,6 +1320,7 @@ static JNINativeMethod methods[] = {
|
||||
{CC"getCodeBlob", CC"(J)[Ljava/lang/Object;",(void*)&WB_GetCodeBlob },
|
||||
{CC"getThreadStackSize", CC"()J", (void*)&WB_GetThreadStackSize },
|
||||
{CC"getThreadRemainingStackSize", CC"()J", (void*)&WB_GetThreadRemainingStackSize },
|
||||
{CC"assertMatchingSafepointCalls", CC"(ZZ)V", (void*)&WB_AssertMatchingSafepointCalls },
|
||||
};
|
||||
|
||||
#undef CC
|
||||
|
@ -925,9 +925,9 @@ bool Arguments::process_argument(const char* arg,
|
||||
"Warning: support for %s was removed in %s\n",
|
||||
fuzzy_matched->_name,
|
||||
version);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// allow for commandline "commenting out" options like -XX:#+Verbose
|
||||
return arg[0] == '#';
|
||||
@ -1382,41 +1382,24 @@ void Arguments::set_cms_and_parnew_gc_flags() {
|
||||
if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
|
||||
FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
|
||||
}
|
||||
// If OldPLABSize is set and CMSParPromoteBlocksToClaim is not,
|
||||
// set CMSParPromoteBlocksToClaim equal to OldPLABSize.
|
||||
// This is done in order to make ParNew+CMS configuration to work
|
||||
// with YoungPLABSize and OldPLABSize options.
|
||||
// See CR 6362902.
|
||||
if (!FLAG_IS_DEFAULT(OldPLABSize)) {
|
||||
if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
|
||||
// OldPLABSize is not the default value but CMSParPromoteBlocksToClaim
|
||||
// is. In this situation let CMSParPromoteBlocksToClaim follow
|
||||
// the value (either from the command line or ergonomics) of
|
||||
// OldPLABSize. Following OldPLABSize is an ergonomics decision.
|
||||
FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
|
||||
|
||||
// OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
|
||||
// but rather the number of free blocks of a given size that are used when
|
||||
// replenishing the local per-worker free list caches.
|
||||
if (FLAG_IS_DEFAULT(OldPLABSize)) {
|
||||
if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
|
||||
// OldPLAB sizing manually turned off: Use a larger default setting,
|
||||
// unless it was manually specified. This is because a too-low value
|
||||
// will slow down scavenges.
|
||||
FLAG_SET_ERGO(uintx, OldPLABSize, CFLS_LAB::_default_static_old_plab_size); // default value before 6631166
|
||||
} else {
|
||||
// OldPLABSize and CMSParPromoteBlocksToClaim are both set.
|
||||
// CMSParPromoteBlocksToClaim is a collector-specific flag, so
|
||||
// we'll let it to take precedence.
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Both OldPLABSize and CMSParPromoteBlocksToClaim"
|
||||
" options are specified for the CMS collector."
|
||||
" CMSParPromoteBlocksToClaim will take precedence.\n");
|
||||
FLAG_SET_DEFAULT(OldPLABSize, CFLS_LAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
|
||||
}
|
||||
}
|
||||
if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
|
||||
// OldPLAB sizing manually turned off: Use a larger default setting,
|
||||
// unless it was manually specified. This is because a too-low value
|
||||
// will slow down scavenges.
|
||||
if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
|
||||
FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, 50); // default value before 6631166
|
||||
}
|
||||
}
|
||||
// Overwrite OldPLABSize which is the variable we will internally use everywhere.
|
||||
FLAG_SET_ERGO(uintx, OldPLABSize, CMSParPromoteBlocksToClaim);
|
||||
|
||||
// If either of the static initialization defaults have changed, note this
|
||||
// modification.
|
||||
if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
|
||||
if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
|
||||
CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
|
||||
}
|
||||
if (PrintGCDetails && Verbose) {
|
||||
@ -2997,17 +2980,20 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
#endif
|
||||
// -D
|
||||
} else if (match_option(option, "-D", &tail)) {
|
||||
if (match_option(option, "-Djava.endorsed.dirs=", &tail)) {
|
||||
const char* value;
|
||||
if (match_option(option, "-Djava.endorsed.dirs=", &value) &&
|
||||
*value!= '\0' && strcmp(value, "\"\"") != 0) {
|
||||
// abort if -Djava.endorsed.dirs is set
|
||||
jio_fprintf(defaultStream::output_stream(),
|
||||
"-Djava.endorsed.dirs is not supported. Endorsed standards and standalone APIs\n"
|
||||
"in modular form will be supported via the concept of upgradeable modules.\n");
|
||||
"-Djava.endorsed.dirs=%s is not supported. Endorsed standards and standalone APIs\n"
|
||||
"in modular form will be supported via the concept of upgradeable modules.\n", value);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (match_option(option, "-Djava.ext.dirs=", &tail)) {
|
||||
if (match_option(option, "-Djava.ext.dirs=", &value) &&
|
||||
*value != '\0' && strcmp(value, "\"\"") != 0) {
|
||||
// abort if -Djava.ext.dirs is set
|
||||
jio_fprintf(defaultStream::output_stream(),
|
||||
"-Djava.ext.dirs is not supported. Use -classpath instead.\n");
|
||||
"-Djava.ext.dirs=%s is not supported. Use -classpath instead.\n", value);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
|
||||
@ -3222,52 +3208,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
FLAG_SET_CMDLINE(bool, NeverTenure, false);
|
||||
FLAG_SET_CMDLINE(bool, AlwaysTenure, false);
|
||||
}
|
||||
} else if (match_option(option, "-XX:+CMSPermGenSweepingEnabled") ||
|
||||
match_option(option, "-XX:-CMSPermGenSweepingEnabled")) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use CMSClassUnloadingEnabled in place of "
|
||||
"CMSPermGenSweepingEnabled in the future\n");
|
||||
} else if (match_option(option, "-XX:+UseGCTimeLimit")) {
|
||||
FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, true);
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use -XX:+UseGCOverheadLimit in place of "
|
||||
"-XX:+UseGCTimeLimit in the future\n");
|
||||
} else if (match_option(option, "-XX:-UseGCTimeLimit")) {
|
||||
FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, false);
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use -XX:-UseGCOverheadLimit in place of "
|
||||
"-XX:-UseGCTimeLimit in the future\n");
|
||||
// The TLE options are for compatibility with 1.3 and will be
|
||||
// removed without notice in a future release. These options
|
||||
// are not to be documented.
|
||||
} else if (match_option(option, "-XX:MaxTLERatio=", &tail)) {
|
||||
// No longer used.
|
||||
} else if (match_option(option, "-XX:+ResizeTLE")) {
|
||||
FLAG_SET_CMDLINE(bool, ResizeTLAB, true);
|
||||
} else if (match_option(option, "-XX:-ResizeTLE")) {
|
||||
FLAG_SET_CMDLINE(bool, ResizeTLAB, false);
|
||||
} else if (match_option(option, "-XX:+PrintTLE")) {
|
||||
FLAG_SET_CMDLINE(bool, PrintTLAB, true);
|
||||
} else if (match_option(option, "-XX:-PrintTLE")) {
|
||||
FLAG_SET_CMDLINE(bool, PrintTLAB, false);
|
||||
} else if (match_option(option, "-XX:TLEFragmentationRatio=", &tail)) {
|
||||
// No longer used.
|
||||
} else if (match_option(option, "-XX:TLESize=", &tail)) {
|
||||
julong long_tlab_size = 0;
|
||||
ArgsRange errcode = parse_memory_size(tail, &long_tlab_size, 1);
|
||||
if (errcode != arg_in_range) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Invalid TLAB size: %s\n", option->optionString);
|
||||
describe_range_error(errcode);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
FLAG_SET_CMDLINE(uintx, TLABSize, long_tlab_size);
|
||||
} else if (match_option(option, "-XX:TLEThreadRatio=", &tail)) {
|
||||
// No longer used.
|
||||
} else if (match_option(option, "-XX:+UseTLE")) {
|
||||
FLAG_SET_CMDLINE(bool, UseTLAB, true);
|
||||
} else if (match_option(option, "-XX:-UseTLE")) {
|
||||
FLAG_SET_CMDLINE(bool, UseTLAB, false);
|
||||
} else if (match_option(option, "-XX:+DisplayVMOutputToStderr")) {
|
||||
FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false);
|
||||
FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true);
|
||||
@ -3291,44 +3231,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
// disable scavenge before parallel mark-compact
|
||||
FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
|
||||
#endif
|
||||
} else if (match_option(option, "-XX:CMSParPromoteBlocksToClaim=", &tail)) {
|
||||
julong cms_blocks_to_claim = (julong)atol(tail);
|
||||
FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use -XX:OldPLABSize in place of "
|
||||
"-XX:CMSParPromoteBlocksToClaim in the future\n");
|
||||
} else if (match_option(option, "-XX:ParCMSPromoteBlocksToClaim=", &tail)) {
|
||||
julong cms_blocks_to_claim = (julong)atol(tail);
|
||||
FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use -XX:OldPLABSize in place of "
|
||||
"-XX:ParCMSPromoteBlocksToClaim in the future\n");
|
||||
} else if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
|
||||
julong old_plab_size = 0;
|
||||
ArgsRange errcode = parse_memory_size(tail, &old_plab_size, 1);
|
||||
if (errcode != arg_in_range) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Invalid old PLAB size: %s\n", option->optionString);
|
||||
describe_range_error(errcode);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
FLAG_SET_CMDLINE(uintx, OldPLABSize, old_plab_size);
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use -XX:OldPLABSize in place of "
|
||||
"-XX:ParallelGCOldGenAllocBufferSize in the future\n");
|
||||
} else if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
|
||||
julong young_plab_size = 0;
|
||||
ArgsRange errcode = parse_memory_size(tail, &young_plab_size, 1);
|
||||
if (errcode != arg_in_range) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Invalid young PLAB size: %s\n", option->optionString);
|
||||
describe_range_error(errcode);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
FLAG_SET_CMDLINE(uintx, YoungPLABSize, young_plab_size);
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use -XX:YoungPLABSize in place of "
|
||||
"-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
|
||||
} else if (match_option(option, "-XX:CMSMarkStackSize=", &tail) ||
|
||||
match_option(option, "-XX:G1MarkStackSize=", &tail)) {
|
||||
julong stack_size = 0;
|
||||
@ -3339,6 +3241,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
describe_range_error(errcode);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use -XX:MarkStackSize in place of "
|
||||
"-XX:CMSMarkStackSize or -XX:G1MarkStackSize in the future\n");
|
||||
FLAG_SET_CMDLINE(uintx, MarkStackSize, stack_size);
|
||||
} else if (match_option(option, "-XX:CMSMarkStackSizeMax=", &tail)) {
|
||||
julong max_stack_size = 0;
|
||||
@ -3350,6 +3255,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
describe_range_error(errcode);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use -XX:MarkStackSizeMax in place of "
|
||||
"-XX:CMSMarkStackSizeMax in the future\n");
|
||||
FLAG_SET_CMDLINE(uintx, MarkStackSizeMax, max_stack_size);
|
||||
} else if (match_option(option, "-XX:ParallelMarkingThreads=", &tail) ||
|
||||
match_option(option, "-XX:ParallelCMSThreads=", &tail)) {
|
||||
@ -3359,6 +3267,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
"Invalid concurrent threads: %s\n", option->optionString);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Please use -XX:ConcGCThreads in place of "
|
||||
"-XX:ParallelMarkingThreads or -XX:ParallelCMSThreads in the future\n");
|
||||
FLAG_SET_CMDLINE(uintx, ConcGCThreads, conc_threads);
|
||||
} else if (match_option(option, "-XX:MaxDirectMemorySize=", &tail)) {
|
||||
julong max_direct_memory_size = 0;
|
||||
|
@ -1341,7 +1341,7 @@ class CommandLineFlags {
|
||||
develop(bool, TraceClassInitialization, false, \
|
||||
"Trace class initialization") \
|
||||
\
|
||||
develop(bool, TraceExceptions, false, \
|
||||
product(bool, TraceExceptions, false, \
|
||||
"Trace exceptions") \
|
||||
\
|
||||
develop(bool, TraceICs, false, \
|
||||
@ -1472,7 +1472,8 @@ class CommandLineFlags {
|
||||
"Size of young gen promotion LAB's (in HeapWords)") \
|
||||
\
|
||||
product(uintx, OldPLABSize, 1024, \
|
||||
"Size of old gen promotion LAB's (in HeapWords)") \
|
||||
"Size of old gen promotion LAB's (in HeapWords), or Number \
|
||||
of blocks to attempt to claim when refilling CMS LAB's") \
|
||||
\
|
||||
product(uintx, GCTaskTimeStampEntries, 200, \
|
||||
"Number of time stamp entries per gc worker thread") \
|
||||
@ -1583,14 +1584,10 @@ class CommandLineFlags {
|
||||
"The number of cards in each chunk of the parallel chunks used " \
|
||||
"during card table scanning") \
|
||||
\
|
||||
product(uintx, CMSParPromoteBlocksToClaim, 16, \
|
||||
"Number of blocks to attempt to claim when refilling CMS LAB's " \
|
||||
"for parallel GC") \
|
||||
\
|
||||
product(uintx, OldPLABWeight, 50, \
|
||||
"Percentage (0-100) used to weight the current sample when " \
|
||||
"computing exponentially decaying average for resizing " \
|
||||
"CMSParPromoteBlocksToClaim") \
|
||||
"OldPLABSize") \
|
||||
\
|
||||
product(bool, ResizeOldPLAB, true, \
|
||||
"Dynamically resize (old gen) promotion LAB's") \
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user