Merge
This commit is contained in:
commit
438e92ebe0
4
.hgtags
4
.hgtags
@ -55,3 +55,7 @@ ce74bd35ce948d629a356e168797f44b593b1578 jdk7-b73
|
||||
4061c66ba1af1a2e27c2c839ba887407dd3ce050 jdk7-b78
|
||||
e9c98378f6b9256c0595ef2985ca5899f0c0e274 jdk7-b79
|
||||
e6abd38682d237306d6c147c17538ec9e7f8e3a7 jdk7-b80
|
||||
dcc938ac40cc45f1ef454d76020b5db5d943001c jdk7-b81
|
||||
a30062be6d9ca1d48579826f870f85974300004e jdk7-b82
|
||||
34c8199936a1682aa8587857f44cfaf37c2b6381 jdk7-b83
|
||||
b1e55627a6980b9508854ed0c0f21d4f981b4494 jdk7-b84
|
||||
|
@ -55,3 +55,7 @@ c8b63075403d53a208104a8a6ea5072c1cb66aab jdk7-b76
|
||||
ab4ae8f4514693a9fe17ca2fec0239d8f8450d2c jdk7-b78
|
||||
20aeeb51713990dbea6929a2e100a8bbf5df70d4 jdk7-b79
|
||||
a3242906c7747b5d9bcc3d118c7c3c69aa40f4b7 jdk7-b80
|
||||
8403096d1fe7ff5318df9708cfec84a3fd3e1cf9 jdk7-b81
|
||||
e1176f86805fe07fd9fb9da065dc51b47712ce76 jdk7-b82
|
||||
6880a3af9addb41541e80ebe8cde6f79ec402a58 jdk7-b83
|
||||
2f3ea057d1ad56cf3b269cdc4de2741411151982 jdk7-b84
|
||||
|
@ -55,3 +55,7 @@ b751c528c55560cf2adeaeef24b39ca1f4d1cbf7 jdk7-b73
|
||||
a7f7276b48cd74d8eb1baa83fbf3d1ef4a2603c8 jdk7-b78
|
||||
ec0421b5703b677e2226cf4bf7ae4eaafd8061c5 jdk7-b79
|
||||
0336e70ca0aeabc783cc01658f36cb6e27ea7934 jdk7-b80
|
||||
e08a42a2a94d97ea8eedb187a94dbff822c8fbba jdk7-b81
|
||||
1e8c1bfad1abb4b81407a0f2645e0fb85764ca48 jdk7-b82
|
||||
fde0df7a2384f7fe33204a79678989807d9c2b98 jdk7-b83
|
||||
68c8961a82e4a3ad2a67991e5d834192a81eb4cd jdk7-b84
|
||||
|
@ -55,3 +55,29 @@ d8dd291a362acb656026a9c0a9da48501505a1e7 jdk7-b75
|
||||
e703499b4b51e3af756ae77c3d5e8b3058a14e4e jdk7-b78
|
||||
a5a6adfca6ecefb5894a848debabfe442ff50e25 jdk7-b79
|
||||
3003ddd1d4330b06cb4691ae74d600d3685899eb jdk7-b80
|
||||
1f9b07674480c224828852ffe137beea36b3cab5 jdk7-b81
|
||||
1999f5b12482d66c8b0daf6709daea4f51893a04 jdk7-b82
|
||||
a94714c550658fd6741793ef036cb9625dc2ab1a hs17-b01
|
||||
faf94d94786b621f8e13cbcc941ca69c6d967c3f hs17-b02
|
||||
f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 hs17-b03
|
||||
d8dd291a362acb656026a9c0a9da48501505a1e7 hs17-b04
|
||||
9174bb32e934965288121f75394874eeb1fcb649 hs17-b05
|
||||
a5a6adfca6ecefb5894a848debabfe442ff50e25 hs17-b06
|
||||
3003ddd1d4330b06cb4691ae74d600d3685899eb hs17-b07
|
||||
1f9b07674480c224828852ffe137beea36b3cab5 hs17-b08
|
||||
ff3232b68fbb35185b338d7ff4695b52460243f3 hs17-b09
|
||||
981375ca07b7f0605f92f57aad95122e8c385a4d hs16-b01
|
||||
f4cbf78110c726919f46b59a3b054c54c7e889b4 hs16-b02
|
||||
07c1c01e031513bfe6a7d17c6cf30d2752824ae9 hs16-b03
|
||||
08f86fa55a31113df626a75c8a626e66a543a1bd hs16-b04
|
||||
32c83fb84370a35344676991a48440378e6b6c8a hs16-b05
|
||||
ba313800759b678979434d6da8ed3bf49eb8bea4 hs16-b06
|
||||
3c0f729815607e1678bd0c41ae68494c700dcc71 hs16-b07
|
||||
ac59d4e6dae51ac5fc31a9a4940d1857f91161b1 hs16-b08
|
||||
3f844a28c5f4912bd04043b44f21b25b0805ffc2 hs15-b01
|
||||
1605bb4eb5a7a1703b13d5b077a22cc665fe45f7 hs15-b02
|
||||
2581d90c6c9b2012da930eb4742add94a03069a0 hs15-b03
|
||||
9ab385cb0c42997e16a7761ebcd25c90560a2714 hs15-b04
|
||||
fafab5d5349c7c066d677538db67a1ee0fb33bd2 hs15-b05
|
||||
3f370a32906eb5ba993fabd7b4279be7f31052b9 jdk7-b83
|
||||
ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
# Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -312,10 +312,13 @@ endif
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
|
||||
$(install-file)
|
||||
|
||||
# Include files (jvmti.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
|
||||
# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/%
|
||||
$(install-file)
|
||||
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/%
|
||||
$(install-file)
|
||||
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/prims/%
|
||||
$(install-file)
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
# Copyright 2006-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -261,6 +261,7 @@ EXPORT_JRE_LIB_ARCH_DIR = $(EXPORT_JRE_LIB_DIR)/$(LIBARCH)
|
||||
|
||||
# Common export list of files
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmti.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmticmlr.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jni.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2009
|
||||
|
||||
HS_MAJOR_VER=17
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=08
|
||||
HS_BUILD_NUMBER=10
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=7
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -357,7 +357,7 @@ void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr,
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::emit_exception_handler() {
|
||||
int LIR_Assembler::emit_exception_handler() {
|
||||
// if the last instruction is a call (typically to do a throw which
|
||||
// is coming at the end after block reordering) the return address
|
||||
// must still point into the code area in order to avoid assertion
|
||||
@ -373,15 +373,12 @@ void LIR_Assembler::emit_exception_handler() {
|
||||
if (handler_base == NULL) {
|
||||
// not enough space left for the handler
|
||||
bailout("exception handler overflow");
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
|
||||
int offset = code_offset();
|
||||
#endif // ASSERT
|
||||
compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
|
||||
|
||||
|
||||
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
|
||||
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
|
||||
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
@ -390,11 +387,13 @@ void LIR_Assembler::emit_exception_handler() {
|
||||
__ delayed()->nop();
|
||||
debug_only(__ stop("should have gone to the caller");)
|
||||
assert(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_deopt_handler() {
|
||||
|
||||
int LIR_Assembler::emit_deopt_handler() {
|
||||
// if the last instruction is a call (typically to do a throw which
|
||||
// is coming at the end after block reordering) the return address
|
||||
// must still point into the code area in order to avoid assertion
|
||||
@ -408,23 +407,18 @@ void LIR_Assembler::emit_deopt_handler() {
|
||||
if (handler_base == NULL) {
|
||||
// not enough space left for the handler
|
||||
bailout("deopt handler overflow");
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
|
||||
int offset = code_offset();
|
||||
#endif // ASSERT
|
||||
compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
|
||||
|
||||
AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
|
||||
|
||||
__ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
|
||||
__ delayed()->nop();
|
||||
|
||||
assert(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
|
||||
debug_only(__ stop("should have gone to the caller");)
|
||||
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
@ -2736,9 +2730,6 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
}
|
||||
|
||||
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
|
||||
__ lduw(counter_addr, tmp1);
|
||||
__ add(tmp1, DataLayout::counter_increment, tmp1);
|
||||
__ stw(tmp1, counter_addr);
|
||||
Bytecodes::Code bc = method->java_code_at_bci(bci);
|
||||
// Perform additional virtual call profiling for invokevirtual and
|
||||
// invokeinterface bytecodes
|
||||
@ -2828,15 +2819,23 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
__ set(DataLayout::counter_increment, tmp1);
|
||||
__ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
|
||||
mdo_offset_bias);
|
||||
if (i < (VirtualCallData::row_limit() - 1)) {
|
||||
__ br(Assembler::always, false, Assembler::pt, update_done);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
__ br(Assembler::always, false, Assembler::pt, update_done);
|
||||
__ delayed()->nop();
|
||||
__ bind(next_test);
|
||||
}
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
__ lduw(counter_addr, tmp1);
|
||||
__ add(tmp1, DataLayout::counter_increment, tmp1);
|
||||
__ stw(tmp1, counter_addr);
|
||||
|
||||
__ bind(update_done);
|
||||
}
|
||||
} else {
|
||||
// Static call
|
||||
__ lduw(counter_addr, tmp1);
|
||||
__ add(tmp1, DataLayout::counter_increment, tmp1);
|
||||
__ stw(tmp1, counter_addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -366,8 +366,9 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_sta
|
||||
// as get_original_pc() needs correct value for unextended_sp()
|
||||
if (_pc != NULL) {
|
||||
_cb = CodeCache::find_blob(_pc);
|
||||
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
|
||||
_pc = ((nmethod*)_cb)->get_original_pc(this);
|
||||
address original_pc = nmethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
@ -519,9 +520,9 @@ void frame::patch_pc(Thread* thread, address pc) {
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
*O7_addr() = pc - pc_return_offset;
|
||||
_cb = CodeCache::find_blob(_pc);
|
||||
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
|
||||
address orig = ((nmethod*)_cb)->get_original_pc(this);
|
||||
assert(orig == _pc, "expected original to be stored before patching");
|
||||
address original_pc = nmethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
assert(original_pc == _pc, "expected original to be stored before patching");
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1681,11 +1681,8 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
|
||||
// If no method data exists, go to profile_continue.
|
||||
test_method_data_pointer(profile_continue);
|
||||
|
||||
// We are making a call. Increment the count.
|
||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
|
||||
|
||||
// Record the receiver type.
|
||||
record_klass_in_profile(receiver, scratch);
|
||||
record_klass_in_profile(receiver, scratch, true);
|
||||
|
||||
// The method data pointer needs to be updated to reflect the new target.
|
||||
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
|
||||
@ -1695,9 +1692,13 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
|
||||
|
||||
void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
Register receiver, Register scratch,
|
||||
int start_row, Label& done) {
|
||||
if (TypeProfileWidth == 0)
|
||||
int start_row, Label& done, bool is_virtual_call) {
|
||||
if (TypeProfileWidth == 0) {
|
||||
if (is_virtual_call) {
|
||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int last_row = VirtualCallData::row_limit() - 1;
|
||||
assert(start_row <= last_row, "must be work left to do");
|
||||
@ -1714,6 +1715,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
// See if the receiver is receiver[n].
|
||||
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
|
||||
test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
|
||||
// delayed()->tst(scratch);
|
||||
|
||||
// The receiver is receiver[n]. Increment count[n].
|
||||
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
|
||||
@ -1723,20 +1725,31 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
bind(next_test);
|
||||
|
||||
if (test_for_null_also) {
|
||||
Label found_null;
|
||||
// Failed the equality check on receiver[n]... Test for null.
|
||||
if (start_row == last_row) {
|
||||
// The only thing left to do is handle the null case.
|
||||
brx(Assembler::notZero, false, Assembler::pt, done);
|
||||
delayed()->nop();
|
||||
if (is_virtual_call) {
|
||||
brx(Assembler::zero, false, Assembler::pn, found_null);
|
||||
delayed()->nop();
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
|
||||
ba(false, done);
|
||||
delayed()->nop();
|
||||
bind(found_null);
|
||||
} else {
|
||||
brx(Assembler::notZero, false, Assembler::pt, done);
|
||||
delayed()->nop();
|
||||
}
|
||||
break;
|
||||
}
|
||||
// Since null is rare, make it be the branch-taken case.
|
||||
Label found_null;
|
||||
brx(Assembler::zero, false, Assembler::pn, found_null);
|
||||
delayed()->nop();
|
||||
|
||||
// Put all the "Case 3" tests here.
|
||||
record_klass_in_profile_helper(receiver, scratch, start_row + 1, done);
|
||||
record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
|
||||
|
||||
// Found a null. Keep searching for a matching receiver,
|
||||
// but remember that this is an empty (unused) slot.
|
||||
@ -1753,16 +1766,18 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
|
||||
mov(DataLayout::counter_increment, scratch);
|
||||
set_mdp_data_at(count_offset, scratch);
|
||||
ba(false, done);
|
||||
delayed()->nop();
|
||||
if (start_row > 0) {
|
||||
ba(false, done);
|
||||
delayed()->nop();
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
|
||||
Register scratch) {
|
||||
Register scratch, bool is_virtual_call) {
|
||||
assert(ProfileInterpreter, "must be profiling");
|
||||
Label done;
|
||||
|
||||
record_klass_in_profile_helper(receiver, scratch, 0, done);
|
||||
record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
|
||||
|
||||
bind (done);
|
||||
}
|
||||
@ -1840,7 +1855,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register klass,
|
||||
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
|
||||
|
||||
// Record the object type.
|
||||
record_klass_in_profile(klass, scratch);
|
||||
record_klass_in_profile(klass, scratch, false);
|
||||
}
|
||||
|
||||
// The method data pointer needs to be updated.
|
||||
|
@ -290,9 +290,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void test_mdp_data_at(int offset, Register value, Label& not_equal_continue,
|
||||
Register scratch);
|
||||
|
||||
void record_klass_in_profile(Register receiver, Register scratch);
|
||||
void record_klass_in_profile(Register receiver, Register scratch, bool is_virtual_call);
|
||||
void record_klass_in_profile_helper(Register receiver, Register scratch,
|
||||
int start_row, Label& done);
|
||||
int start_row, Label& done, bool is_virtual_call);
|
||||
|
||||
void update_mdp_by_offset(int offset_of_disp, Register scratch);
|
||||
void update_mdp_by_offset(Register reg, int offset_of_disp,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -851,10 +851,10 @@ void AdapterGenerator::gen_c2i_adapter(
|
||||
__ set(reg2offset(r_1) + extraspace + bias, ld_off);
|
||||
#else
|
||||
int ld_off = reg2offset(r_1) + extraspace + bias;
|
||||
#endif // _LP64
|
||||
#ifdef ASSERT
|
||||
G1_forced = true;
|
||||
#endif // ASSERT
|
||||
#endif // _LP64
|
||||
r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
|
||||
if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
|
||||
else __ ldx(base, ld_off, G1_scratch);
|
||||
@ -865,9 +865,11 @@ void AdapterGenerator::gen_c2i_adapter(
|
||||
if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
|
||||
store_c2i_object(r, base, st_off);
|
||||
} else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
|
||||
#ifndef _LP64
|
||||
if (TieredCompilation) {
|
||||
assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
|
||||
}
|
||||
#endif // _LP64
|
||||
store_c2i_long(r, base, st_off, r_2->is_stack());
|
||||
} else {
|
||||
store_c2i_int(r, base, st_off);
|
||||
@ -1189,7 +1191,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
// VMReg max_arg,
|
||||
int comp_args_on_stack, // VMRegStackSlots
|
||||
const BasicType *sig_bt,
|
||||
const VMRegPair *regs) {
|
||||
const VMRegPair *regs,
|
||||
AdapterFingerPrint* fingerprint) {
|
||||
address i2c_entry = __ pc();
|
||||
|
||||
AdapterGenerator agen(masm);
|
||||
@ -1258,7 +1261,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
|
||||
|
||||
__ flush();
|
||||
return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
|
||||
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
|
||||
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -418,13 +418,12 @@ int LIR_Assembler::initial_frame_size_in_bytes() {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::emit_exception_handler() {
|
||||
int LIR_Assembler::emit_exception_handler() {
|
||||
// if the last instruction is a call (typically to do a throw which
|
||||
// is coming at the end after block reordering) the return address
|
||||
// must still point into the code area in order to avoid assertion
|
||||
// failures when searching for the corresponding bci => add a nop
|
||||
// (was bug 5/14/1999 - gri)
|
||||
|
||||
__ nop();
|
||||
|
||||
// generate code for exception handler
|
||||
@ -432,17 +431,14 @@ void LIR_Assembler::emit_exception_handler() {
|
||||
if (handler_base == NULL) {
|
||||
// not enough space left for the handler
|
||||
bailout("exception handler overflow");
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
int offset = code_offset();
|
||||
#endif // ASSERT
|
||||
|
||||
compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
|
||||
int offset = code_offset();
|
||||
|
||||
// if the method does not have an exception handler, then there is
|
||||
// no reason to search for one
|
||||
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
|
||||
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
|
||||
// the exception oop and pc are in rax, and rdx
|
||||
// no other registers need to be preserved, so invalidate them
|
||||
__ invalidate_registers(false, true, true, false, true, true);
|
||||
@ -474,19 +470,19 @@ void LIR_Assembler::emit_exception_handler() {
|
||||
// unwind activation and forward exception to caller
|
||||
// rax,: exception
|
||||
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
|
||||
|
||||
assert(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_deopt_handler() {
|
||||
|
||||
int LIR_Assembler::emit_deopt_handler() {
|
||||
// if the last instruction is a call (typically to do a throw which
|
||||
// is coming at the end after block reordering) the return address
|
||||
// must still point into the code area in order to avoid assertion
|
||||
// failures when searching for the corresponding bci => add a nop
|
||||
// (was bug 5/14/1999 - gri)
|
||||
|
||||
__ nop();
|
||||
|
||||
// generate code for exception handler
|
||||
@ -494,23 +490,17 @@ void LIR_Assembler::emit_deopt_handler() {
|
||||
if (handler_base == NULL) {
|
||||
// not enough space left for the handler
|
||||
bailout("deopt handler overflow");
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
|
||||
int offset = code_offset();
|
||||
#endif // ASSERT
|
||||
|
||||
compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
|
||||
|
||||
InternalAddress here(__ pc());
|
||||
__ pushptr(here.addr());
|
||||
|
||||
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
|
||||
|
||||
assert(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
@ -3219,7 +3209,6 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
Register mdo = op->mdo()->as_register();
|
||||
__ movoop(mdo, md->constant_encoding());
|
||||
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
|
||||
__ addl(counter_addr, DataLayout::counter_increment);
|
||||
Bytecodes::Code bc = method->java_code_at_bci(bci);
|
||||
// Perform additional virtual call profiling for invokevirtual and
|
||||
// invokeinterface bytecodes
|
||||
@ -3286,14 +3275,18 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
__ jcc(Assembler::notEqual, next_test);
|
||||
__ movptr(recv_addr, recv);
|
||||
__ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
|
||||
if (i < (VirtualCallData::row_limit() - 1)) {
|
||||
__ jmp(update_done);
|
||||
}
|
||||
__ jmp(update_done);
|
||||
__ bind(next_test);
|
||||
}
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
__ addl(counter_addr, DataLayout::counter_increment);
|
||||
|
||||
__ bind(update_done);
|
||||
}
|
||||
} else {
|
||||
// Static call
|
||||
__ addl(counter_addr, DataLayout::counter_increment);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -222,9 +222,9 @@ void frame::patch_pc(Thread* thread, address pc) {
|
||||
}
|
||||
((address *)sp())[-1] = pc;
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
|
||||
address orig = (((nmethod*)_cb)->get_original_pc(this));
|
||||
assert(orig == _pc, "expected original to be stored before patching");
|
||||
address original_pc = nmethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
assert(original_pc == _pc, "expected original PC to be stored before patching");
|
||||
_deopt_state = is_deoptimized;
|
||||
// leave _pc as is
|
||||
} else {
|
||||
@ -323,19 +323,61 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
|
||||
return fr;
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::verify_deopt_original_pc
|
||||
//
|
||||
// Verifies the calculated original PC of a deoptimization PC for the
|
||||
// given unextended SP. The unextended SP might also be the saved SP
|
||||
// for MethodHandle call sites.
|
||||
#if ASSERT
|
||||
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
|
||||
frame fr;
|
||||
|
||||
// This is ugly but it's better than to change {get,set}_original_pc
|
||||
// to take an SP value as argument. And it's only a debugging
|
||||
// method anyway.
|
||||
fr._unextended_sp = unextended_sp;
|
||||
|
||||
address original_pc = nm->get_original_pc(&fr);
|
||||
assert(nm->code_contains(original_pc), "original PC must be in nmethod");
|
||||
assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::sender_for_interpreter_frame
|
||||
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
// sp is the raw sp from the sender after adapter or interpreter extension
|
||||
intptr_t* sp = (intptr_t*) addr_at(sender_sp_offset);
|
||||
// SP is the raw SP from the sender after adapter or interpreter
|
||||
// extension.
|
||||
intptr_t* sender_sp = this->sender_sp();
|
||||
|
||||
// This is the sp before any possible extension (adapter/locals).
|
||||
intptr_t* unextended_sp = interpreter_frame_sender_sp();
|
||||
|
||||
// Stored FP.
|
||||
intptr_t* saved_fp = link();
|
||||
|
||||
address sender_pc = this->sender_pc();
|
||||
CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
|
||||
assert(sender_cb, "sanity");
|
||||
nmethod* sender_nm = sender_cb->as_nmethod_or_null();
|
||||
if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
|
||||
unextended_sp = (intptr_t*) at(link_offset);
|
||||
|
||||
if (sender_nm != NULL) {
|
||||
// If the sender PC is a deoptimization point, get the original
|
||||
// PC. For MethodHandle call site the unextended_sp is stored in
|
||||
// saved_fp.
|
||||
if (sender_nm->is_deopt_mh_entry(sender_pc)) {
|
||||
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
|
||||
unextended_sp = saved_fp;
|
||||
}
|
||||
else if (sender_nm->is_deopt_entry(sender_pc)) {
|
||||
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
|
||||
}
|
||||
else if (sender_nm->is_method_handle_return(sender_pc)) {
|
||||
unextended_sp = saved_fp;
|
||||
}
|
||||
}
|
||||
|
||||
// The interpreter and compiler(s) always save EBP/RBP in a known
|
||||
@ -359,40 +401,51 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
}
|
||||
#endif // AMD64
|
||||
}
|
||||
#endif /* COMPILER2 */
|
||||
return frame(sp, unextended_sp, link(), sender_pc);
|
||||
#endif // COMPILER2
|
||||
|
||||
return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
|
||||
}
|
||||
|
||||
|
||||
//------------------------------sender_for_compiled_frame-----------------------
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::sender_for_compiled_frame
|
||||
frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
const bool c1_compiled = _cb->is_compiled_by_c1();
|
||||
|
||||
// frame owned by optimizing compiler
|
||||
intptr_t* sender_sp = NULL;
|
||||
|
||||
assert(_cb->frame_size() >= 0, "must have non-zero frame size");
|
||||
sender_sp = unextended_sp() + _cb->frame_size();
|
||||
intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
|
||||
intptr_t* unextended_sp = sender_sp;
|
||||
|
||||
// On Intel the return_address is always the word on the stack
|
||||
address sender_pc = (address) *(sender_sp-1);
|
||||
|
||||
// This is the saved value of ebp which may or may not really be an fp.
|
||||
// it is only an fp if the sender is an interpreter frame (or c1?)
|
||||
// This is the saved value of EBP which may or may not really be an FP.
|
||||
// It is only an FP if the sender is an interpreter frame (or C1?).
|
||||
intptr_t* saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
|
||||
|
||||
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
|
||||
|
||||
intptr_t* unextended_sp = sender_sp;
|
||||
// If we are returning to a compiled method handle call site,
|
||||
// the saved_fp will in fact be a saved value of the unextended SP.
|
||||
// The simplest way to tell whether we are returning to such a call
|
||||
// site is as follows:
|
||||
// If we are returning to a compiled MethodHandle call site, the
|
||||
// saved_fp will in fact be a saved value of the unextended SP. The
|
||||
// simplest way to tell whether we are returning to such a call site
|
||||
// is as follows:
|
||||
CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
|
||||
assert(sender_cb, "sanity");
|
||||
nmethod* sender_nm = sender_cb->as_nmethod_or_null();
|
||||
if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
|
||||
unextended_sp = saved_fp;
|
||||
|
||||
if (sender_nm != NULL) {
|
||||
// If the sender PC is a deoptimization point, get the original
|
||||
// PC. For MethodHandle call site the unextended_sp is stored in
|
||||
// saved_fp.
|
||||
if (sender_nm->is_deopt_mh_entry(sender_pc)) {
|
||||
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
|
||||
unextended_sp = saved_fp;
|
||||
}
|
||||
else if (sender_nm->is_deopt_entry(sender_pc)) {
|
||||
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
|
||||
}
|
||||
else if (sender_nm->is_method_handle_return(sender_pc)) {
|
||||
unextended_sp = saved_fp;
|
||||
}
|
||||
}
|
||||
|
||||
if (map->update_map()) {
|
||||
@ -403,7 +456,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||
if (_cb->oop_maps() != NULL) {
|
||||
OopMapSet::update_register_map(this, map);
|
||||
}
|
||||
// Since the prolog does the save and restore of epb there is no oopmap
|
||||
// Since the prolog does the save and restore of EBP there is no oopmap
|
||||
// for it so we must fill in its location as if there was an oopmap entry
|
||||
// since if our caller was compiled code there could be live jvm state in it.
|
||||
map->set_location(rbp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset));
|
||||
@ -422,6 +475,9 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||
return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::sender
|
||||
frame frame::sender(RegisterMap* map) const {
|
||||
// Default is we done have to follow them. The sender_for_xxx will
|
||||
// update it accordingly
|
||||
|
@ -163,6 +163,14 @@
|
||||
return (intptr_t*) addr_at(offset);
|
||||
}
|
||||
|
||||
#if ASSERT
|
||||
// Used in frame::sender_for_{interpreter,compiled}_frame
|
||||
static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
|
||||
static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
|
||||
verify_deopt_original_pc(nm, unextended_sp, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Constructors
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,32 +35,35 @@ inline frame::frame() {
|
||||
_deopt_state = unknown;
|
||||
}
|
||||
|
||||
inline frame:: frame(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
_sp = sp;
|
||||
_unextended_sp = sp;
|
||||
_fp = fp;
|
||||
_pc = pc;
|
||||
assert(pc != NULL, "no pc?");
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
_deopt_state = not_deoptimized;
|
||||
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
|
||||
_pc = (((nmethod*)_cb)->get_original_pc(this));
|
||||
|
||||
address original_pc = nmethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
}
|
||||
}
|
||||
|
||||
inline frame:: frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
|
||||
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
|
||||
_sp = sp;
|
||||
_unextended_sp = unextended_sp;
|
||||
_fp = fp;
|
||||
_pc = pc;
|
||||
assert(pc != NULL, "no pc?");
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
_deopt_state = not_deoptimized;
|
||||
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
|
||||
_pc = (((nmethod*)_cb)->get_original_pc(this));
|
||||
|
||||
address original_pc = nmethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
assert(((nmethod*)_cb)->code_contains(_pc), "original PC must be in nmethod");
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
@ -86,9 +89,9 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
|
||||
_cb = CodeCache::find_blob(_pc);
|
||||
|
||||
_deopt_state = not_deoptimized;
|
||||
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
|
||||
_pc = (((nmethod*)_cb)->get_original_pc(this));
|
||||
address original_pc = nmethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
@ -230,7 +233,8 @@ inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
} else {
|
||||
// sp() may have been extended or shrunk by an adapter. At least
|
||||
// check that we don't fall behind the legal region.
|
||||
assert(last_sp < (intptr_t*) interpreter_frame_monitor_begin(), "bad tos");
|
||||
// For top deoptimized frame last_sp == interpreter_frame_monitor_end.
|
||||
assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
|
||||
return last_sp;
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1239,17 +1239,19 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register
|
||||
// If no method data exists, go to profile_continue.
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
// We are making a call. Increment the count.
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
|
||||
Label skip_receiver_profile;
|
||||
if (receiver_can_be_null) {
|
||||
Label not_null;
|
||||
testptr(receiver, receiver);
|
||||
jcc(Assembler::zero, skip_receiver_profile);
|
||||
jccb(Assembler::notZero, not_null);
|
||||
// We are making a call. Increment the count for null receiver.
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
jmp(skip_receiver_profile);
|
||||
bind(not_null);
|
||||
}
|
||||
|
||||
// Record the receiver type.
|
||||
record_klass_in_profile(receiver, mdp, reg2);
|
||||
record_klass_in_profile(receiver, mdp, reg2, true);
|
||||
bind(skip_receiver_profile);
|
||||
|
||||
// The method data pointer needs to be updated to reflect the new target.
|
||||
@ -1263,10 +1265,14 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register
|
||||
|
||||
void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
Register receiver, Register mdp,
|
||||
Register reg2,
|
||||
int start_row, Label& done) {
|
||||
if (TypeProfileWidth == 0)
|
||||
Register reg2, int start_row,
|
||||
Label& done, bool is_virtual_call) {
|
||||
if (TypeProfileWidth == 0) {
|
||||
if (is_virtual_call) {
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int last_row = VirtualCallData::row_limit() - 1;
|
||||
assert(start_row <= last_row, "must be work left to do");
|
||||
@ -1294,19 +1300,28 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
bind(next_test);
|
||||
|
||||
if (row == start_row) {
|
||||
Label found_null;
|
||||
// Failed the equality check on receiver[n]... Test for null.
|
||||
testptr(reg2, reg2);
|
||||
if (start_row == last_row) {
|
||||
// The only thing left to do is handle the null case.
|
||||
jcc(Assembler::notZero, done);
|
||||
if (is_virtual_call) {
|
||||
jccb(Assembler::zero, found_null);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
jmp(done);
|
||||
bind(found_null);
|
||||
} else {
|
||||
jcc(Assembler::notZero, done);
|
||||
}
|
||||
break;
|
||||
}
|
||||
// Since null is rare, make it be the branch-taken case.
|
||||
Label found_null;
|
||||
jcc(Assembler::zero, found_null);
|
||||
|
||||
// Put all the "Case 3" tests here.
|
||||
record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
|
||||
record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
|
||||
|
||||
// Found a null. Keep searching for a matching receiver,
|
||||
// but remember that this is an empty (unused) slot.
|
||||
@ -1323,16 +1338,18 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
|
||||
movptr(reg2, (int32_t)DataLayout::counter_increment);
|
||||
set_mdp_data_at(mdp, count_offset, reg2);
|
||||
jmp(done);
|
||||
if (start_row > 0) {
|
||||
jmp(done);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
|
||||
Register mdp,
|
||||
Register reg2) {
|
||||
Register mdp, Register reg2,
|
||||
bool is_virtual_call) {
|
||||
assert(ProfileInterpreter, "must be profiling");
|
||||
Label done;
|
||||
|
||||
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
|
||||
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
|
||||
|
||||
bind (done);
|
||||
}
|
||||
@ -1425,7 +1442,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
|
||||
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
|
||||
|
||||
// Record the object type.
|
||||
record_klass_in_profile(klass, mdp, reg2);
|
||||
record_klass_in_profile(klass, mdp, reg2, false);
|
||||
assert(reg2 == rdi, "we know how to fix this blown reg");
|
||||
restore_locals(); // Restore EDI
|
||||
}
|
||||
|
@ -213,10 +213,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
Label& not_equal_continue);
|
||||
|
||||
void record_klass_in_profile(Register receiver, Register mdp,
|
||||
Register reg2);
|
||||
Register reg2, bool is_virtual_call);
|
||||
void record_klass_in_profile_helper(Register receiver, Register mdp,
|
||||
Register reg2,
|
||||
int start_row, Label& done);
|
||||
Register reg2, int start_row,
|
||||
Label& done, bool is_virtual_call);
|
||||
|
||||
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
|
||||
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1262,17 +1262,19 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
|
||||
// If no method data exists, go to profile_continue.
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
// We are making a call. Increment the count.
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
|
||||
Label skip_receiver_profile;
|
||||
if (receiver_can_be_null) {
|
||||
Label not_null;
|
||||
testptr(receiver, receiver);
|
||||
jcc(Assembler::zero, skip_receiver_profile);
|
||||
jccb(Assembler::notZero, not_null);
|
||||
// We are making a call. Increment the count for null receiver.
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
jmp(skip_receiver_profile);
|
||||
bind(not_null);
|
||||
}
|
||||
|
||||
// Record the receiver type.
|
||||
record_klass_in_profile(receiver, mdp, reg2);
|
||||
record_klass_in_profile(receiver, mdp, reg2, true);
|
||||
bind(skip_receiver_profile);
|
||||
|
||||
// The method data pointer needs to be updated to reflect the new target.
|
||||
@ -1296,10 +1298,14 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
|
||||
// See below for example code.
|
||||
void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
Register receiver, Register mdp,
|
||||
Register reg2,
|
||||
int start_row, Label& done) {
|
||||
if (TypeProfileWidth == 0)
|
||||
Register reg2, int start_row,
|
||||
Label& done, bool is_virtual_call) {
|
||||
if (TypeProfileWidth == 0) {
|
||||
if (is_virtual_call) {
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int last_row = VirtualCallData::row_limit() - 1;
|
||||
assert(start_row <= last_row, "must be work left to do");
|
||||
@ -1327,19 +1333,28 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
bind(next_test);
|
||||
|
||||
if (test_for_null_also) {
|
||||
Label found_null;
|
||||
// Failed the equality check on receiver[n]... Test for null.
|
||||
testptr(reg2, reg2);
|
||||
if (start_row == last_row) {
|
||||
// The only thing left to do is handle the null case.
|
||||
jcc(Assembler::notZero, done);
|
||||
if (is_virtual_call) {
|
||||
jccb(Assembler::zero, found_null);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
jmp(done);
|
||||
bind(found_null);
|
||||
} else {
|
||||
jcc(Assembler::notZero, done);
|
||||
}
|
||||
break;
|
||||
}
|
||||
// Since null is rare, make it be the branch-taken case.
|
||||
Label found_null;
|
||||
jcc(Assembler::zero, found_null);
|
||||
|
||||
// Put all the "Case 3" tests here.
|
||||
record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
|
||||
record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
|
||||
|
||||
// Found a null. Keep searching for a matching receiver,
|
||||
// but remember that this is an empty (unused) slot.
|
||||
@ -1356,7 +1371,9 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
|
||||
movl(reg2, DataLayout::counter_increment);
|
||||
set_mdp_data_at(mdp, count_offset, reg2);
|
||||
jmp(done);
|
||||
if (start_row > 0) {
|
||||
jmp(done);
|
||||
}
|
||||
}
|
||||
|
||||
// Example state machine code for three profile rows:
|
||||
@ -1368,7 +1385,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
// if (row[1].rec != NULL) {
|
||||
// // degenerate decision tree, rooted at row[2]
|
||||
// if (row[2].rec == rec) { row[2].incr(); goto done; }
|
||||
// if (row[2].rec != NULL) { goto done; } // overflow
|
||||
// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
|
||||
// row[2].init(rec); goto done;
|
||||
// } else {
|
||||
// // remember row[1] is empty
|
||||
@ -1381,14 +1398,15 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
// if (row[2].rec == rec) { row[2].incr(); goto done; }
|
||||
// row[0].init(rec); goto done;
|
||||
// }
|
||||
// done:
|
||||
|
||||
void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
|
||||
Register mdp,
|
||||
Register reg2) {
|
||||
Register mdp, Register reg2,
|
||||
bool is_virtual_call) {
|
||||
assert(ProfileInterpreter, "must be profiling");
|
||||
Label done;
|
||||
|
||||
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
|
||||
record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
|
||||
|
||||
bind (done);
|
||||
}
|
||||
@ -1484,7 +1502,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
|
||||
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
|
||||
|
||||
// Record the object type.
|
||||
record_klass_in_profile(klass, mdp, reg2);
|
||||
record_klass_in_profile(klass, mdp, reg2, false);
|
||||
}
|
||||
update_mdp_by_constant(mdp, mdp_delta);
|
||||
|
||||
|
@ -222,10 +222,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
Label& not_equal_continue);
|
||||
|
||||
void record_klass_in_profile(Register receiver, Register mdp,
|
||||
Register reg2);
|
||||
Register reg2, bool is_virtual_call);
|
||||
void record_klass_in_profile_helper(Register receiver, Register mdp,
|
||||
Register reg2,
|
||||
int start_row, Label& done);
|
||||
Register reg2, int start_row,
|
||||
Label& done, bool is_virtual_call);
|
||||
|
||||
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
|
||||
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -907,7 +907,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
int total_args_passed,
|
||||
int comp_args_on_stack,
|
||||
const BasicType *sig_bt,
|
||||
const VMRegPair *regs) {
|
||||
const VMRegPair *regs,
|
||||
AdapterFingerPrint* fingerprint) {
|
||||
address i2c_entry = __ pc();
|
||||
|
||||
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
|
||||
@ -954,7 +955,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
|
||||
|
||||
__ flush();
|
||||
return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
|
||||
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
|
||||
}
|
||||
|
||||
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -778,7 +778,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
int total_args_passed,
|
||||
int comp_args_on_stack,
|
||||
const BasicType *sig_bt,
|
||||
const VMRegPair *regs) {
|
||||
const VMRegPair *regs,
|
||||
AdapterFingerPrint* fingerprint) {
|
||||
address i2c_entry = __ pc();
|
||||
|
||||
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
|
||||
@ -824,7 +825,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
|
||||
|
||||
__ flush();
|
||||
return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
|
||||
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
|
||||
}
|
||||
|
||||
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
|
@ -718,10 +718,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
case BarrierSet::G1SATBCTLogging:
|
||||
{
|
||||
__ pusha(); // push registers
|
||||
__ push(count);
|
||||
__ push(start);
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
|
||||
__ addptr(rsp, 2*wordSize);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
|
||||
start, count);
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
@ -752,10 +750,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
case BarrierSet::G1SATBCTLogging:
|
||||
{
|
||||
__ pusha(); // push registers
|
||||
__ push(count);
|
||||
__ push(start);
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
|
||||
__ addptr(rsp, 2*wordSize);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
|
||||
start, count);
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
|
@ -1172,7 +1172,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ movptr(c_rarg0, addr);
|
||||
__ movptr(c_rarg1, count);
|
||||
}
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
@ -1212,7 +1212,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
|
||||
__ mov(c_rarg0, start);
|
||||
__ mov(c_rarg1, scratch);
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
|
||||
__ popa();
|
||||
}
|
||||
break;
|
||||
|
@ -3238,17 +3238,19 @@ void TemplateTable::_new() {
|
||||
__ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
|
||||
__ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
|
||||
__ store_klass(rax, rsi); // store klass last
|
||||
|
||||
{
|
||||
SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
|
||||
// Trigger dtrace event for fastpath
|
||||
__ push(atos); // save the return value
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
|
||||
__ pop(atos); // restore the return value
|
||||
|
||||
}
|
||||
__ jmp(done);
|
||||
}
|
||||
|
||||
{
|
||||
SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
|
||||
// Trigger dtrace event for fastpath
|
||||
__ push(atos); // save the return value
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
|
||||
__ pop(atos); // restore the return value
|
||||
}
|
||||
|
||||
// slow case
|
||||
__ bind(slow_case);
|
||||
|
@ -235,6 +235,11 @@ reg_class xdb_reg7( XMM7a,XMM7b );
|
||||
//----------SOURCE BLOCK-------------------------------------------------------
|
||||
// This is a block of C++ code which provides values, functions, and
|
||||
// definitions necessary in the rest of the architecture description
|
||||
source_hpp %{
|
||||
// Must be visible to the DFA in dfa_x86_32.cpp
|
||||
extern bool is_operand_hi32_zero(Node* n);
|
||||
%}
|
||||
|
||||
source %{
|
||||
#define RELOC_IMM32 Assembler::imm_operand
|
||||
#define RELOC_DISP32 Assembler::disp32_operand
|
||||
@ -1485,6 +1490,21 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return EBP_REG_mask;
|
||||
}
|
||||
|
||||
// Returns true if the high 32 bits of the value is known to be zero.
|
||||
bool is_operand_hi32_zero(Node* n) {
|
||||
int opc = n->Opcode();
|
||||
if (opc == Op_LoadUI2L) {
|
||||
return true;
|
||||
}
|
||||
if (opc == Op_AndL) {
|
||||
Node* o2 = n->in(2);
|
||||
if (o2->is_Con() && (o2->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
//----------ENCODING BLOCK-----------------------------------------------------
|
||||
@ -8599,6 +8619,63 @@ instruct mulL_eReg(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// Multiply Register Long where the left operand's high 32 bits are zero
|
||||
instruct mulL_eReg_lhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
|
||||
predicate(is_operand_hi32_zero(n->in(1)));
|
||||
match(Set dst (MulL dst src));
|
||||
effect(KILL cr, TEMP tmp);
|
||||
ins_cost(2*100+2*400);
|
||||
// Basic idea: lo(result) = lo(x_lo * y_lo)
|
||||
// hi(result) = hi(x_lo * y_lo) + lo(x_lo * y_hi) where lo(x_hi * y_lo) = 0 because x_hi = 0
|
||||
format %{ "MOV $tmp,$src.hi\n\t"
|
||||
"IMUL $tmp,EAX\n\t"
|
||||
"MUL EDX:EAX,$src.lo\n\t"
|
||||
"ADD EDX,$tmp" %}
|
||||
ins_encode %{
|
||||
__ movl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
|
||||
__ imull($tmp$$Register, rax);
|
||||
__ mull($src$$Register);
|
||||
__ addl(rdx, $tmp$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// Multiply Register Long where the right operand's high 32 bits are zero
|
||||
instruct mulL_eReg_rhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
|
||||
predicate(is_operand_hi32_zero(n->in(2)));
|
||||
match(Set dst (MulL dst src));
|
||||
effect(KILL cr, TEMP tmp);
|
||||
ins_cost(2*100+2*400);
|
||||
// Basic idea: lo(result) = lo(x_lo * y_lo)
|
||||
// hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) where lo(x_lo * y_hi) = 0 because y_hi = 0
|
||||
format %{ "MOV $tmp,$src.lo\n\t"
|
||||
"IMUL $tmp,EDX\n\t"
|
||||
"MUL EDX:EAX,$src.lo\n\t"
|
||||
"ADD EDX,$tmp" %}
|
||||
ins_encode %{
|
||||
__ movl($tmp$$Register, $src$$Register);
|
||||
__ imull($tmp$$Register, rdx);
|
||||
__ mull($src$$Register);
|
||||
__ addl(rdx, $tmp$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// Multiply Register Long where the left and the right operands' high 32 bits are zero
|
||||
instruct mulL_eReg_hi0(eADXRegL dst, eRegL src, eFlagsReg cr) %{
|
||||
predicate(is_operand_hi32_zero(n->in(1)) && is_operand_hi32_zero(n->in(2)));
|
||||
match(Set dst (MulL dst src));
|
||||
effect(KILL cr);
|
||||
ins_cost(1*400);
|
||||
// Basic idea: lo(result) = lo(x_lo * y_lo)
|
||||
// hi(result) = hi(x_lo * y_lo) where lo(x_hi * y_lo) = 0 and lo(x_lo * y_hi) = 0 because x_hi = 0 and y_hi = 0
|
||||
format %{ "MUL EDX:EAX,$src.lo\n\t" %}
|
||||
ins_encode %{
|
||||
__ mull($src$$Register);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// Multiply Register Long by small constant
|
||||
instruct mulL_eReg_con(eADXRegL dst, immL_127 src, eRegI tmp, eFlagsReg cr) %{
|
||||
match(Set dst (MulL dst src));
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -145,7 +145,7 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
}
|
||||
else if (istate->msg() == BytecodeInterpreter::return_from_method) {
|
||||
// Copy the result into the caller's frame
|
||||
result_slots = type2size[method->result_type()];
|
||||
result_slots = type2size[result_type_of(method)];
|
||||
assert(result_slots >= 0 && result_slots <= 2, "what?");
|
||||
result = istate->stack() + result_slots;
|
||||
break;
|
||||
@ -394,9 +394,10 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
|
||||
// Push our result
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
stack->set_sp(stack->sp() - type2size[method->result_type()]);
|
||||
BasicType type = result_type_of(method);
|
||||
stack->set_sp(stack->sp() - type2size[type]);
|
||||
|
||||
switch (method->result_type()) {
|
||||
switch (type) {
|
||||
case T_VOID:
|
||||
break;
|
||||
|
||||
@ -707,6 +708,26 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
return i;
|
||||
}
|
||||
|
||||
BasicType CppInterpreter::result_type_of(methodOop method) {
|
||||
BasicType t;
|
||||
switch (method->result_index()) {
|
||||
case 0 : t = T_BOOLEAN; break;
|
||||
case 1 : t = T_CHAR; break;
|
||||
case 2 : t = T_BYTE; break;
|
||||
case 3 : t = T_SHORT; break;
|
||||
case 4 : t = T_INT; break;
|
||||
case 5 : t = T_LONG; break;
|
||||
case 6 : t = T_VOID; break;
|
||||
case 7 : t = T_FLOAT; break;
|
||||
case 8 : t = T_DOUBLE; break;
|
||||
case 9 : t = T_OBJECT; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
|
||||
"out of step with AbstractInterpreter::BasicType_as_index");
|
||||
return t;
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_empty_entry() {
|
||||
if (!UseFastEmptyMethods)
|
||||
return NULL;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,3 +41,7 @@
|
||||
private:
|
||||
// Stack overflow checks
|
||||
static bool stack_overflow_imminent(JavaThread *thread);
|
||||
|
||||
private:
|
||||
// Fast result type determination
|
||||
static BasicType result_type_of(methodOop method);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,7 +40,7 @@ define_pd_global(intx, PreInflateSpin, 10);
|
||||
|
||||
define_pd_global(intx, StackYellowPages, 2);
|
||||
define_pd_global(intx, StackRedPages, 1);
|
||||
define_pd_global(intx, StackShadowPages, 3 LP64_ONLY(+3) DEBUG_ONLY(+3));
|
||||
define_pd_global(intx, StackShadowPages, 5 LP64_ONLY(+1) DEBUG_ONLY(+3));
|
||||
|
||||
define_pd_global(bool, RewriteBytecodes, true);
|
||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,6 +47,10 @@ address InterpreterGenerator::generate_method_handle_entry() {
|
||||
return ShouldNotCallThisEntry();
|
||||
}
|
||||
|
||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int AbstractInterpreter::size_activation(methodOop method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,8 +47,10 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(
|
||||
int total_args_passed,
|
||||
int comp_args_on_stack,
|
||||
const BasicType *sig_bt,
|
||||
const VMRegPair *regs) {
|
||||
return new AdapterHandlerEntry(
|
||||
const VMRegPair *regs,
|
||||
AdapterFingerPrint *fingerprint) {
|
||||
return AdapterHandlerLibrary::new_entry(
|
||||
fingerprint,
|
||||
ShouldNotCallThisStub(),
|
||||
ShouldNotCallThisStub(),
|
||||
ShouldNotCallThisStub());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -142,6 +142,9 @@ void os::run_periodic_checks() {
|
||||
}
|
||||
|
||||
#ifndef _WIN64
|
||||
// previous UnhandledExceptionFilter, if there is one
|
||||
static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
|
||||
|
||||
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
|
||||
#endif
|
||||
void os::init_system_properties_values() {
|
||||
@ -260,7 +263,8 @@ void os::init_system_properties_values() {
|
||||
}
|
||||
|
||||
#ifndef _WIN64
|
||||
SetUnhandledExceptionFilter(Handle_FLT_Exception);
|
||||
// set our UnhandledExceptionFilter and save any previous one
|
||||
prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
|
||||
#endif
|
||||
|
||||
// Done
|
||||
@ -1969,7 +1973,7 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
#ifndef _WIN64
|
||||
//-----------------------------------------------------------------------------
|
||||
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
// handle exception caused by native mothod modifying control word
|
||||
// handle exception caused by native method modifying control word
|
||||
PCONTEXT ctx = exceptionInfo->ContextRecord;
|
||||
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
|
||||
|
||||
@ -1990,6 +1994,13 @@ LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
}
|
||||
|
||||
if (prev_uef_handler != NULL) {
|
||||
// We didn't handle this exception so pass it to the previous
|
||||
// UnhandledExceptionFilter.
|
||||
return (prev_uef_handler)(exceptionInfo);
|
||||
}
|
||||
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
#else //_WIN64
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2004 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,13 @@
|
||||
"stfd %0, 0(%2)\n"
|
||||
: "=f"(tmp)
|
||||
: "b"(src), "b"(dst));
|
||||
#elif defined(S390) && !defined(_LP64)
|
||||
double tmp;
|
||||
asm volatile ("ld %0, 0(%1)\n"
|
||||
"std %0, 0(%2)\n"
|
||||
: "=r"(tmp)
|
||||
: "a"(src), "a"(dst));
|
||||
#else
|
||||
*(jlong *) dst = *(jlong *) src;
|
||||
#endif // PPC && !_LP64
|
||||
#endif
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -730,11 +730,12 @@ void os::print_context(outputStream *st, void *context) {
|
||||
st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
|
||||
st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
|
||||
st->cr();
|
||||
st->print(", R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
|
||||
st->print( "R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
|
||||
st->print(", R9=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
|
||||
st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
|
||||
st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
|
||||
st->print(", R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
|
||||
st->cr();
|
||||
st->print( "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
|
||||
st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
|
||||
st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
|
||||
st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1496,7 +1496,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
unsigned i;
|
||||
|
||||
// Generate Expand function header
|
||||
fprintf(fp,"MachNode *%sNode::Expand(State *state, Node_List &proj_list) {\n", node->_ident);
|
||||
fprintf(fp,"MachNode *%sNode::Expand(State *state, Node_List &proj_list, Node* mem) {\n", node->_ident);
|
||||
fprintf(fp,"Compile* C = Compile::current();\n");
|
||||
// Generate expand code
|
||||
if( node->expands() ) {
|
||||
@ -1546,15 +1546,16 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
// Build a mapping from operand index to input edges
|
||||
fprintf(fp," unsigned idx0 = oper_input_base();\n");
|
||||
|
||||
// The order in which inputs are added to a node is very
|
||||
// The order in which the memory input is added to a node is very
|
||||
// strange. Store nodes get a memory input before Expand is
|
||||
// called and all other nodes get it afterwards so
|
||||
// oper_input_base is wrong during expansion. This code adjusts
|
||||
// is so that expansion will work correctly.
|
||||
bool missing_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames) &&
|
||||
node->is_ideal_store() == Form::none;
|
||||
if (missing_memory_edge) {
|
||||
fprintf(fp," idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
|
||||
// called and other nodes get it afterwards or before depending on
|
||||
// match order so oper_input_base is wrong during expansion. This
|
||||
// code adjusts it so that expansion will work correctly.
|
||||
int has_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames);
|
||||
if (has_memory_edge) {
|
||||
fprintf(fp," if (mem == (Node*)1) {\n");
|
||||
fprintf(fp," idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
|
||||
fprintf(fp," }\n");
|
||||
}
|
||||
|
||||
for( i = 0; i < node->num_opnds(); i++ ) {
|
||||
@ -1611,9 +1612,11 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
int node_mem_op = node->memory_operand(_globalNames);
|
||||
assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND,
|
||||
"expand rule member needs memory but top-level inst doesn't have any" );
|
||||
if (!missing_memory_edge) {
|
||||
if (has_memory_edge) {
|
||||
// Copy memory edge
|
||||
fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
|
||||
fprintf(fp," if (mem != (Node*)1) {\n");
|
||||
fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
|
||||
fprintf(fp," }\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1689,7 +1692,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
} // done iterating over a new instruction's operands
|
||||
|
||||
// Invoke Expand() for the newly created instruction.
|
||||
fprintf(fp," result = n%d->Expand( state, proj_list );\n", cnt);
|
||||
fprintf(fp," result = n%d->Expand( state, proj_list, mem );\n", cnt);
|
||||
assert( !new_inst->expands(), "Do not have complete support for recursive expansion");
|
||||
} // done iterating over new instructions
|
||||
fprintf(fp,"\n");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1754,7 +1754,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
instr->has_temps() ||
|
||||
instr->_matrule != NULL &&
|
||||
instr->num_opnds() != instr->num_unique_opnds() ) {
|
||||
fprintf(fp," virtual MachNode *Expand(State *state, Node_List &proj_list);\n");
|
||||
fprintf(fp," virtual MachNode *Expand(State *state, Node_List &proj_list, Node* mem);\n");
|
||||
}
|
||||
|
||||
if (instr->is_pinned(_globalNames)) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,6 +39,7 @@ public:
|
||||
Dtrace_trap = OSR_Entry, // dtrace probes can never have an OSR entry so reuse it
|
||||
Exceptions, // Offset where exception handler lives
|
||||
Deopt, // Offset where deopt handler lives
|
||||
DeoptMH, // Offset where MethodHandle deopt handler lives
|
||||
max_Entries };
|
||||
|
||||
// special value to note codeBlobs where profile (forte) stack walking is
|
||||
@ -51,12 +52,13 @@ private:
|
||||
|
||||
public:
|
||||
CodeOffsets() {
|
||||
_values[Entry] = 0;
|
||||
_values[Entry ] = 0;
|
||||
_values[Verified_Entry] = 0;
|
||||
_values[Frame_Complete] = frame_never_safe;
|
||||
_values[OSR_Entry] = 0;
|
||||
_values[Exceptions] = -1;
|
||||
_values[Deopt] = -1;
|
||||
_values[OSR_Entry ] = 0;
|
||||
_values[Exceptions ] = -1;
|
||||
_values[Deopt ] = -1;
|
||||
_values[DeoptMH ] = -1;
|
||||
}
|
||||
|
||||
int value(Entries e) { return _values[e]; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -205,6 +205,8 @@ void Compilation::emit_lir() {
|
||||
void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
|
||||
CHECK_BAILOUT();
|
||||
|
||||
CodeOffsets* code_offsets = assembler->offsets();
|
||||
|
||||
// generate code or slow cases
|
||||
assembler->emit_slow_case_stubs();
|
||||
CHECK_BAILOUT();
|
||||
@ -213,10 +215,18 @@ void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
|
||||
assembler->emit_exception_entries(exception_info_list());
|
||||
CHECK_BAILOUT();
|
||||
|
||||
// generate code for exception handler
|
||||
assembler->emit_exception_handler();
|
||||
// Generate code for exception handler.
|
||||
code_offsets->set_value(CodeOffsets::Exceptions, assembler->emit_exception_handler());
|
||||
CHECK_BAILOUT();
|
||||
assembler->emit_deopt_handler();
|
||||
|
||||
// Generate code for deopt handler.
|
||||
code_offsets->set_value(CodeOffsets::Deopt, assembler->emit_deopt_handler());
|
||||
CHECK_BAILOUT();
|
||||
|
||||
// Generate code for MethodHandle deopt handler. We can use the
|
||||
// same code as for the normal deopt handler, we just need a
|
||||
// different entry point address.
|
||||
code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
|
||||
CHECK_BAILOUT();
|
||||
|
||||
// done
|
||||
|
@ -253,7 +253,8 @@ class IRScopeDebugInfo: public CompilationResourceObj {
|
||||
// reexecute allowed only for the topmost frame
|
||||
bool reexecute = topmost ? should_reexecute() : false;
|
||||
bool is_method_handle_invoke = false;
|
||||
recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, locvals, expvals, monvals);
|
||||
bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
|
||||
recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, return_oop, locvals, expvals, monvals);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -133,9 +133,9 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
void add_call_info_here(CodeEmitInfo* info) { add_call_info(code_offset(), info); }
|
||||
|
||||
// code patterns
|
||||
void emit_exception_handler();
|
||||
int emit_exception_handler();
|
||||
void emit_exception_entries(ExceptionInfoList* info_list);
|
||||
void emit_deopt_handler();
|
||||
int emit_deopt_handler();
|
||||
|
||||
void emit_code(BlockList* hir);
|
||||
void emit_block(BlockBegin* block);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1765,7 +1765,7 @@ void LIRGenerator::do_Throw(Throw* x) {
|
||||
__ null_check(exception_opr, new CodeEmitInfo(info, true));
|
||||
}
|
||||
|
||||
if (compilation()->env()->jvmti_can_post_exceptions() &&
|
||||
if (compilation()->env()->jvmti_can_post_on_exceptions() &&
|
||||
!block()->is_set(BlockBegin::default_exception_handler_flag)) {
|
||||
// we need to go through the exception lookup path to get JVMTI
|
||||
// notification done
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -110,8 +110,8 @@ static void deopt_caller() {
|
||||
RegisterMap reg_map(thread, false);
|
||||
frame runtime_frame = thread->last_frame();
|
||||
frame caller_frame = runtime_frame.sender(®_map);
|
||||
VM_DeoptimizeFrame deopt(thread, caller_frame.id());
|
||||
VMThread::execute(&deopt);
|
||||
// bypass VM_DeoptimizeFrame and deoptimize the frame directly
|
||||
Deoptimization::deoptimize_frame(thread, caller_frame.id());
|
||||
assert(caller_is_deopted(), "Must be deoptimized");
|
||||
}
|
||||
}
|
||||
@ -354,7 +354,7 @@ JRT_END
|
||||
|
||||
|
||||
JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
|
||||
if (JvmtiExport::can_post_exceptions()) {
|
||||
if (JvmtiExport::can_post_on_exceptions()) {
|
||||
vframeStream vfst(thread, true);
|
||||
address bcp = vfst.method()->bcp_from(vfst.bci());
|
||||
JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
|
||||
@ -437,7 +437,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
|
||||
bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
|
||||
if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
|
||||
|
||||
if (JvmtiExport::can_post_exceptions()) {
|
||||
if (JvmtiExport::can_post_on_exceptions()) {
|
||||
// To ensure correct notification of exception catches and throws
|
||||
// we have to deoptimize here. If we attempted to notify the
|
||||
// catches and throws during this exception lookup it's possible
|
||||
@ -1075,6 +1075,7 @@ enum {
|
||||
};
|
||||
|
||||
|
||||
// Below length is the # elements copied.
|
||||
template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
|
||||
oopDesc* dst, T* dst_addr,
|
||||
int length) {
|
||||
@ -1083,22 +1084,22 @@ template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
|
||||
// barrier. The assert will fail if this is not the case.
|
||||
// Note that we use the non-virtual inlineable variant of write_ref_array.
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(),
|
||||
"Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||
if (src == dst) {
|
||||
// same object, no check
|
||||
bs->write_ref_array_pre(dst_addr, length);
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
bs->write_ref_array((HeapWord*)dst_addr, length);
|
||||
return ac_ok;
|
||||
} else {
|
||||
klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
|
||||
klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
|
||||
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
|
||||
// Elements are guaranteed to be subtypes, so no check necessary
|
||||
bs->write_ref_array_pre(dst_addr, length);
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
bs->write_ref_array((HeapWord*)dst_addr, length);
|
||||
return ac_ok;
|
||||
}
|
||||
}
|
||||
@ -1162,9 +1163,16 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
|
||||
#endif
|
||||
|
||||
if (num == 0) return;
|
||||
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
bs->write_ref_array(MemRegion(dst, dst + num));
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||
if (UseCompressedOops) {
|
||||
bs->write_ref_array_pre((narrowOop*)dst, num);
|
||||
} else {
|
||||
bs->write_ref_array_pre((oop*)dst, num);
|
||||
}
|
||||
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
|
||||
bs->write_ref_array(dst, num);
|
||||
JRT_END
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -178,7 +178,7 @@ void ciEnv::cache_jvmti_state() {
|
||||
_jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
|
||||
_jvmti_can_examine_or_deopt_anywhere = JvmtiExport::can_examine_or_deopt_anywhere();
|
||||
_jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables();
|
||||
_jvmti_can_post_exceptions = JvmtiExport::can_post_exceptions();
|
||||
_jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
@ -891,8 +891,8 @@ void ciEnv::register_method(ciMethod* target,
|
||||
JvmtiExport::can_examine_or_deopt_anywhere()) ||
|
||||
(!jvmti_can_access_local_variables() &&
|
||||
JvmtiExport::can_access_local_variables()) ||
|
||||
(!jvmti_can_post_exceptions() &&
|
||||
JvmtiExport::can_post_exceptions()) )) {
|
||||
(!jvmti_can_post_on_exceptions() &&
|
||||
JvmtiExport::can_post_on_exceptions()) )) {
|
||||
record_failure("Jvmti state change invalidated dependencies");
|
||||
}
|
||||
|
||||
@ -962,18 +962,10 @@ void ciEnv::register_method(ciMethod* target,
|
||||
if (nm == NULL) {
|
||||
// The CodeCache is full. Print out warning and disable compilation.
|
||||
record_failure("code cache is full");
|
||||
UseInterpreter = true;
|
||||
if (UseCompiler || AlwaysCompileLoopMethods ) {
|
||||
#ifndef PRODUCT
|
||||
warning("CodeCache is full. Compiler has been disabled");
|
||||
if (CompileTheWorld || ExitOnFullCodeCache) {
|
||||
before_exit(JavaThread::current());
|
||||
exit_globals(); // will delete tty
|
||||
vm_direct_exit(CompileTheWorld ? 0 : 1);
|
||||
}
|
||||
#endif
|
||||
UseCompiler = false;
|
||||
AlwaysCompileLoopMethods = false;
|
||||
{
|
||||
MutexUnlocker ml(Compile_lock);
|
||||
MutexUnlocker locker(MethodCompileQueue_lock);
|
||||
CompileBroker::handle_full_code_cache();
|
||||
}
|
||||
} else {
|
||||
NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); )
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -57,7 +57,7 @@ private:
|
||||
bool _jvmti_can_hotswap_or_post_breakpoint;
|
||||
bool _jvmti_can_examine_or_deopt_anywhere;
|
||||
bool _jvmti_can_access_local_variables;
|
||||
bool _jvmti_can_post_exceptions;
|
||||
bool _jvmti_can_post_on_exceptions;
|
||||
|
||||
// Cache DTrace flags
|
||||
bool _dtrace_extended_probes;
|
||||
@ -259,7 +259,7 @@ public:
|
||||
bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; }
|
||||
bool jvmti_can_examine_or_deopt_anywhere() const { return _jvmti_can_examine_or_deopt_anywhere; }
|
||||
bool jvmti_can_access_local_variables() const { return _jvmti_can_access_local_variables; }
|
||||
bool jvmti_can_post_exceptions() const { return _jvmti_can_post_exceptions; }
|
||||
bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions; }
|
||||
|
||||
// Cache DTrace flags
|
||||
void cache_dtrace_flags();
|
||||
|
@ -436,15 +436,21 @@ ciCallProfile ciMethod::call_profile_at_bci(int bci) {
|
||||
// we will set result._method also.
|
||||
}
|
||||
// Determine call site's morphism.
|
||||
// The call site count could be == (receivers_count_total + 1)
|
||||
// not only in the case of a polymorphic call but also in the case
|
||||
// when a method data snapshot is taken after the site count was updated
|
||||
// but before receivers counters were updated.
|
||||
if (morphism == result._limit) {
|
||||
// There were no array klasses and morphism <= MorphismLimit.
|
||||
if (morphism < ciCallProfile::MorphismLimit ||
|
||||
morphism == ciCallProfile::MorphismLimit &&
|
||||
(receivers_count_total+1) >= count) {
|
||||
// The call site count is 0 with known morphism (onlt 1 or 2 receivers)
|
||||
// or < 0 in the case of a type check failured for checkcast, aastore, instanceof.
|
||||
// The call site count is > 0 in the case of a polymorphic virtual call.
|
||||
if (morphism > 0 && morphism == result._limit) {
|
||||
// The morphism <= MorphismLimit.
|
||||
if ((morphism < ciCallProfile::MorphismLimit) ||
|
||||
(morphism == ciCallProfile::MorphismLimit && count == 0)) {
|
||||
#ifdef ASSERT
|
||||
if (count > 0) {
|
||||
this->print_short_name(tty);
|
||||
tty->print_cr(" @ bci:%d", bci);
|
||||
this->print_codes();
|
||||
assert(false, "this call site should not be polymorphic");
|
||||
}
|
||||
#endif
|
||||
result._morphism = morphism;
|
||||
}
|
||||
}
|
||||
@ -452,10 +458,8 @@ ciCallProfile ciMethod::call_profile_at_bci(int bci) {
|
||||
// zero or less, presume that this is a typecheck profile and
|
||||
// do nothing. Otherwise, increase count to be the sum of all
|
||||
// receiver's counts.
|
||||
if (count > 0) {
|
||||
if (count < receivers_count_total) {
|
||||
count = receivers_count_total;
|
||||
}
|
||||
if (count >= 0) {
|
||||
count += receivers_count_total;
|
||||
}
|
||||
}
|
||||
result._count = count;
|
||||
|
@ -1249,6 +1249,7 @@ void ClassLoader::compile_the_world() {
|
||||
}
|
||||
|
||||
int ClassLoader::_compile_the_world_counter = 0;
|
||||
static int _codecache_sweep_counter = 0;
|
||||
|
||||
void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
|
||||
int len = (int)strlen(name);
|
||||
@ -1293,6 +1294,13 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
|
||||
for (int n = 0; n < k->methods()->length(); n++) {
|
||||
methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
|
||||
if (CompilationPolicy::canBeCompiled(m)) {
|
||||
|
||||
if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
|
||||
// Give sweeper a chance to keep up with CTW
|
||||
VM_ForceSafepoint op;
|
||||
VMThread::execute(&op);
|
||||
_codecache_sweep_counter = 0;
|
||||
}
|
||||
// Force compilation
|
||||
CompileBroker::compile_method(m, InvocationEntryBci,
|
||||
methodHandle(), 0, "CTW", THREAD);
|
||||
|
@ -1121,10 +1121,23 @@ class BacktraceBuilder: public StackObj {
|
||||
}
|
||||
|
||||
void flush() {
|
||||
// The following appears to have been an optimization to save from
|
||||
// doing a barrier for each individual store into the _methods array,
|
||||
// but rather to do it for the entire array after the series of writes.
|
||||
// That optimization seems to have been lost when compressed oops was
|
||||
// implemented. However, the extra card-marks below was left in place,
|
||||
// but is now redundant because the individual stores into the
|
||||
// _methods array already execute the barrier code. CR 6918185 has
|
||||
// been filed so the original code may be restored by deferring the
|
||||
// barriers until after the entire sequence of stores, thus re-enabling
|
||||
// the intent of the original optimization. In the meantime the redundant
|
||||
// card mark below is now disabled.
|
||||
if (_dirty && _methods != NULL) {
|
||||
#if 0
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
|
||||
#endif
|
||||
_dirty = false;
|
||||
}
|
||||
}
|
||||
@ -1168,9 +1181,7 @@ class BacktraceBuilder: public StackObj {
|
||||
method = mhandle();
|
||||
}
|
||||
|
||||
_methods->obj_at_put(_index, method);
|
||||
// bad for UseCompressedOops
|
||||
// *_methods->obj_at_addr(_index) = method;
|
||||
_methods->obj_at_put(_index, method);
|
||||
_bcis->ushort_at_put(_index, bci);
|
||||
_index++;
|
||||
_dirty = true;
|
||||
|
@ -457,7 +457,8 @@ void LoaderConstraintTable::merge_loader_constraints(
|
||||
}
|
||||
|
||||
|
||||
void LoaderConstraintTable::verify(Dictionary* dictionary) {
|
||||
void LoaderConstraintTable::verify(Dictionary* dictionary,
|
||||
PlaceholderTable* placeholders) {
|
||||
Thread *thread = Thread::current();
|
||||
for (int cindex = 0; cindex < _loader_constraint_size; cindex++) {
|
||||
for (LoaderConstraintEntry* probe = bucket(cindex);
|
||||
@ -472,7 +473,23 @@ void LoaderConstraintTable::verify(Dictionary* dictionary) {
|
||||
unsigned int d_hash = dictionary->compute_hash(name, loader);
|
||||
int d_index = dictionary->hash_to_index(d_hash);
|
||||
klassOop k = dictionary->find_class(d_index, d_hash, name, loader);
|
||||
guarantee(k == probe->klass(), "klass should be in dictionary");
|
||||
if (k != NULL) {
|
||||
// We found the class in the system dictionary, so we should
|
||||
// make sure that the klassOop matches what we already have.
|
||||
guarantee(k == probe->klass(), "klass should be in dictionary");
|
||||
} else {
|
||||
// If we don't find the class in the system dictionary, it
|
||||
// has to be in the placeholders table.
|
||||
unsigned int p_hash = placeholders->compute_hash(name, loader);
|
||||
int p_index = placeholders->hash_to_index(p_hash);
|
||||
PlaceholderEntry* entry = placeholders->get_entry(p_index, p_hash,
|
||||
name, loader);
|
||||
|
||||
// The instanceKlass might not be on the entry, so the only
|
||||
// thing we can check here is whether we were successful in
|
||||
// finding the class in the placeholders table.
|
||||
guarantee(entry != NULL, "klass should be in the placeholders");
|
||||
}
|
||||
}
|
||||
for (int n = 0; n< probe->num_loaders(); n++) {
|
||||
guarantee(probe->loader(n)->is_oop_or_null(), "should be oop");
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
|
||||
void purge_loader_constraints(BoolObjectClosure* is_alive);
|
||||
|
||||
void verify(Dictionary* dictionary);
|
||||
void verify(Dictionary* dictionary, PlaceholderTable* placeholders);
|
||||
#ifndef PRODUCT
|
||||
void print();
|
||||
#endif
|
||||
|
@ -2573,7 +2573,7 @@ void SystemDictionary::verify() {
|
||||
|
||||
// Verify constraint table
|
||||
guarantee(constraints() != NULL, "Verify of loader constraints failed");
|
||||
constraints()->verify(dictionary());
|
||||
constraints()->verify(dictionary(), placeholders());
|
||||
}
|
||||
|
||||
|
||||
|
@ -96,6 +96,7 @@ int CodeCache::_number_of_blobs = 0;
|
||||
int CodeCache::_number_of_nmethods_with_dependencies = 0;
|
||||
bool CodeCache::_needs_cache_clean = false;
|
||||
nmethod* CodeCache::_scavenge_root_nmethods = NULL;
|
||||
nmethod* CodeCache::_saved_nmethods = NULL;
|
||||
|
||||
|
||||
CodeBlob* CodeCache::first() {
|
||||
@ -395,6 +396,85 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
|
||||
}
|
||||
#endif //PRODUCT
|
||||
|
||||
|
||||
nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
nmethod* saved = _saved_nmethods;
|
||||
nmethod* prev = NULL;
|
||||
while (saved != NULL) {
|
||||
if (saved->is_in_use() && saved->method() == m) {
|
||||
if (prev != NULL) {
|
||||
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
|
||||
} else {
|
||||
_saved_nmethods = saved->saved_nmethod_link();
|
||||
}
|
||||
assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
|
||||
saved->set_speculatively_disconnected(false);
|
||||
saved->set_saved_nmethod_link(NULL);
|
||||
if (PrintMethodFlushing) {
|
||||
saved->print_on(tty, " ### nmethod is reconnected");
|
||||
}
|
||||
if (LogCompilation && (xtty != NULL)) {
|
||||
ttyLocker ttyl;
|
||||
xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
|
||||
xtty->method(methodOop(m));
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
}
|
||||
return saved;
|
||||
}
|
||||
prev = saved;
|
||||
saved = saved->saved_nmethod_link();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void CodeCache::remove_saved_code(nmethod* nm) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
|
||||
nmethod* saved = _saved_nmethods;
|
||||
nmethod* prev = NULL;
|
||||
while (saved != NULL) {
|
||||
if (saved == nm) {
|
||||
if (prev != NULL) {
|
||||
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
|
||||
} else {
|
||||
_saved_nmethods = saved->saved_nmethod_link();
|
||||
}
|
||||
if (LogCompilation && (xtty != NULL)) {
|
||||
ttyLocker ttyl;
|
||||
xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
}
|
||||
return;
|
||||
}
|
||||
prev = saved;
|
||||
saved = saved->saved_nmethod_link();
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
void CodeCache::speculatively_disconnect(nmethod* nm) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
|
||||
nm->set_saved_nmethod_link(_saved_nmethods);
|
||||
_saved_nmethods = nm;
|
||||
if (PrintMethodFlushing) {
|
||||
nm->print_on(tty, " ### nmethod is speculatively disconnected");
|
||||
}
|
||||
if (LogCompilation && (xtty != NULL)) {
|
||||
ttyLocker ttyl;
|
||||
xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
|
||||
xtty->method(methodOop(nm->method()));
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
}
|
||||
nm->method()->clear_code();
|
||||
nm->set_speculatively_disconnected(true);
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::gc_prologue() {
|
||||
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
|
||||
}
|
||||
|
@ -46,6 +46,7 @@ class CodeCache : AllStatic {
|
||||
static int _number_of_nmethods_with_dependencies;
|
||||
static bool _needs_cache_clean;
|
||||
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
||||
static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look()
|
||||
|
||||
static void verify_if_often() PRODUCT_RETURN;
|
||||
|
||||
@ -141,11 +142,16 @@ class CodeCache : AllStatic {
|
||||
static size_t capacity() { return _heap->capacity(); }
|
||||
static size_t max_capacity() { return _heap->max_capacity(); }
|
||||
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
|
||||
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
|
||||
|
||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
|
||||
static nmethod* find_and_remove_saved_code(methodOop m);
|
||||
static void remove_saved_code(nmethod* nm);
|
||||
static void speculatively_disconnect(nmethod* nm);
|
||||
|
||||
// Deoptimization
|
||||
static int mark_for_deoptimization(DepChange& changes);
|
||||
#ifdef HOTSWAP
|
||||
|
@ -282,6 +282,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
|
||||
int bci,
|
||||
bool reexecute,
|
||||
bool is_method_handle_invoke,
|
||||
bool return_oop,
|
||||
DebugToken* locals,
|
||||
DebugToken* expressions,
|
||||
DebugToken* monitors) {
|
||||
@ -296,6 +297,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
|
||||
// Record flags into pcDesc.
|
||||
last_pd->set_should_reexecute(reexecute);
|
||||
last_pd->set_is_method_handle_invoke(is_method_handle_invoke);
|
||||
last_pd->set_return_oop(return_oop);
|
||||
|
||||
// serialize sender stream offest
|
||||
stream()->write_int(sender_stream_offset);
|
||||
|
@ -89,6 +89,7 @@ class DebugInformationRecorder: public ResourceObj {
|
||||
int bci,
|
||||
bool reexecute,
|
||||
bool is_method_handle_invoke = false,
|
||||
bool return_oop = false,
|
||||
DebugToken* locals = NULL,
|
||||
DebugToken* expressions = NULL,
|
||||
DebugToken* monitors = NULL);
|
||||
|
@ -843,13 +843,15 @@ static bool count_find_witness_calls() {
|
||||
if (occasional_print || final_stats) {
|
||||
// Every now and then dump a little info about dependency searching.
|
||||
if (xtty != NULL) {
|
||||
xtty->elem("deps_find_witness calls='%d' steps='%d' recursions='%d' singles='%d'",
|
||||
ttyLocker ttyl;
|
||||
xtty->elem("deps_find_witness calls='%d' steps='%d' recursions='%d' singles='%d'",
|
||||
deps_find_witness_calls,
|
||||
deps_find_witness_steps,
|
||||
deps_find_witness_recursions,
|
||||
deps_find_witness_singles);
|
||||
}
|
||||
if (final_stats || (TraceDependencies && WizardMode)) {
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("Dependency check (find_witness) "
|
||||
"calls=%d, steps=%d (avg=%.1f), recursions=%d, singles=%d",
|
||||
deps_find_witness_calls,
|
||||
|
115
hotspot/src/share/vm/code/jvmticmlr.h
Normal file
115
hotspot/src/share/vm/code/jvmticmlr.h
Normal file
@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Sun designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Sun in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This header file defines the data structures sent by the VM
|
||||
* through the JVMTI CompiledMethodLoad callback function via the
|
||||
* "void * compile_info" parameter. The memory pointed to by the
|
||||
* compile_info parameter may not be referenced after returning from
|
||||
* the CompiledMethodLoad callback. These are VM implementation
|
||||
* specific data structures that may evolve in future releases. A
|
||||
* JVMTI agent should interpret a non-NULL compile_info as a pointer
|
||||
* to a region of memory containing a list of records. In a typical
|
||||
* usage scenario, a JVMTI agent would cast each record to a
|
||||
* jvmtiCompiledMethodLoadRecordHeader, a struct that represents
|
||||
* arbitrary information. This struct contains a kind field to indicate
|
||||
* the kind of information being passed, and a pointer to the next
|
||||
* record. If the kind field indicates inlining information, then the
|
||||
* agent would cast the record to a jvmtiCompiledMethodLoadInlineRecord.
|
||||
* This record contains an array of PCStackInfo structs, which indicate
|
||||
* for every pc address what are the methods on the invocation stack.
|
||||
* The "methods" and "bcis" fields in each PCStackInfo struct specify a
|
||||
* 1-1 mapping between these inlined methods and their bytecode indices.
|
||||
* This can be used to derive the proper source lines of the inlined
|
||||
* methods.
|
||||
*/
|
||||
|
||||
#ifndef _JVMTI_CMLR_H_
|
||||
#define _JVMTI_CMLR_H_
|
||||
|
||||
enum {
|
||||
JVMTI_CMLR_MAJOR_VERSION_1 = 0x00000001,
|
||||
JVMTI_CMLR_MINOR_VERSION_0 = 0x00000000,
|
||||
|
||||
JVMTI_CMLR_MAJOR_VERSION = 0x00000001,
|
||||
JVMTI_CMLR_MINOR_VERSION = 0x00000000
|
||||
|
||||
/*
|
||||
* This comment is for the "JDK import from HotSpot" sanity check:
|
||||
* version: 1.0.0
|
||||
*/
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
JVMTI_CMLR_DUMMY = 1,
|
||||
JVMTI_CMLR_INLINE_INFO = 2
|
||||
} jvmtiCMLRKind;
|
||||
|
||||
/*
|
||||
* Record that represents arbitrary information passed through JVMTI
|
||||
* CompiledMethodLoadEvent void pointer.
|
||||
*/
|
||||
typedef struct _jvmtiCompiledMethodLoadRecordHeader {
|
||||
jvmtiCMLRKind kind; /* id for the kind of info passed in the record */
|
||||
jint majorinfoversion; /* major and minor info version values. Init'ed */
|
||||
jint minorinfoversion; /* to current version value in jvmtiExport.cpp. */
|
||||
|
||||
struct _jvmtiCompiledMethodLoadRecordHeader* next;
|
||||
} jvmtiCompiledMethodLoadRecordHeader;
|
||||
|
||||
/*
|
||||
* Record that gives information about the methods on the compile-time
|
||||
* stack at a specific pc address of a compiled method. Each element in
|
||||
* the methods array maps to same element in the bcis array.
|
||||
*/
|
||||
typedef struct _PCStackInfo {
|
||||
void* pc; /* the pc address for this compiled method */
|
||||
jint numstackframes; /* number of methods on the stack */
|
||||
jmethodID* methods; /* array of numstackframes method ids */
|
||||
jint* bcis; /* array of numstackframes bytecode indices */
|
||||
} PCStackInfo;
|
||||
|
||||
/*
|
||||
* Record that contains inlining information for each pc address of
|
||||
* an nmethod.
|
||||
*/
|
||||
typedef struct _jvmtiCompiledMethodLoadInlineRecord {
|
||||
jvmtiCompiledMethodLoadRecordHeader header; /* common header for casting */
|
||||
jint numpcs; /* number of pc descriptors in this nmethod */
|
||||
PCStackInfo* pcinfo; /* array of numpcs pc descriptors */
|
||||
} jvmtiCompiledMethodLoadInlineRecord;
|
||||
|
||||
/*
|
||||
* Dummy record used to test that we can pass records with different
|
||||
* information through the void pointer provided that they can be cast
|
||||
* to a jvmtiCompiledMethodLoadRecordHeader.
|
||||
*/
|
||||
|
||||
typedef struct _jvmtiCompiledMethodLoadDummyRecord {
|
||||
jvmtiCompiledMethodLoadRecordHeader header; /* common header for casting */
|
||||
char message[50];
|
||||
} jvmtiCompiledMethodLoadDummyRecord;
|
||||
|
||||
#endif
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -587,11 +587,13 @@ nmethod::nmethod(
|
||||
_osr_link = NULL;
|
||||
_scavenge_root_link = NULL;
|
||||
_scavenge_root_state = 0;
|
||||
_saved_nmethod_link = NULL;
|
||||
_compiler = NULL;
|
||||
// We have no exception handler or deopt handler make the
|
||||
// values something that will never match a pc like the nmethod vtable entry
|
||||
_exception_offset = 0;
|
||||
_deoptimize_offset = 0;
|
||||
_deoptimize_mh_offset = 0;
|
||||
_orig_pc_offset = 0;
|
||||
#ifdef HAVE_DTRACE_H
|
||||
_trap_offset = 0;
|
||||
@ -682,6 +684,7 @@ nmethod::nmethod(
|
||||
// values something that will never match a pc like the nmethod vtable entry
|
||||
_exception_offset = 0;
|
||||
_deoptimize_offset = 0;
|
||||
_deoptimize_mh_offset = 0;
|
||||
_trap_offset = offsets->value(CodeOffsets::Dtrace_trap);
|
||||
_orig_pc_offset = 0;
|
||||
_stub_offset = data_offset();
|
||||
@ -794,6 +797,7 @@ nmethod::nmethod(
|
||||
// Exception handler and deopt handler are in the stub section
|
||||
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
|
||||
_deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
|
||||
_deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
|
||||
_consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
|
||||
_scopes_data_offset = data_offset();
|
||||
_scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
|
||||
@ -984,7 +988,8 @@ ScopeDesc* nmethod::scope_desc_at(address pc) {
|
||||
PcDesc* pd = pc_desc_at(pc);
|
||||
guarantee(pd != NULL, "scope must be present");
|
||||
return new ScopeDesc(this, pd->scope_decode_offset(),
|
||||
pd->obj_decode_offset(), pd->should_reexecute());
|
||||
pd->obj_decode_offset(), pd->should_reexecute(),
|
||||
pd->return_oop());
|
||||
}
|
||||
|
||||
|
||||
@ -1033,7 +1038,7 @@ void nmethod::cleanup_inline_caches() {
|
||||
if( cb != NULL && cb->is_nmethod() ) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
// Clean inline caches pointing to both zombie and not_entrant methods
|
||||
if (!nm->is_in_use()) ic->set_to_clean();
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1043,7 +1048,7 @@ void nmethod::cleanup_inline_caches() {
|
||||
if( cb != NULL && cb->is_nmethod() ) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
// Clean inline caches pointing to both zombie and not_entrant methods
|
||||
if (!nm->is_in_use()) csc->set_to_clean();
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1113,7 +1118,6 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
||||
if (_method->code() == this) {
|
||||
_method->clear_code(); // Break a cycle
|
||||
}
|
||||
inc_decompile_count(); // Last chance to make a mark on the MDO
|
||||
_method = NULL; // Clear the method of this dead nmethod
|
||||
}
|
||||
// Make the class unloaded - i.e., change state and notify sweeper
|
||||
@ -1173,15 +1177,17 @@ void nmethod::log_state_change() const {
|
||||
bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
|
||||
|
||||
// If the method is already zombie there is nothing to do
|
||||
if (is_zombie()) {
|
||||
return false;
|
||||
}
|
||||
bool was_alive = false;
|
||||
|
||||
// Make sure the nmethod is not flushed in case of a safepoint in code below.
|
||||
nmethodLocker nml(this);
|
||||
|
||||
{
|
||||
// If the method is already zombie there is nothing to do
|
||||
if (is_zombie()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// invalidate osr nmethod before acquiring the patching lock since
|
||||
// they both acquire leaf locks and we don't want a deadlock.
|
||||
// This logic is equivalent to the logic below for patching the
|
||||
@ -1219,6 +1225,8 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
assert(state == not_entrant, "other cases may need to be handled differently");
|
||||
}
|
||||
|
||||
was_alive = is_in_use(); // Read state under lock
|
||||
|
||||
// Change state
|
||||
flags.state = state;
|
||||
|
||||
@ -1245,8 +1253,11 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
mark_as_seen_on_stack();
|
||||
}
|
||||
|
||||
// It's a true state change, so mark the method as decompiled.
|
||||
inc_decompile_count();
|
||||
if (was_alive) {
|
||||
// It's a true state change, so mark the method as decompiled.
|
||||
// Do it only for transition from alive.
|
||||
inc_decompile_count();
|
||||
}
|
||||
|
||||
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
|
||||
// and it hasn't already been reported for this nmethod then report it now.
|
||||
@ -1312,7 +1323,8 @@ void nmethod::flush() {
|
||||
// completely deallocate this method
|
||||
EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
|
||||
if (PrintMethodFlushing) {
|
||||
tty->print_cr("*flushing nmethod " INTPTR_FORMAT ". Live blobs: %d", this, CodeCache::nof_blobs());
|
||||
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
|
||||
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
|
||||
}
|
||||
|
||||
// We need to deallocate any ExceptionCache data.
|
||||
@ -1330,6 +1342,10 @@ void nmethod::flush() {
|
||||
CodeCache::drop_scavenge_root_nmethod(this);
|
||||
}
|
||||
|
||||
if (is_speculatively_disconnected()) {
|
||||
CodeCache::remove_saved_code(this);
|
||||
}
|
||||
|
||||
((CodeBlob*)(this))->flush();
|
||||
|
||||
CodeCache::free(this);
|
||||
@ -1995,7 +2011,10 @@ address nmethod::continuation_for_implicit_exception(address pc) {
|
||||
print_pcs();
|
||||
}
|
||||
#endif
|
||||
guarantee(cont_offset != 0, "unhandled implicit exception in compiled code");
|
||||
if (cont_offset == 0) {
|
||||
// Let the normal error handling report the exception
|
||||
return NULL;
|
||||
}
|
||||
return instructions_begin() + cont_offset;
|
||||
}
|
||||
|
||||
@ -2031,9 +2050,21 @@ void nmethodLocker::unlock_nmethod(nmethod* nm) {
|
||||
guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
|
||||
}
|
||||
|
||||
bool nmethod::is_deopt_pc(address pc) {
|
||||
bool ret = pc == deopt_handler_begin();
|
||||
return ret;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// nmethod::get_deopt_original_pc
|
||||
//
|
||||
// Return the original PC for the given PC if:
|
||||
// (a) the given PC belongs to a nmethod and
|
||||
// (b) it is a deopt PC
|
||||
address nmethod::get_deopt_original_pc(const frame* fr) {
|
||||
if (fr->cb() == NULL) return NULL;
|
||||
|
||||
nmethod* nm = fr->cb()->as_nmethod_or_null();
|
||||
if (nm != NULL && nm->is_deopt_pc(fr->pc()))
|
||||
return nm->get_original_pc(fr);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@ -2129,7 +2160,8 @@ void nmethod::verify_interrupt_point(address call_site) {
|
||||
PcDesc* pd = pc_desc_at(ic->end_of_call());
|
||||
assert(pd != NULL, "PcDesc must exist");
|
||||
for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
|
||||
pd->obj_decode_offset(), pd->should_reexecute());
|
||||
pd->obj_decode_offset(), pd->should_reexecute(),
|
||||
pd->return_oop());
|
||||
!sd->is_top(); sd = sd->sender()) {
|
||||
sd->verify();
|
||||
}
|
||||
@ -2394,7 +2426,8 @@ ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
|
||||
PcDesc* p = pc_desc_near(begin+1);
|
||||
if (p != NULL && p->real_pc(this) <= end) {
|
||||
return new ScopeDesc(this, p->scope_decode_offset(),
|
||||
p->obj_decode_offset(), p->should_reexecute());
|
||||
p->obj_decode_offset(), p->should_reexecute(),
|
||||
p->return_oop());
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -2404,6 +2437,8 @@ void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) {
|
||||
if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]");
|
||||
if (block_begin == exception_begin()) stream->print_cr("[Exception Handler]");
|
||||
if (block_begin == stub_begin()) stream->print_cr("[Stub Code]");
|
||||
if (block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]");
|
||||
if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]");
|
||||
if (block_begin == consts_begin()) stream->print_cr("[Constants]");
|
||||
if (block_begin == entry_point()) {
|
||||
methodHandle m = method();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -95,6 +95,8 @@ struct nmFlags {
|
||||
unsigned int has_unsafe_access:1; // May fault due to unsafe access.
|
||||
unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
|
||||
|
||||
unsigned int speculatively_disconnected:1; // Marked for potential unload
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
@ -137,6 +139,7 @@ class nmethod : public CodeBlob {
|
||||
// To support simple linked-list chaining of nmethods:
|
||||
nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
|
||||
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
|
||||
nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
|
||||
|
||||
static nmethod* volatile _oops_do_mark_nmethods;
|
||||
nmethod* volatile _oops_do_mark_link;
|
||||
@ -145,8 +148,12 @@ class nmethod : public CodeBlob {
|
||||
|
||||
// Offsets for different nmethod parts
|
||||
int _exception_offset;
|
||||
// All deoptee's will resume execution at this location described by this offset
|
||||
// All deoptee's will resume execution at this location described by
|
||||
// this offset.
|
||||
int _deoptimize_offset;
|
||||
// All deoptee's at a MethodHandle call site will resume execution
|
||||
// at this location described by this offset.
|
||||
int _deoptimize_mh_offset;
|
||||
#ifdef HAVE_DTRACE_H
|
||||
int _trap_offset;
|
||||
#endif // def HAVE_DTRACE_H
|
||||
@ -329,24 +336,25 @@ class nmethod : public CodeBlob {
|
||||
bool is_compiled_by_c2() const;
|
||||
|
||||
// boundaries for different parts
|
||||
address code_begin () const { return _entry_point; }
|
||||
address code_end () const { return header_begin() + _stub_offset ; }
|
||||
address exception_begin () const { return header_begin() + _exception_offset ; }
|
||||
address deopt_handler_begin() const { return header_begin() + _deoptimize_offset ; }
|
||||
address stub_begin () const { return header_begin() + _stub_offset ; }
|
||||
address stub_end () const { return header_begin() + _consts_offset ; }
|
||||
address consts_begin () const { return header_begin() + _consts_offset ; }
|
||||
address consts_end () const { return header_begin() + _scopes_data_offset ; }
|
||||
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
|
||||
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
|
||||
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
|
||||
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset); }
|
||||
address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
|
||||
address dependencies_end () const { return header_begin() + _handler_table_offset ; }
|
||||
address handler_table_begin() const { return header_begin() + _handler_table_offset ; }
|
||||
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
|
||||
address nul_chk_table_begin() const { return header_begin() + _nul_chk_table_offset ; }
|
||||
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
|
||||
address code_begin () const { return _entry_point; }
|
||||
address code_end () const { return header_begin() + _stub_offset ; }
|
||||
address exception_begin () const { return header_begin() + _exception_offset ; }
|
||||
address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
|
||||
address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
|
||||
address stub_begin () const { return header_begin() + _stub_offset ; }
|
||||
address stub_end () const { return header_begin() + _consts_offset ; }
|
||||
address consts_begin () const { return header_begin() + _consts_offset ; }
|
||||
address consts_end () const { return header_begin() + _scopes_data_offset ; }
|
||||
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
|
||||
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
|
||||
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
|
||||
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
|
||||
address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
|
||||
address dependencies_end () const { return header_begin() + _handler_table_offset ; }
|
||||
address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
|
||||
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
|
||||
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
|
||||
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
|
||||
|
||||
int code_size () const { return code_end () - code_begin (); }
|
||||
int stub_size () const { return stub_end () - stub_begin (); }
|
||||
@ -413,6 +421,9 @@ class nmethod : public CodeBlob {
|
||||
bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
|
||||
void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
|
||||
|
||||
bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
|
||||
void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
|
||||
|
||||
int level() const { return flags.level; }
|
||||
void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
|
||||
|
||||
@ -437,6 +448,9 @@ class nmethod : public CodeBlob {
|
||||
nmethod* scavenge_root_link() const { return _scavenge_root_link; }
|
||||
void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
|
||||
|
||||
nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
|
||||
void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
|
||||
|
||||
public:
|
||||
|
||||
// Sweeper support
|
||||
@ -515,7 +529,7 @@ class nmethod : public CodeBlob {
|
||||
private:
|
||||
ScopeDesc* scope_desc_in(address begin, address end);
|
||||
|
||||
address* orig_pc_addr(const frame* fr ) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
|
||||
address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
|
||||
|
||||
PcDesc* find_pc_desc_internal(address pc, bool approximate);
|
||||
|
||||
@ -538,13 +552,17 @@ class nmethod : public CodeBlob {
|
||||
void copy_scopes_pcs(PcDesc* pcs, int count);
|
||||
void copy_scopes_data(address buffer, int size);
|
||||
|
||||
// deopt
|
||||
// return true is the pc is one would expect if the frame is being deopted.
|
||||
bool is_deopt_pc(address pc);
|
||||
// Deopt
|
||||
// Return true is the PC is one would expect if the frame is being deopted.
|
||||
bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
|
||||
bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); }
|
||||
bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
|
||||
// Accessor/mutator for the original pc of a frame before a frame was deopted.
|
||||
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
|
||||
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
|
||||
|
||||
static address get_deopt_original_pc(const frame* fr);
|
||||
|
||||
// MethodHandle
|
||||
bool is_method_handle_return(address return_pc);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,7 +52,8 @@ void PcDesc::print(nmethod* code) {
|
||||
tty->print(" ");
|
||||
sd->method()->print_short_name(tty);
|
||||
tty->print(" @%d", sd->bci());
|
||||
tty->print(" reexecute=%s", sd->should_reexecute()?"true":"false");
|
||||
if (sd->should_reexecute())
|
||||
tty->print(" reexecute=true");
|
||||
tty->cr();
|
||||
}
|
||||
#endif
|
||||
|
@ -39,6 +39,7 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
|
||||
struct {
|
||||
unsigned int reexecute: 1;
|
||||
unsigned int is_method_handle_invoke: 1;
|
||||
unsigned int return_oop: 1;
|
||||
} bits;
|
||||
bool operator ==(const PcDescFlags& other) { return word == other.word; }
|
||||
} _flags;
|
||||
@ -76,6 +77,9 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
|
||||
bool is_method_handle_invoke() const { return _flags.bits.is_method_handle_invoke; }
|
||||
void set_is_method_handle_invoke(bool z) { _flags.bits.is_method_handle_invoke = z; }
|
||||
|
||||
bool return_oop() const { return _flags.bits.return_oop; }
|
||||
void set_return_oop(bool z) { _flags.bits.return_oop = z; }
|
||||
|
||||
// Returns the real pc
|
||||
address real_pc(const nmethod* code) const;
|
||||
|
||||
|
@ -26,19 +26,21 @@
|
||||
# include "incls/_scopeDesc.cpp.incl"
|
||||
|
||||
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute) {
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool return_oop) {
|
||||
_code = code;
|
||||
_decode_offset = decode_offset;
|
||||
_objects = decode_object_values(obj_decode_offset);
|
||||
_reexecute = reexecute;
|
||||
_return_oop = return_oop;
|
||||
decode_body();
|
||||
}
|
||||
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute) {
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool return_oop) {
|
||||
_code = code;
|
||||
_decode_offset = decode_offset;
|
||||
_objects = decode_object_values(DebugInformationRecorder::serialized_null);
|
||||
_reexecute = reexecute;
|
||||
_return_oop = return_oop;
|
||||
decode_body();
|
||||
}
|
||||
|
||||
@ -48,6 +50,7 @@ ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
|
||||
_decode_offset = parent->_sender_decode_offset;
|
||||
_objects = parent->_objects;
|
||||
_reexecute = false; //reexecute only applies to the first scope
|
||||
_return_oop = false;
|
||||
decode_body();
|
||||
}
|
||||
|
||||
|
@ -52,17 +52,18 @@ class SimpleScopeDesc : public StackObj {
|
||||
class ScopeDesc : public ResourceObj {
|
||||
public:
|
||||
// Constructor
|
||||
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute);
|
||||
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool return_oop);
|
||||
|
||||
// Calls above, giving default value of "serialized_null" to the
|
||||
// "obj_decode_offset" argument. (We don't use a default argument to
|
||||
// avoid a .hpp-.hpp dependency.)
|
||||
ScopeDesc(const nmethod* code, int decode_offset, bool reexecute);
|
||||
ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool return_oop);
|
||||
|
||||
// JVM state
|
||||
methodHandle method() const { return _method; }
|
||||
int bci() const { return _bci; }
|
||||
bool should_reexecute() const { return _reexecute; }
|
||||
bool return_oop() const { return _return_oop; }
|
||||
|
||||
GrowableArray<ScopeValue*>* locals();
|
||||
GrowableArray<ScopeValue*>* expressions();
|
||||
@ -88,6 +89,7 @@ class ScopeDesc : public ResourceObj {
|
||||
methodHandle _method;
|
||||
int _bci;
|
||||
bool _reexecute;
|
||||
bool _return_oop;
|
||||
|
||||
// Decoding offsets
|
||||
int _decode_offset;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -69,6 +69,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
|
||||
|
||||
bool CompileBroker::_initialized = false;
|
||||
volatile bool CompileBroker::_should_block = false;
|
||||
volatile jint CompileBroker::_should_compile_new_jobs = run_compilation;
|
||||
|
||||
// The installed compiler(s)
|
||||
AbstractCompiler* CompileBroker::_compilers[2];
|
||||
@ -986,6 +987,13 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
return method_code;
|
||||
}
|
||||
if (method->is_not_compilable(comp_level)) return NULL;
|
||||
|
||||
nmethod* saved = CodeCache::find_and_remove_saved_code(method());
|
||||
if (saved != NULL) {
|
||||
method->set_code(method, saved);
|
||||
return saved;
|
||||
}
|
||||
|
||||
} else {
|
||||
// osr compilation
|
||||
#ifndef TIERED
|
||||
@ -1037,6 +1045,14 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
method->jmethod_id();
|
||||
}
|
||||
|
||||
// If the compiler is shut off due to code cache flushing or otherwise,
|
||||
// fail out now so blocking compiles dont hang the java thread
|
||||
if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
|
||||
method->invocation_counter()->decay();
|
||||
method->backedge_counter()->decay();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// do the compilation
|
||||
if (method->is_native()) {
|
||||
if (!PreferInterpreterNativeStubs) {
|
||||
@ -1116,7 +1132,7 @@ bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci,
|
||||
// the specified level
|
||||
if (is_native &&
|
||||
(!CICompileNatives || !compiler(comp_level)->supports_native())) {
|
||||
method->set_not_compilable();
|
||||
method->set_not_compilable_quietly();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1140,7 +1156,7 @@ bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci,
|
||||
method->print_short_name(tty);
|
||||
tty->cr();
|
||||
}
|
||||
method->set_not_compilable();
|
||||
method->set_not_compilable_quietly();
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -1173,7 +1189,7 @@ uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
|
||||
}
|
||||
|
||||
// Method was not in the appropriate compilation range.
|
||||
method->set_not_compilable();
|
||||
method->set_not_compilable_quietly();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1325,26 +1341,13 @@ void CompileBroker::compiler_thread_loop() {
|
||||
{
|
||||
// We need this HandleMark to avoid leaking VM handles.
|
||||
HandleMark hm(thread);
|
||||
|
||||
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
|
||||
// The CodeCache is full. Print out warning and disable compilation.
|
||||
UseInterpreter = true;
|
||||
if (UseCompiler || AlwaysCompileLoopMethods ) {
|
||||
if (log != NULL) {
|
||||
log->begin_elem("code_cache_full");
|
||||
log->stamp();
|
||||
log->end_elem();
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
warning("CodeCache is full. Compiler has been disabled");
|
||||
if (CompileTheWorld || ExitOnFullCodeCache) {
|
||||
before_exit(thread);
|
||||
exit_globals(); // will delete tty
|
||||
vm_direct_exit(CompileTheWorld ? 0 : 1);
|
||||
}
|
||||
#endif
|
||||
UseCompiler = false;
|
||||
AlwaysCompileLoopMethods = false;
|
||||
}
|
||||
// the code cache is really full
|
||||
handle_full_code_cache();
|
||||
} else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
|
||||
// Attempt to start cleaning the code cache while there is still a little headroom
|
||||
NMethodSweeper::handle_full_code_cache(false);
|
||||
}
|
||||
|
||||
CompileTask* task = queue->get();
|
||||
@ -1369,7 +1372,7 @@ void CompileBroker::compiler_thread_loop() {
|
||||
// Never compile a method if breakpoints are present in it
|
||||
if (method()->number_of_breakpoints() == 0) {
|
||||
// Compile the method.
|
||||
if (UseCompiler || AlwaysCompileLoopMethods) {
|
||||
if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
|
||||
#ifdef COMPILER1
|
||||
// Allow repeating compilations for the purpose of benchmarking
|
||||
// compile speed. This is not useful for customers.
|
||||
@ -1587,10 +1590,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
if (is_osr) {
|
||||
method->set_not_osr_compilable();
|
||||
} else {
|
||||
method->set_not_compilable();
|
||||
method->set_not_compilable_quietly();
|
||||
}
|
||||
} else if (compilable == ciEnv::MethodCompilable_not_at_tier) {
|
||||
method->set_not_compilable(task->comp_level());
|
||||
method->set_not_compilable_quietly(task->comp_level());
|
||||
}
|
||||
|
||||
// Note that the queued_for_compilation bits are cleared without
|
||||
@ -1613,6 +1616,38 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::handle_full_code_cache
|
||||
//
|
||||
// The CodeCache is full. Print out warning and disable compilation or
|
||||
// try code cache cleaning so compilation can continue later.
|
||||
void CompileBroker::handle_full_code_cache() {
|
||||
UseInterpreter = true;
|
||||
if (UseCompiler || AlwaysCompileLoopMethods ) {
|
||||
CompilerThread* thread = CompilerThread::current();
|
||||
CompileLog* log = thread->log();
|
||||
if (log != NULL) {
|
||||
log->begin_elem("code_cache_full");
|
||||
log->stamp();
|
||||
log->end_elem();
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
warning("CodeCache is full. Compiler has been disabled");
|
||||
if (CompileTheWorld || ExitOnFullCodeCache) {
|
||||
before_exit(JavaThread::current());
|
||||
exit_globals(); // will delete tty
|
||||
vm_direct_exit(CompileTheWorld ? 0 : 1);
|
||||
}
|
||||
#endif
|
||||
if (UseCodeCacheFlushing) {
|
||||
NMethodSweeper::handle_full_code_cache(true);
|
||||
} else {
|
||||
UseCompiler = false;
|
||||
AlwaysCompileLoopMethods = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::set_last_compile
|
||||
//
|
||||
|
@ -193,6 +193,9 @@ class CompileBroker: AllStatic {
|
||||
static bool _initialized;
|
||||
static volatile bool _should_block;
|
||||
|
||||
// This flag can be used to stop compilation or turn it back on
|
||||
static volatile jint _should_compile_new_jobs;
|
||||
|
||||
// The installed compiler(s)
|
||||
static AbstractCompiler* _compilers[2];
|
||||
|
||||
@ -319,6 +322,7 @@ class CompileBroker: AllStatic {
|
||||
|
||||
static void compiler_thread_loop();
|
||||
|
||||
static uint get_compilation_id() { return _compilation_id; }
|
||||
static bool is_idle();
|
||||
|
||||
// Set _should_block.
|
||||
@ -328,6 +332,20 @@ class CompileBroker: AllStatic {
|
||||
// Call this from the compiler at convenient points, to poll for _should_block.
|
||||
static void maybe_block();
|
||||
|
||||
enum {
|
||||
// Flags for toggling compiler activity
|
||||
stop_compilation = 0,
|
||||
run_compilation = 1
|
||||
};
|
||||
|
||||
static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
|
||||
static bool set_should_compile_new_jobs(jint new_state) {
|
||||
// Return success if the current caller set it
|
||||
jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
|
||||
return (old == (1-new_state));
|
||||
}
|
||||
static void handle_full_code_cache();
|
||||
|
||||
// Return total compilation ticks
|
||||
static jlong total_compilation_ticks() {
|
||||
return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0;
|
||||
|
@ -3655,9 +3655,7 @@ bool CMSCollector::markFromRootsWork(bool asynch) {
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
assert(_revisitStack.isEmpty(), "tabula rasa");
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
|
||||
bool result = false;
|
||||
if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
|
||||
result = do_marking_mt(asynch);
|
||||
@ -4124,7 +4122,6 @@ void CMSConcMarkingTask::do_work_steal(int i) {
|
||||
void CMSConcMarkingTask::coordinator_yield() {
|
||||
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
||||
"CMS thread should hold CMS token");
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker mux(false);)
|
||||
// First give up the locks, then yield, then re-lock
|
||||
// We should probably use a constructor/destructor idiom to
|
||||
@ -4201,9 +4198,7 @@ bool CMSCollector::do_marking_mt(bool asynch) {
|
||||
// Mutate the Refs discovery so it is MT during the
|
||||
// multi-threaded marking phase.
|
||||
ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
|
||||
conc_workers()->start_task(&tsk);
|
||||
while (tsk.yielded()) {
|
||||
tsk.coordinator_yield();
|
||||
@ -4472,7 +4467,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
// for cleaner interfaces.
|
||||
rp->preclean_discovered_references(
|
||||
rp->is_alive_non_header(), &keep_alive, &complete_trace,
|
||||
&yield_cl);
|
||||
&yield_cl, should_unload_classes());
|
||||
}
|
||||
|
||||
if (clean_survivor) { // preclean the active survivor space(s)
|
||||
@ -4494,7 +4489,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
SurvivorSpacePrecleanClosure
|
||||
sss_cl(this, _span, &_markBitMap, &_markStack,
|
||||
&pam_cl, before_count, CMSYield);
|
||||
DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
|
||||
DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
|
||||
dng->from()->object_iterate_careful(&sss_cl);
|
||||
dng->to()->object_iterate_careful(&sss_cl);
|
||||
}
|
||||
@ -4665,7 +4660,7 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
sample_eden();
|
||||
DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
|
||||
DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
|
||||
stop_point =
|
||||
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
}
|
||||
@ -4753,7 +4748,7 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
sample_eden();
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
|
||||
DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
|
||||
HeapWord* stop_point =
|
||||
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
if (stop_point != NULL) {
|
||||
@ -4853,7 +4848,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
||||
assert(haveFreelistLocks(), "must have free list locks");
|
||||
assert_lock_strong(bitMapLock());
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
|
||||
DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
|
||||
if (!init_mark_was_synchronous) {
|
||||
// We might assume that we need not fill TLAB's when
|
||||
// CMSScavengeBeforeRemark is set, because we may have just done
|
||||
|
@ -300,7 +300,23 @@ jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
|
||||
int count;
|
||||
jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
|
||||
assert(cached_ptr != NULL, "bad cached card ptr");
|
||||
assert(!is_young_card(cached_ptr), "shouldn't get a card in young region");
|
||||
|
||||
if (is_young_card(cached_ptr)) {
|
||||
// The region containing cached_ptr has been freed during a clean up
|
||||
// pause, reallocated, and tagged as young.
|
||||
assert(cached_ptr != card_ptr, "shouldn't be");
|
||||
|
||||
// We've just inserted a new old-gen card pointer into the card count
|
||||
// cache and evicted the previous contents of that count slot.
|
||||
// The evicted card pointer has been determined to be in a young region
|
||||
// and so cannot be the newly inserted card pointer (that will be
|
||||
// in an old region).
|
||||
// The count for newly inserted card will be set to zero during the
|
||||
// insertion, so we don't want to defer the cleaning of the newly
|
||||
// inserted card pointer.
|
||||
assert(*defer == false, "deferring non-hot card");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// The card pointer we obtained from card count cache is not hot
|
||||
// so do not store it in the cache; return it for immediate
|
||||
|
@ -2505,6 +2505,7 @@ G1CollectedHeap* G1CollectedHeap::heap() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
||||
// always_do_update_barrier = false;
|
||||
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
|
||||
// Call allocation profiler
|
||||
AllocationProfiler::iterate_since_last_gc();
|
||||
@ -2518,6 +2519,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||
// is set.
|
||||
COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
|
||||
"derived pointer present"));
|
||||
// always_do_update_barrier = true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_collection_pause() {
|
||||
@ -2644,6 +2646,13 @@ G1CollectedHeap::cleanup_surviving_young_words() {
|
||||
|
||||
// </NEW PREDICTION>
|
||||
|
||||
struct PrepareForRSScanningClosure : public HeapRegionClosure {
|
||||
bool doHeapRegion(HeapRegion *r) {
|
||||
r->rem_set()->set_iter_claimed(0);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void
|
||||
G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
if (PrintHeapAtGC) {
|
||||
@ -2782,6 +2791,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
gclog_or_tty->print_cr("\nAfter pause, heap:");
|
||||
print();
|
||||
#endif
|
||||
PrepareForRSScanningClosure prepare_for_rs_scan;
|
||||
collection_set_iterate(&prepare_for_rs_scan);
|
||||
|
||||
setup_surviving_young_words();
|
||||
|
||||
@ -3779,22 +3790,16 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test>
|
||||
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
|
||||
template <class T>
|
||||
void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test>
|
||||
void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee>
|
||||
::do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
assert(barrier != G1BarrierRS || obj != NULL,
|
||||
"Precondition: G1BarrierRS implies obj is nonNull");
|
||||
|
||||
// The only time we skip the cset test is when we're scanning
|
||||
// references popped from the queue. And we only push on the queue
|
||||
// references that we know point into the cset, so no point in
|
||||
// checking again. But we'll leave an assert here for peace of mind.
|
||||
assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
|
||||
|
||||
// here the null check is implicit in the cset_fast_test() test
|
||||
if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
#if G1_REM_SET_LOGGING
|
||||
gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
|
||||
"into CS.", p, (void*) obj);
|
||||
@ -3811,7 +3816,6 @@ void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_tes
|
||||
}
|
||||
}
|
||||
|
||||
// When scanning moved objs, must look at all oops.
|
||||
if (barrier == G1BarrierEvac && obj != NULL) {
|
||||
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
|
||||
}
|
||||
@ -3821,8 +3825,8 @@ void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_tes
|
||||
}
|
||||
}
|
||||
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p);
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
|
||||
|
||||
template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
@ -3894,11 +3898,11 @@ public:
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*) stolen_task;
|
||||
assert(has_partial_array_mask(p) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error");
|
||||
pss->push_on_queue(p);
|
||||
} else {
|
||||
oop* p = (oop*) stolen_task;
|
||||
assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error");
|
||||
assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error");
|
||||
pss->push_on_queue(p);
|
||||
}
|
||||
continue;
|
||||
@ -3960,6 +3964,7 @@ public:
|
||||
G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss);
|
||||
G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss);
|
||||
G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss);
|
||||
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss);
|
||||
G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss);
|
||||
@ -3983,7 +3988,7 @@ public:
|
||||
_g1h->g1_process_strong_roots(/* not collecting perm */ false,
|
||||
SharedHeap::SO_AllClasses,
|
||||
scan_root_cl,
|
||||
&only_scan_heap_rs_cl,
|
||||
&push_heap_rs_cl,
|
||||
scan_so_cl,
|
||||
scan_perm_cl,
|
||||
i);
|
||||
|
@ -1004,7 +1004,12 @@ public:
|
||||
// storage in the heap comes from a young region or not.
|
||||
// See ReduceInitialCardMarks.
|
||||
virtual bool can_elide_tlab_store_barriers() const {
|
||||
return true;
|
||||
// 6920090: Temporarily disabled, because of lingering
|
||||
// instabilities related to RICM with G1. In the
|
||||
// interim, the option ReduceInitialCardMarksForG1
|
||||
// below is left solely as a debugging device at least
|
||||
// until 6920109 fixes the instabilities.
|
||||
return ReduceInitialCardMarksForG1;
|
||||
}
|
||||
|
||||
virtual bool card_mark_must_follow_store() const {
|
||||
@ -1026,6 +1031,8 @@ public:
|
||||
// However, non-generational G1 (-XX:-G1Gen) appears to have
|
||||
// bit-rotted so was not tested below.
|
||||
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
|
||||
// Re 6920090, 6920109 above.
|
||||
assert(ReduceInitialCardMarksForG1, "Else cannot be here");
|
||||
assert(G1Gen || !is_in_young(new_obj),
|
||||
"Non-generational G1 should never return true below");
|
||||
return is_in_young(new_obj);
|
||||
@ -1616,7 +1623,7 @@ public:
|
||||
template <class T> void push_on_queue(T* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(ref)), "invariant");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
|
||||
#ifdef ASSERT
|
||||
if (has_partial_array_mask(ref)) {
|
||||
oop p = clear_partial_array_mask(ref);
|
||||
@ -1637,9 +1644,9 @@ public:
|
||||
assert((oop*)ref != NULL, "pop_local() returned true");
|
||||
assert(UseCompressedOops || !ref.is_narrow(), "Error");
|
||||
assert(has_partial_array_mask((oop*)ref) ||
|
||||
_g1h->obj_in_cs(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)ref)),
|
||||
"invariant");
|
||||
_g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)ref)),
|
||||
"invariant");
|
||||
IF_G1_DETAILED_STATS(note_pop());
|
||||
} else {
|
||||
StarTask null_task;
|
||||
@ -1652,9 +1659,9 @@ public:
|
||||
assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
|
||||
assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
|
||||
assert(has_partial_array_mask((oop*)new_ref) ||
|
||||
_g1h->obj_in_cs(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
|
||||
"invariant");
|
||||
_g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
|
||||
"invariant");
|
||||
ref = new_ref;
|
||||
}
|
||||
|
||||
@ -1818,12 +1825,12 @@ public:
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||
assert(!has_partial_array_mask(p) &&
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
} else {
|
||||
oop* p = (oop*)ref_to_scan;
|
||||
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
}
|
||||
}
|
||||
@ -1837,12 +1844,12 @@ public:
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||
assert(!has_partial_array_mask(p) &&
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
} else {
|
||||
oop* p = (oop*)ref_to_scan;
|
||||
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
}
|
||||
}
|
||||
|
@ -205,6 +205,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
// policy is created before the heap, we have to set this up here,
|
||||
// so it's done as soon as possible.
|
||||
HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
|
||||
HeapRegionRemSet::setup_remset_size();
|
||||
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||
|
@ -53,6 +53,15 @@ public:
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
};
|
||||
|
||||
class G1ParPushHeapRSClosure : public G1ParClosureSuper {
|
||||
public:
|
||||
G1ParPushHeapRSClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParClosureSuper(g1, par_scan_state) { }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
class G1ParScanClosure : public G1ParClosureSuper {
|
||||
public:
|
||||
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
@ -100,7 +109,7 @@ public:
|
||||
};
|
||||
|
||||
template<bool do_gen_barrier, G1Barrier barrier,
|
||||
bool do_mark_forwardee, bool skip_cset_test>
|
||||
bool do_mark_forwardee>
|
||||
class G1ParCopyClosure : public G1ParCopyHelper {
|
||||
G1ParScanClosure _scanner;
|
||||
template <class T> void do_oop_work(T* p);
|
||||
@ -116,12 +125,13 @@ public:
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, false> G1ParScanPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
|
||||
|
||||
// This is the only case when we set skip_cset_test. Basically, this
|
||||
// closure is (should?) only be called directly while we're draining
|
||||
// the overflow and task queues. In that case we know that the
|
||||
@ -132,7 +142,7 @@ typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHea
|
||||
// We need a separate closure to handle references during evacuation
|
||||
// failure processing, as we cannot asume that the reference already
|
||||
// points into the collection set (like G1ParScanHeapEvacClosure does).
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
|
||||
|
||||
class FilterIntoCSClosure: public OopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
|
@ -104,3 +104,16 @@ template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
Prefetch::write(obj->mark_addr(), 0);
|
||||
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
|
||||
_par_scan_state->push_on_queue(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -155,8 +155,8 @@ class ScanRSClosure : public HeapRegionClosure {
|
||||
G1BlockOffsetSharedArray* _bot_shared;
|
||||
CardTableModRefBS *_ct_bs;
|
||||
int _worker_i;
|
||||
int _block_size;
|
||||
bool _try_claimed;
|
||||
size_t _min_skip_distance, _max_skip_distance;
|
||||
public:
|
||||
ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
|
||||
_oc(oc),
|
||||
@ -168,8 +168,7 @@ public:
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
_bot_shared = _g1h->bot_shared();
|
||||
_ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
|
||||
_min_skip_distance = 16;
|
||||
_max_skip_distance = 2 * _g1h->n_par_threads() * _min_skip_distance;
|
||||
_block_size = MAX2<int>(G1RSetScanBlockSize, 1);
|
||||
}
|
||||
|
||||
void set_try_claimed() { _try_claimed = true; }
|
||||
@ -225,12 +224,15 @@ public:
|
||||
HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
|
||||
hrrs->init_iterator(iter);
|
||||
size_t card_index;
|
||||
size_t skip_distance = 0, current_card = 0, jump_to_card = 0;
|
||||
while (iter->has_next(card_index)) {
|
||||
if (current_card < jump_to_card) {
|
||||
++current_card;
|
||||
continue;
|
||||
|
||||
// We claim cards in block so as to recude the contention. The block size is determined by
|
||||
// the G1RSetScanBlockSize parameter.
|
||||
size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||
for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
|
||||
if (current_card >= jump_to_card + _block_size) {
|
||||
jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||
}
|
||||
if (current_card < jump_to_card) continue;
|
||||
HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
|
||||
#if 0
|
||||
gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
|
||||
@ -247,22 +249,14 @@ public:
|
||||
|
||||
// If the card is dirty, then we will scan it during updateRS.
|
||||
if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
|
||||
if (!_ct_bs->is_card_claimed(card_index) && _ct_bs->claim_card(card_index)) {
|
||||
scanCard(card_index, card_region);
|
||||
} else if (_try_claimed) {
|
||||
if (jump_to_card == 0 || jump_to_card != current_card) {
|
||||
// We did some useful work in the previous iteration.
|
||||
// Decrease the distance.
|
||||
skip_distance = MAX2(skip_distance >> 1, _min_skip_distance);
|
||||
} else {
|
||||
// Previous iteration resulted in a claim failure.
|
||||
// Increase the distance.
|
||||
skip_distance = MIN2(skip_distance << 1, _max_skip_distance);
|
||||
}
|
||||
jump_to_card = current_card + skip_distance;
|
||||
}
|
||||
// We make the card as "claimed" lazily (so races are possible but they're benign),
|
||||
// which reduces the number of duplicate scans (the rsets of the regions in the cset
|
||||
// can intersect).
|
||||
if (!_ct_bs->is_card_claimed(card_index)) {
|
||||
_ct_bs->set_card_claimed(card_index);
|
||||
scanCard(card_index, card_region);
|
||||
}
|
||||
}
|
||||
++current_card;
|
||||
}
|
||||
if (!_try_claimed) {
|
||||
hrrs->set_iter_complete();
|
||||
@ -299,30 +293,18 @@ void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
|
||||
double rs_time_start = os::elapsedTime();
|
||||
HeapRegion *startRegion = calculateStartRegion(worker_i);
|
||||
|
||||
BufferingOopsInHeapRegionClosure boc(oc);
|
||||
ScanRSClosure scanRScl(&boc, worker_i);
|
||||
ScanRSClosure scanRScl(oc, worker_i);
|
||||
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
||||
scanRScl.set_try_claimed();
|
||||
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
||||
|
||||
boc.done();
|
||||
double closure_app_time_sec = boc.closure_app_seconds();
|
||||
double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
|
||||
closure_app_time_sec;
|
||||
double closure_app_time_ms = closure_app_time_sec * 1000.0;
|
||||
double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
|
||||
|
||||
assert( _cards_scanned != NULL, "invariant" );
|
||||
_cards_scanned[worker_i] = scanRScl.cards_done();
|
||||
|
||||
_g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
|
||||
_g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
|
||||
|
||||
double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i);
|
||||
if (scan_new_refs_time_ms > 0.0) {
|
||||
closure_app_time_ms += scan_new_refs_time_ms;
|
||||
}
|
||||
|
||||
_g1p->record_obj_copy_time(worker_i, closure_app_time_ms);
|
||||
}
|
||||
|
||||
void HRInto_G1RemSet::updateRS(int worker_i) {
|
||||
@ -449,9 +431,8 @@ HRInto_G1RemSet::scanNewRefsRS_work(OopsInHeapRegionClosure* oc,
|
||||
oc->do_oop(p);
|
||||
}
|
||||
}
|
||||
_g1p->record_scan_new_refs_time(worker_i,
|
||||
(os::elapsedTime() - scan_new_refs_start_sec)
|
||||
* 1000.0);
|
||||
double scan_new_refs_time_ms = (os::elapsedTime() - scan_new_refs_start_sec) * 1000.0;
|
||||
_g1p->record_scan_new_refs_time(worker_i, scan_new_refs_time_ms);
|
||||
}
|
||||
|
||||
void HRInto_G1RemSet::cleanupHRRS() {
|
||||
|
@ -207,8 +207,20 @@
|
||||
develop(bool, G1PrintOopAppls, false, \
|
||||
"When true, print applications of closures to external locs.") \
|
||||
\
|
||||
develop(intx, G1LogRSRegionEntries, 7, \
|
||||
"Log_2 of max number of regions for which we keep bitmaps.") \
|
||||
develop(intx, G1RSetRegionEntriesBase, 256, \
|
||||
"Max number of regions in a fine-grain table per MB.") \
|
||||
\
|
||||
product(intx, G1RSetRegionEntries, 0, \
|
||||
"Max number of regions for which we keep bitmaps." \
|
||||
"Will be set ergonomically by default") \
|
||||
\
|
||||
develop(intx, G1RSetSparseRegionEntriesBase, 4, \
|
||||
"Max number of entries per region in a sparse table " \
|
||||
"per MB.") \
|
||||
\
|
||||
product(intx, G1RSetSparseRegionEntries, 0, \
|
||||
"Max number of entries per region in a sparse table." \
|
||||
"Will be set ergonomically by default.") \
|
||||
\
|
||||
develop(bool, G1RecordHRRSOops, false, \
|
||||
"When true, record recent calls to rem set operations.") \
|
||||
@ -291,6 +303,14 @@
|
||||
"a particular entry exceeds this value.") \
|
||||
\
|
||||
develop(bool, G1VerifyCTCleanup, false, \
|
||||
"Verify card table cleanup.")
|
||||
"Verify card table cleanup.") \
|
||||
\
|
||||
product(uintx, G1RSetScanBlockSize, 64, \
|
||||
"Size of a work unit of cards claimed by a worker thread" \
|
||||
"during RSet scanning.") \
|
||||
\
|
||||
develop(bool, ReduceInitialCardMarksForG1, false, \
|
||||
"When ReduceInitialCardMarks is true, this flag setting " \
|
||||
" controls whether G1 allows the RICM optimization")
|
||||
|
||||
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
||||
|
@ -33,11 +33,12 @@ enum G1Barrier {
|
||||
};
|
||||
|
||||
template<bool do_gen_barrier, G1Barrier barrier,
|
||||
bool do_mark_forwardee, bool skip_cset_test>
|
||||
bool do_mark_forwardee>
|
||||
class G1ParCopyClosure;
|
||||
class G1ParScanClosure;
|
||||
class G1ParPushHeapRSClosure;
|
||||
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
|
||||
|
||||
class FilterIntoCSClosure;
|
||||
class FilterOutOfRegionClosure;
|
||||
@ -51,6 +52,7 @@ class FilterAndMarkInHeapRegionAndIntoCSClosure;
|
||||
#define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \
|
||||
f(G1ParScanHeapEvacClosure,_nv) \
|
||||
f(G1ParScanClosure,_nv) \
|
||||
f(G1ParPushHeapRSClosure,_nv) \
|
||||
f(FilterIntoCSClosure,_nv) \
|
||||
f(FilterOutOfRegionClosure,_nv) \
|
||||
f(FilterInHeapRegionAndIntoCSClosure,_nv) \
|
||||
|
@ -258,42 +258,6 @@ class PosParPRT: public PerRegionTable {
|
||||
ReserveParTableExpansion = 1
|
||||
};
|
||||
|
||||
void par_expand() {
|
||||
int n = HeapRegionRemSet::num_par_rem_sets()-1;
|
||||
if (n <= 0) return;
|
||||
if (_par_tables == NULL) {
|
||||
PerRegionTable* res =
|
||||
(PerRegionTable*)
|
||||
Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
|
||||
&_par_tables, NULL);
|
||||
if (res != NULL) return;
|
||||
// Otherwise, we reserved the right to do the expansion.
|
||||
|
||||
PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
PerRegionTable* ptable = PerRegionTable::alloc(hr());
|
||||
ptables[i] = ptable;
|
||||
}
|
||||
// Here we do not need an atomic.
|
||||
_par_tables = ptables;
|
||||
#if COUNT_PAR_EXPANDS
|
||||
print_par_expand();
|
||||
#endif
|
||||
// We must put this table on the expanded list.
|
||||
PosParPRT* exp_head = _par_expanded_list;
|
||||
while (true) {
|
||||
set_next_par_expanded(exp_head);
|
||||
PosParPRT* res =
|
||||
(PosParPRT*)
|
||||
Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
|
||||
if (res == exp_head) return;
|
||||
// Otherwise.
|
||||
exp_head = res;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void par_contract() {
|
||||
assert(_par_tables != NULL, "Precondition.");
|
||||
int n = HeapRegionRemSet::num_par_rem_sets()-1;
|
||||
@ -391,13 +355,49 @@ public:
|
||||
void set_next(PosParPRT* nxt) { _next = nxt; }
|
||||
PosParPRT** next_addr() { return &_next; }
|
||||
|
||||
bool should_expand(int tid) {
|
||||
return par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region();
|
||||
}
|
||||
|
||||
void par_expand() {
|
||||
int n = HeapRegionRemSet::num_par_rem_sets()-1;
|
||||
if (n <= 0) return;
|
||||
if (_par_tables == NULL) {
|
||||
PerRegionTable* res =
|
||||
(PerRegionTable*)
|
||||
Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
|
||||
&_par_tables, NULL);
|
||||
if (res != NULL) return;
|
||||
// Otherwise, we reserved the right to do the expansion.
|
||||
|
||||
PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
PerRegionTable* ptable = PerRegionTable::alloc(hr());
|
||||
ptables[i] = ptable;
|
||||
}
|
||||
// Here we do not need an atomic.
|
||||
_par_tables = ptables;
|
||||
#if COUNT_PAR_EXPANDS
|
||||
print_par_expand();
|
||||
#endif
|
||||
// We must put this table on the expanded list.
|
||||
PosParPRT* exp_head = _par_expanded_list;
|
||||
while (true) {
|
||||
set_next_par_expanded(exp_head);
|
||||
PosParPRT* res =
|
||||
(PosParPRT*)
|
||||
Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
|
||||
if (res == exp_head) return;
|
||||
// Otherwise.
|
||||
exp_head = res;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
// Expand if necessary.
|
||||
PerRegionTable** pt = par_tables();
|
||||
if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) {
|
||||
par_expand();
|
||||
pt = par_tables();
|
||||
}
|
||||
if (pt != NULL) {
|
||||
// We always have to assume that mods to table 0 are in parallel,
|
||||
// because of the claiming scheme in parallel expansion. A thread
|
||||
@ -505,12 +505,13 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
|
||||
typedef PosParPRT* PosParPRTPtr;
|
||||
if (_max_fine_entries == 0) {
|
||||
assert(_mod_max_fine_entries_mask == 0, "Both or none.");
|
||||
_max_fine_entries = (size_t)(1 << G1LogRSRegionEntries);
|
||||
size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
|
||||
_max_fine_entries = (size_t)(1 << max_entries_log);
|
||||
_mod_max_fine_entries_mask = _max_fine_entries - 1;
|
||||
#if SAMPLE_FOR_EVICTION
|
||||
assert(_fine_eviction_sample_size == 0
|
||||
&& _fine_eviction_stride == 0, "All init at same time.");
|
||||
_fine_eviction_sample_size = MAX2((size_t)4, (size_t)G1LogRSRegionEntries);
|
||||
_fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
|
||||
_fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
|
||||
#endif
|
||||
}
|
||||
@ -655,13 +656,6 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
#endif
|
||||
}
|
||||
|
||||
// Otherwise, transfer from sparse to fine-grain.
|
||||
CardIdx_t cards[SparsePRTEntry::CardsPerEntry];
|
||||
if (G1HRRSUseSparseTable) {
|
||||
bool res = _sparse_table.get_cards(from_hrs_ind, &cards[0]);
|
||||
assert(res, "There should have been an entry");
|
||||
}
|
||||
|
||||
if (_n_fine_entries == _max_fine_entries) {
|
||||
prt = delete_region_table();
|
||||
} else {
|
||||
@ -676,10 +670,12 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
_fine_grain_regions[ind] = prt;
|
||||
_n_fine_entries++;
|
||||
|
||||
// Add in the cards from the sparse table.
|
||||
if (G1HRRSUseSparseTable) {
|
||||
for (int i = 0; i < SparsePRTEntry::CardsPerEntry; i++) {
|
||||
CardIdx_t c = cards[i];
|
||||
// Transfer from sparse to fine-grain.
|
||||
SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
|
||||
assert(sprt_entry != NULL, "There should have been an entry");
|
||||
for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
|
||||
CardIdx_t c = sprt_entry->card(i);
|
||||
if (c != SparsePRTEntry::NullEntry) {
|
||||
prt->add_card(c);
|
||||
}
|
||||
@ -696,7 +692,21 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
// OtherRegionsTable for why this is OK.
|
||||
assert(prt != NULL, "Inv");
|
||||
|
||||
prt->add_reference(from, tid);
|
||||
if (prt->should_expand(tid)) {
|
||||
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
|
||||
HeapRegion* prt_hr = prt->hr();
|
||||
if (prt_hr == from_hr) {
|
||||
// Make sure the table still corresponds to the same region
|
||||
prt->par_expand();
|
||||
prt->add_reference(from, tid);
|
||||
}
|
||||
// else: The table has been concurrently coarsened, evicted, and
|
||||
// the table data structure re-used for another table. So, we
|
||||
// don't need to add the reference any more given that the table
|
||||
// has been coarsened and the whole region will be scanned anyway.
|
||||
} else {
|
||||
prt->add_reference(from, tid);
|
||||
}
|
||||
if (G1RecordHRRSOops) {
|
||||
HeapRegionRemSet::record(hr(), from);
|
||||
#if HRRS_VERBOSE
|
||||
@ -1070,6 +1080,19 @@ HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
||||
{}
|
||||
|
||||
|
||||
void HeapRegionRemSet::setup_remset_size() {
|
||||
// Setup sparse and fine-grain tables sizes.
|
||||
// table_size = base * (log(region_size / 1M) + 1)
|
||||
int region_size_log_mb = MAX2((int)HeapRegion::LogOfHRGrainBytes - (int)LOG_M, 0);
|
||||
if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
|
||||
G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
|
||||
G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
|
||||
}
|
||||
guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
|
||||
}
|
||||
|
||||
void HeapRegionRemSet::init_for_par_iteration() {
|
||||
_iter_state = Unclaimed;
|
||||
}
|
||||
@ -1385,7 +1408,7 @@ void HeapRegionRemSet::test() {
|
||||
os::sleep(Thread::current(), (jlong)5000, false);
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
// Run with "-XX:G1LogRSRegionEntries=2", so that 1 and 5 end up in same
|
||||
// Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
|
||||
// hash bucket.
|
||||
HeapRegion* hr0 = g1h->region_at(0);
|
||||
HeapRegion* hr1 = g1h->region_at(1);
|
||||
|
@ -187,7 +187,8 @@ private:
|
||||
void clear_outgoing_entries();
|
||||
|
||||
enum ParIterState { Unclaimed, Claimed, Complete };
|
||||
ParIterState _iter_state;
|
||||
volatile ParIterState _iter_state;
|
||||
volatile jlong _iter_claimed;
|
||||
|
||||
// Unused unless G1RecordHRRSOops is true.
|
||||
|
||||
@ -209,6 +210,7 @@ public:
|
||||
HeapRegion* hr);
|
||||
|
||||
static int num_par_rem_sets();
|
||||
static void setup_remset_size();
|
||||
|
||||
HeapRegion* hr() const {
|
||||
return _other_regions.hr();
|
||||
@ -272,6 +274,19 @@ public:
|
||||
// Returns "true" iff the region's iteration is complete.
|
||||
bool iter_is_complete();
|
||||
|
||||
// Support for claiming blocks of cards during iteration
|
||||
void set_iter_claimed(size_t x) { _iter_claimed = (jlong)x; }
|
||||
size_t iter_claimed() const { return (size_t)_iter_claimed; }
|
||||
// Claim the next block of cards
|
||||
size_t iter_claimed_next(size_t step) {
|
||||
size_t current, next;
|
||||
do {
|
||||
current = iter_claimed();
|
||||
next = current + step;
|
||||
} while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
|
||||
return current;
|
||||
}
|
||||
|
||||
// Initialize the given iterator to iterate over this rem set.
|
||||
void init_iterator(HeapRegionRemSetIterator* iter) const;
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#define SPARSE_PRT_VERBOSE 0
|
||||
|
||||
#define UNROLL_CARD_LOOPS 1
|
||||
#define UNROLL_CARD_LOOPS 1
|
||||
|
||||
void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
|
||||
sprt_iter->init(this);
|
||||
@ -36,27 +36,32 @@ void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
|
||||
void SparsePRTEntry::init(RegionIdx_t region_ind) {
|
||||
_region_ind = region_ind;
|
||||
_next_index = NullEntry;
|
||||
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
_cards[0] = NullEntry;
|
||||
_cards[1] = NullEntry;
|
||||
_cards[2] = NullEntry;
|
||||
_cards[3] = NullEntry;
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
_cards[i] = NullEntry;
|
||||
_cards[i + 1] = NullEntry;
|
||||
_cards[i + 2] = NullEntry;
|
||||
_cards[i + 3] = NullEntry;
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++)
|
||||
for (int i = 0; i < cards_num(); i++)
|
||||
_cards[i] = NullEntry;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool SparsePRTEntry::contains_card(CardIdx_t card_index) const {
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
if (_cards[0] == card_index) return true;
|
||||
if (_cards[1] == card_index) return true;
|
||||
if (_cards[2] == card_index) return true;
|
||||
if (_cards[3] == card_index) return true;
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
if (_cards[i] == card_index ||
|
||||
_cards[i + 1] == card_index ||
|
||||
_cards[i + 2] == card_index ||
|
||||
_cards[i + 3] == card_index) return true;
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) {
|
||||
for (int i = 0; i < cards_num(); i++) {
|
||||
if (_cards[i] == card_index) return true;
|
||||
}
|
||||
#endif
|
||||
@ -67,14 +72,16 @@ bool SparsePRTEntry::contains_card(CardIdx_t card_index) const {
|
||||
int SparsePRTEntry::num_valid_cards() const {
|
||||
int sum = 0;
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
if (_cards[0] != NullEntry) sum++;
|
||||
if (_cards[1] != NullEntry) sum++;
|
||||
if (_cards[2] != NullEntry) sum++;
|
||||
if (_cards[3] != NullEntry) sum++;
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
sum += (_cards[i] != NullEntry);
|
||||
sum += (_cards[i + 1] != NullEntry);
|
||||
sum += (_cards[i + 2] != NullEntry);
|
||||
sum += (_cards[i + 3] != NullEntry);
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) {
|
||||
if (_cards[i] != NulLEntry) sum++;
|
||||
for (int i = 0; i < cards_num(); i++) {
|
||||
sum += (_cards[i] != NullEntry);
|
||||
}
|
||||
#endif
|
||||
// Otherwise, we're full.
|
||||
@ -83,27 +90,27 @@ int SparsePRTEntry::num_valid_cards() const {
|
||||
|
||||
SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(CardIdx_t card_index) {
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
CardIdx_t c = _cards[0];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[0] = card_index; return added; }
|
||||
c = _cards[1];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[1] = card_index; return added; }
|
||||
c = _cards[2];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[2] = card_index; return added; }
|
||||
c = _cards[3];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[3] = card_index; return added; }
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
CardIdx_t c;
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
c = _cards[i];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[i] = card_index; return added; }
|
||||
c = _cards[i + 1];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[i + 1] = card_index; return added; }
|
||||
c = _cards[i + 2];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[i + 2] = card_index; return added; }
|
||||
c = _cards[i + 3];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[i + 3] = card_index; return added; }
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) {
|
||||
for (int i = 0; i < cards_num(); i++) {
|
||||
CardIdx_t c = _cards[i];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) {
|
||||
_cards[i] = card_index;
|
||||
return added;
|
||||
}
|
||||
if (c == NullEntry) { _cards[i] = card_index; return added; }
|
||||
}
|
||||
#endif
|
||||
// Otherwise, we're full.
|
||||
@ -112,13 +119,15 @@ SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(CardIdx_t card_index) {
|
||||
|
||||
void SparsePRTEntry::copy_cards(CardIdx_t* cards) const {
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
cards[0] = _cards[0];
|
||||
cards[1] = _cards[1];
|
||||
cards[2] = _cards[2];
|
||||
cards[3] = _cards[3];
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
cards[i] = _cards[i];
|
||||
cards[i + 1] = _cards[i + 1];
|
||||
cards[i + 2] = _cards[i + 2];
|
||||
cards[i + 3] = _cards[i + 3];
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) {
|
||||
for (int i = 0; i < cards_num(); i++) {
|
||||
cards[i] = _cards[i];
|
||||
}
|
||||
#endif
|
||||
@ -133,7 +142,7 @@ void SparsePRTEntry::copy_cards(SparsePRTEntry* e) const {
|
||||
RSHashTable::RSHashTable(size_t capacity) :
|
||||
_capacity(capacity), _capacity_mask(capacity-1),
|
||||
_occupied_entries(0), _occupied_cards(0),
|
||||
_entries(NEW_C_HEAP_ARRAY(SparsePRTEntry, capacity)),
|
||||
_entries((SparsePRTEntry*)NEW_C_HEAP_ARRAY(char, SparsePRTEntry::size() * capacity)),
|
||||
_buckets(NEW_C_HEAP_ARRAY(int, capacity)),
|
||||
_free_list(NullEntry), _free_region(0)
|
||||
{
|
||||
@ -161,8 +170,8 @@ void RSHashTable::clear() {
|
||||
"_capacity too large");
|
||||
|
||||
// This will put -1 == NullEntry in the key field of all entries.
|
||||
memset(_entries, -1, _capacity * sizeof(SparsePRTEntry));
|
||||
memset(_buckets, -1, _capacity * sizeof(int));
|
||||
memset(_entries, NullEntry, _capacity * SparsePRTEntry::size());
|
||||
memset(_buckets, NullEntry, _capacity * sizeof(int));
|
||||
_free_list = NullEntry;
|
||||
_free_region = 0;
|
||||
}
|
||||
@ -175,8 +184,8 @@ bool RSHashTable::add_card(RegionIdx_t region_ind, CardIdx_t card_index) {
|
||||
if (res == SparsePRTEntry::added) _occupied_cards++;
|
||||
#if SPARSE_PRT_VERBOSE
|
||||
gclog_or_tty->print_cr(" after add_card[%d]: valid-cards = %d.",
|
||||
pointer_delta(e, _entries, sizeof(SparsePRTEntry)),
|
||||
e->num_valid_cards());
|
||||
pointer_delta(e, _entries, SparsePRTEntry::size()),
|
||||
e->num_valid_cards());
|
||||
#endif
|
||||
assert(e->num_valid_cards() > 0, "Postcondition");
|
||||
return res != SparsePRTEntry::overflow;
|
||||
@ -199,6 +208,22 @@ bool RSHashTable::get_cards(RegionIdx_t region_ind, CardIdx_t* cards) {
|
||||
return true;
|
||||
}
|
||||
|
||||
SparsePRTEntry* RSHashTable::get_entry(RegionIdx_t region_ind) {
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
int cur_ind = _buckets[ind];
|
||||
SparsePRTEntry* cur;
|
||||
while (cur_ind != NullEntry &&
|
||||
(cur = entry(cur_ind))->r_ind() != region_ind) {
|
||||
cur_ind = cur->next_index();
|
||||
}
|
||||
|
||||
if (cur_ind == NullEntry) return NULL;
|
||||
// Otherwise...
|
||||
assert(cur->r_ind() == region_ind, "Postcondition of loop + test above.");
|
||||
assert(cur->num_valid_cards() > 0, "Inv");
|
||||
return cur;
|
||||
}
|
||||
|
||||
bool RSHashTable::delete_entry(RegionIdx_t region_ind) {
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
int* prev_loc = &_buckets[ind];
|
||||
@ -225,20 +250,8 @@ RSHashTable::entry_for_region_ind(RegionIdx_t region_ind) const {
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
int cur_ind = _buckets[ind];
|
||||
SparsePRTEntry* cur;
|
||||
// XXX
|
||||
// int k = 0;
|
||||
while (cur_ind != NullEntry &&
|
||||
(cur = entry(cur_ind))->r_ind() != region_ind) {
|
||||
/*
|
||||
k++;
|
||||
if (k > 10) {
|
||||
gclog_or_tty->print_cr("RSHashTable::entry_for_region_ind(%d): "
|
||||
"k = %d, cur_ind = %d.", region_ind, k, cur_ind);
|
||||
if (k >= 1000) {
|
||||
while (1) ;
|
||||
}
|
||||
}
|
||||
*/
|
||||
cur_ind = cur->next_index();
|
||||
}
|
||||
|
||||
@ -319,7 +332,7 @@ size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
|
||||
bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
|
||||
_card_ind++;
|
||||
CardIdx_t ci;
|
||||
if (_card_ind < SparsePRTEntry::CardsPerEntry &&
|
||||
if (_card_ind < SparsePRTEntry::cards_num() &&
|
||||
((ci = _rsht->entry(_bl_ind)->card(_card_ind)) !=
|
||||
SparsePRTEntry::NullEntry)) {
|
||||
card_index = compute_card_ind(ci);
|
||||
@ -359,7 +372,7 @@ bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index)
|
||||
|
||||
size_t RSHashTable::mem_size() const {
|
||||
return sizeof(this) +
|
||||
capacity() * (sizeof(SparsePRTEntry) + sizeof(int));
|
||||
capacity() * (SparsePRTEntry::size() + sizeof(int));
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
@ -446,6 +459,10 @@ bool SparsePRT::get_cards(RegionIdx_t region_id, CardIdx_t* cards) {
|
||||
return _next->get_cards(region_id, cards);
|
||||
}
|
||||
|
||||
SparsePRTEntry* SparsePRT::get_entry(RegionIdx_t region_id) {
|
||||
return _next->get_entry(region_id);
|
||||
}
|
||||
|
||||
bool SparsePRT::delete_entry(RegionIdx_t region_id) {
|
||||
return _next->delete_entry(region_id);
|
||||
}
|
||||
|
@ -32,21 +32,28 @@
|
||||
// insertions only enqueue old versions for deletions, but do not delete
|
||||
// old versions synchronously.
|
||||
|
||||
|
||||
class SparsePRTEntry: public CHeapObj {
|
||||
public:
|
||||
|
||||
enum SomePublicConstants {
|
||||
CardsPerEntry = 4,
|
||||
NullEntry = -1
|
||||
NullEntry = -1,
|
||||
UnrollFactor = 4
|
||||
};
|
||||
|
||||
private:
|
||||
RegionIdx_t _region_ind;
|
||||
int _next_index;
|
||||
CardIdx_t _cards[CardsPerEntry];
|
||||
|
||||
CardIdx_t _cards[1];
|
||||
// WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
|
||||
// It should always be the last data member.
|
||||
public:
|
||||
// Returns the size of the entry, used for entry allocation.
|
||||
static size_t size() { return sizeof(SparsePRTEntry) + sizeof(CardIdx_t) * (cards_num() - 1); }
|
||||
// Returns the size of the card array.
|
||||
static int cards_num() {
|
||||
// The number of cards should be a multiple of 4, because that's our current
|
||||
// unrolling factor.
|
||||
static const int s = MAX2<int>(G1RSetSparseRegionEntries & ~(UnrollFactor - 1), UnrollFactor);
|
||||
return s;
|
||||
}
|
||||
|
||||
// Set the region_ind to the given value, and delete all cards.
|
||||
inline void init(RegionIdx_t region_ind);
|
||||
@ -134,12 +141,15 @@ public:
|
||||
bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
|
||||
|
||||
bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
|
||||
|
||||
bool delete_entry(RegionIdx_t region_id);
|
||||
|
||||
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
|
||||
|
||||
void add_entry(SparsePRTEntry* e);
|
||||
|
||||
SparsePRTEntry* get_entry(RegionIdx_t region_id);
|
||||
|
||||
void clear();
|
||||
|
||||
size_t capacity() const { return _capacity; }
|
||||
@ -148,7 +158,7 @@ public:
|
||||
size_t occupied_cards() const { return _occupied_cards; }
|
||||
size_t mem_size() const;
|
||||
|
||||
SparsePRTEntry* entry(int i) const { return &_entries[i]; }
|
||||
SparsePRTEntry* entry(int i) const { return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i); }
|
||||
|
||||
void print();
|
||||
};
|
||||
@ -157,7 +167,7 @@ public:
|
||||
class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
||||
int _tbl_ind; // [-1, 0.._rsht->_capacity)
|
||||
int _bl_ind; // [-1, 0.._rsht->_capacity)
|
||||
short _card_ind; // [0..CardsPerEntry)
|
||||
short _card_ind; // [0..SparsePRTEntry::cards_num())
|
||||
RSHashTable* _rsht;
|
||||
size_t _heap_bot_card_ind;
|
||||
|
||||
@ -176,7 +186,7 @@ public:
|
||||
RSHashTableIter(size_t heap_bot_card_ind) :
|
||||
_tbl_ind(RSHashTable::NullEntry),
|
||||
_bl_ind(RSHashTable::NullEntry),
|
||||
_card_ind((SparsePRTEntry::CardsPerEntry-1)),
|
||||
_card_ind((SparsePRTEntry::cards_num() - 1)),
|
||||
_rsht(NULL),
|
||||
_heap_bot_card_ind(heap_bot_card_ind)
|
||||
{}
|
||||
@ -185,7 +195,7 @@ public:
|
||||
_rsht = rsht;
|
||||
_tbl_ind = -1; // So that first increment gets to 0.
|
||||
_bl_ind = RSHashTable::NullEntry;
|
||||
_card_ind = (SparsePRTEntry::CardsPerEntry-1);
|
||||
_card_ind = (SparsePRTEntry::cards_num() - 1);
|
||||
}
|
||||
|
||||
bool has_next(size_t& card_index);
|
||||
@ -241,9 +251,13 @@ public:
|
||||
|
||||
// If the table hold an entry for "region_ind", Copies its
|
||||
// cards into "cards", which must be an array of length at least
|
||||
// "CardsPerEntry", and returns "true"; otherwise, returns "false".
|
||||
// "SparePRTEntry::cards_num()", and returns "true"; otherwise,
|
||||
// returns "false".
|
||||
bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards);
|
||||
|
||||
// Return the pointer to the entry associated with the given region.
|
||||
SparsePRTEntry* get_entry(RegionIdx_t region_ind);
|
||||
|
||||
// If there is an entry for "region_ind", removes it and return "true";
|
||||
// otherwise returns "false."
|
||||
bool delete_entry(RegionIdx_t region_ind);
|
||||
|
@ -66,7 +66,8 @@ void CollectedHeap::pre_initialize() {
|
||||
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
|
||||
// otherwise remains unused.
|
||||
#ifdef COMPLER2
|
||||
_defer_initial_card_mark = ReduceInitialCardMarks && (DeferInitialCardMark || card_mark_must_follow_store());
|
||||
_defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()
|
||||
&& (DeferInitialCardMark || card_mark_must_follow_store());
|
||||
#else
|
||||
assert(_defer_initial_card_mark == false, "Who would set it?");
|
||||
#endif
|
||||
|
@ -775,6 +775,7 @@ output.cpp allocation.inline.hpp
|
||||
output.cpp assembler.inline.hpp
|
||||
output.cpp callnode.hpp
|
||||
output.cpp cfgnode.hpp
|
||||
output.cpp compileBroker.hpp
|
||||
output.cpp debugInfo.hpp
|
||||
output.cpp debugInfoRec.hpp
|
||||
output.cpp handles.inline.hpp
|
||||
|
@ -175,6 +175,7 @@ arguments.cpp jvmtiExport.hpp
|
||||
arguments.cpp management.hpp
|
||||
arguments.cpp oop.inline.hpp
|
||||
arguments.cpp os_<os_family>.inline.hpp
|
||||
arguments.cpp referenceProcessor.hpp
|
||||
arguments.cpp universe.inline.hpp
|
||||
arguments.cpp vm_version_<arch>.hpp
|
||||
|
||||
@ -921,6 +922,7 @@ classFileStream.hpp top.hpp
|
||||
|
||||
classLoader.cpp allocation.inline.hpp
|
||||
classLoader.cpp arguments.hpp
|
||||
classLoader.cpp bytecodeStream.hpp
|
||||
classLoader.cpp classFileParser.hpp
|
||||
classLoader.cpp classFileStream.hpp
|
||||
classLoader.cpp classLoader.hpp
|
||||
@ -948,6 +950,7 @@ classLoader.cpp jvm_misc.hpp
|
||||
classLoader.cpp management.hpp
|
||||
classLoader.cpp oop.inline.hpp
|
||||
classLoader.cpp oopFactory.hpp
|
||||
classLoader.cpp oopMapCache.hpp
|
||||
classLoader.cpp os_<os_family>.inline.hpp
|
||||
classLoader.cpp symbolOop.hpp
|
||||
classLoader.cpp systemDictionary.hpp
|
||||
@ -1030,6 +1033,7 @@ codeCache.cpp objArrayOop.hpp
|
||||
codeCache.cpp oop.inline.hpp
|
||||
codeCache.cpp pcDesc.hpp
|
||||
codeCache.cpp resourceArea.hpp
|
||||
codeCache.cpp xmlstream.hpp
|
||||
|
||||
codeCache.hpp allocation.hpp
|
||||
codeCache.hpp codeBlob.hpp
|
||||
@ -1118,6 +1122,7 @@ compileBroker.cpp nativeLookup.hpp
|
||||
compileBroker.cpp oop.inline.hpp
|
||||
compileBroker.cpp os.hpp
|
||||
compileBroker.cpp sharedRuntime.hpp
|
||||
compileBroker.cpp sweeper.hpp
|
||||
compileBroker.cpp systemDictionary.hpp
|
||||
compileBroker.cpp vmSymbols.hpp
|
||||
|
||||
@ -1479,6 +1484,7 @@ deoptimization.cpp thread.hpp
|
||||
deoptimization.cpp vframe.hpp
|
||||
deoptimization.cpp vframeArray.hpp
|
||||
deoptimization.cpp vframe_hp.hpp
|
||||
deoptimization.cpp vmreg_<arch>.inline.hpp
|
||||
deoptimization.cpp xmlstream.hpp
|
||||
|
||||
deoptimization.hpp allocation.hpp
|
||||
@ -2530,6 +2536,7 @@ jvmtiExport.hpp growableArray.hpp
|
||||
jvmtiExport.hpp handles.hpp
|
||||
jvmtiExport.hpp iterator.hpp
|
||||
jvmtiExport.hpp jvmti.h
|
||||
jvmtiExport.hpp jvmticmlr.h
|
||||
jvmtiExport.hpp oop.hpp
|
||||
jvmtiExport.hpp oopsHierarchy.hpp
|
||||
|
||||
@ -2648,6 +2655,7 @@ loaderConstraints.cpp resourceArea.hpp
|
||||
loaderConstraints.cpp safepoint.hpp
|
||||
|
||||
loaderConstraints.hpp dictionary.hpp
|
||||
loaderConstraints.hpp placeholders.hpp
|
||||
loaderConstraints.hpp hashtable.hpp
|
||||
|
||||
location.cpp debugInfo.hpp
|
||||
@ -3716,6 +3724,7 @@ sharedHeap.hpp permGen.hpp
|
||||
sharedRuntime.cpp abstractCompiler.hpp
|
||||
sharedRuntime.cpp arguments.hpp
|
||||
sharedRuntime.cpp biasedLocking.hpp
|
||||
sharedRuntime.cpp compileBroker.hpp
|
||||
sharedRuntime.cpp compiledIC.hpp
|
||||
sharedRuntime.cpp compilerOracle.hpp
|
||||
sharedRuntime.cpp copy.hpp
|
||||
@ -3724,6 +3733,7 @@ sharedRuntime.cpp events.hpp
|
||||
sharedRuntime.cpp forte.hpp
|
||||
sharedRuntime.cpp gcLocker.inline.hpp
|
||||
sharedRuntime.cpp handles.inline.hpp
|
||||
sharedRuntime.cpp hashtable.inline.hpp
|
||||
sharedRuntime.cpp init.hpp
|
||||
sharedRuntime.cpp interfaceSupport.hpp
|
||||
sharedRuntime.cpp interpreterRuntime.hpp
|
||||
@ -3751,6 +3761,7 @@ sharedRuntime.cpp xmlstream.hpp
|
||||
sharedRuntime.hpp allocation.hpp
|
||||
sharedRuntime.hpp bytecodeHistogram.hpp
|
||||
sharedRuntime.hpp bytecodeTracer.hpp
|
||||
sharedRuntime.hpp hashtable.hpp
|
||||
sharedRuntime.hpp linkResolver.hpp
|
||||
sharedRuntime.hpp resourceArea.hpp
|
||||
sharedRuntime.hpp threadLocalStorage.hpp
|
||||
@ -3968,6 +3979,7 @@ stubs.hpp os_<os_family>.inline.hpp
|
||||
|
||||
sweeper.cpp atomic.hpp
|
||||
sweeper.cpp codeCache.hpp
|
||||
sweeper.cpp compileBroker.hpp
|
||||
sweeper.cpp events.hpp
|
||||
sweeper.cpp methodOop.hpp
|
||||
sweeper.cpp mutexLocker.hpp
|
||||
@ -3975,6 +3987,8 @@ sweeper.cpp nmethod.hpp
|
||||
sweeper.cpp os.hpp
|
||||
sweeper.cpp resourceArea.hpp
|
||||
sweeper.cpp sweeper.hpp
|
||||
sweeper.cpp vm_operations.hpp
|
||||
sweeper.cpp xmlstream.hpp
|
||||
|
||||
symbolKlass.cpp gcLocker.hpp
|
||||
symbolKlass.cpp handles.inline.hpp
|
||||
@ -4628,6 +4642,7 @@ vm_operations.cpp deoptimization.hpp
|
||||
vm_operations.cpp interfaceSupport.hpp
|
||||
vm_operations.cpp isGCActiveMark.hpp
|
||||
vm_operations.cpp resourceArea.hpp
|
||||
vm_operations.cpp sweeper.hpp
|
||||
vm_operations.cpp threadService.hpp
|
||||
vm_operations.cpp thread_<os_family>.inline.hpp
|
||||
vm_operations.cpp vmSymbols.hpp
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -397,7 +397,7 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
|
||||
|
||||
// notify JVMTI of an exception throw; JVMTI will detect if this is a first
|
||||
// time throw or a stack unwinding throw and accordingly notify the debugger
|
||||
if (JvmtiExport::can_post_exceptions()) {
|
||||
if (JvmtiExport::can_post_on_exceptions()) {
|
||||
JvmtiExport::post_exception_throw(thread, h_method(), bcp(thread), h_exception());
|
||||
}
|
||||
|
||||
@ -426,7 +426,7 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
|
||||
}
|
||||
// notify debugger of an exception catch
|
||||
// (this is good for exceptions caught in native methods as well)
|
||||
if (JvmtiExport::can_post_exceptions()) {
|
||||
if (JvmtiExport::can_post_on_exceptions()) {
|
||||
JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL));
|
||||
}
|
||||
|
||||
|
@ -124,8 +124,6 @@ public:
|
||||
// Below length is the # array elements being written
|
||||
virtual void write_ref_array_pre( oop* dst, int length) {}
|
||||
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
|
||||
// Below MemRegion mr is expected to be HeapWord-aligned
|
||||
inline void write_ref_array(MemRegion mr);
|
||||
// Below count is the # array elements being written, starting
|
||||
// at the address "start", which may not necessarily be HeapWord-aligned
|
||||
inline void write_ref_array(HeapWord* start, size_t count);
|
||||
|
@ -42,16 +42,6 @@ void BarrierSet::write_ref_field(void* field, oop new_val) {
|
||||
}
|
||||
}
|
||||
|
||||
void BarrierSet::write_ref_array(MemRegion mr) {
|
||||
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
|
||||
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
|
||||
if (kind() == CardTableModRef) {
|
||||
((CardTableModRefBS*)this)->inline_write_ref_array(mr);
|
||||
} else {
|
||||
write_ref_array_work(mr);
|
||||
}
|
||||
}
|
||||
|
||||
// count is number of array elements being written
|
||||
void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
||||
assert(count <= (size_t)max_intx, "count too large");
|
||||
@ -61,12 +51,12 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
||||
// strictly necessary for current uses, but a case of good hygiene and,
|
||||
// if you will, aesthetics) and the second upward (this is essential for
|
||||
// current uses) to a HeapWord boundary, so we mark all cards overlapping
|
||||
// this write. In the event that this evolves in the future to calling a
|
||||
// this write. If this evolves in the future to calling a
|
||||
// logging barrier of narrow oop granularity, like the pre-barrier for G1
|
||||
// (mentioned here merely by way of example), we will need to change this
|
||||
// interface, much like the pre-barrier one above, so it is "exactly precise"
|
||||
// (if i may be allowed the adverbial redundancy for emphasis) and does not
|
||||
// include narrow oop slots not included in the original write interval.
|
||||
// interface, so it is "exactly precise" (if i may be allowed the adverbial
|
||||
// redundancy for emphasis) and does not include narrow oop slots not
|
||||
// included in the original write interval.
|
||||
HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
|
||||
HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
|
||||
// If compressed oops were not being used, these should already be aligned
|
||||
|
@ -339,6 +339,16 @@ public:
|
||||
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
|
||||
}
|
||||
|
||||
void set_card_claimed(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
if (val == clean_card_val()) {
|
||||
val = (jbyte)claimed_card_val();
|
||||
} else {
|
||||
val |= (jbyte)claimed_card_val();
|
||||
}
|
||||
_byte_map[card_index] = val;
|
||||
}
|
||||
|
||||
bool claim_card(size_t card_index);
|
||||
|
||||
bool is_card_clean(size_t card_index) {
|
||||
|
@ -296,23 +296,32 @@ public:
|
||||
// RememberKlassesChecker can be passed "false" to turn off checking.
|
||||
// It is used by CMS when CMS yields to a different collector.
|
||||
class RememberKlassesChecker: StackObj {
|
||||
bool _state;
|
||||
bool _skip;
|
||||
bool _saved_state;
|
||||
bool _do_check;
|
||||
public:
|
||||
RememberKlassesChecker(bool checking_on) : _state(false), _skip(false) {
|
||||
_skip = !(ClassUnloading && !UseConcMarkSweepGC ||
|
||||
CMSClassUnloadingEnabled && UseConcMarkSweepGC);
|
||||
if (_skip) {
|
||||
return;
|
||||
RememberKlassesChecker(bool checking_on) : _saved_state(false),
|
||||
_do_check(true) {
|
||||
// The ClassUnloading unloading flag affects the collectors except
|
||||
// for CMS.
|
||||
// CMS unloads classes if CMSClassUnloadingEnabled is true or
|
||||
// if ExplicitGCInvokesConcurrentAndUnloadsClasses is true and
|
||||
// the current collection is an explicit collection. Turning
|
||||
// on the checking in general for
|
||||
// ExplicitGCInvokesConcurrentAndUnloadsClasses and
|
||||
// UseConcMarkSweepGC should not lead to false positives.
|
||||
_do_check =
|
||||
ClassUnloading && !UseConcMarkSweepGC ||
|
||||
CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
|
||||
ExplicitGCInvokesConcurrentAndUnloadsClasses && UseConcMarkSweepGC;
|
||||
if (_do_check) {
|
||||
_saved_state = OopClosure::must_remember_klasses();
|
||||
OopClosure::set_must_remember_klasses(checking_on);
|
||||
}
|
||||
_state = OopClosure::must_remember_klasses();
|
||||
OopClosure::set_must_remember_klasses(checking_on);
|
||||
}
|
||||
~RememberKlassesChecker() {
|
||||
if (_skip) {
|
||||
return;
|
||||
if (_do_check) {
|
||||
OopClosure::set_must_remember_klasses(_saved_state);
|
||||
}
|
||||
OopClosure::set_must_remember_klasses(_state);
|
||||
}
|
||||
};
|
||||
#endif // ASSERT
|
||||
|
@ -1227,13 +1227,16 @@ void ReferenceProcessor::preclean_discovered_references(
|
||||
BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc,
|
||||
YieldClosure* yield) {
|
||||
YieldClosure* yield,
|
||||
bool should_unload_classes) {
|
||||
|
||||
NOT_PRODUCT(verify_ok_to_handle_reflists());
|
||||
|
||||
#ifdef ASSERT
|
||||
bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
|
||||
CMSClassUnloadingEnabled && UseConcMarkSweepGC;
|
||||
CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
|
||||
ExplicitGCInvokesConcurrentAndUnloadsClasses &&
|
||||
UseConcMarkSweepGC && should_unload_classes;
|
||||
RememberKlassesChecker mx(must_remember_klasses);
|
||||
#endif
|
||||
// Soft references
|
||||
|
@ -170,11 +170,13 @@ class ReferenceProcessor : public CHeapObj {
|
||||
// The caller is responsible for taking care of potential
|
||||
// interference with concurrent operations on these lists
|
||||
// (or predicates involved) by other threads. Currently
|
||||
// only used by the CMS collector.
|
||||
// only used by the CMS collector. should_unload_classes is
|
||||
// used to aid assertion checking when classes are collected.
|
||||
void preclean_discovered_references(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc,
|
||||
YieldClosure* yield);
|
||||
YieldClosure* yield,
|
||||
bool should_unload_classes);
|
||||
|
||||
// Delete entries in the discovered lists that have
|
||||
// either a null referent or are not active. Such
|
||||
@ -261,10 +263,13 @@ class ReferenceProcessor : public CHeapObj {
|
||||
int parallel_gc_threads = 1,
|
||||
bool mt_processing = false,
|
||||
bool discovered_list_needs_barrier = false);
|
||||
|
||||
// RefDiscoveryPolicy values
|
||||
enum {
|
||||
enum DiscoveryPolicy {
|
||||
ReferenceBasedDiscovery = 0,
|
||||
ReferentBasedDiscovery = 1
|
||||
ReferentBasedDiscovery = 1,
|
||||
DiscoveryPolicyMin = ReferenceBasedDiscovery,
|
||||
DiscoveryPolicyMax = ReferentBasedDiscovery
|
||||
};
|
||||
|
||||
static void init_statics();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -545,6 +545,10 @@ public:
|
||||
return cell_offset(counter_cell_count);
|
||||
}
|
||||
|
||||
void set_count(uint count) {
|
||||
set_uint_at(count_off, count);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st);
|
||||
#endif
|
||||
@ -692,6 +696,23 @@ public:
|
||||
|
||||
void clear_row(uint row) {
|
||||
assert(row < row_limit(), "oob");
|
||||
// Clear total count - indicator of polymorphic call site.
|
||||
// The site may look like as monomorphic after that but
|
||||
// it allow to have more accurate profiling information because
|
||||
// there was execution phase change since klasses were unloaded.
|
||||
// If the site is still polymorphic then MDO will be updated
|
||||
// to reflect it. But it could be the case that the site becomes
|
||||
// only bimorphic. Then keeping total count not 0 will be wrong.
|
||||
// Even if we use monomorphic (when it is not) for compilation
|
||||
// we will only have trap, deoptimization and recompile again
|
||||
// with updated MDO after executing method in Interpreter.
|
||||
// An additional receiver will be recorded in the cleaned row
|
||||
// during next call execution.
|
||||
//
|
||||
// Note: our profiling logic works with empty rows in any slot.
|
||||
// We do sorting a profiling info (ciCallProfile) for compilation.
|
||||
//
|
||||
set_count(0);
|
||||
set_receiver(row, NULL);
|
||||
set_receiver_count(row, 0);
|
||||
}
|
||||
@ -1391,6 +1412,9 @@ public:
|
||||
}
|
||||
void inc_decompile_count() {
|
||||
_nof_decompiles += 1;
|
||||
if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
|
||||
method()->set_not_compilable();
|
||||
}
|
||||
}
|
||||
|
||||
// Support for code generation
|
||||
|
@ -575,12 +575,6 @@ bool methodOopDesc::is_not_compilable(int comp_level) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
methodDataOop mdo = method_data();
|
||||
if (mdo != NULL
|
||||
&& (uint)mdo->decompile_count() > (uint)PerMethodRecompilationCutoff) {
|
||||
// Since (uint)-1 is large, -1 really means 'no cutoff'.
|
||||
return true;
|
||||
}
|
||||
#ifdef COMPILER2
|
||||
if (is_tier1_compile(comp_level)) {
|
||||
if (is_not_tier1_compilable()) {
|
||||
@ -593,7 +587,16 @@ bool methodOopDesc::is_not_compilable(int comp_level) const {
|
||||
}
|
||||
|
||||
// call this when compiler finds that this method is not compilable
|
||||
void methodOopDesc::set_not_compilable(int comp_level) {
|
||||
void methodOopDesc::set_not_compilable(int comp_level, bool report) {
|
||||
if (PrintCompilation && report) {
|
||||
ttyLocker ttyl;
|
||||
tty->print("made not compilable ");
|
||||
this->print_short_name(tty);
|
||||
int size = this->code_size();
|
||||
if (size > 0)
|
||||
tty->print(" (%d bytes)", size);
|
||||
tty->cr();
|
||||
}
|
||||
if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
|
||||
ttyLocker ttyl;
|
||||
xtty->begin_elem("make_not_compilable thread='%d'", (int) os::current_thread_id());
|
||||
@ -688,7 +691,7 @@ address methodOopDesc::make_adapters(methodHandle mh, TRAPS) {
|
||||
// so making them eagerly shouldn't be too expensive.
|
||||
AdapterHandlerEntry* adapter = AdapterHandlerLibrary::get_adapter(mh);
|
||||
if (adapter == NULL ) {
|
||||
THROW_0(vmSymbols::java_lang_OutOfMemoryError());
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(), "out of space in CodeCache for adapters");
|
||||
}
|
||||
|
||||
mh->set_adapter_entry(adapter);
|
||||
@ -705,6 +708,16 @@ address methodOopDesc::make_adapters(methodHandle mh, TRAPS) {
|
||||
// This function must not hit a safepoint!
|
||||
address methodOopDesc::verified_code_entry() {
|
||||
debug_only(No_Safepoint_Verifier nsv;)
|
||||
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
|
||||
if (code == NULL && UseCodeCacheFlushing) {
|
||||
nmethod *saved_code = CodeCache::find_and_remove_saved_code(this);
|
||||
if (saved_code != NULL) {
|
||||
methodHandle method(this);
|
||||
assert( ! saved_code->is_osr_method(), "should not get here for osr" );
|
||||
set_code( method, saved_code );
|
||||
}
|
||||
}
|
||||
|
||||
assert(_from_compiled_entry != NULL, "must be set");
|
||||
return _from_compiled_entry;
|
||||
}
|
||||
@ -733,8 +746,8 @@ void methodOopDesc::set_code(methodHandle mh, nmethod *code) {
|
||||
int comp_level = code->comp_level();
|
||||
// In theory there could be a race here. In practice it is unlikely
|
||||
// and not worth worrying about.
|
||||
if (comp_level > highest_tier_compile()) {
|
||||
set_highest_tier_compile(comp_level);
|
||||
if (comp_level > mh->highest_tier_compile()) {
|
||||
mh->set_highest_tier_compile(comp_level);
|
||||
}
|
||||
|
||||
OrderAccess::storestore();
|
||||
|
@ -303,7 +303,7 @@ class methodOopDesc : public oopDesc {
|
||||
bool check_code() const; // Not inline to avoid circular ref
|
||||
nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
|
||||
void clear_code(); // Clear out any compiled code
|
||||
void set_code(methodHandle mh, nmethod* code);
|
||||
static void set_code(methodHandle mh, nmethod* code);
|
||||
void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; }
|
||||
address get_i2c_entry();
|
||||
address get_c2i_entry();
|
||||
@ -596,7 +596,10 @@ class methodOopDesc : public oopDesc {
|
||||
// whether it is not compilable for another reason like having a
|
||||
// breakpoint set in it.
|
||||
bool is_not_compilable(int comp_level = CompLevel_highest_tier) const;
|
||||
void set_not_compilable(int comp_level = CompLevel_highest_tier);
|
||||
void set_not_compilable(int comp_level = CompLevel_highest_tier, bool report = true);
|
||||
void set_not_compilable_quietly(int comp_level = CompLevel_highest_tier) {
|
||||
set_not_compilable(comp_level, false);
|
||||
}
|
||||
|
||||
bool is_not_osr_compilable() const { return is_not_compilable() || access_flags().is_not_osr_compilable(); }
|
||||
void set_not_osr_compilable() { _access_flags.set_not_osr_compilable(); }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -136,8 +136,10 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
|
||||
}
|
||||
// Mark the call node as virtual, sort of:
|
||||
call->set_optimized_virtual(true);
|
||||
if (method()->is_method_handle_invoke())
|
||||
if (method()->is_method_handle_invoke()) {
|
||||
call->set_method_handle_invoke(true);
|
||||
kit.C->set_has_method_handle_invokes(true);
|
||||
}
|
||||
}
|
||||
kit.set_arguments_for_java_call(call);
|
||||
kit.set_edges_for_java_call(call, false, _separate_io_proj);
|
||||
@ -194,6 +196,7 @@ JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
|
||||
call->set_optimized_virtual(true);
|
||||
// Take extra care (in the presence of argument motion) not to trash the SP:
|
||||
call->set_method_handle_invoke(true);
|
||||
kit.C->set_has_method_handle_invokes(true);
|
||||
|
||||
// Pass the target MethodHandle as first argument and shift the
|
||||
// other arguments.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -465,6 +465,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
_code_buffer("Compile::Fill_buffer"),
|
||||
_orig_pc_slot(0),
|
||||
_orig_pc_slot_offset_in_bytes(0),
|
||||
_has_method_handle_invokes(false),
|
||||
_node_bundling_limit(0),
|
||||
_node_bundling_base(NULL),
|
||||
_java_calls(0),
|
||||
@ -759,6 +760,7 @@ Compile::Compile( ciEnv* ci_env,
|
||||
_do_escape_analysis(false),
|
||||
_failure_reason(NULL),
|
||||
_code_buffer("Compile::Fill_buffer"),
|
||||
_has_method_handle_invokes(false),
|
||||
_node_bundling_limit(0),
|
||||
_node_bundling_base(NULL),
|
||||
_java_calls(0),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -166,6 +166,9 @@ class Compile : public Phase {
|
||||
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
|
||||
#endif
|
||||
|
||||
// JSR 292
|
||||
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
|
||||
|
||||
// Compilation environment.
|
||||
Arena _comp_arena; // Arena with lifetime equivalent to Compile
|
||||
ciEnv* _env; // CI interface
|
||||
@ -336,6 +339,10 @@ class Compile : public Phase {
|
||||
void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
|
||||
#endif
|
||||
|
||||
// JSR 292
|
||||
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
|
||||
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
|
||||
|
||||
void begin_method() {
|
||||
#ifndef PRODUCT
|
||||
if (_printer) _printer->begin_method(this);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,7 +70,7 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
|
||||
CompileLog* log = this->log();
|
||||
if (log != NULL) {
|
||||
int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
|
||||
int r2id = (profile.morphism() == 2)? log->identify(profile.receiver(1)):-1;
|
||||
int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
|
||||
log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
|
||||
log->identify(call_method), site_count, prof_factor);
|
||||
if (call_is_virtual) log->print(" virtual='1'");
|
||||
@ -182,26 +182,16 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
|
||||
}
|
||||
}
|
||||
CallGenerator* miss_cg;
|
||||
Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
|
||||
Deoptimization::Reason_bimorphic :
|
||||
Deoptimization::Reason_class_check;
|
||||
if (( profile.morphism() == 1 ||
|
||||
(profile.morphism() == 2 && next_hit_cg != NULL) ) &&
|
||||
|
||||
!too_many_traps(Deoptimization::Reason_class_check)
|
||||
|
||||
// Check only total number of traps per method to allow
|
||||
// the transition from monomorphic to bimorphic case between
|
||||
// compilations without falling into virtual call.
|
||||
// A monomorphic case may have the class_check trap flag is set
|
||||
// due to the time gap between the uncommon trap processing
|
||||
// when flags are set in MDO and the call site bytecode execution
|
||||
// in Interpreter when MDO counters are updated.
|
||||
// There was also class_check trap in monomorphic case due to
|
||||
// the bug 6225440.
|
||||
|
||||
!too_many_traps(jvms->method(), jvms->bci(), reason)
|
||||
) {
|
||||
// Generate uncommon trap for class check failure path
|
||||
// in case of monomorphic or bimorphic virtual call site.
|
||||
miss_cg = CallGenerator::for_uncommon_trap(call_method,
|
||||
Deoptimization::Reason_class_check,
|
||||
miss_cg = CallGenerator::for_uncommon_trap(call_method, reason,
|
||||
Deoptimization::Action_maybe_recompile);
|
||||
} else {
|
||||
// Generate virtual call for class check failure path
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -455,16 +455,44 @@ Bytecodes::Code GraphKit::java_bc() const {
|
||||
return Bytecodes::_illegal;
|
||||
}
|
||||
|
||||
void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
|
||||
bool must_throw) {
|
||||
// if the exception capability is set, then we will generate code
|
||||
// to check the JavaThread.should_post_on_exceptions flag to see
|
||||
// if we actually need to report exception events (for this
|
||||
// thread). If we don't need to report exception events, we will
|
||||
// take the normal fast path provided by add_exception_events. If
|
||||
// exception event reporting is enabled for this thread, we will
|
||||
// take the uncommon_trap in the BuildCutout below.
|
||||
|
||||
// first must access the should_post_on_exceptions_flag in this thread's JavaThread
|
||||
Node* jthread = _gvn.transform(new (C, 1) ThreadLocalNode());
|
||||
Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
|
||||
Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false);
|
||||
|
||||
// Test the should_post_on_exceptions_flag vs. 0
|
||||
Node* chk = _gvn.transform( new (C, 3) CmpINode(should_post_flag, intcon(0)) );
|
||||
Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, BoolTest::eq) );
|
||||
|
||||
// Branch to slow_path if should_post_on_exceptions_flag was true
|
||||
{ BuildCutout unless(this, tst, PROB_MAX);
|
||||
// Do not try anything fancy if we're notifying the VM on every throw.
|
||||
// Cf. case Bytecodes::_athrow in parse2.cpp.
|
||||
uncommon_trap(reason, Deoptimization::Action_none,
|
||||
(ciKlass*)NULL, (char*)NULL, must_throw);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//------------------------------builtin_throw----------------------------------
|
||||
void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
|
||||
bool must_throw = true;
|
||||
|
||||
if (env()->jvmti_can_post_exceptions()) {
|
||||
// Do not try anything fancy if we're notifying the VM on every throw.
|
||||
// Cf. case Bytecodes::_athrow in parse2.cpp.
|
||||
uncommon_trap(reason, Deoptimization::Action_none,
|
||||
(ciKlass*)NULL, (char*)NULL, must_throw);
|
||||
return;
|
||||
if (env()->jvmti_can_post_on_exceptions()) {
|
||||
// check if we must post exception events, take uncommon trap if so
|
||||
uncommon_trap_if_should_post_on_exceptions(reason, must_throw);
|
||||
// here if should_post_on_exceptions is false
|
||||
// continue on with the normal codegen
|
||||
}
|
||||
|
||||
// If this particular condition has not yet happened at this
|
||||
@ -752,12 +780,20 @@ bool GraphKit::dead_locals_are_killed() {
|
||||
|
||||
// Helper function for enforcing certain bytecodes to reexecute if
|
||||
// deoptimization happens
|
||||
static bool should_reexecute_implied_by_bytecode(JVMState *jvms) {
|
||||
static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
|
||||
ciMethod* cur_method = jvms->method();
|
||||
int cur_bci = jvms->bci();
|
||||
if (cur_method != NULL && cur_bci != InvocationEntryBci) {
|
||||
Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
|
||||
return Interpreter::bytecode_should_reexecute(code);
|
||||
return Interpreter::bytecode_should_reexecute(code) ||
|
||||
is_anewarray && code == Bytecodes::_multianewarray;
|
||||
// Reexecute _multianewarray bytecode which was replaced with
|
||||
// sequence of [a]newarray. See Parse::do_multianewarray().
|
||||
//
|
||||
// Note: interpreter should not have it set since this optimization
|
||||
// is limited by dimensions and guarded by flag so in some cases
|
||||
// multianewarray() runtime calls will be generated and
|
||||
// the bytecode should not be reexecutes (stack will not be reset).
|
||||
} else
|
||||
return false;
|
||||
}
|
||||
@ -808,7 +844,7 @@ void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
|
||||
// For a known set of bytecodes, the interpreter should reexecute them if
|
||||
// deoptimization happens. We set the reexecute state for them here
|
||||
if (out_jvms->is_reexecute_undefined() && //don't change if already specified
|
||||
should_reexecute_implied_by_bytecode(out_jvms)) {
|
||||
should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {
|
||||
out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -251,6 +251,11 @@ class GraphKit : public Phase {
|
||||
// via an uncommon trap.
|
||||
void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
|
||||
|
||||
// Helper to check the JavaThread::_should_post_on_exceptions flag
|
||||
// and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
|
||||
void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
|
||||
bool must_throw) ;
|
||||
|
||||
// Helper Functions for adding debug information
|
||||
void kill_dead_locals();
|
||||
#ifdef ASSERT
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,7 +47,7 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
|
||||
int offset = t_oop->offset();
|
||||
phi = new (C,region->req()) PhiNode(region, type, NULL, iid, index, offset);
|
||||
} else {
|
||||
phi = new (C,region->req()) PhiNode(region, type);
|
||||
phi = PhiNode::make_blank(region, n);
|
||||
}
|
||||
uint old_unique = C->unique();
|
||||
for( uint i = 1; i < region->req(); i++ ) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -232,7 +232,7 @@ public:
|
||||
// Expand method for MachNode, replaces nodes representing pseudo
|
||||
// instructions with a set of nodes which represent real machine
|
||||
// instructions and compute the same value.
|
||||
virtual MachNode *Expand( State *, Node_List &proj_list ) { return this; }
|
||||
virtual MachNode *Expand( State *, Node_List &proj_list, Node* mem ) { return this; }
|
||||
|
||||
// Bottom_type call; value comes from operand0
|
||||
virtual const class Type *bottom_type() const { return _opnds[0]->type(); }
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user