Merge
This commit is contained in:
commit
409201ce7b
5
.hgtags
5
.hgtags
@ -18,3 +18,8 @@ a2879b2837f5a4c87e9542efe69ef138194af8ff jdk7-b38
|
||||
3cb2a607c347934f8e7e86f840a094c28b08d9ea jdk7-b41
|
||||
caf58ffa084568990cbb3441f9ae188e36b31770 jdk7-b42
|
||||
41bd0a702bc8ec6feebd725a63e7c3227f82ab11 jdk7-b43
|
||||
5843778bda89b1d5ac8e1aa05e26930ac90b3145 jdk7-b44
|
||||
54dffad0bf066791a2793305875250c395011d5f jdk7-b45
|
||||
04b2620edc72de93671646e4720c5992c74ac8b5 jdk7-b46
|
||||
0c4657194eec95c08ba478aee9cfc3c295e41657 jdk7-b47
|
||||
1bf51a4c2627c2f0e0cbcc2cf0421bdb37f1f2b2 jdk7-b48
|
||||
|
@ -18,3 +18,8 @@ ab523b49de1fc73fefe6855ce1e0349bdbd7af29 jdk7-b39
|
||||
541bdc5ad32fc33255944d0a044ad992f3d915e8 jdk7-b41
|
||||
94052b87287303527125026fe4b2698cf867ea83 jdk7-b42
|
||||
848e684279d2ba42577d9621d5b2e5af3823d12d jdk7-b43
|
||||
a395e3aac4744cc9033fcd819fad1239a45add52 jdk7-b44
|
||||
99846f001ca214015578d593802d26e27246a802 jdk7-b45
|
||||
e8a2a4d187773a62f3309b0fa265c13425bc2258 jdk7-b46
|
||||
d7744e86dedc21a8ecf6bdb73eb191b8eaf5b0da jdk7-b47
|
||||
4ae9f4bfdb98f65bd957e3fe72471b320150b38e jdk7-b48
|
||||
|
@ -18,3 +18,8 @@ ef6af34d75a7b44e77083f1d4ee47631fa09d3b4 jdk7-b31
|
||||
c90eeda9594ed2983403e2049aed8d503126c62e jdk7-b41
|
||||
ccd6a16502e0650d91d85c4b86be05cbcd461a87 jdk7-b42
|
||||
9cd740d48a4855321d69f137a7109c00bcda76be jdk7-b43
|
||||
9803dac7254041b30ca65e3852d4c566b9757c3b jdk7-b44
|
||||
68814aa5b44b1f16931a97e7cd4028c70eb9586b jdk7-b45
|
||||
1691dbfc08f8ee3f4e23a1ff30cdff920718696c jdk7-b46
|
||||
167ad0164301f318b069a947e1c9c07ed667748a jdk7-b47
|
||||
0be222241fd405e48915647facfaa176621b39b9 jdk7-b48
|
||||
|
@ -18,3 +18,8 @@ d9bc824aa078573829bb66572af847e26e1bd12e jdk7-b38
|
||||
f9d938ede1960d18cb7cf23c645b026519c1a678 jdk7-b41
|
||||
ad8c8ca4ab0f4c86e74c061958f44a8f4a930f2c jdk7-b42
|
||||
fc6a5ae3fef5ebacfa896dbb3ae37715e388e282 jdk7-b43
|
||||
809e899c638bd9b21836abf9d09ab2a30ff3900b jdk7-b44
|
||||
945bf754069766e76873c53102fae48abf04cf5b jdk7-b45
|
||||
16bb38eeda35b46268eefa4c1f829eb086e0ca46 jdk7-b46
|
||||
fcb923bad68e2b10380a030ea83a723f4dc3d4d6 jdk7-b47
|
||||
bcb33806d186561c781992e5f4d8a90bb033f9f0 jdk7-b48
|
||||
|
@ -238,8 +238,8 @@ struct FileMapHeader {
|
||||
// Ignore the rest of the FileMapHeader. We don't need those fields here.
|
||||
};
|
||||
|
||||
static bool read_int(struct ps_prochandle* ph, uintptr_t addr, int* pvalue) {
|
||||
int i;
|
||||
static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) {
|
||||
jboolean i;
|
||||
if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
|
||||
*pvalue = i;
|
||||
return true;
|
||||
@ -295,7 +295,7 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
int fd = -1, m = 0;
|
||||
uintptr_t base = 0, useSharedSpacesAddr = 0;
|
||||
uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
|
||||
int useSharedSpaces = 0;
|
||||
jboolean useSharedSpaces = 0;
|
||||
map_info* mi = 0;
|
||||
|
||||
memset(classes_jsa, 0, sizeof(classes_jsa));
|
||||
@ -306,12 +306,15 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (read_int(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
|
||||
// Hotspot vm types are not exported to build this library. So
|
||||
// using equivalent type jboolean to read the value of
|
||||
// UseSharedSpaces which is same as hotspot type "bool".
|
||||
if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
|
||||
print_debug("can't read the value of 'UseSharedSpaces' flag\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (useSharedSpaces == 0) {
|
||||
if ((int)useSharedSpaces == 0) {
|
||||
print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
|
||||
return true;
|
||||
}
|
||||
|
@ -502,8 +502,8 @@ struct FileMapHeader {
|
||||
};
|
||||
|
||||
static bool
|
||||
read_int(struct ps_prochandle* ph, psaddr_t addr, int* pvalue) {
|
||||
int i;
|
||||
read_jboolean(struct ps_prochandle* ph, psaddr_t addr, jboolean* pvalue) {
|
||||
jboolean i;
|
||||
if (ps_pread(ph, addr, &i, sizeof(i)) == PS_OK) {
|
||||
*pvalue = i;
|
||||
return true;
|
||||
@ -575,10 +575,13 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
}
|
||||
|
||||
// read the value of the flag "UseSharedSpaces"
|
||||
int value = 0;
|
||||
if (read_int(ph, useSharedSpacesAddr, &value) != true) {
|
||||
// Since hotspot types are not available to build this library. So
|
||||
// equivalent type "jboolean" is used to read the value of "UseSharedSpaces"
|
||||
// which is same as hotspot type "bool".
|
||||
jboolean value = 0;
|
||||
if (read_jboolean(ph, useSharedSpacesAddr, &value) != true) {
|
||||
THROW_NEW_DEBUGGER_EXCEPTION_("can't read 'UseSharedSpaces' flag", 1);
|
||||
} else if (value == 0) {
|
||||
} else if ((int)value == 0) {
|
||||
print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
|
||||
return 1;
|
||||
}
|
||||
|
@ -33,9 +33,9 @@
|
||||
# Don't put quotes (fail windows build).
|
||||
HOTSPOT_VM_COPYRIGHT=Copyright 2008
|
||||
|
||||
HS_MAJOR_VER=14
|
||||
HS_MAJOR_VER=15
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=10
|
||||
HS_BUILD_NUMBER=02
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=7
|
||||
|
@ -28,5 +28,11 @@
|
||||
#define JNICALL
|
||||
|
||||
typedef int jint;
|
||||
typedef long long jlong;
|
||||
|
||||
#ifdef _LP64
|
||||
typedef long jlong;
|
||||
#else
|
||||
typedef long long jlong;
|
||||
#endif
|
||||
|
||||
typedef signed char jbyte;
|
||||
|
@ -762,7 +762,7 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
|
||||
case Assembler::stdf_op3: st_op = Op_StoreD; break;
|
||||
|
||||
case Assembler::ldsb_op3: ld_op = Op_LoadB; break;
|
||||
case Assembler::lduh_op3: ld_op = Op_LoadC; break;
|
||||
case Assembler::lduh_op3: ld_op = Op_LoadUS; break;
|
||||
case Assembler::ldsh_op3: ld_op = Op_LoadS; break;
|
||||
case Assembler::ldx_op3: // may become LoadP or stay LoadI
|
||||
case Assembler::ldsw_op3: // may become LoadP or stay LoadI
|
||||
@ -3869,6 +3869,8 @@ operand regD() %{
|
||||
constraint(ALLOC_IN_RC(dflt_reg));
|
||||
match(RegD);
|
||||
|
||||
match(regD_low);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
@ -3883,7 +3885,7 @@ operand regF() %{
|
||||
|
||||
operand regD_low() %{
|
||||
constraint(ALLOC_IN_RC(dflt_low_reg));
|
||||
match(RegD);
|
||||
match(regD);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
@ -5314,9 +5316,9 @@ instruct loadUBL(iRegL dst, memory mem, immL_FF bytemask) %{
|
||||
ins_pipe(iload_mask_mem);
|
||||
%}
|
||||
|
||||
// Load Char (16bit UNsigned) into a Long Register
|
||||
instruct loadUCL(iRegL dst, memory mem, immL_FFFF bytemask) %{
|
||||
match(Set dst (AndL (ConvI2L (LoadC mem)) bytemask));
|
||||
// Load Unsigned Short/Char (16bit UNsigned) into a Long Register
|
||||
instruct loadUS2L(iRegL dst, memory mem, immL_FFFF bytemask) %{
|
||||
match(Set dst (AndL (ConvI2L (LoadUS mem)) bytemask));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
@ -5326,9 +5328,9 @@ instruct loadUCL(iRegL dst, memory mem, immL_FFFF bytemask) %{
|
||||
ins_pipe(iload_mask_mem);
|
||||
%}
|
||||
|
||||
// Load Char (16bit unsigned)
|
||||
instruct loadC(iRegI dst, memory mem) %{
|
||||
match(Set dst (LoadC mem));
|
||||
// Load Unsigned Short/Char (16bit unsigned)
|
||||
instruct loadUS(iRegI dst, memory mem) %{
|
||||
match(Set dst (LoadUS mem));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
|
@ -5212,15 +5212,15 @@ void MacroAssembler::pushptr(AddressLiteral src) {
|
||||
void MacroAssembler::reset_last_Java_frame(bool clear_fp,
|
||||
bool clear_pc) {
|
||||
// we must set sp to zero to clear frame
|
||||
movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
|
||||
// must clear fp, so that compiled frames are not confused; it is
|
||||
// possible that we need it only for debugging
|
||||
if (clear_fp) {
|
||||
movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
|
||||
}
|
||||
|
||||
if (clear_pc) {
|
||||
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5670,7 +5670,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
|
||||
// get oop result if there is one and reset the value in the thread
|
||||
if (oop_result->is_valid()) {
|
||||
movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
|
||||
movptr(Address(java_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
|
||||
verify_oop(oop_result, "broken oop in call_VM_base");
|
||||
}
|
||||
}
|
||||
@ -6426,13 +6426,13 @@ void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp,
|
||||
get_thread(java_thread);
|
||||
}
|
||||
// we must set sp to zero to clear frame
|
||||
movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
|
||||
if (clear_fp) {
|
||||
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
|
||||
}
|
||||
|
||||
if (clear_pc)
|
||||
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
|
||||
|
||||
}
|
||||
|
||||
@ -6943,29 +6943,32 @@ void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
|
||||
|
||||
Label slow_case, done;
|
||||
|
||||
// x ?<= pi/4
|
||||
fld_d(ExternalAddress((address)&pi_4));
|
||||
fld_s(1); // Stack: X PI/4 X
|
||||
fabs(); // Stack: |X| PI/4 X
|
||||
fcmp(tmp);
|
||||
jcc(Assembler::above, slow_case);
|
||||
ExternalAddress pi4_adr = (address)&pi_4;
|
||||
if (reachable(pi4_adr)) {
|
||||
// x ?<= pi/4
|
||||
fld_d(pi4_adr);
|
||||
fld_s(1); // Stack: X PI/4 X
|
||||
fabs(); // Stack: |X| PI/4 X
|
||||
fcmp(tmp);
|
||||
jcc(Assembler::above, slow_case);
|
||||
|
||||
// fastest case: -pi/4 <= x <= pi/4
|
||||
switch(trig) {
|
||||
case 's':
|
||||
fsin();
|
||||
break;
|
||||
case 'c':
|
||||
fcos();
|
||||
break;
|
||||
case 't':
|
||||
ftan();
|
||||
break;
|
||||
default:
|
||||
assert(false, "bad intrinsic");
|
||||
break;
|
||||
// fastest case: -pi/4 <= x <= pi/4
|
||||
switch(trig) {
|
||||
case 's':
|
||||
fsin();
|
||||
break;
|
||||
case 'c':
|
||||
fcos();
|
||||
break;
|
||||
case 't':
|
||||
ftan();
|
||||
break;
|
||||
default:
|
||||
assert(false, "bad intrinsic");
|
||||
break;
|
||||
}
|
||||
jmp(done);
|
||||
}
|
||||
jmp(done);
|
||||
|
||||
// slow case: runtime call
|
||||
bind(slow_case);
|
||||
|
@ -213,7 +213,7 @@ inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
|
||||
|
||||
inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
|
||||
/* it's possible we could catch this special case implicitly */
|
||||
if (op1 == 0x80000000 && op2 == -1) return op1;
|
||||
if ((juint)op1 == 0x80000000 && op2 == -1) return op1;
|
||||
else return op1 / op2;
|
||||
}
|
||||
|
||||
@ -231,7 +231,7 @@ inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
|
||||
|
||||
inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
|
||||
/* it's possible we could catch this special case implicitly */
|
||||
if (op1 == 0x80000000 && op2 == -1) return 0;
|
||||
if ((juint)op1 == 0x80000000 && op2 == -1) return 0;
|
||||
else return op1 % op2;
|
||||
}
|
||||
|
||||
|
@ -779,7 +779,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
case T_OBJECT: // fall through
|
||||
case T_ARRAY:
|
||||
if (c->as_jobject() == NULL) {
|
||||
__ movptr(as_Address(addr), (int32_t)NULL_WORD);
|
||||
__ movptr(as_Address(addr), NULL_WORD);
|
||||
} else {
|
||||
if (is_literal_address(addr)) {
|
||||
ShouldNotReachHere();
|
||||
|
@ -78,10 +78,10 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
|
||||
movptr(rax, Address(thread, Thread::pending_exception_offset()));
|
||||
// make sure that the vm_results are cleared
|
||||
if (oop_result1->is_valid()) {
|
||||
movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
|
||||
}
|
||||
if (oop_result2->is_valid()) {
|
||||
movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
|
||||
}
|
||||
if (frame_size() == no_frame_size) {
|
||||
leave();
|
||||
@ -96,12 +96,12 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
|
||||
// get oop results if there are any and reset the values in the thread
|
||||
if (oop_result1->is_valid()) {
|
||||
movptr(oop_result1, Address(thread, JavaThread::vm_result_offset()));
|
||||
movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
|
||||
verify_oop(oop_result1);
|
||||
}
|
||||
if (oop_result2->is_valid()) {
|
||||
movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
|
||||
movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD);
|
||||
movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
|
||||
verify_oop(oop_result2);
|
||||
}
|
||||
return call_offset;
|
||||
@ -728,8 +728,8 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
|
||||
|
||||
// clear exception fields in JavaThread because they are no longer needed
|
||||
// (fields must be cleared because they are processed by GC otherwise)
|
||||
__ movptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
|
||||
__ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
|
||||
|
||||
// pop the stub frame off
|
||||
__ leave();
|
||||
@ -878,7 +878,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
|
||||
// load and clear pending exception
|
||||
__ movptr(rax, Address(thread, Thread::pending_exception_offset()));
|
||||
__ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
|
||||
|
||||
// check that there is really a valid exception
|
||||
__ verify_not_null_oop(rax);
|
||||
@ -971,14 +971,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
// load pending exception oop into rax,
|
||||
__ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
|
||||
// clear pending exception
|
||||
__ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
|
||||
|
||||
// load issuing PC (the return address for this stub) into rdx
|
||||
__ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
|
||||
|
||||
// make sure that the vm_results are cleared (may be unnecessary)
|
||||
__ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
|
||||
__ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
|
||||
|
||||
// verify that that there is really a valid exception in rax,
|
||||
__ verify_not_null_oop(exception_oop);
|
||||
@ -1393,7 +1393,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ ret(0);
|
||||
|
||||
__ bind(miss);
|
||||
__ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 0); // result
|
||||
__ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
|
||||
__ pop(rax);
|
||||
__ pop(rcx);
|
||||
__ pop(rsi);
|
||||
|
@ -594,7 +594,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
|
||||
|
||||
// for c++ interpreter can rsi really be munged?
|
||||
__ lea(state, Address(rbp, -sizeof(BytecodeInterpreter))); // restore state
|
||||
__ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); // restore state
|
||||
__ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method))); // restore method
|
||||
__ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer
|
||||
|
||||
@ -658,7 +658,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
const Address size_of_stack (rbx, methodOopDesc::max_stack_offset());
|
||||
// Always give one monitor to allow us to start interp if sync method.
|
||||
// Any additional monitors need a check when moving the expression stack
|
||||
const one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
|
||||
const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
|
||||
__ load_unsigned_word(rax, size_of_stack); // get size of expression stack in words
|
||||
__ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor));
|
||||
__ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
|
||||
@ -1829,7 +1829,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
Label unwind_and_forward;
|
||||
|
||||
// restore state pointer.
|
||||
__ lea(state, Address(rbp, -sizeof(BytecodeInterpreter)));
|
||||
__ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
|
||||
|
||||
__ movptr(rbx, STATE(_method)); // get method
|
||||
#ifdef _LP64
|
||||
@ -1877,14 +1877,14 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
|
||||
if (UseSSE < 2) {
|
||||
__ lea(state, Address(rbp, -sizeof(BytecodeInterpreter)));
|
||||
__ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
|
||||
__ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed
|
||||
__ movl(rcx, Address(rbx, methodOopDesc::result_index_offset()));
|
||||
__ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
|
||||
__ jcc(Assembler::equal, do_float);
|
||||
__ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
|
||||
__ jcc(Assembler::equal, do_double);
|
||||
#ifdef COMPILER2
|
||||
#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
|
||||
__ empty_FPU_stack();
|
||||
#endif // COMPILER2
|
||||
__ jmp(done_conv);
|
||||
@ -1928,7 +1928,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// Restore rsi/r13 as compiled code may not preserve it
|
||||
|
||||
__ lea(state, Address(rbp, -sizeof(BytecodeInterpreter)));
|
||||
__ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
|
||||
|
||||
// restore stack to what we had when we left (in case i2c extended it)
|
||||
|
||||
@ -1942,7 +1942,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
#else
|
||||
__ movptr(rcx, STATE(_thread)); // get thread
|
||||
__ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
|
||||
#endif / __LP64
|
||||
#endif // _LP64
|
||||
__ jcc(Assembler::notZero, return_with_exception);
|
||||
|
||||
// get method just executed
|
||||
|
@ -139,7 +139,7 @@ inline address* frame::native_param_addr(int idx) const { return (address*) addr
|
||||
#ifdef CC_INTERP
|
||||
|
||||
inline interpreterState frame::get_interpreterState() const {
|
||||
return ((interpreterState)addr_at( -sizeof(BytecodeInterpreter)/wordSize ));
|
||||
return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
|
||||
}
|
||||
|
||||
inline intptr_t* frame::sender_sp() const {
|
||||
|
@ -133,7 +133,7 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
|
||||
+ in_ByteSize(wordSize));
|
||||
switch (state) {
|
||||
case atos: movptr(rax, oop_addr);
|
||||
movptr(oop_addr, (int32_t)NULL_WORD);
|
||||
movptr(oop_addr, NULL_WORD);
|
||||
verify_oop(rax, state); break;
|
||||
case ltos:
|
||||
movl(rdx, val_addr1); // fall through
|
||||
@ -148,8 +148,8 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
|
||||
}
|
||||
// Clean up tos value in the thread object
|
||||
movl(tos_addr, (int32_t) ilgl);
|
||||
movptr(val_addr, (int32_t)NULL_WORD);
|
||||
NOT_LP64(movl(val_addr1, (int32_t)NULL_WORD));
|
||||
movptr(val_addr, NULL_WORD);
|
||||
NOT_LP64(movptr(val_addr1, NULL_WORD));
|
||||
}
|
||||
|
||||
|
||||
@ -944,7 +944,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
||||
movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ()));
|
||||
|
||||
// Free entry
|
||||
movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
|
||||
movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_exit(obj_reg, header_reg, done);
|
||||
|
@ -120,7 +120,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void empty_expression_stack() {
|
||||
movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
|
||||
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
}
|
||||
|
||||
// Tagged stack helpers for swap and dup
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
#ifdef CC_INTERP
|
||||
void InterpreterMacroAssembler::get_method(Register reg) {
|
||||
movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
|
||||
movptr(reg, Address(rbp, -((int)sizeof(BytecodeInterpreter) + 2 * wordSize)));
|
||||
movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
|
||||
}
|
||||
#endif // CC_INTERP
|
||||
|
@ -54,7 +54,7 @@ void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_
|
||||
__ cmpptr(Address(from(), Interpreter::local_offset_in_bytes(from_offset)), (int32_t)NULL_WORD); // do not use temp() to avoid AGI
|
||||
Label L;
|
||||
__ jcc(Assembler::notZero, L);
|
||||
__ movptr(temp(), ((int32_t)NULL_WORD));
|
||||
__ movptr(temp(), NULL_WORD);
|
||||
__ bind(L);
|
||||
__ movptr(Address(to(), to_offset * wordSize), temp());
|
||||
}
|
||||
@ -110,7 +110,7 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
virtual void pass_object() {
|
||||
// pass address of from
|
||||
intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
|
||||
*_to++ = (*(intptr_t*)from_addr == 0) ? NULL : from_addr;
|
||||
*_to++ = (*(intptr_t*)from_addr == 0) ? NULL_WORD : from_addr;
|
||||
debug_only(verify_tag(frame::TagReference));
|
||||
_from -= Interpreter::stackElementSize();
|
||||
}
|
||||
|
@ -32,7 +32,13 @@
|
||||
#define JNICALL
|
||||
|
||||
typedef int jint;
|
||||
|
||||
#ifdef _LP64
|
||||
typedef long jlong;
|
||||
#else
|
||||
typedef long long jlong;
|
||||
#endif
|
||||
|
||||
#else
|
||||
#define JNIEXPORT __declspec(dllexport)
|
||||
#define JNIIMPORT __declspec(dllimport)
|
||||
|
@ -129,11 +129,11 @@ void OptoRuntime::generate_exception_blob() {
|
||||
// Get the exception pc in case we are deoptimized
|
||||
__ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
|
||||
#ifdef ASSERT
|
||||
__ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rcx, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), NULL_WORD);
|
||||
__ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
|
||||
#endif
|
||||
// Clear the exception oop so GC no longer processes it as a root.
|
||||
__ movptr(Address(rcx, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
|
||||
|
||||
__ pop(rcx);
|
||||
|
||||
|
@ -39,6 +39,8 @@ RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
|
||||
RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
|
||||
RuntimeStub* SharedRuntime::_resolve_static_call_blob;
|
||||
|
||||
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
|
||||
|
||||
class RegisterSaver {
|
||||
enum { FPU_regs_live = 8 /*for the FPU stack*/+8/*eight more for XMM registers*/ };
|
||||
// Capture info about frame layout
|
||||
@ -1299,7 +1301,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Now compute actual number of stack words we need rounding to make
|
||||
// stack properly aligned.
|
||||
stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
|
||||
stack_slots = round_to(stack_slots, StackAlignmentInSlots);
|
||||
|
||||
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
|
||||
|
||||
@ -1793,7 +1795,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// reset handle block
|
||||
__ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
|
||||
|
||||
__ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
|
||||
|
||||
// Any exception pending?
|
||||
__ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
|
||||
@ -1865,7 +1867,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Save pending exception around call to VM (which contains an EXCEPTION_MARK)
|
||||
|
||||
__ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
|
||||
__ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
|
||||
|
||||
|
||||
// should be a peal
|
||||
@ -2431,7 +2433,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
__ get_thread(rdi);
|
||||
__ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
|
||||
__ movptr(Address(rbp, wordSize), rdx);
|
||||
__ movptr(Address(rdi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
|
||||
|
||||
#ifdef ASSERT
|
||||
// verify that there is really an exception oop in JavaThread
|
||||
@ -2489,8 +2491,8 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
__ jcc(Assembler::notEqual, noException);
|
||||
__ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
|
||||
__ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
|
||||
__ movptr(Address(rcx, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rcx, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
|
||||
__ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
|
||||
|
||||
__ verify_oop(rax);
|
||||
|
||||
@ -2582,7 +2584,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
rbx); // Make it walkable
|
||||
#else /* CC_INTERP */
|
||||
// This value is corrected by layout_activation_impl
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
|
||||
#endif /* CC_INTERP */
|
||||
__ movptr(sp_temp, rsp); // pass to next frame
|
||||
@ -2802,7 +2804,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
rbx); // Make it walkable
|
||||
#else /* CC_INTERP */
|
||||
// This value is corrected by layout_activation_impl
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
|
||||
#endif /* CC_INTERP */
|
||||
__ movptr(sp_temp, rsp); // pass to next frame
|
||||
@ -3020,7 +3022,7 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
|
||||
// exception pending => remove activation and forward to exception handler
|
||||
|
||||
__ get_thread(thread);
|
||||
__ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
|
||||
__ movptr(rax, Address(thread, Thread::pending_exception_offset()));
|
||||
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
|
||||
|
||||
|
@ -39,6 +39,8 @@ RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
|
||||
RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
|
||||
RuntimeStub* SharedRuntime::_resolve_static_call_blob;
|
||||
|
||||
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
class SimpleRuntimeFrame {
|
||||
@ -1286,7 +1288,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Now compute actual number of stack words we need rounding to make
|
||||
// stack properly aligned.
|
||||
stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
|
||||
stack_slots = round_to(stack_slots, StackAlignmentInSlots);
|
||||
|
||||
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
|
||||
|
||||
@ -2954,10 +2956,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
__ pushptr(Address(rcx, 0)); // Save return address
|
||||
__ enter(); // Save old & set new rbp
|
||||
__ subptr(rsp, rbx); // Prolog
|
||||
#ifdef CC_INTERP
|
||||
__ movptr(Address(rbp,
|
||||
-(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
|
||||
sender_sp); // Make it walkable
|
||||
#else // CC_INTERP
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
|
||||
sender_sp); // Make it walkable
|
||||
// This value is corrected by layout_activation_impl
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
|
||||
#endif // CC_INTERP
|
||||
__ mov(sender_sp, rsp); // Pass sender_sp to next frame
|
||||
__ addptr(rsi, wordSize); // Bump array pointer (sizes)
|
||||
__ addptr(rcx, wordSize); // Bump array pointer (pcs)
|
||||
|
@ -407,7 +407,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ get_thread(rcx);
|
||||
__ pop(rdx);
|
||||
__ movptr(rax, Address(rcx, Thread::pending_exception_offset()));
|
||||
__ movptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rcx, Thread::pending_exception_offset()), NULL_WORD);
|
||||
|
||||
#ifdef ASSERT
|
||||
// make sure exception is set
|
||||
|
@ -472,7 +472,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// setup rax & rdx, remove return address & clear pending exception
|
||||
__ pop(rdx);
|
||||
__ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
|
||||
__ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
|
||||
__ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
|
||||
|
||||
#ifdef ASSERT
|
||||
// make sure exception is set
|
||||
@ -954,9 +954,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ jcc(Assembler::zero, exit); // if obj is NULL it is OK
|
||||
// Check if the oop is in the right area of memory
|
||||
__ movptr(c_rarg2, rax);
|
||||
__ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask());
|
||||
__ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
|
||||
__ andptr(c_rarg2, c_rarg3);
|
||||
__ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits());
|
||||
__ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits());
|
||||
__ cmpptr(c_rarg2, c_rarg3);
|
||||
__ jcc(Assembler::notZero, error);
|
||||
|
||||
@ -969,9 +969,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
|
||||
// Check if the klass is in the right area of memory
|
||||
__ mov(c_rarg2, rax);
|
||||
__ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
|
||||
__ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
|
||||
__ andptr(c_rarg2, c_rarg3);
|
||||
__ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
|
||||
__ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
|
||||
__ cmpptr(c_rarg2, c_rarg3);
|
||||
__ jcc(Assembler::notZero, error);
|
||||
|
||||
@ -980,9 +980,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
|
||||
// Check if the klass' klass is in the right area of memory
|
||||
__ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
|
||||
__ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
|
||||
__ andptr(rax, c_rarg3);
|
||||
__ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
|
||||
__ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
|
||||
__ cmpptr(rax, c_rarg3);
|
||||
__ jcc(Assembler::notZero, error);
|
||||
|
||||
|
@ -110,7 +110,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
|
||||
if (message != NULL) {
|
||||
__ lea(rbx, ExternalAddress((address)message));
|
||||
} else {
|
||||
__ movptr(rbx, (int32_t)NULL_WORD);
|
||||
__ movptr(rbx, NULL_WORD);
|
||||
}
|
||||
__ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
|
||||
}
|
||||
@ -123,7 +123,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
|
||||
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
|
||||
address entry = __ pc();
|
||||
// NULL last_sp until next java call
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
__ dispatch_next(state);
|
||||
return entry;
|
||||
}
|
||||
@ -176,7 +176,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
// Restore stack bottom in case i2c adjusted stack
|
||||
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
// and NULL it as marker that rsp is now tos until next java call
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
|
||||
__ restore_bcp();
|
||||
__ restore_locals();
|
||||
@ -211,7 +211,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
|
||||
|
||||
// The stack is not extended by deopt but we must NULL last_sp as this
|
||||
// entry is like a "return".
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
__ restore_bcp();
|
||||
__ restore_locals();
|
||||
// handle exceptions
|
||||
@ -382,7 +382,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
||||
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
|
||||
// The call returns the address of the verified entry point for the method or NULL
|
||||
// if the compilation did not complete (either went background or bailed out).
|
||||
__ movptr(rax, (int32_t)false);
|
||||
__ movptr(rax, (intptr_t)false);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
|
||||
|
||||
__ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
|
||||
@ -1028,7 +1028,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
// reset handle block
|
||||
__ movptr(t, Address(thread, JavaThread::active_handles_offset()));
|
||||
__ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
|
||||
|
||||
// If result was an oop then unbox and save it in the frame
|
||||
{ Label L;
|
||||
@ -1488,7 +1488,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
|
||||
// Restore sp to interpreter_frame_last_sp even though we are going
|
||||
// to empty the expression stack for the exception processing.
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
// rax,: exception
|
||||
// rdx: return address/pc that threw exception
|
||||
__ restore_bcp(); // rsi points to call/send
|
||||
@ -1608,7 +1608,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ reset_last_Java_frame(rcx, true, true);
|
||||
// Restore the last_sp and null it out
|
||||
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
|
||||
__ restore_bcp();
|
||||
__ restore_locals();
|
||||
@ -1636,7 +1636,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
// restore exception
|
||||
__ get_thread(rcx);
|
||||
__ movptr(rax, Address(rcx, JavaThread::vm_result_offset()));
|
||||
__ movptr(Address(rcx, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
|
||||
__ movptr(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD);
|
||||
__ verify_oop(rax);
|
||||
|
||||
// Inbetween activations - previous activation type unknown yet
|
||||
|
@ -137,7 +137,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
// Do the actual store
|
||||
// noreg means NULL
|
||||
if (val == noreg) {
|
||||
__ movl(Address(rdx, 0), NULL_WORD);
|
||||
__ movptr(Address(rdx, 0), NULL_WORD);
|
||||
// No post barrier for NULL
|
||||
} else {
|
||||
__ movl(Address(rdx, 0), val);
|
||||
@ -152,7 +152,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
if (val == noreg) {
|
||||
__ movl(obj, NULL_WORD);
|
||||
__ movptr(obj, NULL_WORD);
|
||||
} else {
|
||||
__ movl(obj, val);
|
||||
// flatten object address if needed
|
||||
@ -168,7 +168,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
case BarrierSet::ModRef:
|
||||
case BarrierSet::Other:
|
||||
if (val == noreg) {
|
||||
__ movl(obj, NULL_WORD);
|
||||
__ movptr(obj, NULL_WORD);
|
||||
} else {
|
||||
__ movl(obj, val);
|
||||
}
|
||||
|
@ -3371,7 +3371,7 @@ encode %{
|
||||
masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
|
||||
masm.get_thread (scrReg) ;
|
||||
masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
|
||||
masm.movptr(tmpReg, 0); // consider: xor vs mov
|
||||
masm.movptr(tmpReg, NULL_WORD); // consider: xor vs mov
|
||||
if (os::is_MP()) { masm.lock(); }
|
||||
masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
|
||||
} else
|
||||
@ -3387,7 +3387,7 @@ encode %{
|
||||
|
||||
if ((EmitSync & 64) == 0) {
|
||||
// Optimistic form: consider XORL tmpReg,tmpReg
|
||||
masm.movptr(tmpReg, 0 ) ;
|
||||
masm.movptr(tmpReg, NULL_WORD) ;
|
||||
} else {
|
||||
// Can suffer RTS->RTO upgrades on shared or cold $ lines
|
||||
// Test-And-CAS instead of CAS
|
||||
@ -3587,7 +3587,7 @@ encode %{
|
||||
masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
|
||||
masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
|
||||
masm.jccb (Assembler::notZero, DONE_LABEL) ;
|
||||
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
|
||||
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ;
|
||||
masm.jmpb (DONE_LABEL) ;
|
||||
} else {
|
||||
masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
|
||||
@ -3596,7 +3596,7 @@ encode %{
|
||||
masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
|
||||
masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
|
||||
masm.jccb (Assembler::notZero, CheckSucc) ;
|
||||
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
|
||||
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ;
|
||||
masm.jmpb (DONE_LABEL) ;
|
||||
}
|
||||
|
||||
@ -3644,7 +3644,7 @@ encode %{
|
||||
// We currently use (3), although it's likely that switching to (2)
|
||||
// is correct for the future.
|
||||
|
||||
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
|
||||
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ;
|
||||
if (os::is_MP()) {
|
||||
if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
|
||||
masm.mfence();
|
||||
@ -6413,9 +6413,9 @@ instruct loadUB(xRegI dst, memory mem, immI_255 bytemask) %{
|
||||
ins_pipe( ialu_reg_mem );
|
||||
%}
|
||||
|
||||
// Load Char (16bit unsigned)
|
||||
instruct loadC(eRegI dst, memory mem) %{
|
||||
match(Set dst (LoadC mem));
|
||||
// Load Unsigned Short/Char (16bit unsigned)
|
||||
instruct loadUS(eRegI dst, memory mem) %{
|
||||
match(Set dst (LoadUS mem));
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "MOVZX $dst,$mem" %}
|
||||
|
@ -6096,25 +6096,25 @@ instruct loadS(rRegI dst, memory mem)
|
||||
// ins_pipe(ialu_reg_mem);
|
||||
// %}
|
||||
|
||||
// Load Char (16 bit UNsigned)
|
||||
instruct loadC(rRegI dst, memory mem)
|
||||
// Load Unsigned Short/Char (16 bit UNsigned)
|
||||
instruct loadUS(rRegI dst, memory mem)
|
||||
%{
|
||||
match(Set dst (LoadC mem));
|
||||
match(Set dst (LoadUS mem));
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "movzwl $dst, $mem\t# char" %}
|
||||
format %{ "movzwl $dst, $mem\t# ushort/char" %}
|
||||
opcode(0x0F, 0xB7);
|
||||
ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Char (16 bit UNsigned) into long
|
||||
// instruct loadC2L(rRegL dst, memory mem)
|
||||
// Load Unsigned Short/Char (16 bit UNsigned) into long
|
||||
// instruct loadUS2L(rRegL dst, memory mem)
|
||||
// %{
|
||||
// match(Set dst (ConvI2L (LoadC mem)));
|
||||
// match(Set dst (ConvI2L (LoadUS mem)));
|
||||
|
||||
// ins_cost(125);
|
||||
// format %{ "movzwl $dst, $mem\t# char -> long" %}
|
||||
// format %{ "movzwl $dst, $mem\t# ushort/char -> long" %}
|
||||
// opcode(0x0F, 0xB7);
|
||||
// ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
|
||||
// ins_pipe(ialu_reg_mem);
|
||||
@ -9490,14 +9490,14 @@ instruct andL_rReg_imm255(rRegL dst, immL_255 src)
|
||||
%{
|
||||
match(Set dst (AndL dst src));
|
||||
|
||||
format %{ "movzbq $dst, $src\t# long & 0xFF" %}
|
||||
format %{ "movzbq $dst, $dst\t# long & 0xFF" %}
|
||||
opcode(0x0F, 0xB6);
|
||||
ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
// And Register with Immediate 65535
|
||||
instruct andL_rReg_imm65535(rRegI dst, immL_65535 src)
|
||||
instruct andL_rReg_imm65535(rRegL dst, immL_65535 src)
|
||||
%{
|
||||
match(Set dst (AndL dst src));
|
||||
|
||||
|
@ -279,7 +279,11 @@ void os::init_system_properties_values() {
|
||||
* ...
|
||||
* 7: The default directories, normally /lib and /usr/lib.
|
||||
*/
|
||||
#if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390))
|
||||
#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
|
||||
#else
|
||||
#define DEFAULT_LIBPATH "/lib:/usr/lib"
|
||||
#endif
|
||||
|
||||
#define EXTENSIONS_DIR "/lib/ext"
|
||||
#define ENDORSED_DIR "/lib/endorsed"
|
||||
@ -1160,7 +1164,10 @@ void os::Linux::capture_initial_stack(size_t max_size) {
|
||||
|
||||
/* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 */
|
||||
/* 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 */
|
||||
i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu",
|
||||
i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld "
|
||||
UINTX_FORMAT UINTX_FORMAT UINTX_FORMAT
|
||||
" %lu "
|
||||
UINTX_FORMAT UINTX_FORMAT UINTX_FORMAT,
|
||||
&state, /* 3 %c */
|
||||
&ppid, /* 4 %d */
|
||||
&pgrp, /* 5 %d */
|
||||
@ -1180,13 +1187,13 @@ void os::Linux::capture_initial_stack(size_t max_size) {
|
||||
&nice, /* 19 %ld */
|
||||
&junk, /* 20 %ld */
|
||||
&it_real, /* 21 %ld */
|
||||
&start, /* 22 %lu */
|
||||
&vsize, /* 23 %lu */
|
||||
&rss, /* 24 %ld */
|
||||
&start, /* 22 UINTX_FORMAT */
|
||||
&vsize, /* 23 UINTX_FORMAT */
|
||||
&rss, /* 24 UINTX_FORMAT */
|
||||
&rsslim, /* 25 %lu */
|
||||
&scodes, /* 26 %lu */
|
||||
&ecode, /* 27 %lu */
|
||||
&stack_start); /* 28 %lu */
|
||||
&scodes, /* 26 UINTX_FORMAT */
|
||||
&ecode, /* 27 UINTX_FORMAT */
|
||||
&stack_start); /* 28 UINTX_FORMAT */
|
||||
}
|
||||
|
||||
if (i != 28 - 2) {
|
||||
@ -1425,6 +1432,10 @@ char * os::local_time_string(char *buf, size_t buflen) {
|
||||
return buf;
|
||||
}
|
||||
|
||||
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
|
||||
return localtime_r(clock, res);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// runtime exit support
|
||||
|
||||
@ -2024,7 +2035,8 @@ void os::jvm_path(char *buf, jint len) {
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), NULL);
|
||||
assert(ret != 0, "cannot locate libjvm");
|
||||
realpath(dli_fname, buf);
|
||||
if (realpath(dli_fname, buf) == NULL)
|
||||
return;
|
||||
|
||||
if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) {
|
||||
// Support for the gamma launcher. Typical value for buf is
|
||||
@ -2048,7 +2060,8 @@ void os::jvm_path(char *buf, jint len) {
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
p = strstr(p, "_g") ? "_g" : "";
|
||||
|
||||
realpath(java_home_var, buf);
|
||||
if (realpath(java_home_var, buf) == NULL)
|
||||
return;
|
||||
sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm[_g].so" instead of
|
||||
@ -2059,7 +2072,8 @@ void os::jvm_path(char *buf, jint len) {
|
||||
sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
realpath(dli_fname, buf);
|
||||
if (realpath(dli_fname, buf) == NULL)
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4184,11 +4198,11 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
|
||||
// Skip blank chars
|
||||
do s++; while (isspace(*s));
|
||||
|
||||
count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
|
||||
&idummy, &idummy, &idummy, &idummy, &idummy, &idummy,
|
||||
count = sscanf(s,"%*c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
|
||||
&idummy, &idummy, &idummy, &idummy, &idummy,
|
||||
&ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
|
||||
&user_time, &sys_time);
|
||||
if ( count != 13 ) return -1;
|
||||
if ( count != 12 ) return -1;
|
||||
if (user_sys_cpu_time) {
|
||||
return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
|
||||
} else {
|
||||
|
@ -323,6 +323,10 @@ size_t os::current_stack_size() {
|
||||
return (size_t)(base - bottom);
|
||||
}
|
||||
|
||||
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
|
||||
return localtime_r(clock, res);
|
||||
}
|
||||
|
||||
// interruptible infrastructure
|
||||
|
||||
// setup_interruptible saves the thread state before going into an
|
||||
|
@ -327,6 +327,14 @@ size_t os::current_stack_size() {
|
||||
return sz;
|
||||
}
|
||||
|
||||
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
|
||||
const struct tm* time_struct_ptr = localtime(clock);
|
||||
if (time_struct_ptr != NULL) {
|
||||
*res = *time_struct_ptr;
|
||||
return res;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
|
||||
|
||||
|
@ -316,9 +316,12 @@ int cmpstr(const void *k1, const void *k2) {
|
||||
return strcmp((const char *)k1,(const char *)k2);
|
||||
}
|
||||
|
||||
// Slimey cheap key comparator.
|
||||
// Cheap key comparator.
|
||||
int cmpkey(const void *key1, const void *key2) {
|
||||
return (int)((intptr_t)key1 - (intptr_t)key2);
|
||||
if (key1 == key2) return 0;
|
||||
intptr_t delta = (intptr_t)key1 - (intptr_t)key2;
|
||||
if (delta > 0) return 1;
|
||||
return -1;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
@ -248,7 +248,7 @@ Form::DataType Form::ideal_to_Reg_type(const char *name) const {
|
||||
// True if 'opType', an ideal name, loads or stores.
|
||||
Form::DataType Form::is_load_from_memory(const char *opType) const {
|
||||
if( strcmp(opType,"LoadB")==0 ) return Form::idealB;
|
||||
if( strcmp(opType,"LoadC")==0 ) return Form::idealC;
|
||||
if( strcmp(opType,"LoadUS")==0 ) return Form::idealC;
|
||||
if( strcmp(opType,"LoadD")==0 ) return Form::idealD;
|
||||
if( strcmp(opType,"LoadD_unaligned")==0 ) return Form::idealD;
|
||||
if( strcmp(opType,"LoadF")==0 ) return Form::idealF;
|
||||
|
@ -3314,7 +3314,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
||||
"StoreI","StoreL","StoreP","StoreN","StoreD","StoreF" ,
|
||||
"StoreB","StoreC","Store" ,"StoreFP",
|
||||
"LoadI" ,"LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF" ,
|
||||
"LoadB" ,"LoadC" ,"LoadS" ,"Load" ,
|
||||
"LoadB" ,"LoadUS" ,"LoadS" ,"Load" ,
|
||||
"Store4I","Store2I","Store2L","Store2D","Store4F","Store2F","Store16B",
|
||||
"Store8B","Store4B","Store8C","Store4C","Store2C",
|
||||
"Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" ,
|
||||
|
@ -123,6 +123,10 @@ CodeBuffer::~CodeBuffer() {
|
||||
// addresses constructed before expansions will not be confused.
|
||||
cb->free_blob();
|
||||
}
|
||||
|
||||
// free any overflow storage
|
||||
delete _overflow_arena;
|
||||
|
||||
#ifdef ASSERT
|
||||
Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
|
||||
#endif
|
||||
|
@ -232,7 +232,9 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
|
||||
length >= 1, "Illegal constant pool size %u in class file %s",
|
||||
length, CHECK_(nullHandle));
|
||||
constantPoolOop constant_pool =
|
||||
oopFactory::new_constantPool(length, CHECK_(nullHandle));
|
||||
oopFactory::new_constantPool(length,
|
||||
methodOopDesc::IsSafeConc,
|
||||
CHECK_(nullHandle));
|
||||
constantPoolHandle cp (THREAD, constant_pool);
|
||||
|
||||
cp->set_partially_loaded(); // Enables heap verify to work on partial constantPoolOops
|
||||
@ -1675,7 +1677,8 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf
|
||||
// All sizing information for a methodOop is finally available, now create it
|
||||
methodOop m_oop = oopFactory::new_method(
|
||||
code_length, access_flags, linenumber_table_length,
|
||||
total_lvt_length, checked_exceptions_length, CHECK_(nullHandle));
|
||||
total_lvt_length, checked_exceptions_length,
|
||||
methodOopDesc::IsSafeConc, CHECK_(nullHandle));
|
||||
methodHandle m (THREAD, m_oop);
|
||||
|
||||
ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize);
|
||||
|
@ -441,6 +441,7 @@ oop java_lang_Class::primitive_mirror(BasicType t) {
|
||||
|
||||
bool java_lang_Class::offsets_computed = false;
|
||||
int java_lang_Class::classRedefinedCount_offset = -1;
|
||||
int java_lang_Class::parallelCapable_offset = -1;
|
||||
|
||||
void java_lang_Class::compute_offsets() {
|
||||
assert(!offsets_computed, "offsets should be initialized only once");
|
||||
@ -451,6 +452,23 @@ void java_lang_Class::compute_offsets() {
|
||||
// so don't go fatal.
|
||||
compute_optional_offset(classRedefinedCount_offset,
|
||||
k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
|
||||
|
||||
// The field indicating parallelCapable (parallelLockMap) is only present starting in 7,
|
||||
klassOop k1 = SystemDictionary::classloader_klass();
|
||||
compute_optional_offset(parallelCapable_offset,
|
||||
k1, vmSymbols::parallelCapable_name(), vmSymbols::concurrenthashmap_signature());
|
||||
}
|
||||
|
||||
// For class loader classes, parallelCapable defined
|
||||
// based on non-null field
|
||||
// Written to by java.lang.ClassLoader, vm only reads this field, doesn't set it
|
||||
bool java_lang_Class::parallelCapable(oop class_loader) {
|
||||
if (!JDK_Version::is_gte_jdk17x_version()
|
||||
|| parallelCapable_offset == -1) {
|
||||
// Default for backward compatibility is false
|
||||
return false;
|
||||
}
|
||||
return (class_loader->obj_field(parallelCapable_offset) != NULL);
|
||||
}
|
||||
|
||||
int java_lang_Class::classRedefinedCount(oop the_class_mirror) {
|
||||
@ -866,7 +884,7 @@ char* java_lang_Throwable::print_stack_element_to_buffer(methodOop method, int b
|
||||
}
|
||||
nmethod* nm = method->code();
|
||||
if (WizardMode && nm != NULL) {
|
||||
sprintf(buf + (int)strlen(buf), "(nmethod %#x)", nm);
|
||||
sprintf(buf + (int)strlen(buf), "(nmethod " PTR_FORMAT ")", (intptr_t)nm);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,6 +141,7 @@ class java_lang_Class : AllStatic {
|
||||
static void compute_offsets();
|
||||
static bool offsets_computed;
|
||||
static int classRedefinedCount_offset;
|
||||
static int parallelCapable_offset;
|
||||
|
||||
public:
|
||||
// Instance creation
|
||||
@ -168,6 +169,8 @@ class java_lang_Class : AllStatic {
|
||||
// Support for classRedefinedCount field
|
||||
static int classRedefinedCount(oop the_class_mirror);
|
||||
static void set_classRedefinedCount(oop the_class_mirror, int value);
|
||||
// Support for parallelCapable field
|
||||
static bool parallelCapable(oop the_class_mirror);
|
||||
// Debugging
|
||||
friend class JavaClasses;
|
||||
friend class instanceKlass; // verification code accesses offsets
|
||||
|
@ -89,6 +89,14 @@ bool SystemDictionary::is_internal_format(symbolHandle class_name) {
|
||||
|
||||
#endif
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parallel class loading check
|
||||
|
||||
bool SystemDictionary::is_parallelCapable(Handle class_loader) {
|
||||
if (UnsyncloadClass || class_loader.is_null()) return true;
|
||||
if (AlwaysLockClassLoader) return false;
|
||||
return java_lang_Class::parallelCapable(class_loader());
|
||||
}
|
||||
// ----------------------------------------------------------------------------
|
||||
// Resolving of classes
|
||||
|
||||
@ -196,7 +204,8 @@ klassOop SystemDictionary::resolve_array_class_or_null(symbolHandle class_name,
|
||||
// super-class callers:
|
||||
// ClassFileParser - for defineClass & jvmtiRedefineClasses
|
||||
// load_shared_class - while loading a class from shared archive
|
||||
// resolve_instance_class_or_fail:
|
||||
// resolve_instance_class_or_null:
|
||||
// via: handle_parallel_super_load
|
||||
// when resolving a class that has an existing placeholder with
|
||||
// a saved superclass [i.e. a defineClass is currently in progress]
|
||||
// if another thread is trying to resolve the class, it must do
|
||||
@ -283,12 +292,9 @@ klassOop SystemDictionary::resolve_super_or_fail(symbolHandle child_name,
|
||||
if (probe && probe->check_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER)) {
|
||||
throw_circularity_error = true;
|
||||
}
|
||||
|
||||
// add placeholder entry even if error - callers will remove on error
|
||||
}
|
||||
if (!throw_circularity_error) {
|
||||
PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, child_name, class_loader, PlaceholderTable::LOAD_SUPER, class_name, THREAD);
|
||||
if (throw_circularity_error) {
|
||||
newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (throw_circularity_error) {
|
||||
@ -325,7 +331,6 @@ klassOop SystemDictionary::resolve_super_or_fail(symbolHandle child_name,
|
||||
return superk_h();
|
||||
}
|
||||
|
||||
|
||||
void SystemDictionary::validate_protection_domain(instanceKlassHandle klass,
|
||||
Handle class_loader,
|
||||
Handle protection_domain,
|
||||
@ -421,7 +426,7 @@ void SystemDictionary::double_lock_wait(Handle lockObject, TRAPS) {
|
||||
bool calledholdinglock
|
||||
= ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
|
||||
assert(calledholdinglock,"must hold lock for notify");
|
||||
assert(!UnsyncloadClass, "unexpected double_lock_wait");
|
||||
assert((!(lockObject() == _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
|
||||
ObjectSynchronizer::notifyall(lockObject, THREAD);
|
||||
intptr_t recursions = ObjectSynchronizer::complete_exit(lockObject, THREAD);
|
||||
SystemDictionary_lock->wait();
|
||||
@ -439,7 +444,7 @@ void SystemDictionary::double_lock_wait(Handle lockObject, TRAPS) {
|
||||
// even in non-circularity situations.
|
||||
// Note: only one thread can define the class, but multiple can resolve
|
||||
// Note: must call resolve_super_or_fail even if null super -
|
||||
// to force placeholder entry creation for this class
|
||||
// to force placeholder entry creation for this class for circularity detection
|
||||
// Caller must check for pending exception
|
||||
// Returns non-null klassOop if other thread has completed load
|
||||
// and we are done,
|
||||
@ -477,9 +482,9 @@ instanceKlassHandle SystemDictionary::handle_parallel_super_load(
|
||||
SystemDictionary_lock->notify_all();
|
||||
}
|
||||
|
||||
// UnsyncloadClass does NOT wait for parallel superclass loads to complete
|
||||
// Bootstrap classloader does wait for parallel superclass loads
|
||||
if (UnsyncloadClass) {
|
||||
// parallelCapable class loaders do NOT wait for parallel superclass loads to complete
|
||||
// Serial class loaders and bootstrap classloader do wait for superclass loads
|
||||
if (!class_loader.is_null() && is_parallelCapable(class_loader)) {
|
||||
MutexLocker mu(SystemDictionary_lock, THREAD);
|
||||
// Check if classloading completed while we were loading superclass or waiting
|
||||
klassOop check = find_class(d_index, d_hash, name, class_loader);
|
||||
@ -566,10 +571,10 @@ klassOop SystemDictionary::resolve_instance_class_or_null(symbolHandle class_nam
|
||||
// This lock must be acquired here so the waiter will find
|
||||
// any successful result in the SystemDictionary and not attempt
|
||||
// the define
|
||||
// Classloaders that support parallelism, e.g. bootstrap classloader,
|
||||
// ParallelCapable Classloaders and the bootstrap classloader,
|
||||
// or all classloaders with UnsyncloadClass do not acquire lock here
|
||||
bool DoObjectLock = true;
|
||||
if (UnsyncloadClass || (class_loader.is_null())) {
|
||||
if (is_parallelCapable(class_loader)) {
|
||||
DoObjectLock = false;
|
||||
}
|
||||
|
||||
@ -627,6 +632,9 @@ klassOop SystemDictionary::resolve_instance_class_or_null(symbolHandle class_nam
|
||||
// Five cases:
|
||||
// All cases need to prevent modifying bootclasssearchpath
|
||||
// in parallel with a classload of same classname
|
||||
// Redefineclasses uses existence of the placeholder for the duration
|
||||
// of the class load to prevent concurrent redefinition of not completely
|
||||
// defined classes.
|
||||
// case 1. traditional classloaders that rely on the classloader object lock
|
||||
// - no other need for LOAD_INSTANCE
|
||||
// case 2. traditional classloaders that break the classloader object lock
|
||||
@ -642,12 +650,13 @@ klassOop SystemDictionary::resolve_instance_class_or_null(symbolHandle class_nam
|
||||
// This classloader supports parallelism at the classloader level,
|
||||
// but only allows a single load of a class/classloader pair.
|
||||
// No performance benefit and no deadlock issues.
|
||||
// case 5. Future: parallel user level classloaders - without objectLocker
|
||||
// case 5. parallelCapable user level classloaders - without objectLocker
|
||||
// Allow parallel classloading of a class/classloader pair
|
||||
symbolHandle nullsymbolHandle;
|
||||
bool throw_circularity_error = false;
|
||||
{
|
||||
MutexLocker mu(SystemDictionary_lock, THREAD);
|
||||
if (!UnsyncloadClass) {
|
||||
if (class_loader.is_null() || !is_parallelCapable(class_loader)) {
|
||||
PlaceholderEntry* oldprobe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
|
||||
if (oldprobe) {
|
||||
// only need check_seen_thread once, not on each loop
|
||||
@ -681,25 +690,25 @@ klassOop SystemDictionary::resolve_instance_class_or_null(symbolHandle class_nam
|
||||
}
|
||||
}
|
||||
// All cases: add LOAD_INSTANCE
|
||||
// case 3: UnsyncloadClass: allow competing threads to try
|
||||
// case 3: UnsyncloadClass || case 5: parallelCapable: allow competing threads to try
|
||||
// LOAD_INSTANCE in parallel
|
||||
// add placeholder entry even if error - callers will remove on error
|
||||
if (!class_has_been_loaded) {
|
||||
if (!throw_circularity_error && !class_has_been_loaded) {
|
||||
PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, name, class_loader, PlaceholderTable::LOAD_INSTANCE, nullsymbolHandle, THREAD);
|
||||
if (throw_circularity_error) {
|
||||
newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
|
||||
}
|
||||
// For class loaders that do not acquire the classloader object lock,
|
||||
// if they did not catch another thread holding LOAD_INSTANCE,
|
||||
// need a check analogous to the acquire ObjectLocker/find_class
|
||||
// i.e. now that we hold the LOAD_INSTANCE token on loading this class/CL
|
||||
// one final check if the load has already completed
|
||||
// class loaders holding the ObjectLock shouldn't find the class here
|
||||
klassOop check = find_class(d_index, d_hash, name, class_loader);
|
||||
if (check != NULL) {
|
||||
// Klass is already loaded, so just return it
|
||||
k = instanceKlassHandle(THREAD, check);
|
||||
class_has_been_loaded = true;
|
||||
newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
|
||||
placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
|
||||
SystemDictionary_lock->notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -714,18 +723,14 @@ klassOop SystemDictionary::resolve_instance_class_or_null(symbolHandle class_nam
|
||||
// Do actual loading
|
||||
k = load_instance_class(name, class_loader, THREAD);
|
||||
|
||||
// In custom class loaders, the usual findClass calls
|
||||
// findLoadedClass, which directly searches the SystemDictionary, then
|
||||
// defineClass. If these are not atomic with respect to other threads,
|
||||
// the findLoadedClass can fail, but the defineClass can get a
|
||||
// LinkageError:: duplicate class definition.
|
||||
// For UnsyncloadClass and AllowParallelDefineClass only:
|
||||
// If they got a linkageError, check if a parallel class load succeeded.
|
||||
// If it did, then for bytecode resolution the specification requires
|
||||
// that we return the same result we did for the other thread, i.e. the
|
||||
// successfully loaded instanceKlass
|
||||
// Note: Class can not be unloaded as long as any classloader refs exist
|
||||
// Should not get here for classloaders that support parallelism
|
||||
// with the new cleaner mechanism, e.g. bootstrap classloader
|
||||
// with the new cleaner mechanism
|
||||
// Bootstrap goes through here to allow for an extra guarantee check
|
||||
if (UnsyncloadClass || (class_loader.is_null())) {
|
||||
if (k.is_null() && HAS_PENDING_EXCEPTION
|
||||
&& PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
|
||||
@ -841,6 +846,12 @@ klassOop SystemDictionary::find(symbolHandle class_name,
|
||||
Handle protection_domain,
|
||||
TRAPS) {
|
||||
|
||||
// UseNewReflection
|
||||
// The result of this call should be consistent with the result
|
||||
// of the call to resolve_instance_class_or_null().
|
||||
// See evaluation 6790209 and 4474172 for more details.
|
||||
class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
|
||||
|
||||
unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
|
||||
int d_index = dictionary()->hash_to_index(d_hash);
|
||||
|
||||
@ -955,10 +966,10 @@ klassOop SystemDictionary::parse_stream(symbolHandle class_name,
|
||||
instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
|
||||
class_loader,
|
||||
protection_domain,
|
||||
cp_patches,
|
||||
parsed_name,
|
||||
THREAD);
|
||||
|
||||
|
||||
// We don't redefine the class, so we just need to clean up whether there
|
||||
// was an error or not (don't want to modify any system dictionary
|
||||
// data structures).
|
||||
@ -1013,11 +1024,17 @@ klassOop SystemDictionary::resolve_from_stream(symbolHandle class_name,
|
||||
ClassFileStream* st,
|
||||
TRAPS) {
|
||||
|
||||
// Make sure we are synchronized on the class loader before we initiate
|
||||
// loading.
|
||||
// Classloaders that support parallelism, e.g. bootstrap classloader,
|
||||
// or all classloaders with UnsyncloadClass do not acquire lock here
|
||||
bool DoObjectLock = true;
|
||||
if (is_parallelCapable(class_loader)) {
|
||||
DoObjectLock = false;
|
||||
}
|
||||
|
||||
// Make sure we are synchronized on the class loader before we proceed
|
||||
Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
|
||||
check_loader_lock_contention(lockObject, THREAD);
|
||||
ObjectLocker ol(lockObject, THREAD);
|
||||
ObjectLocker ol(lockObject, THREAD, DoObjectLock);
|
||||
|
||||
symbolHandle parsed_name;
|
||||
|
||||
@ -1069,7 +1086,13 @@ klassOop SystemDictionary::resolve_from_stream(symbolHandle class_name,
|
||||
"external class name format used internally");
|
||||
|
||||
// Add class just loaded
|
||||
define_instance_class(k, THREAD);
|
||||
// If a class loader supports parallel classloading handle parallel define requests
|
||||
// find_or_define_instance_class may return a different instanceKlass
|
||||
if (is_parallelCapable(class_loader)) {
|
||||
k = find_or_define_instance_class(class_name, class_loader, k, THREAD);
|
||||
} else {
|
||||
define_instance_class(k, THREAD);
|
||||
}
|
||||
}
|
||||
|
||||
// If parsing the class file or define_instance_class failed, we
|
||||
@ -1299,7 +1322,7 @@ instanceKlassHandle SystemDictionary::load_instance_class(symbolHandle class_nam
|
||||
}
|
||||
#endif // KERNEL
|
||||
|
||||
// find_or_define_instance_class may return a different k
|
||||
// find_or_define_instance_class may return a different instanceKlass
|
||||
if (!k.is_null()) {
|
||||
k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh));
|
||||
}
|
||||
@ -1316,14 +1339,24 @@ instanceKlassHandle SystemDictionary::load_instance_class(symbolHandle class_nam
|
||||
|
||||
KlassHandle spec_klass (THREAD, SystemDictionary::classloader_klass());
|
||||
|
||||
// UnsyncloadClass option means don't synchronize loadClass() calls.
|
||||
// loadClassInternal() is synchronized and public loadClass(String) is not.
|
||||
// This flag is for diagnostic purposes only. It is risky to call
|
||||
// Call public unsynchronized loadClass(String) directly for all class loaders
|
||||
// for parallelCapable class loaders. JDK >=7, loadClass(String, boolean) will
|
||||
// acquire a class-name based lock rather than the class loader object lock.
|
||||
// JDK < 7 already acquire the class loader lock in loadClass(String, boolean),
|
||||
// so the call to loadClassInternal() was not required.
|
||||
//
|
||||
// UnsyncloadClass flag means both call loadClass(String) and do
|
||||
// not acquire the class loader lock even for class loaders that are
|
||||
// not parallelCapable. This was a risky transitional
|
||||
// flag for diagnostic purposes only. It is risky to call
|
||||
// custom class loaders without synchronization.
|
||||
// WARNING If a custom class loader does NOT synchronizer findClass, or callers of
|
||||
// findClass, this flag risks unexpected timing bugs in the field.
|
||||
// findClass, the UnsyncloadClass flag risks unexpected timing bugs in the field.
|
||||
// Do NOT assume this will be supported in future releases.
|
||||
if (!UnsyncloadClass && has_loadClassInternal()) {
|
||||
//
|
||||
// Added MustCallLoadClassInternal in case we discover in the field
|
||||
// a customer that counts on this call
|
||||
if (MustCallLoadClassInternal && has_loadClassInternal()) {
|
||||
JavaCalls::call_special(&result,
|
||||
class_loader,
|
||||
spec_klass,
|
||||
@ -1365,14 +1398,17 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
|
||||
|
||||
Handle class_loader_h(THREAD, k->class_loader());
|
||||
|
||||
// for bootstrap classloader don't acquire lock
|
||||
if (!class_loader_h.is_null()) {
|
||||
// for bootstrap and other parallel classloaders don't acquire lock,
|
||||
// use placeholder token
|
||||
// If a parallelCapable class loader calls define_instance_class instead of
|
||||
// find_or_define_instance_class to get here, we have a timing
|
||||
// hole with systemDictionary updates and check_constraints
|
||||
if (!class_loader_h.is_null() && !is_parallelCapable(class_loader_h)) {
|
||||
assert(ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD,
|
||||
compute_loader_lock_object(class_loader_h, THREAD)),
|
||||
"define called without lock");
|
||||
}
|
||||
|
||||
|
||||
// Check class-loading constraints. Throw exception if violation is detected.
|
||||
// Grabs and releases SystemDictionary_lock
|
||||
// The check_constraints/find_class call and update_dictionary sequence
|
||||
@ -1427,59 +1463,63 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
|
||||
|
||||
// Support parallel classloading
|
||||
// Initial implementation for bootstrap classloader
|
||||
// For future:
|
||||
// For custom class loaders that support parallel classloading,
|
||||
// in case they do not synchronize around
|
||||
// FindLoadedClass/DefineClass calls, we check for parallel
|
||||
// With AllowParallelDefine flag==true, in case they do not synchronize around
|
||||
// FindLoadedClass/DefineClass, calls, we check for parallel
|
||||
// loading for them, wait if a defineClass is in progress
|
||||
// and return the initial requestor's results
|
||||
// With AllowParallelDefine flag==false, call through to define_instance_class
|
||||
// which will throw LinkageError: duplicate class definition.
|
||||
// For better performance, the class loaders should synchronize
|
||||
// findClass(), i.e. FindLoadedClass/DefineClass or they
|
||||
// findClass(), i.e. FindLoadedClass/DefineClassIfAbsent or they
|
||||
// potentially waste time reading and parsing the bytestream.
|
||||
// Note: VM callers should ensure consistency of k/class_name,class_loader
|
||||
instanceKlassHandle SystemDictionary::find_or_define_instance_class(symbolHandle class_name, Handle class_loader, instanceKlassHandle k, TRAPS) {
|
||||
|
||||
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
|
||||
symbolHandle name_h(THREAD, k->name()); // passed in class_name may be null
|
||||
|
||||
unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
|
||||
unsigned int d_hash = dictionary()->compute_hash(name_h, class_loader);
|
||||
int d_index = dictionary()->hash_to_index(d_hash);
|
||||
|
||||
// Hold SD lock around find_class and placeholder creation for DEFINE_CLASS
|
||||
unsigned int p_hash = placeholders()->compute_hash(class_name, class_loader);
|
||||
unsigned int p_hash = placeholders()->compute_hash(name_h, class_loader);
|
||||
int p_index = placeholders()->hash_to_index(p_hash);
|
||||
PlaceholderEntry* probe;
|
||||
|
||||
{
|
||||
MutexLocker mu(SystemDictionary_lock, THREAD);
|
||||
// First check if class already defined
|
||||
klassOop check = find_class(d_index, d_hash, class_name, class_loader);
|
||||
klassOop check = find_class(d_index, d_hash, name_h, class_loader);
|
||||
if (check != NULL) {
|
||||
return(instanceKlassHandle(THREAD, check));
|
||||
}
|
||||
|
||||
// Acquire define token for this class/classloader
|
||||
symbolHandle nullsymbolHandle;
|
||||
probe = placeholders()->find_and_add(p_index, p_hash, class_name, class_loader, PlaceholderTable::DEFINE_CLASS, nullsymbolHandle, THREAD);
|
||||
// Check if another thread defining in parallel
|
||||
if (probe->definer() == NULL) {
|
||||
// Thread will define the class
|
||||
probe->set_definer(THREAD);
|
||||
} else {
|
||||
// Wait for defining thread to finish and return results
|
||||
while (probe->definer() != NULL) {
|
||||
SystemDictionary_lock->wait();
|
||||
}
|
||||
if (probe->instanceKlass() != NULL) {
|
||||
probe = placeholders()->find_and_add(p_index, p_hash, name_h, class_loader, PlaceholderTable::DEFINE_CLASS, nullsymbolHandle, THREAD);
|
||||
// Wait if another thread defining in parallel
|
||||
// All threads wait - even those that will throw duplicate class: otherwise
|
||||
// caller is surprised by LinkageError: duplicate, but findLoadedClass fails
|
||||
// if other thread has not finished updating dictionary
|
||||
while (probe->definer() != NULL) {
|
||||
SystemDictionary_lock->wait();
|
||||
}
|
||||
// Only special cases allow parallel defines and can use other thread's results
|
||||
// Other cases fall through, and may run into duplicate defines
|
||||
// caught by finding an entry in the SystemDictionary
|
||||
if ((UnsyncloadClass || AllowParallelDefineClass) && (probe->instanceKlass() != NULL)) {
|
||||
probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
|
||||
return(instanceKlassHandle(THREAD, probe->instanceKlass()));
|
||||
} else {
|
||||
// If definer had an error, try again as any new thread would
|
||||
probe->set_definer(THREAD);
|
||||
placeholders()->find_and_remove(p_index, p_hash, name_h, class_loader, THREAD);
|
||||
SystemDictionary_lock->notify_all();
|
||||
#ifdef ASSERT
|
||||
klassOop check = find_class(d_index, d_hash, class_name, class_loader);
|
||||
assert(check == NULL, "definer missed recording success");
|
||||
klassOop check = find_class(d_index, d_hash, name_h, class_loader);
|
||||
assert(check != NULL, "definer missed recording success");
|
||||
#endif
|
||||
}
|
||||
return(instanceKlassHandle(THREAD, probe->instanceKlass()));
|
||||
} else {
|
||||
// This thread will define the class (even if earlier thread tried and had an error)
|
||||
probe->set_definer(THREAD);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1490,7 +1530,7 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(symbolHandle
|
||||
// definer must notify any waiting threads
|
||||
{
|
||||
MutexLocker mu(SystemDictionary_lock, THREAD);
|
||||
PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, class_name, class_loader);
|
||||
PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, name_h, class_loader);
|
||||
assert(probe != NULL, "DEFINE_CLASS placeholder lost?");
|
||||
if (probe != NULL) {
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
@ -1501,6 +1541,7 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(symbolHandle
|
||||
}
|
||||
probe->set_definer(NULL);
|
||||
probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
|
||||
placeholders()->find_and_remove(p_index, p_hash, name_h, class_loader, THREAD);
|
||||
SystemDictionary_lock->notify_all();
|
||||
}
|
||||
}
|
||||
@ -1512,7 +1553,6 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(symbolHandle
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
Handle SystemDictionary::compute_loader_lock_object(Handle class_loader, TRAPS) {
|
||||
// If class_loader is NULL we synchronize on _system_loader_lock_obj
|
||||
if (class_loader.is_null()) {
|
||||
@ -1902,11 +1942,11 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
|
||||
warning("Cannot find sun/jkernel/DownloadManager");
|
||||
}
|
||||
#endif // KERNEL
|
||||
|
||||
{ // Compute whether we should use loadClass or loadClassInternal when loading classes.
|
||||
methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
|
||||
_has_loadClassInternal = (method != NULL);
|
||||
}
|
||||
|
||||
{ // Compute whether we should use checkPackageAccess or NOT
|
||||
methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
|
||||
_has_checkPackageAccess = (method != NULL);
|
||||
|
@ -526,6 +526,7 @@ private:
|
||||
static instanceKlassHandle load_instance_class(symbolHandle class_name, Handle class_loader, TRAPS);
|
||||
static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
|
||||
static void check_loader_lock_contention(Handle loader_lock, TRAPS);
|
||||
static bool is_parallelCapable(Handle class_loader);
|
||||
|
||||
static klassOop find_shared_class(symbolHandle class_name);
|
||||
|
||||
|
@ -362,6 +362,7 @@
|
||||
template(class_signature, "Ljava/lang/Class;") \
|
||||
template(string_signature, "Ljava/lang/String;") \
|
||||
template(reference_signature, "Ljava/lang/ref/Reference;") \
|
||||
template(concurrenthashmap_signature, "Ljava/util/concurrent/ConcurrentHashMap;") \
|
||||
/* signature symbols needed by intrinsics */ \
|
||||
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE) \
|
||||
\
|
||||
@ -374,6 +375,9 @@
|
||||
/* used by ClassFormatError when class name is not known yet */ \
|
||||
template(unknown_class_name, "<Unknown>") \
|
||||
\
|
||||
/* used to identify class loaders handling parallel class loading */ \
|
||||
template(parallelCapable_name, "parallelLockMap;") \
|
||||
\
|
||||
/* JVM monitoring and management support */ \
|
||||
template(java_lang_StackTraceElement_array, "[Ljava/lang/StackTraceElement;") \
|
||||
template(java_lang_management_ThreadState, "java/lang/management/ThreadState") \
|
||||
|
@ -706,6 +706,30 @@ void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the given closure to each live object in the space
|
||||
// The usage of CompactibleFreeListSpace
|
||||
// by the ConcurrentMarkSweepGeneration for concurrent GC's allows
|
||||
// objects in the space with references to objects that are no longer
|
||||
// valid. For example, an object may reference another object
|
||||
// that has already been sweep up (collected). This method uses
|
||||
// obj_is_alive() to determine whether it is safe to apply the closure to
|
||||
// an object. See obj_is_alive() for details on how liveness of an
|
||||
// object is decided.
|
||||
|
||||
void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
|
||||
assert_lock_strong(freelistLock());
|
||||
NOT_PRODUCT(verify_objects_initialized());
|
||||
HeapWord *cur, *limit;
|
||||
size_t curSize;
|
||||
for (cur = bottom(), limit = end(); cur < limit;
|
||||
cur += curSize) {
|
||||
curSize = block_size(cur);
|
||||
if (block_is_obj(cur) && obj_is_alive(cur)) {
|
||||
blk->do_object(oop(cur));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
|
||||
UpwardsObjectClosure* cl) {
|
||||
assert_locked();
|
||||
@ -861,7 +885,9 @@ const {
|
||||
} else {
|
||||
// must read from what 'p' points to in each loop.
|
||||
klassOop k = ((volatile oopDesc*)p)->klass_or_null();
|
||||
if (k != NULL && ((oopDesc*)p)->is_parsable()) {
|
||||
if (k != NULL &&
|
||||
((oopDesc*)p)->is_parsable() &&
|
||||
((oopDesc*)p)->is_conc_safe()) {
|
||||
assert(k->is_oop(), "Should really be klass oop.");
|
||||
oop o = (oop)p;
|
||||
assert(o->is_oop(), "Should be an oop");
|
||||
|
@ -481,6 +481,15 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
void oop_iterate(OopClosure* cl);
|
||||
|
||||
void object_iterate(ObjectClosure* blk);
|
||||
// Apply the closure to each object in the space whose references
|
||||
// point to objects in the heap. The usage of CompactibleFreeListSpace
|
||||
// by the ConcurrentMarkSweepGeneration for concurrent GC's allows
|
||||
// objects in the space with references to objects that are no longer
|
||||
// valid. For example, an object may reference another object
|
||||
// that has already been sweep up (collected). This method uses
|
||||
// obj_is_alive() to determine whether it is safe to iterate of
|
||||
// an object.
|
||||
void safe_object_iterate(ObjectClosure* blk);
|
||||
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
|
||||
|
||||
// Requires that "mr" be entirely within the space.
|
||||
|
@ -3017,6 +3017,16 @@ ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
|
||||
if (freelistLock()->owned_by_self()) {
|
||||
Generation::safe_object_iterate(cl);
|
||||
} else {
|
||||
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
|
||||
Generation::safe_object_iterate(cl);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
|
||||
}
|
||||
@ -6623,7 +6633,11 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
||||
if (_bitMap->isMarked(addr)) {
|
||||
// it's marked; is it potentially uninitialized?
|
||||
if (p->klass_or_null() != NULL) {
|
||||
if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
|
||||
// If is_conc_safe is false, the object may be undergoing
|
||||
// change by the VM outside a safepoint. Don't try to
|
||||
// scan it, but rather leave it for the remark phase.
|
||||
if (CMSPermGenPrecleaningEnabled &&
|
||||
(!p->is_conc_safe() || !p->is_parsable())) {
|
||||
// Signal precleaning to redirty the card since
|
||||
// the klass pointer is already installed.
|
||||
assert(size == 0, "Initial value");
|
||||
@ -7001,7 +7015,6 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
|
||||
_mut->clear_range(mr);
|
||||
}
|
||||
DEBUG_ONLY(})
|
||||
|
||||
// Note: the finger doesn't advance while we drain
|
||||
// the stack below.
|
||||
PushOrMarkClosure pushOrMarkClosure(_collector,
|
||||
@ -8062,9 +8075,13 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
|
||||
#ifdef DEBUG
|
||||
if (oop(addr)->klass_or_null() != NULL &&
|
||||
( !_collector->should_unload_classes()
|
||||
|| oop(addr)->is_parsable())) {
|
||||
|| (oop(addr)->is_parsable()) &&
|
||||
oop(addr)->is_conc_safe())) {
|
||||
// Ignore mark word because we are running concurrent with mutators
|
||||
assert(oop(addr)->is_oop(true), "live block should be an oop");
|
||||
// is_conc_safe is checked before performing this assertion
|
||||
// because an object that is not is_conc_safe may yet have
|
||||
// the return from size() correct.
|
||||
assert(size ==
|
||||
CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
|
||||
"P-mark and computed size do not agree");
|
||||
@ -8077,6 +8094,13 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
|
||||
(!_collector->should_unload_classes()
|
||||
|| oop(addr)->is_parsable()),
|
||||
"Should be an initialized object");
|
||||
// Note that there are objects used during class redefinition
|
||||
// (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
|
||||
// which are discarded with their is_conc_safe state still
|
||||
// false. These object may be floating garbage so may be
|
||||
// seen here. If they are floating garbage their size
|
||||
// should be attainable from their klass. Do not that
|
||||
// is_conc_safe() is true for oop(addr).
|
||||
// Ignore mark word because we are running concurrent with mutators
|
||||
assert(oop(addr)->is_oop(true), "live block should be an oop");
|
||||
// Verify that the bit map has no bits marked between
|
||||
@ -8484,7 +8508,7 @@ bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
|
||||
size_t i = num;
|
||||
oop cur = _overflow_list;
|
||||
const markOop proto = markOopDesc::prototype();
|
||||
NOT_PRODUCT(size_t n = 0;)
|
||||
NOT_PRODUCT(ssize_t n = 0;)
|
||||
for (oop next; i > 0 && cur != NULL; cur = next, i--) {
|
||||
next = oop(cur->mark());
|
||||
cur->set_mark(proto); // until proven otherwise
|
||||
@ -8501,45 +8525,131 @@ bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
|
||||
return !stack->isEmpty();
|
||||
}
|
||||
|
||||
// Multi-threaded; use CAS to break off a prefix
|
||||
#define BUSY (oop(0x1aff1aff))
|
||||
// (MT-safe) Get a prefix of at most "num" from the list.
|
||||
// The overflow list is chained through the mark word of
|
||||
// each object in the list. We fetch the entire list,
|
||||
// break off a prefix of the right size and return the
|
||||
// remainder. If other threads try to take objects from
|
||||
// the overflow list at that time, they will wait for
|
||||
// some time to see if data becomes available. If (and
|
||||
// only if) another thread places one or more object(s)
|
||||
// on the global list before we have returned the suffix
|
||||
// to the global list, we will walk down our local list
|
||||
// to find its end and append the global list to
|
||||
// our suffix before returning it. This suffix walk can
|
||||
// prove to be expensive (quadratic in the amount of traffic)
|
||||
// when there are many objects in the overflow list and
|
||||
// there is much producer-consumer contention on the list.
|
||||
// *NOTE*: The overflow list manipulation code here and
|
||||
// in ParNewGeneration:: are very similar in shape,
|
||||
// except that in the ParNew case we use the old (from/eden)
|
||||
// copy of the object to thread the list via its klass word.
|
||||
// Because of the common code, if you make any changes in
|
||||
// the code below, please check the ParNew version to see if
|
||||
// similar changes might be needed.
|
||||
// CR 6797058 has been filed to consolidate the common code.
|
||||
bool CMSCollector::par_take_from_overflow_list(size_t num,
|
||||
OopTaskQueue* work_q) {
|
||||
assert(work_q->size() == 0, "That's the current policy");
|
||||
assert(work_q->size() == 0, "First empty local work queue");
|
||||
assert(num < work_q->max_elems(), "Can't bite more than we can chew");
|
||||
if (_overflow_list == NULL) {
|
||||
return false;
|
||||
}
|
||||
// Grab the entire list; we'll put back a suffix
|
||||
oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
|
||||
if (prefix == NULL) { // someone grabbed it before we did ...
|
||||
// ... we could spin for a short while, but for now we don't
|
||||
return false;
|
||||
oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
|
||||
Thread* tid = Thread::current();
|
||||
size_t CMSOverflowSpinCount = (size_t)ParallelGCThreads;
|
||||
size_t sleep_time_millis = MAX2((size_t)1, num/100);
|
||||
// If the list is busy, we spin for a short while,
|
||||
// sleeping between attempts to get the list.
|
||||
for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
|
||||
os::sleep(tid, sleep_time_millis, false);
|
||||
if (_overflow_list == NULL) {
|
||||
// Nothing left to take
|
||||
return false;
|
||||
} else if (_overflow_list != BUSY) {
|
||||
// Try and grab the prefix
|
||||
prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
|
||||
}
|
||||
}
|
||||
// If the list was found to be empty, or we spun long
|
||||
// enough, we give up and return empty-handed. If we leave
|
||||
// the list in the BUSY state below, it must be the case that
|
||||
// some other thread holds the overflow list and will set it
|
||||
// to a non-BUSY state in the future.
|
||||
if (prefix == NULL || prefix == BUSY) {
|
||||
// Nothing to take or waited long enough
|
||||
if (prefix == NULL) {
|
||||
// Write back the NULL in case we overwrote it with BUSY above
|
||||
// and it is still the same value.
|
||||
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
assert(prefix != NULL && prefix != BUSY, "Error");
|
||||
size_t i = num;
|
||||
oop cur = prefix;
|
||||
// Walk down the first "num" objects, unless we reach the end.
|
||||
for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
|
||||
if (cur->mark() != NULL) {
|
||||
if (cur->mark() == NULL) {
|
||||
// We have "num" or fewer elements in the list, so there
|
||||
// is nothing to return to the global list.
|
||||
// Write back the NULL in lieu of the BUSY we wrote
|
||||
// above, if it is still the same value.
|
||||
if (_overflow_list == BUSY) {
|
||||
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
|
||||
}
|
||||
} else {
|
||||
// Chop off the suffix and rerturn it to the global list.
|
||||
assert(cur->mark() != BUSY, "Error");
|
||||
oop suffix_head = cur->mark(); // suffix will be put back on global list
|
||||
cur->set_mark(NULL); // break off suffix
|
||||
// Find tail of suffix so we can prepend suffix to global list
|
||||
for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
|
||||
oop suffix_tail = cur;
|
||||
assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
|
||||
"Tautology");
|
||||
// It's possible that the list is still in the empty(busy) state
|
||||
// we left it in a short while ago; in that case we may be
|
||||
// able to place back the suffix without incurring the cost
|
||||
// of a walk down the list.
|
||||
oop observed_overflow_list = _overflow_list;
|
||||
do {
|
||||
cur = observed_overflow_list;
|
||||
suffix_tail->set_mark(markOop(cur));
|
||||
oop cur_overflow_list = observed_overflow_list;
|
||||
bool attached = false;
|
||||
while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
|
||||
observed_overflow_list =
|
||||
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur);
|
||||
} while (cur != observed_overflow_list);
|
||||
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
|
||||
if (cur_overflow_list == observed_overflow_list) {
|
||||
attached = true;
|
||||
break;
|
||||
} else cur_overflow_list = observed_overflow_list;
|
||||
}
|
||||
if (!attached) {
|
||||
// Too bad, someone else sneaked in (at least) an element; we'll need
|
||||
// to do a splice. Find tail of suffix so we can prepend suffix to global
|
||||
// list.
|
||||
for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
|
||||
oop suffix_tail = cur;
|
||||
assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
|
||||
"Tautology");
|
||||
observed_overflow_list = _overflow_list;
|
||||
do {
|
||||
cur_overflow_list = observed_overflow_list;
|
||||
if (cur_overflow_list != BUSY) {
|
||||
// Do the splice ...
|
||||
suffix_tail->set_mark(markOop(cur_overflow_list));
|
||||
} else { // cur_overflow_list == BUSY
|
||||
suffix_tail->set_mark(NULL);
|
||||
}
|
||||
// ... and try to place spliced list back on overflow_list ...
|
||||
observed_overflow_list =
|
||||
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
|
||||
} while (cur_overflow_list != observed_overflow_list);
|
||||
// ... until we have succeeded in doing so.
|
||||
}
|
||||
}
|
||||
|
||||
// Push the prefix elements on work_q
|
||||
assert(prefix != NULL, "control point invariant");
|
||||
const markOop proto = markOopDesc::prototype();
|
||||
oop next;
|
||||
NOT_PRODUCT(size_t n = 0;)
|
||||
NOT_PRODUCT(ssize_t n = 0;)
|
||||
for (cur = prefix; cur != NULL; cur = next) {
|
||||
next = oop(cur->mark());
|
||||
cur->set_mark(proto); // until proven otherwise
|
||||
@ -8573,11 +8683,16 @@ void CMSCollector::par_push_on_overflow_list(oop p) {
|
||||
oop cur_overflow_list;
|
||||
do {
|
||||
cur_overflow_list = observed_overflow_list;
|
||||
p->set_mark(markOop(cur_overflow_list));
|
||||
if (cur_overflow_list != BUSY) {
|
||||
p->set_mark(markOop(cur_overflow_list));
|
||||
} else {
|
||||
p->set_mark(NULL);
|
||||
}
|
||||
observed_overflow_list =
|
||||
(oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
|
||||
} while (cur_overflow_list != observed_overflow_list);
|
||||
}
|
||||
#undef BUSY
|
||||
|
||||
// Single threaded
|
||||
// General Note on GrowableArray: pushes may silently fail
|
||||
@ -8586,7 +8701,7 @@ void CMSCollector::par_push_on_overflow_list(oop p) {
|
||||
// a lot of code in the JVM. The prudent thing for GrowableArray
|
||||
// to do (for now) is to exit with an error. However, that may
|
||||
// be too draconian in some cases because the caller may be
|
||||
// able to recover without much harm. For suych cases, we
|
||||
// able to recover without much harm. For such cases, we
|
||||
// should probably introduce a "soft_push" method which returns
|
||||
// an indication of success or failure with the assumption that
|
||||
// the caller may be able to recover from a failure; code in
|
||||
@ -8594,8 +8709,6 @@ void CMSCollector::par_push_on_overflow_list(oop p) {
|
||||
// failures where possible, thus, incrementally hardening the VM
|
||||
// in such low resource situations.
|
||||
void CMSCollector::preserve_mark_work(oop p, markOop m) {
|
||||
int PreserveMarkStackSize = 128;
|
||||
|
||||
if (_preserved_oop_stack == NULL) {
|
||||
assert(_preserved_mark_stack == NULL,
|
||||
"bijection with preserved_oop_stack");
|
||||
|
@ -595,7 +595,7 @@ class CMSCollector: public CHeapObj {
|
||||
size_t _ser_kac_preclean_ovflw;
|
||||
size_t _ser_kac_ovflw;
|
||||
size_t _par_kac_ovflw;
|
||||
NOT_PRODUCT(size_t _num_par_pushes;)
|
||||
NOT_PRODUCT(ssize_t _num_par_pushes;)
|
||||
|
||||
// ("Weak") Reference processing support
|
||||
ReferenceProcessor* _ref_processor;
|
||||
@ -1212,6 +1212,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
// More iteration support
|
||||
virtual void oop_iterate(MemRegion mr, OopClosure* cl);
|
||||
virtual void oop_iterate(OopClosure* cl);
|
||||
virtual void safe_object_iterate(ObjectClosure* cl);
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
|
||||
// Need to declare the full complement of closures, whether we'll
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
// We need to sort heap regions by collection desirability.
|
||||
|
||||
class CSetChooserCache {
|
||||
class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
enum {
|
||||
CacheLength = 16
|
||||
|
@ -33,7 +33,7 @@ enum PostYieldAction {
|
||||
PYA_cancel // It's been completed by somebody else: cancel.
|
||||
};
|
||||
|
||||
class ConcurrentG1Refine {
|
||||
class ConcurrentG1Refine: public CHeapObj {
|
||||
ConcurrentG1RefineThread* _cg1rThread;
|
||||
|
||||
volatile jint _pya;
|
||||
|
@ -30,7 +30,7 @@ typedef GenericTaskQueueSet<oop> CMTaskQueueSet;
|
||||
// A generic CM bit map. This is essentially a wrapper around the BitMap
|
||||
// class, with one bit per (1<<_shifter) HeapWords.
|
||||
|
||||
class CMBitMapRO {
|
||||
class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
|
||||
protected:
|
||||
HeapWord* _bmStartWord; // base address of range covered by map
|
||||
size_t _bmWordSize; // map size (in #HeapWords covered)
|
||||
@ -139,7 +139,7 @@ class CMBitMap : public CMBitMapRO {
|
||||
|
||||
// Represents a marking stack used by the CM collector.
|
||||
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
|
||||
class CMMarkStack {
|
||||
class CMMarkStack VALUE_OBJ_CLASS_SPEC {
|
||||
ConcurrentMark* _cm;
|
||||
oop* _base; // bottom of stack
|
||||
jint _index; // one more than last occupied index
|
||||
@ -237,7 +237,7 @@ class CMMarkStack {
|
||||
void oops_do(OopClosure* f);
|
||||
};
|
||||
|
||||
class CMRegionStack {
|
||||
class CMRegionStack VALUE_OBJ_CLASS_SPEC {
|
||||
MemRegion* _base;
|
||||
jint _capacity;
|
||||
jint _index;
|
||||
@ -312,7 +312,7 @@ typedef enum {
|
||||
|
||||
class ConcurrentMarkThread;
|
||||
|
||||
class ConcurrentMark {
|
||||
class ConcurrentMark: public CHeapObj {
|
||||
friend class ConcurrentMarkThread;
|
||||
friend class CMTask;
|
||||
friend class CMBitMapClosure;
|
||||
|
@ -141,7 +141,7 @@ YoungList::YoungList(G1CollectedHeap* g1h)
|
||||
_scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
|
||||
_length(0), _scan_only_length(0),
|
||||
_last_sampled_rs_lengths(0),
|
||||
_survivor_head(NULL), _survivors_tail(NULL), _survivor_length(0)
|
||||
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
|
||||
{
|
||||
guarantee( check_list_empty(false), "just making sure..." );
|
||||
}
|
||||
@ -159,16 +159,15 @@ void YoungList::push_region(HeapRegion *hr) {
|
||||
}
|
||||
|
||||
void YoungList::add_survivor_region(HeapRegion* hr) {
|
||||
assert(!hr->is_survivor(), "should not already be for survived");
|
||||
assert(hr->is_survivor(), "should be flagged as survivor region");
|
||||
assert(hr->get_next_young_region() == NULL, "cause it should!");
|
||||
|
||||
hr->set_next_young_region(_survivor_head);
|
||||
if (_survivor_head == NULL) {
|
||||
_survivors_tail = hr;
|
||||
_survivor_tail = hr;
|
||||
}
|
||||
_survivor_head = hr;
|
||||
|
||||
hr->set_survivor();
|
||||
++_survivor_length;
|
||||
}
|
||||
|
||||
@ -239,7 +238,7 @@ void YoungList::empty_list() {
|
||||
|
||||
empty_list(_survivor_head);
|
||||
_survivor_head = NULL;
|
||||
_survivors_tail = NULL;
|
||||
_survivor_tail = NULL;
|
||||
_survivor_length = 0;
|
||||
|
||||
_last_sampled_rs_lengths = 0;
|
||||
@ -391,6 +390,7 @@ YoungList::reset_auxilary_lists() {
|
||||
|
||||
// Add survivor regions to SurvRateGroup.
|
||||
_g1h->g1_policy()->note_start_adding_survivor_regions();
|
||||
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
|
||||
for (HeapRegion* curr = _survivor_head;
|
||||
curr != NULL;
|
||||
curr = curr->get_next_young_region()) {
|
||||
@ -401,7 +401,7 @@ YoungList::reset_auxilary_lists() {
|
||||
if (_survivor_head != NULL) {
|
||||
_head = _survivor_head;
|
||||
_length = _survivor_length + _scan_only_length;
|
||||
_survivors_tail->set_next_young_region(_scan_only_head);
|
||||
_survivor_tail->set_next_young_region(_scan_only_head);
|
||||
} else {
|
||||
_head = _scan_only_head;
|
||||
_length = _scan_only_length;
|
||||
@ -418,9 +418,9 @@ YoungList::reset_auxilary_lists() {
|
||||
_curr_scan_only = NULL;
|
||||
|
||||
_survivor_head = NULL;
|
||||
_survivors_tail = NULL;
|
||||
_survivor_tail = NULL;
|
||||
_survivor_length = 0;
|
||||
_g1h->g1_policy()->finished_recalculating_age_indexes();
|
||||
_g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
|
||||
|
||||
assert(check_list_well_formed(), "young list should be well formed");
|
||||
}
|
||||
@ -553,7 +553,7 @@ HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
|
||||
if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
|
||||
alloc_region = newAllocRegion_work(word_size, true, zero_filled);
|
||||
if (purpose == GCAllocForSurvived && alloc_region != NULL) {
|
||||
_young_list->add_survivor_region(alloc_region);
|
||||
alloc_region->set_survivor();
|
||||
}
|
||||
++_gc_alloc_region_counts[purpose];
|
||||
} else {
|
||||
@ -949,6 +949,10 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
GCOverheadReporter::recordSTWEnd(end);
|
||||
g1_policy()->record_full_collection_end();
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
|
||||
gc_epilogue(true);
|
||||
|
||||
// Abandon concurrent refinement. This must happen last: in the
|
||||
@ -1285,7 +1289,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_unclean_regions_coming(false),
|
||||
_young_list(new YoungList(this)),
|
||||
_gc_time_stamp(0),
|
||||
_surviving_young_words(NULL)
|
||||
_surviving_young_words(NULL),
|
||||
_in_cset_fast_test(NULL),
|
||||
_in_cset_fast_test_base(NULL)
|
||||
{
|
||||
_g1h = this; // To catch bugs.
|
||||
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
|
||||
@ -2485,6 +2491,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
g1_policy()->record_collection_pause_start(start_time_sec,
|
||||
start_used_bytes);
|
||||
|
||||
guarantee(_in_cset_fast_test == NULL, "invariant");
|
||||
guarantee(_in_cset_fast_test_base == NULL, "invariant");
|
||||
_in_cset_fast_test_length = n_regions();
|
||||
_in_cset_fast_test_base =
|
||||
NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
|
||||
memset(_in_cset_fast_test_base, false,
|
||||
_in_cset_fast_test_length * sizeof(bool));
|
||||
// We're biasing _in_cset_fast_test to avoid subtracting the
|
||||
// beginning of the heap every time we want to index; basically
|
||||
// it's the same with what we do with the card table.
|
||||
_in_cset_fast_test = _in_cset_fast_test_base -
|
||||
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
|
||||
|
||||
#if SCAN_ONLY_VERBOSE
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
@ -2553,6 +2572,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
free_collection_set(g1_policy()->collection_set());
|
||||
g1_policy()->clear_collection_set();
|
||||
|
||||
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
|
||||
// this is more for peace of mind; we're nulling them here and
|
||||
// we're expecting them to be null at the beginning of the next GC
|
||||
_in_cset_fast_test = NULL;
|
||||
_in_cset_fast_test_base = NULL;
|
||||
|
||||
if (popular_region != NULL) {
|
||||
// We have to wait until now, because we don't want the region to
|
||||
// be rescheduled for pop-evac during RS update.
|
||||
@ -2572,6 +2597,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
_young_list->print();
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
|
||||
g1_policy()->record_survivor_regions(_young_list->survivor_length(),
|
||||
_young_list->first_survivor_region(),
|
||||
_young_list->last_survivor_region());
|
||||
_young_list->reset_auxilary_lists();
|
||||
}
|
||||
} else {
|
||||
@ -2598,7 +2626,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
#endif // SCAN_ONLY_VERBOSE
|
||||
|
||||
double end_time_sec = os::elapsedTime();
|
||||
g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
|
||||
if (!evacuation_failed()) {
|
||||
g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
|
||||
}
|
||||
GCOverheadReporter::recordSTWEnd(end_time_sec);
|
||||
g1_policy()->record_collection_pause_end(popular_region != NULL,
|
||||
abandoned);
|
||||
@ -2621,8 +2651,13 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||
}
|
||||
}
|
||||
|
||||
if (mark_in_progress())
|
||||
if (mark_in_progress()) {
|
||||
concurrent_mark()->update_g1_committed();
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
|
||||
gc_epilogue(false);
|
||||
}
|
||||
@ -2733,6 +2768,13 @@ void G1CollectedHeap::forget_alloc_region_list() {
|
||||
_gc_alloc_region_list = r->next_gc_alloc_region();
|
||||
r->set_next_gc_alloc_region(NULL);
|
||||
r->set_is_gc_alloc_region(false);
|
||||
if (r->is_survivor()) {
|
||||
if (r->is_empty()) {
|
||||
r->set_not_young();
|
||||
} else {
|
||||
_young_list->add_survivor_region(r);
|
||||
}
|
||||
}
|
||||
if (r->is_empty()) {
|
||||
++_free_regions;
|
||||
}
|
||||
@ -3129,6 +3171,20 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
||||
return block;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
|
||||
bool par) {
|
||||
// Another thread might have obtained alloc_region for the given
|
||||
// purpose, and might be attempting to allocate in it, and might
|
||||
// succeed. Therefore, we can't do the "finalization" stuff on the
|
||||
// region below until we're sure the last allocation has happened.
|
||||
// We ensure this by allocating the remaining space with a garbage
|
||||
// object.
|
||||
if (par) par_allocate_remaining_space(alloc_region);
|
||||
// Now we can do the post-GC stuff on the region.
|
||||
alloc_region->note_end_of_copying();
|
||||
g1_policy()->record_after_bytes(alloc_region->used());
|
||||
}
|
||||
|
||||
HeapWord*
|
||||
G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||
HeapRegion* alloc_region,
|
||||
@ -3146,16 +3202,7 @@ G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||
// Otherwise, continue; this new region is empty, too.
|
||||
}
|
||||
assert(alloc_region != NULL, "We better have an allocation region");
|
||||
// Another thread might have obtained alloc_region for the given
|
||||
// purpose, and might be attempting to allocate in it, and might
|
||||
// succeed. Therefore, we can't do the "finalization" stuff on the
|
||||
// region below until we're sure the last allocation has happened.
|
||||
// We ensure this by allocating the remaining space with a garbage
|
||||
// object.
|
||||
if (par) par_allocate_remaining_space(alloc_region);
|
||||
// Now we can do the post-GC stuff on the region.
|
||||
alloc_region->note_end_of_copying();
|
||||
g1_policy()->record_after_bytes(alloc_region->used());
|
||||
retire_alloc_region(alloc_region, par);
|
||||
|
||||
if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
|
||||
// Cannot allocate more regions for the given purpose.
|
||||
@ -3164,7 +3211,7 @@ G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||
if (purpose != alt_purpose) {
|
||||
HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
|
||||
// Has not the alternative region been aliased?
|
||||
if (alloc_region != alt_region) {
|
||||
if (alloc_region != alt_region && alt_region != NULL) {
|
||||
// Try to allocate in the alternative region.
|
||||
if (par) {
|
||||
block = alt_region->par_allocate(word_size);
|
||||
@ -3173,9 +3220,10 @@ G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||
}
|
||||
// Make an alias.
|
||||
_gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
|
||||
}
|
||||
if (block != NULL) {
|
||||
return block;
|
||||
if (block != NULL) {
|
||||
return block;
|
||||
}
|
||||
retire_alloc_region(alt_region, par);
|
||||
}
|
||||
// Both the allocation region and the alternative one are full
|
||||
// and aliased, replace them with a new allocation region.
|
||||
@ -3476,6 +3524,7 @@ protected:
|
||||
OverflowQueue* _overflowed_refs;
|
||||
|
||||
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
|
||||
ageTable _age_table;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
@ -3517,6 +3566,7 @@ public:
|
||||
_refs(g1h->task_queue(queue_num)),
|
||||
_hash_seed(17), _queue_num(queue_num),
|
||||
_term_attempts(0),
|
||||
_age_table(false),
|
||||
#if G1_DETAILED_STATS
|
||||
_pushes(0), _pops(0), _steals(0),
|
||||
_steal_attempts(0), _overflow_pushes(0),
|
||||
@ -3551,8 +3601,9 @@ public:
|
||||
|
||||
RefToScanQueue* refs() { return _refs; }
|
||||
OverflowQueue* overflowed_refs() { return _overflowed_refs; }
|
||||
ageTable* age_table() { return &_age_table; }
|
||||
|
||||
inline G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
return &_alloc_buffers[purpose];
|
||||
}
|
||||
|
||||
@ -3560,6 +3611,9 @@ public:
|
||||
size_t undo_waste() { return _undo_waste; }
|
||||
|
||||
void push_on_queue(oop* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
|
||||
|
||||
if (!refs()->push(ref)) {
|
||||
overflowed_refs()->push(ref);
|
||||
IF_G1_DETAILED_STATS(note_overflow_push());
|
||||
@ -3572,6 +3626,10 @@ public:
|
||||
if (!refs()->pop_local(ref)) {
|
||||
ref = NULL;
|
||||
} else {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
|
||||
"invariant");
|
||||
|
||||
IF_G1_DETAILED_STATS(note_pop());
|
||||
}
|
||||
}
|
||||
@ -3601,8 +3659,7 @@ public:
|
||||
|
||||
obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
||||
}
|
||||
return obj;
|
||||
@ -3695,24 +3752,57 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void deal_with_reference(oop* ref_to_scan) {
|
||||
if (has_partial_array_mask(ref_to_scan)) {
|
||||
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
||||
} else {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
_evac_cl->set_region(r);
|
||||
_evac_cl->do_oop_nv(ref_to_scan);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void trim_queue() {
|
||||
// I've replicated the loop twice, first to drain the overflow
|
||||
// queue, second to drain the task queue. This is better than
|
||||
// having a single loop, which checks both conditions and, inside
|
||||
// it, either pops the overflow queue or the task queue, as each
|
||||
// loop is tighter. Also, the decision to drain the overflow queue
|
||||
// first is not arbitrary, as the overflow queue is not visible
|
||||
// to the other workers, whereas the task queue is. So, we want to
|
||||
// drain the "invisible" entries first, while allowing the other
|
||||
// workers to potentially steal the "visible" entries.
|
||||
|
||||
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
|
||||
oop *ref_to_scan = NULL;
|
||||
if (overflowed_refs_to_scan() == 0) {
|
||||
pop_from_queue(ref_to_scan);
|
||||
} else {
|
||||
while (overflowed_refs_to_scan() > 0) {
|
||||
oop *ref_to_scan = NULL;
|
||||
pop_from_overflow_queue(ref_to_scan);
|
||||
assert(ref_to_scan != NULL, "invariant");
|
||||
// We shouldn't have pushed it on the queue if it was not
|
||||
// pointing into the CSet.
|
||||
assert(ref_to_scan != NULL, "sanity");
|
||||
assert(has_partial_array_mask(ref_to_scan) ||
|
||||
_g1h->obj_in_cs(*ref_to_scan), "sanity");
|
||||
|
||||
deal_with_reference(ref_to_scan);
|
||||
}
|
||||
if (ref_to_scan != NULL) {
|
||||
if ((intptr_t)ref_to_scan & G1_PARTIAL_ARRAY_MASK) {
|
||||
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
||||
} else {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
_evac_cl->set_region(r);
|
||||
_evac_cl->do_oop_nv(ref_to_scan);
|
||||
|
||||
while (refs_to_scan() > 0) {
|
||||
oop *ref_to_scan = NULL;
|
||||
pop_from_queue(ref_to_scan);
|
||||
|
||||
if (ref_to_scan != NULL) {
|
||||
// We shouldn't have pushed it on the queue if it was not
|
||||
// pointing into the CSet.
|
||||
assert(has_partial_array_mask(ref_to_scan) ||
|
||||
_g1h->obj_in_cs(*ref_to_scan), "sanity");
|
||||
|
||||
deal_with_reference(ref_to_scan);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3728,16 +3818,25 @@ G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState*
|
||||
// Should probably be made inline and moved in g1OopClosures.inline.hpp.
|
||||
void G1ParScanClosure::do_oop_nv(oop* p) {
|
||||
oop obj = *p;
|
||||
|
||||
if (obj != NULL) {
|
||||
if (_g1->obj_in_cs(obj)) {
|
||||
if (obj->is_forwarded()) {
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
_par_scan_state->push_on_queue(p);
|
||||
return;
|
||||
}
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
// We're not going to even bother checking whether the object is
|
||||
// already forwarded or not, as this usually causes an immediate
|
||||
// stall. We'll try to prefetch the object (for write, given that
|
||||
// we might need to install the forwarding reference) and we'll
|
||||
// get back to it when pop it from the queue
|
||||
Prefetch::write(obj->mark_addr(), 0);
|
||||
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
|
||||
|
||||
// slightly paranoid test; I'm trying to catch potential
|
||||
// problems before we go into push_on_queue to know where the
|
||||
// problem is coming from
|
||||
assert(obj == *p, "the value of *p should not have changed");
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
_g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
|
||||
}
|
||||
_g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
|
||||
}
|
||||
}
|
||||
|
||||
@ -3765,7 +3864,9 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
||||
(!from_region->is_young() && young_index == 0), "invariant" );
|
||||
G1CollectorPolicy* g1p = _g1->g1_policy();
|
||||
markOop m = old->mark();
|
||||
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, m->age(),
|
||||
int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
|
||||
: m->age();
|
||||
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
|
||||
word_sz);
|
||||
HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
|
||||
oop obj = oop(obj_ptr);
|
||||
@ -3777,13 +3878,39 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
||||
return _g1->handle_evacuation_failure_par(cl, old);
|
||||
}
|
||||
|
||||
// We're going to allocate linearly, so might as well prefetch ahead.
|
||||
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
|
||||
|
||||
oop forward_ptr = old->forward_to_atomic(obj);
|
||||
if (forward_ptr == NULL) {
|
||||
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
|
||||
obj->set_mark(m);
|
||||
if (g1p->track_object_age(alloc_purpose)) {
|
||||
obj->incr_age();
|
||||
// We could simply do obj->incr_age(). However, this causes a
|
||||
// performance issue. obj->incr_age() will first check whether
|
||||
// the object has a displaced mark by checking its mark word;
|
||||
// getting the mark word from the new location of the object
|
||||
// stalls. So, given that we already have the mark word and we
|
||||
// are about to install it anyway, it's better to increase the
|
||||
// age on the mark word, when the object does not have a
|
||||
// displaced mark word. We're not expecting many objects to have
|
||||
// a displaced marked word, so that case is not optimized
|
||||
// further (it could be...) and we simply call obj->incr_age().
|
||||
|
||||
if (m->has_displaced_mark_helper()) {
|
||||
// in this case, we have to install the mark word first,
|
||||
// otherwise obj looks to be forwarded (the old mark word,
|
||||
// which contains the forward pointer, was copied)
|
||||
obj->set_mark(m);
|
||||
obj->incr_age();
|
||||
} else {
|
||||
m = m->incr_age();
|
||||
obj->set_mark(m);
|
||||
}
|
||||
_par_scan_state->age_table()->add(obj, word_sz);
|
||||
} else {
|
||||
obj->set_mark(m);
|
||||
}
|
||||
|
||||
// preserve "next" mark bit
|
||||
if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
|
||||
if (!use_local_bitmaps ||
|
||||
@ -3805,9 +3932,11 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
||||
|
||||
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
|
||||
arrayOop(old)->set_length(0);
|
||||
_par_scan_state->push_on_queue((oop*) ((intptr_t)old | G1_PARTIAL_ARRAY_MASK));
|
||||
_par_scan_state->push_on_queue(set_partial_array_mask(old));
|
||||
} else {
|
||||
_scanner->set_region(_g1->heap_region_containing(obj));
|
||||
// No point in using the slower heap_region_containing() method,
|
||||
// given that we know obj is in the heap.
|
||||
_scanner->set_region(_g1->heap_region_containing_raw(obj));
|
||||
obj->oop_iterate_backwards(_scanner);
|
||||
}
|
||||
} else {
|
||||
@ -3817,47 +3946,55 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
|
||||
void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_forwardee>::do_oop_work(oop* p) {
|
||||
template<bool do_gen_barrier, G1Barrier barrier,
|
||||
bool do_mark_forwardee, bool skip_cset_test>
|
||||
void G1ParCopyClosure<do_gen_barrier, barrier,
|
||||
do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
|
||||
oop obj = *p;
|
||||
assert(barrier != G1BarrierRS || obj != NULL,
|
||||
"Precondition: G1BarrierRS implies obj is nonNull");
|
||||
|
||||
if (obj != NULL) {
|
||||
if (_g1->obj_in_cs(obj)) {
|
||||
// The only time we skip the cset test is when we're scanning
|
||||
// references popped from the queue. And we only push on the queue
|
||||
// references that we know point into the cset, so no point in
|
||||
// checking again. But we'll leave an assert here for peace of mind.
|
||||
assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
|
||||
|
||||
// here the null check is implicit in the cset_fast_test() test
|
||||
if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
|
||||
#if G1_REM_SET_LOGGING
|
||||
gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" into CS.",
|
||||
p, (void*) obj);
|
||||
gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
|
||||
"into CS.", p, (void*) obj);
|
||||
#endif
|
||||
if (obj->is_forwarded()) {
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
*p = copy_to_survivor_space(obj);
|
||||
}
|
||||
// When scanning the RS, we only care about objs in CS.
|
||||
if (barrier == G1BarrierRS) {
|
||||
_g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
|
||||
}
|
||||
if (obj->is_forwarded()) {
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
*p = copy_to_survivor_space(obj);
|
||||
}
|
||||
// When scanning moved objs, must look at all oops.
|
||||
if (barrier == G1BarrierEvac) {
|
||||
// When scanning the RS, we only care about objs in CS.
|
||||
if (barrier == G1BarrierRS) {
|
||||
_g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
|
||||
}
|
||||
}
|
||||
|
||||
if (do_gen_barrier) {
|
||||
par_do_barrier(p);
|
||||
}
|
||||
// When scanning moved objs, must look at all oops.
|
||||
if (barrier == G1BarrierEvac && obj != NULL) {
|
||||
_g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
|
||||
}
|
||||
|
||||
if (do_gen_barrier && obj != NULL) {
|
||||
par_do_barrier(p);
|
||||
}
|
||||
}
|
||||
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
|
||||
|
||||
template <class T> void G1ParScanPartialArrayClosure::process_array_chunk(
|
||||
template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
|
||||
oop obj, int start, int end) {
|
||||
// process our set of indices (include header in first chunk)
|
||||
assert(start < end, "invariant");
|
||||
T* const base = (T*)objArrayOop(obj)->base();
|
||||
T* const start_addr = base + start;
|
||||
T* const start_addr = (start == 0) ? (T*) obj : base + start;
|
||||
T* const end_addr = base + end;
|
||||
MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
|
||||
_scanner.set_region(_g1->heap_region_containing(obj));
|
||||
@ -3866,7 +4003,8 @@ template <class T> void G1ParScanPartialArrayClosure::process_array_chunk(
|
||||
|
||||
void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
|
||||
assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
|
||||
oop old = oop((intptr_t)p & ~G1_PARTIAL_ARRAY_MASK);
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop old = clear_partial_array_mask(p);
|
||||
assert(old->is_objArray(), "must be obj array");
|
||||
assert(old->is_forwarded(), "must be forwarded");
|
||||
assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
|
||||
@ -3884,7 +4022,7 @@ void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
arrayOop(old)->set_length(end);
|
||||
// Push remainder.
|
||||
_par_scan_state->push_on_queue((oop*) ((intptr_t) old | G1_PARTIAL_ARRAY_MASK));
|
||||
_par_scan_state->push_on_queue(set_partial_array_mask(old));
|
||||
} else {
|
||||
// Restore length so that the heap remains parsable in
|
||||
// case of evacuation failure.
|
||||
@ -3893,11 +4031,6 @@ void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
|
||||
|
||||
// process our set of indices (include header in first chunk)
|
||||
process_array_chunk<oop>(obj, start, end);
|
||||
oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr<oop>(start);
|
||||
oop* end_addr = (oop*)(obj->base()) + end; // obj_at_addr(end) asserts end < length
|
||||
MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
|
||||
_scanner.set_region(_g1->heap_region_containing(obj));
|
||||
obj->oop_iterate(&_scanner, mr);
|
||||
}
|
||||
|
||||
int G1ScanAndBalanceClosure::_nq = 0;
|
||||
@ -3931,6 +4064,13 @@ public:
|
||||
pss->hash_seed(),
|
||||
ref_to_scan)) {
|
||||
IF_G1_DETAILED_STATS(pss->note_steal());
|
||||
|
||||
// slightly paranoid tests; I'm trying to catch potential
|
||||
// problems before we go into push_on_queue to know where the
|
||||
// problem is coming from
|
||||
assert(ref_to_scan != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref_to_scan) ||
|
||||
_g1h->obj_in_cs(*ref_to_scan), "invariant");
|
||||
pss->push_on_queue(ref_to_scan);
|
||||
continue;
|
||||
}
|
||||
@ -3976,10 +4116,10 @@ public:
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
G1ParScanThreadState pss(_g1h, i);
|
||||
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
|
||||
G1ParScanHeapEvacClosure evac_failure_cl(_g1h, &pss);
|
||||
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
|
||||
G1ParScanThreadState pss(_g1h, i);
|
||||
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
|
||||
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
|
||||
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
|
||||
|
||||
pss.set_evac_closure(&scan_evac_cl);
|
||||
pss.set_evac_failure_closure(&evac_failure_cl);
|
||||
@ -4024,6 +4164,9 @@ public:
|
||||
_g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
|
||||
_g1h->g1_policy()->record_termination_time(i, term_ms);
|
||||
}
|
||||
if (G1UseSurvivorSpace) {
|
||||
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
|
||||
}
|
||||
_g1h->update_surviving_young_words(pss.surviving_young_words()+1);
|
||||
|
||||
// Clean up any par-expanded rem sets.
|
||||
@ -4263,7 +4406,7 @@ void G1CollectedHeap::evacuate_collection_set() {
|
||||
// Is this the right thing to do here? We don't save marks
|
||||
// on individual heap regions when we allocate from
|
||||
// them in parallel, so this seems like the correct place for this.
|
||||
all_alloc_regions_note_end_of_copying();
|
||||
retire_all_alloc_regions();
|
||||
{
|
||||
G1IsAliveClosure is_alive(this);
|
||||
G1KeepAliveClosure keep_alive(this);
|
||||
@ -4903,7 +5046,7 @@ bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
|
||||
return no_allocs;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::all_alloc_regions_note_end_of_copying() {
|
||||
void G1CollectedHeap::retire_all_alloc_regions() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
HeapRegion* r = _gc_alloc_regions[ap];
|
||||
if (r != NULL) {
|
||||
@ -4916,8 +5059,7 @@ void G1CollectedHeap::all_alloc_regions_note_end_of_copying() {
|
||||
}
|
||||
}
|
||||
if (!has_processed_alias) {
|
||||
r->note_end_of_copying();
|
||||
g1_policy()->record_after_bytes(r->used());
|
||||
retire_alloc_region(r, false /* par */);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ private:
|
||||
HeapRegion* _curr_scan_only;
|
||||
|
||||
HeapRegion* _survivor_head;
|
||||
HeapRegion* _survivors_tail;
|
||||
HeapRegion* _survivor_tail;
|
||||
size_t _survivor_length;
|
||||
|
||||
void empty_list(HeapRegion* list);
|
||||
@ -105,6 +105,7 @@ public:
|
||||
bool is_empty() { return _length == 0; }
|
||||
size_t length() { return _length; }
|
||||
size_t scan_only_length() { return _scan_only_length; }
|
||||
size_t survivor_length() { return _survivor_length; }
|
||||
|
||||
void rs_length_sampling_init();
|
||||
bool rs_length_sampling_more();
|
||||
@ -120,6 +121,7 @@ public:
|
||||
HeapRegion* first_region() { return _head; }
|
||||
HeapRegion* first_scan_only_region() { return _scan_only_head; }
|
||||
HeapRegion* first_survivor_region() { return _survivor_head; }
|
||||
HeapRegion* last_survivor_region() { return _survivor_tail; }
|
||||
HeapRegion* par_get_next_scan_only_region() {
|
||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
HeapRegion* ret = _curr_scan_only;
|
||||
@ -219,7 +221,7 @@ private:
|
||||
// The to-space memory regions into which objects are being copied during
|
||||
// a GC.
|
||||
HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
|
||||
uint _gc_alloc_region_counts[GCAllocPurposeCount];
|
||||
size_t _gc_alloc_region_counts[GCAllocPurposeCount];
|
||||
|
||||
// A list of the regions that have been set to be alloc regions in the
|
||||
// current collection.
|
||||
@ -247,6 +249,27 @@ private:
|
||||
NumberSeq _pop_obj_rc_at_copy;
|
||||
void print_popularity_summary_info() const;
|
||||
|
||||
// This is used for a quick test on whether a reference points into
|
||||
// the collection set or not. Basically, we have an array, with one
|
||||
// byte per region, and that byte denotes whether the corresponding
|
||||
// region is in the collection set or not. The entry corresponding
|
||||
// the bottom of the heap, i.e., region 0, is pointed to by
|
||||
// _in_cset_fast_test_base. The _in_cset_fast_test field has been
|
||||
// biased so that it actually points to address 0 of the address
|
||||
// space, to make the test as fast as possible (we can simply shift
|
||||
// the address to address into it, instead of having to subtract the
|
||||
// bottom of the heap from the address before shifting it; basically
|
||||
// it works in the same way the card table works).
|
||||
bool* _in_cset_fast_test;
|
||||
|
||||
// The allocated array used for the fast test on whether a reference
|
||||
// points into the collection set or not. This field is also used to
|
||||
// free the array.
|
||||
bool* _in_cset_fast_test_base;
|
||||
|
||||
// The length of the _in_cset_fast_test_base array.
|
||||
size_t _in_cset_fast_test_length;
|
||||
|
||||
volatile unsigned _gc_time_stamp;
|
||||
|
||||
size_t* _surviving_young_words;
|
||||
@ -260,8 +283,8 @@ protected:
|
||||
// Returns "true" iff none of the gc alloc regions have any allocations
|
||||
// since the last call to "save_marks".
|
||||
bool all_alloc_regions_no_allocs_since_save_marks();
|
||||
// Calls "note_end_of_copying on all gc alloc_regions.
|
||||
void all_alloc_regions_note_end_of_copying();
|
||||
// Perform finalization stuff on all allocation regions.
|
||||
void retire_all_alloc_regions();
|
||||
|
||||
// The number of regions allocated to hold humongous objects.
|
||||
int _num_humongous_regions;
|
||||
@ -330,6 +353,10 @@ protected:
|
||||
// that parallel threads might be attempting allocations.
|
||||
void par_allocate_remaining_space(HeapRegion* r);
|
||||
|
||||
// Retires an allocation region when it is full or at the end of a
|
||||
// GC pause.
|
||||
void retire_alloc_region(HeapRegion* alloc_region, bool par);
|
||||
|
||||
// Helper function for two callbacks below.
|
||||
// "full", if true, indicates that the GC is for a System.gc() request,
|
||||
// and should collect the entire heap. If "clear_all_soft_refs" is true,
|
||||
@ -368,6 +395,38 @@ public:
|
||||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
|
||||
// We register a region with the fast "in collection set" test. We
|
||||
// simply set to true the array slot corresponding to this region.
|
||||
void register_region_with_in_cset_fast_test(HeapRegion* r) {
|
||||
assert(_in_cset_fast_test_base != NULL, "sanity");
|
||||
assert(r->in_collection_set(), "invariant");
|
||||
int index = r->hrs_index();
|
||||
assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length,
|
||||
"invariant");
|
||||
assert(!_in_cset_fast_test_base[index], "invariant");
|
||||
_in_cset_fast_test_base[index] = true;
|
||||
}
|
||||
|
||||
// This is a fast test on whether a reference points into the
|
||||
// collection set or not. It does not assume that the reference
|
||||
// points into the heap; if it doesn't, it will return false.
|
||||
bool in_cset_fast_test(oop obj) {
|
||||
assert(_in_cset_fast_test != NULL, "sanity");
|
||||
if (_g1_committed.contains((HeapWord*) obj)) {
|
||||
// no need to subtract the bottom of the heap from obj,
|
||||
// _in_cset_fast_test is biased
|
||||
size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
|
||||
bool ret = _in_cset_fast_test[index];
|
||||
// let's make sure the result is consistent with what the slower
|
||||
// test returns
|
||||
assert( ret || !obj_in_cs(obj), "sanity");
|
||||
assert(!ret || obj_in_cs(obj), "sanity");
|
||||
return ret;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
||||
@ -850,6 +909,7 @@ public:
|
||||
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
virtual void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
|
||||
|
||||
// Iterate over all objects allocated since the last collection, calling
|
||||
// "cl.do_object" on each. The heap must have been initialized properly
|
||||
|
@ -36,8 +36,11 @@ G1CollectedHeap::heap_region_containing(const void* addr) const {
|
||||
|
||||
inline HeapRegion*
|
||||
G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
|
||||
HeapRegion* res = _hrs->addr_to_region(addr);
|
||||
assert(res != NULL, "addr outside of heap?");
|
||||
assert(_g1_reserved.contains(addr), "invariant");
|
||||
size_t index = ((intptr_t) addr - (intptr_t) _g1_reserved.start())
|
||||
>> HeapRegion::LogOfHRGrainBytes;
|
||||
HeapRegion* res = _hrs->at(index);
|
||||
assert(res == _hrs->addr_to_region(addr), "sanity");
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -196,8 +196,13 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
_short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
|
||||
G1YoungSurvRateNumRegionsSummary)),
|
||||
_survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
|
||||
G1YoungSurvRateNumRegionsSummary))
|
||||
G1YoungSurvRateNumRegionsSummary)),
|
||||
// add here any more surv rate groups
|
||||
_recorded_survivor_regions(0),
|
||||
_recorded_survivor_head(NULL),
|
||||
_recorded_survivor_tail(NULL),
|
||||
_survivors_age_table(true)
|
||||
|
||||
{
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||
@ -272,6 +277,15 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
_concurrent_mark_cleanup_times_ms->add(0.20);
|
||||
_tenuring_threshold = MaxTenuringThreshold;
|
||||
|
||||
if (G1UseSurvivorSpace) {
|
||||
// if G1FixedSurvivorSpaceSize is 0 which means the size is not
|
||||
// fixed, then _max_survivor_regions will be calculated at
|
||||
// calculate_young_list_target_config during initialization
|
||||
_max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
|
||||
} else {
|
||||
_max_survivor_regions = 0;
|
||||
}
|
||||
|
||||
initialize_all();
|
||||
}
|
||||
|
||||
@ -283,6 +297,9 @@ static void inc_mod(int& i, int len) {
|
||||
void G1CollectorPolicy::initialize_flags() {
|
||||
set_min_alignment(HeapRegion::GrainBytes);
|
||||
set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
|
||||
if (SurvivorRatio < 1) {
|
||||
vm_exit_during_initialization("Invalid survivor ratio specified");
|
||||
}
|
||||
CollectorPolicy::initialize_flags();
|
||||
}
|
||||
|
||||
@ -301,6 +318,8 @@ void G1CollectorPolicy::init() {
|
||||
"-XX:+UseConcMarkSweepGC.");
|
||||
}
|
||||
|
||||
initialize_gc_policy_counters();
|
||||
|
||||
if (G1Gen) {
|
||||
_in_young_gc_mode = true;
|
||||
|
||||
@ -322,6 +341,12 @@ void G1CollectorPolicy::init() {
|
||||
}
|
||||
}
|
||||
|
||||
// Create the jstat counters for the policy.
|
||||
void G1CollectorPolicy::initialize_gc_policy_counters()
|
||||
{
|
||||
_gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::calculate_young_list_min_length() {
|
||||
_young_list_min_length = 0;
|
||||
|
||||
@ -352,6 +377,7 @@ void G1CollectorPolicy::calculate_young_list_target_config() {
|
||||
guarantee( so_length < _young_list_target_length, "invariant" );
|
||||
_young_list_so_prefix_length = so_length;
|
||||
}
|
||||
calculate_survivors_policy();
|
||||
}
|
||||
|
||||
// This method calculate the optimal scan-only set for a fixed young
|
||||
@ -448,6 +474,9 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
|
||||
// we are in fully-young mode and there are free regions in the heap
|
||||
|
||||
double survivor_regions_evac_time =
|
||||
predict_survivor_regions_evac_time();
|
||||
|
||||
size_t min_so_length = 0;
|
||||
size_t max_so_length = 0;
|
||||
|
||||
@ -497,9 +526,8 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
scanned_cards = predict_non_young_card_num(adj_rs_lengths);
|
||||
// calculate this once, so that we don't have to recalculate it in
|
||||
// the innermost loop
|
||||
double base_time_ms = predict_base_elapsed_time_ms(pending_cards,
|
||||
scanned_cards);
|
||||
|
||||
double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
|
||||
+ survivor_regions_evac_time;
|
||||
// the result
|
||||
size_t final_young_length = 0;
|
||||
size_t final_so_length = 0;
|
||||
@ -548,14 +576,14 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
bool done = false;
|
||||
// this is the outermost loop
|
||||
while (!done) {
|
||||
#if 0
|
||||
#ifdef TRACE_CALC_YOUNG_CONFIG
|
||||
// leave this in for debugging, just in case
|
||||
gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT
|
||||
", incr " SIZE_FORMAT ", pass %s",
|
||||
from_so_length, to_so_length, so_length_incr,
|
||||
(pass == pass_type_coarse) ? "coarse" :
|
||||
(pass == pass_type_fine) ? "fine" : "final");
|
||||
#endif // 0
|
||||
#endif // TRACE_CALC_YOUNG_CONFIG
|
||||
|
||||
size_t so_length = from_so_length;
|
||||
size_t init_free_regions =
|
||||
@ -651,11 +679,11 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
guarantee( so_length_incr == so_coarse_increments, "invariant" );
|
||||
guarantee( final_so_length >= min_so_length, "invariant" );
|
||||
|
||||
#if 0
|
||||
#ifdef TRACE_CALC_YOUNG_CONFIG
|
||||
// leave this in for debugging, just in case
|
||||
gclog_or_tty->print_cr(" coarse pass: SO length " SIZE_FORMAT,
|
||||
final_so_length);
|
||||
#endif // 0
|
||||
#endif // TRACE_CALC_YOUNG_CONFIG
|
||||
|
||||
from_so_length =
|
||||
(final_so_length - min_so_length > so_coarse_increments) ?
|
||||
@ -687,12 +715,12 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
// of the optimal
|
||||
size_t new_so_length = 950 * final_so_length / 1000;
|
||||
|
||||
#if 0
|
||||
#ifdef TRACE_CALC_YOUNG_CONFIG
|
||||
// leave this in for debugging, just in case
|
||||
gclog_or_tty->print_cr(" fine pass: SO length " SIZE_FORMAT
|
||||
", setting it to " SIZE_FORMAT,
|
||||
final_so_length, new_so_length);
|
||||
#endif // 0
|
||||
#endif // TRACE_CALC_YOUNG_CONFIG
|
||||
|
||||
from_so_length = new_so_length;
|
||||
to_so_length = new_so_length;
|
||||
@ -719,7 +747,8 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
}
|
||||
|
||||
// we should have at least one region in the target young length
|
||||
_young_list_target_length = MAX2((size_t) 1, final_young_length);
|
||||
_young_list_target_length =
|
||||
MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
|
||||
if (final_so_length >= final_young_length)
|
||||
// and we need to ensure that the S-O length is not greater than
|
||||
// the target young length (this is being a bit careful)
|
||||
@ -734,7 +763,7 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
|
||||
|
||||
#if 0
|
||||
#ifdef TRACE_CALC_YOUNG_CONFIG
|
||||
// leave this in for debugging, just in case
|
||||
gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT
|
||||
", SO = " SIZE_FORMAT ", "
|
||||
@ -747,9 +776,9 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
calculations,
|
||||
full_young_gcs() ? "full" : "partial",
|
||||
should_initiate_conc_mark() ? " i-m" : "",
|
||||
in_marking_window(),
|
||||
in_marking_window_im());
|
||||
#endif // 0
|
||||
_in_marking_window,
|
||||
_in_marking_window_im);
|
||||
#endif // TRACE_CALC_YOUNG_CONFIG
|
||||
|
||||
if (_young_list_target_length < _young_list_min_length) {
|
||||
// bummer; this means that, if we do a pause when the optimal
|
||||
@ -768,14 +797,14 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
// S-O length
|
||||
so_length = calculate_optimal_so_length(_young_list_min_length);
|
||||
|
||||
#if 0
|
||||
#ifdef TRACE_CALC_YOUNG_CONFIG
|
||||
// leave this in for debugging, just in case
|
||||
gclog_or_tty->print_cr("adjusted target length from "
|
||||
SIZE_FORMAT " to " SIZE_FORMAT
|
||||
", SO " SIZE_FORMAT,
|
||||
_young_list_target_length, _young_list_min_length,
|
||||
so_length);
|
||||
#endif // 0
|
||||
#endif // TRACE_CALC_YOUNG_CONFIG
|
||||
|
||||
_young_list_target_length =
|
||||
MAX2(_young_list_min_length, (size_t)1);
|
||||
@ -785,12 +814,12 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
|
||||
// we are in a partially-young mode or we've run out of regions (due
|
||||
// to evacuation failure)
|
||||
|
||||
#if 0
|
||||
#ifdef TRACE_CALC_YOUNG_CONFIG
|
||||
// leave this in for debugging, just in case
|
||||
gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
|
||||
", SO " SIZE_FORMAT,
|
||||
_young_list_min_length, 0);
|
||||
#endif // 0
|
||||
#endif // TRACE_CALC_YOUNG_CONFIG
|
||||
|
||||
// we'll do the pause as soon as possible and with no S-O prefix
|
||||
// (see above for the reasons behind the latter)
|
||||
@ -884,6 +913,16 @@ G1CollectorPolicy::predict_gc_eff(size_t young_length,
|
||||
return true;
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_survivor_regions_evac_time() {
|
||||
double survivor_regions_evac_time = 0.0;
|
||||
for (HeapRegion * r = _recorded_survivor_head;
|
||||
r != NULL && r != _recorded_survivor_tail->get_next_young_region();
|
||||
r = r->get_next_young_region()) {
|
||||
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
|
||||
}
|
||||
return survivor_regions_evac_time;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::check_prediction_validity() {
|
||||
guarantee( adaptive_young_list_length(), "should not call this otherwise" );
|
||||
|
||||
@ -995,11 +1034,15 @@ void G1CollectorPolicy::record_full_collection_end() {
|
||||
_short_lived_surv_rate_group->start_adding_regions();
|
||||
// also call this on any additional surv rate groups
|
||||
|
||||
record_survivor_regions(0, NULL, NULL);
|
||||
|
||||
_prev_region_num_young = _region_num_young;
|
||||
_prev_region_num_tenured = _region_num_tenured;
|
||||
|
||||
_free_regions_at_end_of_collection = _g1->free_regions();
|
||||
_scan_only_regions_at_end_of_collection = 0;
|
||||
// Reset survivors SurvRateGroup.
|
||||
_survivor_surv_rate_group->reset();
|
||||
calculate_young_list_min_length();
|
||||
calculate_young_list_target_config();
|
||||
}
|
||||
@ -1104,6 +1147,10 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
||||
_short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
|
||||
tag_scan_only(short_lived_so_length);
|
||||
|
||||
if (G1UseSurvivorSpace) {
|
||||
_survivors_age_table.clear();
|
||||
}
|
||||
|
||||
assert( verify_young_ages(), "region age verification" );
|
||||
}
|
||||
|
||||
@ -1965,9 +2012,6 @@ void G1CollectorPolicy::record_collection_pause_end(bool popular,
|
||||
// </NEW PREDICTION>
|
||||
|
||||
_target_pause_time_ms = -1.0;
|
||||
|
||||
// TODO: calculate tenuring threshold
|
||||
_tenuring_threshold = MaxTenuringThreshold;
|
||||
}
|
||||
|
||||
// <NEW PREDICTION>
|
||||
@ -2058,7 +2102,7 @@ G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
|
||||
guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
|
||||
"invariant" );
|
||||
int age = hr->age_in_surv_rate_group();
|
||||
double yg_surv_rate = predict_yg_surv_rate(age);
|
||||
double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
|
||||
bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
|
||||
}
|
||||
|
||||
@ -2091,7 +2135,7 @@ G1CollectorPolicy::record_cset_region(HeapRegion* hr, bool young) {
|
||||
}
|
||||
#if PREDICTIONS_VERBOSE
|
||||
if (young) {
|
||||
_recorded_young_bytes += hr->asSpace()->used();
|
||||
_recorded_young_bytes += hr->used();
|
||||
} else {
|
||||
_recorded_marked_bytes += hr->max_live_bytes();
|
||||
}
|
||||
@ -2119,11 +2163,6 @@ G1CollectorPolicy::end_recording_regions() {
|
||||
predict_non_young_card_num(_predicted_rs_lengths);
|
||||
_recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
|
||||
|
||||
_predicted_young_survival_ratio = 0.0;
|
||||
for (int i = 0; i < _recorded_young_regions; ++i)
|
||||
_predicted_young_survival_ratio += predict_yg_surv_rate(i);
|
||||
_predicted_young_survival_ratio /= (double) _recorded_young_regions;
|
||||
|
||||
_predicted_scan_only_scan_time_ms =
|
||||
predict_scan_only_time_ms(_recorded_scan_only_regions);
|
||||
_predicted_rs_update_time_ms =
|
||||
@ -2673,8 +2712,11 @@ G1CollectorPolicy::should_add_next_region_to_young_list() {
|
||||
assert(in_young_gc_mode(), "should be in young GC mode");
|
||||
bool ret;
|
||||
size_t young_list_length = _g1->young_list_length();
|
||||
|
||||
if (young_list_length < _young_list_target_length) {
|
||||
size_t young_list_max_length = _young_list_target_length;
|
||||
if (G1FixedEdenSize) {
|
||||
young_list_max_length -= _max_survivor_regions;
|
||||
}
|
||||
if (young_list_length < young_list_max_length) {
|
||||
ret = true;
|
||||
++_region_num_young;
|
||||
} else {
|
||||
@ -2710,17 +2752,39 @@ G1CollectorPolicy::checkpoint_conc_overhead() {
|
||||
}
|
||||
|
||||
|
||||
uint G1CollectorPolicy::max_regions(int purpose) {
|
||||
size_t G1CollectorPolicy::max_regions(int purpose) {
|
||||
switch (purpose) {
|
||||
case GCAllocForSurvived:
|
||||
return G1MaxSurvivorRegions;
|
||||
return _max_survivor_regions;
|
||||
case GCAllocForTenured:
|
||||
return UINT_MAX;
|
||||
return REGIONS_UNLIMITED;
|
||||
default:
|
||||
return UINT_MAX;
|
||||
ShouldNotReachHere();
|
||||
return REGIONS_UNLIMITED;
|
||||
};
|
||||
}
|
||||
|
||||
// Calculates survivor space parameters.
|
||||
void G1CollectorPolicy::calculate_survivors_policy()
|
||||
{
|
||||
if (!G1UseSurvivorSpace) {
|
||||
return;
|
||||
}
|
||||
if (G1FixedSurvivorSpaceSize == 0) {
|
||||
_max_survivor_regions = _young_list_target_length / SurvivorRatio;
|
||||
} else {
|
||||
_max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
|
||||
}
|
||||
|
||||
if (G1FixedTenuringThreshold) {
|
||||
_tenuring_threshold = MaxTenuringThreshold;
|
||||
} else {
|
||||
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
|
||||
HeapRegion::GrainWords * _max_survivor_regions);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
G1CollectorPolicy_BestRegionsFirst::
|
||||
set_single_region_collection_set(HeapRegion* hr) {
|
||||
@ -2743,7 +2807,11 @@ G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
|
||||
double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
|
||||
|
||||
size_t young_list_length = _g1->young_list_length();
|
||||
bool reached_target_length = young_list_length >= _young_list_target_length;
|
||||
size_t young_list_max_length = _young_list_target_length;
|
||||
if (G1FixedEdenSize) {
|
||||
young_list_max_length -= _max_survivor_regions;
|
||||
}
|
||||
bool reached_target_length = young_list_length >= young_list_max_length;
|
||||
|
||||
if (in_young_gc_mode()) {
|
||||
if (reached_target_length) {
|
||||
@ -2985,6 +3053,7 @@ add_to_collection_set(HeapRegion* hr) {
|
||||
_collection_set = hr;
|
||||
_collection_set_size++;
|
||||
_collection_set_bytes_used_before += hr->used();
|
||||
_g1->register_region_with_in_cset_fast_test(hr);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -49,7 +49,7 @@ public: \
|
||||
class MainBodySummary;
|
||||
class PopPreambleSummary;
|
||||
|
||||
class PauseSummary {
|
||||
class PauseSummary: public CHeapObj {
|
||||
define_num_seq(total)
|
||||
define_num_seq(other)
|
||||
|
||||
@ -58,7 +58,7 @@ public:
|
||||
virtual PopPreambleSummary* pop_preamble_summary() { return NULL; }
|
||||
};
|
||||
|
||||
class MainBodySummary {
|
||||
class MainBodySummary: public CHeapObj {
|
||||
define_num_seq(satb_drain) // optional
|
||||
define_num_seq(parallel) // parallel only
|
||||
define_num_seq(ext_root_scan)
|
||||
@ -75,7 +75,7 @@ class MainBodySummary {
|
||||
define_num_seq(clear_ct) // parallel only
|
||||
};
|
||||
|
||||
class PopPreambleSummary {
|
||||
class PopPreambleSummary: public CHeapObj {
|
||||
define_num_seq(pop_preamble)
|
||||
define_num_seq(pop_update_rs)
|
||||
define_num_seq(pop_scan_rs)
|
||||
@ -557,6 +557,8 @@ public:
|
||||
return get_new_neg_prediction(_young_gc_eff_seq);
|
||||
}
|
||||
|
||||
double predict_survivor_regions_evac_time();
|
||||
|
||||
// </NEW PREDICTION>
|
||||
|
||||
public:
|
||||
@ -599,8 +601,8 @@ public:
|
||||
|
||||
// Returns an estimate of the survival rate of the region at yg-age
|
||||
// "yg_age".
|
||||
double predict_yg_surv_rate(int age) {
|
||||
TruncatedSeq* seq = _short_lived_surv_rate_group->get_seq(age);
|
||||
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
|
||||
TruncatedSeq* seq = surv_rate_group->get_seq(age);
|
||||
if (seq->num() == 0)
|
||||
gclog_or_tty->print("BARF! age is %d", age);
|
||||
guarantee( seq->num() > 0, "invariant" );
|
||||
@ -610,6 +612,10 @@ public:
|
||||
return pred;
|
||||
}
|
||||
|
||||
double predict_yg_surv_rate(int age) {
|
||||
return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
|
||||
}
|
||||
|
||||
double accum_yg_surv_rate_pred(int age) {
|
||||
return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
|
||||
}
|
||||
@ -822,6 +828,9 @@ public:
|
||||
|
||||
virtual void init();
|
||||
|
||||
// Create jstat counters for the policy.
|
||||
virtual void initialize_gc_policy_counters();
|
||||
|
||||
virtual HeapWord* mem_allocate_work(size_t size,
|
||||
bool is_tlab,
|
||||
bool* gc_overhead_limit_was_exceeded);
|
||||
@ -1047,8 +1056,12 @@ public:
|
||||
// Print stats on young survival ratio
|
||||
void print_yg_surv_rate_info() const;
|
||||
|
||||
void finished_recalculating_age_indexes() {
|
||||
_short_lived_surv_rate_group->finished_recalculating_age_indexes();
|
||||
void finished_recalculating_age_indexes(bool is_survivors) {
|
||||
if (is_survivors) {
|
||||
_survivor_surv_rate_group->finished_recalculating_age_indexes();
|
||||
} else {
|
||||
_short_lived_surv_rate_group->finished_recalculating_age_indexes();
|
||||
}
|
||||
// do that for any other surv rate groups
|
||||
}
|
||||
|
||||
@ -1097,6 +1110,17 @@ protected:
|
||||
// maximum amount of suvivors regions.
|
||||
int _tenuring_threshold;
|
||||
|
||||
// The limit on the number of regions allocated for survivors.
|
||||
size_t _max_survivor_regions;
|
||||
|
||||
// The amount of survor regions after a collection.
|
||||
size_t _recorded_survivor_regions;
|
||||
// List of survivor regions.
|
||||
HeapRegion* _recorded_survivor_head;
|
||||
HeapRegion* _recorded_survivor_tail;
|
||||
|
||||
ageTable _survivors_age_table;
|
||||
|
||||
public:
|
||||
|
||||
inline GCAllocPurpose
|
||||
@ -1116,7 +1140,9 @@ public:
|
||||
return GCAllocForTenured;
|
||||
}
|
||||
|
||||
uint max_regions(int purpose);
|
||||
static const size_t REGIONS_UNLIMITED = ~(size_t)0;
|
||||
|
||||
size_t max_regions(int purpose);
|
||||
|
||||
// The limit on regions for a particular purpose is reached.
|
||||
void note_alloc_region_limit_reached(int purpose) {
|
||||
@ -1132,6 +1158,23 @@ public:
|
||||
void note_stop_adding_survivor_regions() {
|
||||
_survivor_surv_rate_group->stop_adding_regions();
|
||||
}
|
||||
|
||||
void record_survivor_regions(size_t regions,
|
||||
HeapRegion* head,
|
||||
HeapRegion* tail) {
|
||||
_recorded_survivor_regions = regions;
|
||||
_recorded_survivor_head = head;
|
||||
_recorded_survivor_tail = tail;
|
||||
}
|
||||
|
||||
void record_thread_age_table(ageTable* age_table)
|
||||
{
|
||||
_survivors_age_table.merge_par(age_table);
|
||||
}
|
||||
|
||||
// Calculates survivor space parameters.
|
||||
void calculate_survivors_policy();
|
||||
|
||||
};
|
||||
|
||||
// This encapsulates a particular strategy for a g1 Collector.
|
||||
|
@ -28,7 +28,7 @@
|
||||
/***** ALL TIMES ARE IN SECS!!!!!!! *****/
|
||||
|
||||
// this is the "interface"
|
||||
class G1MMUTracker {
|
||||
class G1MMUTracker: public CHeapObj {
|
||||
protected:
|
||||
double _time_slice;
|
||||
double _max_gc_time; // this is per time slice
|
||||
@ -67,7 +67,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class G1MMUTrackerQueueElem {
|
||||
class G1MMUTrackerQueueElem VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
double _start_time;
|
||||
double _end_time;
|
||||
|
@ -77,6 +77,18 @@ public:
|
||||
|
||||
#define G1_PARTIAL_ARRAY_MASK 1
|
||||
|
||||
inline bool has_partial_array_mask(oop* ref) {
|
||||
return (intptr_t) ref & G1_PARTIAL_ARRAY_MASK;
|
||||
}
|
||||
|
||||
inline oop* set_partial_array_mask(oop obj) {
|
||||
return (oop*) ((intptr_t) obj | G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
inline oop clear_partial_array_mask(oop* ref) {
|
||||
return oop((intptr_t) ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
|
||||
G1ParScanClosure _scanner;
|
||||
template <class T> void process_array_chunk(oop obj, int start, int end);
|
||||
@ -101,7 +113,8 @@ public:
|
||||
G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { }
|
||||
};
|
||||
|
||||
template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
|
||||
template<bool do_gen_barrier, G1Barrier barrier,
|
||||
bool do_mark_forwardee, bool skip_cset_test>
|
||||
class G1ParCopyClosure : public G1ParCopyHelper {
|
||||
G1ParScanClosure _scanner;
|
||||
void do_oop_work(oop* p);
|
||||
@ -119,14 +132,22 @@ public:
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, false> G1ParScanPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
|
||||
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure;
|
||||
// This is the only case when we set skip_cset_test. Basically, this
|
||||
// closure is (should?) only be called directly while we're draining
|
||||
// the overflow and task queues. In that case we know that the
|
||||
// reference in question points into the collection set, otherwise we
|
||||
// would not have pushed it on the queue.
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
|
||||
// We need a separate closure to handle references during evacuation
|
||||
// failure processing, as it cannot asume that the reference already
|
||||
// points to the collection set (like G1ParScanHeapEvacClosure does).
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
|
||||
|
||||
class FilterIntoCSClosure: public OopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
|
@ -572,6 +572,9 @@ prepare_for_oops_into_collection_set_do() {
|
||||
}
|
||||
guarantee( _cards_scanned == NULL, "invariant" );
|
||||
_cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
|
||||
for (uint i = 0; i < n_workers(); ++i) {
|
||||
_cards_scanned[i] = 0;
|
||||
}
|
||||
_total_cards_scanned = 0;
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ class CardTableModRefBarrierSet;
|
||||
class HRInto_G1RemSet;
|
||||
class ConcurrentG1Refine;
|
||||
|
||||
class G1RemSet {
|
||||
class G1RemSet: public CHeapObj {
|
||||
protected:
|
||||
G1CollectedHeap* _g1;
|
||||
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
#define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
|
||||
\
|
||||
product(intx, ParallelGCG1AllocBufferSize, 4*K, \
|
||||
product(intx, ParallelGCG1AllocBufferSize, 8*K, \
|
||||
"Size of parallel G1 allocation buffers in to-space.") \
|
||||
\
|
||||
product(intx, G1TimeSliceMS, 500, \
|
||||
@ -281,7 +281,17 @@
|
||||
develop(bool, G1HRRSFlushLogBuffersOnVerify, false, \
|
||||
"Forces flushing of log buffers before verification.") \
|
||||
\
|
||||
product(intx, G1MaxSurvivorRegions, 0, \
|
||||
"The maximum number of survivor regions")
|
||||
product(bool, G1UseSurvivorSpace, true, \
|
||||
"When true, use survivor space.") \
|
||||
\
|
||||
product(bool, G1FixedTenuringThreshold, false, \
|
||||
"When set, G1 will not adjust the tenuring threshold") \
|
||||
\
|
||||
product(bool, G1FixedEdenSize, false, \
|
||||
"When set, G1 will not allocate unused survivor space regions") \
|
||||
\
|
||||
product(uintx, G1FixedSurvivorSpaceSize, 0, \
|
||||
"If non-0 is the size of the G1 survivor space, " \
|
||||
"otherwise SurvivorRatio is used to determine the size")
|
||||
|
||||
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
||||
|
@ -32,11 +32,13 @@ enum G1Barrier {
|
||||
G1BarrierNone, G1BarrierRS, G1BarrierEvac
|
||||
};
|
||||
|
||||
template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
|
||||
template<bool do_gen_barrier, G1Barrier barrier,
|
||||
bool do_mark_forwardee, bool skip_cset_test>
|
||||
class G1ParCopyClosure;
|
||||
class G1ParScanClosure;
|
||||
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true>
|
||||
G1ParScanHeapEvacClosure;
|
||||
|
||||
class FilterIntoCSClosure;
|
||||
class FilterOutOfRegionClosure;
|
||||
|
@ -566,7 +566,11 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
void note_end_of_copying() {
|
||||
assert(top() >= _next_top_at_mark_start,
|
||||
"Increase only");
|
||||
_next_top_at_mark_start = top();
|
||||
// Survivor regions will be scanned on the start of concurrent
|
||||
// marking.
|
||||
if (!is_survivor()) {
|
||||
_next_top_at_mark_start = top();
|
||||
}
|
||||
}
|
||||
|
||||
// Returns "false" iff no object in the region was allocated when the
|
||||
@ -829,7 +833,7 @@ class HeapRegionClosure : public StackObj {
|
||||
|
||||
// A linked lists of heap regions. It leaves the "next" field
|
||||
// unspecified; that's up to subtypes.
|
||||
class RegionList {
|
||||
class RegionList VALUE_OBJ_CLASS_SPEC {
|
||||
protected:
|
||||
virtual HeapRegion* get_next(HeapRegion* chr) = 0;
|
||||
virtual void set_next(HeapRegion* chr,
|
||||
|
@ -65,9 +65,11 @@ protected:
|
||||
// We need access in order to union things into the base table.
|
||||
BitMap* bm() { return &_bm; }
|
||||
|
||||
#if PRT_COUNT_OCCUPIED
|
||||
void recount_occupied() {
|
||||
_occupied = (jint) bm()->count_one_bits();
|
||||
}
|
||||
#endif
|
||||
|
||||
PerRegionTable(HeapRegion* hr) :
|
||||
_hr(hr),
|
||||
@ -1144,7 +1146,9 @@ void HeapRegionRemSet::clear_outgoing_entries() {
|
||||
size_t i = _outgoing_region_map.get_next_one_offset(0);
|
||||
while (i < _outgoing_region_map.size()) {
|
||||
HeapRegion* to_region = g1h->region_at(i);
|
||||
to_region->rem_set()->clear_incoming_entry(hr());
|
||||
if (!to_region->in_collection_set()) {
|
||||
to_region->rem_set()->clear_incoming_entry(hr());
|
||||
}
|
||||
i = _outgoing_region_map.get_next_one_offset(i+1);
|
||||
}
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ class SparsePRT;
|
||||
// is represented. If a deleted PRT is re-used, a thread adding a bit,
|
||||
// thinking the PRT is for a different region, does no harm.
|
||||
|
||||
class OtherRegionsTable: public CHeapObj {
|
||||
class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
|
||||
friend class HeapRegionRemSetIterator;
|
||||
|
||||
G1CollectedHeap* _g1h;
|
||||
|
@ -29,7 +29,7 @@
|
||||
|
||||
class PtrQueueSet;
|
||||
|
||||
class PtrQueue: public CHeapObj {
|
||||
class PtrQueue VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
protected:
|
||||
// The ptr queue set to which this queue belongs.
|
||||
@ -130,7 +130,7 @@ public:
|
||||
// In particular, the individual queues allocate buffers from this shared
|
||||
// set, and return completed buffers to the set.
|
||||
// All these variables are are protected by the TLOQ_CBL_mon. XXX ???
|
||||
class PtrQueueSet: public CHeapObj {
|
||||
class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
||||
// old versions synchronously.
|
||||
|
||||
|
||||
class SparsePRTEntry {
|
||||
class SparsePRTEntry: public CHeapObj {
|
||||
public:
|
||||
enum SomePublicConstants {
|
||||
CardsPerEntry = (short)4,
|
||||
@ -167,7 +167,7 @@ public:
|
||||
};
|
||||
|
||||
// ValueObj because will be embedded in HRRS iterator.
|
||||
class RSHashTableIter: public CHeapObj {
|
||||
class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
||||
short _tbl_ind;
|
||||
short _bl_ind;
|
||||
short _card_ind;
|
||||
@ -213,7 +213,7 @@ class RSHashTableIter: public CHeapObj {
|
||||
|
||||
class SparsePRTIter;
|
||||
|
||||
class SparsePRT : public CHeapObj {
|
||||
class SparsePRT VALUE_OBJ_CLASS_SPEC {
|
||||
// Iterations are done on the _cur hash table, since they only need to
|
||||
// see entries visible at the start of a collection pause.
|
||||
// All other operations are done using the _next hash table.
|
||||
|
@ -29,23 +29,14 @@ SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
|
||||
const char* name,
|
||||
size_t summary_surv_rates_len) :
|
||||
_g1p(g1p), _name(name),
|
||||
_all_regions_allocated(0),
|
||||
_curr_length(0), _scan_only_prefix(0), _setup_seq_num(0),
|
||||
_array_length(0), _surv_rate(NULL), _accum_surv_rate_pred(NULL),
|
||||
_accum_surv_rate(0.0), _surv_rate_pred(NULL), _last_pred(0.0),
|
||||
_summary_surv_rates_len(summary_surv_rates_len),
|
||||
_summary_surv_rates_max_len(0),
|
||||
_summary_surv_rates(NULL) {
|
||||
|
||||
// the following will set up the arrays with length 1
|
||||
_curr_length = 1;
|
||||
stop_adding_regions();
|
||||
guarantee( _array_length == 1, "invariant" );
|
||||
guarantee( _surv_rate_pred[0] != NULL, "invariant" );
|
||||
_surv_rate_pred[0]->add(0.4);
|
||||
all_surviving_words_recorded(false);
|
||||
_curr_length = 0;
|
||||
|
||||
_summary_surv_rates(NULL),
|
||||
_surv_rate(NULL),
|
||||
_accum_surv_rate_pred(NULL),
|
||||
_surv_rate_pred(NULL)
|
||||
{
|
||||
reset();
|
||||
if (summary_surv_rates_len > 0) {
|
||||
size_t length = summary_surv_rates_len;
|
||||
_summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length);
|
||||
@ -60,61 +51,80 @@ SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
|
||||
start_adding_regions();
|
||||
}
|
||||
|
||||
|
||||
void SurvRateGroup::reset()
|
||||
{
|
||||
_all_regions_allocated = 0;
|
||||
_scan_only_prefix = 0;
|
||||
_setup_seq_num = 0;
|
||||
_stats_arrays_length = 0;
|
||||
_accum_surv_rate = 0.0;
|
||||
_last_pred = 0.0;
|
||||
// the following will set up the arrays with length 1
|
||||
_region_num = 1;
|
||||
stop_adding_regions();
|
||||
guarantee( _stats_arrays_length == 1, "invariant" );
|
||||
guarantee( _surv_rate_pred[0] != NULL, "invariant" );
|
||||
_surv_rate_pred[0]->add(0.4);
|
||||
all_surviving_words_recorded(false);
|
||||
_region_num = 0;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
SurvRateGroup::start_adding_regions() {
|
||||
_setup_seq_num = _array_length;
|
||||
_curr_length = _scan_only_prefix;
|
||||
_setup_seq_num = _stats_arrays_length;
|
||||
_region_num = _scan_only_prefix;
|
||||
_accum_surv_rate = 0.0;
|
||||
|
||||
#if 0
|
||||
gclog_or_tty->print_cr("start adding regions, seq num %d, length %d",
|
||||
_setup_seq_num, _curr_length);
|
||||
gclog_or_tty->print_cr("[%s] start adding regions, seq num %d, length %d",
|
||||
_name, _setup_seq_num, _region_num);
|
||||
#endif // 0
|
||||
}
|
||||
|
||||
void
|
||||
SurvRateGroup::stop_adding_regions() {
|
||||
size_t length = _curr_length;
|
||||
|
||||
#if 0
|
||||
gclog_or_tty->print_cr("stop adding regions, length %d", length);
|
||||
gclog_or_tty->print_cr("[%s] stop adding regions, length %d", _name, _region_num);
|
||||
#endif // 0
|
||||
|
||||
if (length > _array_length) {
|
||||
if (_region_num > _stats_arrays_length) {
|
||||
double* old_surv_rate = _surv_rate;
|
||||
double* old_accum_surv_rate_pred = _accum_surv_rate_pred;
|
||||
TruncatedSeq** old_surv_rate_pred = _surv_rate_pred;
|
||||
|
||||
_surv_rate = NEW_C_HEAP_ARRAY(double, length);
|
||||
_surv_rate = NEW_C_HEAP_ARRAY(double, _region_num);
|
||||
if (_surv_rate == NULL) {
|
||||
vm_exit_out_of_memory(sizeof(double) * length,
|
||||
vm_exit_out_of_memory(sizeof(double) * _region_num,
|
||||
"Not enough space for surv rate array.");
|
||||
}
|
||||
_accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, length);
|
||||
_accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num);
|
||||
if (_accum_surv_rate_pred == NULL) {
|
||||
vm_exit_out_of_memory(sizeof(double) * length,
|
||||
vm_exit_out_of_memory(sizeof(double) * _region_num,
|
||||
"Not enough space for accum surv rate pred array.");
|
||||
}
|
||||
_surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, length);
|
||||
_surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num);
|
||||
if (_surv_rate == NULL) {
|
||||
vm_exit_out_of_memory(sizeof(TruncatedSeq*) * length,
|
||||
vm_exit_out_of_memory(sizeof(TruncatedSeq*) * _region_num,
|
||||
"Not enough space for surv rate pred array.");
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < _array_length; ++i)
|
||||
for (size_t i = 0; i < _stats_arrays_length; ++i)
|
||||
_surv_rate_pred[i] = old_surv_rate_pred[i];
|
||||
|
||||
#if 0
|
||||
gclog_or_tty->print_cr("stop adding regions, new seqs %d to %d",
|
||||
_array_length, length - 1);
|
||||
gclog_or_tty->print_cr("[%s] stop adding regions, new seqs %d to %d",
|
||||
_name, _array_length, _region_num - 1);
|
||||
#endif // 0
|
||||
|
||||
for (size_t i = _array_length; i < length; ++i) {
|
||||
for (size_t i = _stats_arrays_length; i < _region_num; ++i) {
|
||||
_surv_rate_pred[i] = new TruncatedSeq(10);
|
||||
// _surv_rate_pred[i]->add(last_pred);
|
||||
}
|
||||
|
||||
_array_length = length;
|
||||
_stats_arrays_length = _region_num;
|
||||
|
||||
if (old_surv_rate != NULL)
|
||||
FREE_C_HEAP_ARRAY(double, old_surv_rate);
|
||||
@ -124,7 +134,7 @@ SurvRateGroup::stop_adding_regions() {
|
||||
FREE_C_HEAP_ARRAY(NumberSeq*, old_surv_rate_pred);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < _array_length; ++i)
|
||||
for (size_t i = 0; i < _stats_arrays_length; ++i)
|
||||
_surv_rate[i] = 0.0;
|
||||
}
|
||||
|
||||
@ -135,7 +145,7 @@ SurvRateGroup::accum_surv_rate(size_t adjustment) {
|
||||
|
||||
double ret = _accum_surv_rate;
|
||||
if (adjustment > 0) {
|
||||
TruncatedSeq* seq = get_seq(_curr_length+1);
|
||||
TruncatedSeq* seq = get_seq(_region_num+1);
|
||||
double surv_rate = _g1p->get_new_prediction(seq);
|
||||
ret += surv_rate;
|
||||
}
|
||||
@ -145,23 +155,23 @@ SurvRateGroup::accum_surv_rate(size_t adjustment) {
|
||||
|
||||
int
|
||||
SurvRateGroup::next_age_index() {
|
||||
TruncatedSeq* seq = get_seq(_curr_length);
|
||||
TruncatedSeq* seq = get_seq(_region_num);
|
||||
double surv_rate = _g1p->get_new_prediction(seq);
|
||||
_accum_surv_rate += surv_rate;
|
||||
|
||||
++_curr_length;
|
||||
++_region_num;
|
||||
return (int) ++_all_regions_allocated;
|
||||
}
|
||||
|
||||
void
|
||||
SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
|
||||
guarantee( scan_only_prefix <= _curr_length, "pre-condition" );
|
||||
guarantee( scan_only_prefix <= _region_num, "pre-condition" );
|
||||
_scan_only_prefix = scan_only_prefix;
|
||||
}
|
||||
|
||||
void
|
||||
SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
|
||||
guarantee( 0 <= age_in_group && (size_t) age_in_group < _curr_length,
|
||||
guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
|
||||
"pre-condition" );
|
||||
guarantee( _surv_rate[age_in_group] <= 0.00001,
|
||||
"should only update each slot once" );
|
||||
@ -178,15 +188,15 @@ SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
|
||||
|
||||
void
|
||||
SurvRateGroup::all_surviving_words_recorded(bool propagate) {
|
||||
if (propagate && _curr_length > 0) { // conservative
|
||||
double surv_rate = _surv_rate_pred[_curr_length-1]->last();
|
||||
if (propagate && _region_num > 0) { // conservative
|
||||
double surv_rate = _surv_rate_pred[_region_num-1]->last();
|
||||
|
||||
#if 0
|
||||
gclog_or_tty->print_cr("propagating %1.2lf from %d to %d",
|
||||
surv_rate, _curr_length, _array_length - 1);
|
||||
#endif // 0
|
||||
|
||||
for (size_t i = _curr_length; i < _array_length; ++i) {
|
||||
for (size_t i = _region_num; i < _stats_arrays_length; ++i) {
|
||||
guarantee( _surv_rate[i] <= 0.00001,
|
||||
"the slot should not have been updated" );
|
||||
_surv_rate_pred[i]->add(surv_rate);
|
||||
@ -195,7 +205,7 @@ SurvRateGroup::all_surviving_words_recorded(bool propagate) {
|
||||
|
||||
double accum = 0.0;
|
||||
double pred = 0.0;
|
||||
for (size_t i = 0; i < _array_length; ++i) {
|
||||
for (size_t i = 0; i < _stats_arrays_length; ++i) {
|
||||
pred = _g1p->get_new_prediction(_surv_rate_pred[i]);
|
||||
if (pred > 1.0) pred = 1.0;
|
||||
accum += pred;
|
||||
@ -209,8 +219,8 @@ SurvRateGroup::all_surviving_words_recorded(bool propagate) {
|
||||
void
|
||||
SurvRateGroup::print() {
|
||||
gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)",
|
||||
_name, _curr_length, _scan_only_prefix);
|
||||
for (size_t i = 0; i < _curr_length; ++i) {
|
||||
_name, _region_num, _scan_only_prefix);
|
||||
for (size_t i = 0; i < _region_num; ++i) {
|
||||
gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%%s",
|
||||
i, _surv_rate[i] * 100.0,
|
||||
_g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0,
|
||||
|
@ -29,7 +29,7 @@ private:
|
||||
G1CollectorPolicy* _g1p;
|
||||
const char* _name;
|
||||
|
||||
size_t _array_length;
|
||||
size_t _stats_arrays_length;
|
||||
double* _surv_rate;
|
||||
double* _accum_surv_rate_pred;
|
||||
double _last_pred;
|
||||
@ -40,7 +40,7 @@ private:
|
||||
size_t _summary_surv_rates_max_len;
|
||||
|
||||
int _all_regions_allocated;
|
||||
size_t _curr_length;
|
||||
size_t _region_num;
|
||||
size_t _scan_only_prefix;
|
||||
size_t _setup_seq_num;
|
||||
|
||||
@ -48,6 +48,7 @@ public:
|
||||
SurvRateGroup(G1CollectorPolicy* g1p,
|
||||
const char* name,
|
||||
size_t summary_surv_rates_len);
|
||||
void reset();
|
||||
void start_adding_regions();
|
||||
void stop_adding_regions();
|
||||
void record_scan_only_prefix(size_t scan_only_prefix);
|
||||
@ -55,22 +56,21 @@ public:
|
||||
void all_surviving_words_recorded(bool propagate);
|
||||
const char* name() { return _name; }
|
||||
|
||||
size_t region_num() { return _curr_length; }
|
||||
size_t region_num() { return _region_num; }
|
||||
size_t scan_only_length() { return _scan_only_prefix; }
|
||||
double accum_surv_rate_pred(int age) {
|
||||
assert(age >= 0, "must be");
|
||||
if ((size_t)age < _array_length)
|
||||
if ((size_t)age < _stats_arrays_length)
|
||||
return _accum_surv_rate_pred[age];
|
||||
else {
|
||||
double diff = (double) (age - _array_length + 1);
|
||||
return _accum_surv_rate_pred[_array_length-1] + diff * _last_pred;
|
||||
double diff = (double) (age - _stats_arrays_length + 1);
|
||||
return _accum_surv_rate_pred[_stats_arrays_length-1] + diff * _last_pred;
|
||||
}
|
||||
}
|
||||
|
||||
double accum_surv_rate(size_t adjustment);
|
||||
|
||||
TruncatedSeq* get_seq(size_t age) {
|
||||
guarantee( 0 <= age, "pre-condition" );
|
||||
if (age >= _setup_seq_num) {
|
||||
guarantee( _setup_seq_num > 0, "invariant" );
|
||||
age = _setup_seq_num-1;
|
||||
|
@ -28,6 +28,7 @@ binaryTreeDictionary.cpp allocationStats.hpp
|
||||
binaryTreeDictionary.cpp binaryTreeDictionary.hpp
|
||||
binaryTreeDictionary.cpp globals.hpp
|
||||
binaryTreeDictionary.cpp ostream.hpp
|
||||
binaryTreeDictionary.cpp space.inline.hpp
|
||||
binaryTreeDictionary.cpp spaceDecorator.hpp
|
||||
|
||||
binaryTreeDictionary.hpp freeBlockDictionary.hpp
|
||||
|
@ -31,9 +31,10 @@ bufferingOopClosure.hpp os.hpp
|
||||
cardTableRS.cpp concurrentMark.hpp
|
||||
cardTableRS.cpp g1SATBCardTableModRefBS.hpp
|
||||
|
||||
collectionSetChooser.cpp g1CollectedHeap.hpp
|
||||
collectionSetChooser.cpp g1CollectedHeap.inline.hpp
|
||||
collectionSetChooser.cpp g1CollectorPolicy.hpp
|
||||
collectionSetChooser.cpp collectionSetChooser.hpp
|
||||
collectionSetChooser.cpp space.inline.hpp
|
||||
|
||||
collectionSetChooser.hpp heapRegion.hpp
|
||||
collectionSetChooser.hpp growableArray.hpp
|
||||
@ -42,14 +43,16 @@ concurrentG1Refine.cpp atomic.hpp
|
||||
concurrentG1Refine.cpp concurrentG1Refine.hpp
|
||||
concurrentG1Refine.cpp concurrentG1RefineThread.hpp
|
||||
concurrentG1Refine.cpp copy.hpp
|
||||
concurrentG1Refine.cpp g1CollectedHeap.hpp
|
||||
concurrentG1Refine.cpp g1CollectedHeap.inline.hpp
|
||||
concurrentG1Refine.cpp g1RemSet.hpp
|
||||
concurrentG1Refine.cpp space.inline.hpp
|
||||
|
||||
concurrentG1Refine.hpp globalDefinitions.hpp
|
||||
concurrentG1Refine.hpp allocation.hpp
|
||||
|
||||
concurrentG1RefineThread.cpp concurrentG1Refine.hpp
|
||||
concurrentG1RefineThread.cpp concurrentG1RefineThread.hpp
|
||||
concurrentG1RefineThread.cpp g1CollectedHeap.hpp
|
||||
concurrentG1RefineThread.cpp g1CollectedHeap.inline.hpp
|
||||
concurrentG1RefineThread.cpp g1CollectorPolicy.hpp
|
||||
concurrentG1RefineThread.cpp handles.inline.hpp
|
||||
concurrentG1RefineThread.cpp mutexLocker.hpp
|
||||
@ -166,10 +169,11 @@ g1CollectorPolicy.cpp concurrentMark.hpp
|
||||
g1CollectorPolicy.cpp concurrentMarkThread.inline.hpp
|
||||
g1CollectorPolicy.cpp debug.hpp
|
||||
g1CollectorPolicy.cpp java.hpp
|
||||
g1CollectorPolicy.cpp g1CollectedHeap.hpp
|
||||
g1CollectorPolicy.cpp g1CollectedHeap.inline.hpp
|
||||
g1CollectorPolicy.cpp g1CollectorPolicy.hpp
|
||||
g1CollectorPolicy.cpp heapRegionRemSet.hpp
|
||||
g1CollectorPolicy.cpp mutexLocker.hpp
|
||||
g1CollectorPolicy.cpp gcPolicyCounters.hpp
|
||||
|
||||
g1CollectorPolicy.hpp collectorPolicy.hpp
|
||||
g1CollectorPolicy.hpp collectionSetChooser.hpp
|
||||
@ -187,7 +191,7 @@ g1MarkSweep.cpp biasedLocking.hpp
|
||||
g1MarkSweep.cpp codeCache.hpp
|
||||
g1MarkSweep.cpp events.hpp
|
||||
g1MarkSweep.cpp fprofiler.hpp
|
||||
g1MarkSweep.hpp g1CollectedHeap.hpp
|
||||
g1MarkSweep.hpp g1CollectedHeap.inline.hpp
|
||||
g1MarkSweep.cpp g1MarkSweep.hpp
|
||||
g1MarkSweep.cpp gcLocker.hpp
|
||||
g1MarkSweep.cpp genCollectedHeap.hpp
|
||||
@ -226,7 +230,7 @@ g1MMUTracker.cpp ostream.hpp
|
||||
g1MMUTracker.cpp mutexLocker.hpp
|
||||
|
||||
g1MMUTracker.hpp debug.hpp
|
||||
|
||||
g1MMUTracker.hpp allocation.hpp
|
||||
g1RemSet.cpp bufferingOopClosure.hpp
|
||||
g1RemSet.cpp concurrentG1Refine.hpp
|
||||
g1RemSet.cpp concurrentG1RefineThread.hpp
|
||||
@ -264,12 +268,13 @@ heapRegion.cpp heapRegionSeq.inline.hpp
|
||||
heapRegion.cpp iterator.hpp
|
||||
heapRegion.cpp oop.inline.hpp
|
||||
|
||||
heapRegion.hpp space.hpp
|
||||
heapRegion.hpp space.inline.hpp
|
||||
heapRegion.hpp spaceDecorator.hpp
|
||||
heapRegion.hpp g1BlockOffsetTable.inline.hpp
|
||||
heapRegion.hpp watermark.hpp
|
||||
heapRegion.hpp g1_specialized_oop_closures.hpp
|
||||
heapRegion.hpp survRateGroup.hpp
|
||||
heapRegion.hpp ageTable.hpp
|
||||
|
||||
heapRegionRemSet.hpp sparsePRT.hpp
|
||||
|
||||
@ -283,7 +288,7 @@ heapRegionRemSet.cpp globalDefinitions.hpp
|
||||
heapRegionRemSet.cpp space.inline.hpp
|
||||
|
||||
heapRegionSeq.cpp allocation.hpp
|
||||
heapRegionSeq.cpp g1CollectedHeap.hpp
|
||||
heapRegionSeq.cpp g1CollectedHeap.inline.hpp
|
||||
heapRegionSeq.cpp heapRegionSeq.hpp
|
||||
|
||||
heapRegionSeq.hpp growableArray.hpp
|
||||
@ -334,18 +339,18 @@ specialized_oop_closures.hpp g1_specialized_oop_closures.hpp
|
||||
survRateGroup.hpp numberSeq.hpp
|
||||
|
||||
survRateGroup.cpp allocation.hpp
|
||||
survRateGroup.cpp g1CollectedHeap.hpp
|
||||
survRateGroup.cpp g1CollectedHeap.inline.hpp
|
||||
survRateGroup.cpp g1CollectorPolicy.hpp
|
||||
survRateGroup.cpp heapRegion.hpp
|
||||
survRateGroup.cpp survRateGroup.hpp
|
||||
|
||||
thread.cpp concurrentMarkThread.inline.hpp
|
||||
|
||||
universe.cpp g1CollectedHeap.hpp
|
||||
universe.cpp g1CollectedHeap.inline.hpp
|
||||
universe.cpp g1CollectorPolicy.hpp
|
||||
|
||||
vm_operations_g1.hpp vmGCOperations.hpp
|
||||
|
||||
vm_operations_g1.cpp vm_operations_g1.hpp
|
||||
vm_operations_g1.cpp g1CollectedHeap.hpp
|
||||
vm_operations_g1.cpp g1CollectedHeap.inline.hpp
|
||||
vm_operations_g1.cpp isGCActiveMark.hpp
|
||||
|
@ -29,6 +29,8 @@ asParNewGeneration.cpp asParNewGeneration.hpp
|
||||
asParNewGeneration.cpp cmsAdaptiveSizePolicy.hpp
|
||||
asParNewGeneration.cpp cmsGCAdaptivePolicyCounters.hpp
|
||||
asParNewGeneration.cpp defNewGeneration.inline.hpp
|
||||
asParNewGeneration.cpp markOop.inline.hpp
|
||||
asParNewGeneration.cpp markSweep.inline.hpp
|
||||
asParNewGeneration.cpp oop.pcgc.inline.hpp
|
||||
asParNewGeneration.cpp parNewGeneration.hpp
|
||||
asParNewGeneration.cpp referencePolicy.hpp
|
||||
@ -40,7 +42,7 @@ parCardTableModRefBS.cpp cardTableRS.hpp
|
||||
parCardTableModRefBS.cpp java.hpp
|
||||
parCardTableModRefBS.cpp mutexLocker.hpp
|
||||
parCardTableModRefBS.cpp sharedHeap.hpp
|
||||
parCardTableModRefBS.cpp space.hpp
|
||||
parCardTableModRefBS.cpp space.inline.hpp
|
||||
parCardTableModRefBS.cpp universe.hpp
|
||||
parCardTableModRefBS.cpp virtualspace.hpp
|
||||
|
||||
@ -77,6 +79,7 @@ parNewGeneration.cpp resourceArea.hpp
|
||||
parNewGeneration.cpp sharedHeap.hpp
|
||||
parNewGeneration.cpp space.hpp
|
||||
parNewGeneration.cpp spaceDecorator.hpp
|
||||
parNewGeneration.cpp thread.hpp
|
||||
parNewGeneration.cpp workgroup.hpp
|
||||
|
||||
parNewGeneration.hpp defNewGeneration.hpp
|
||||
|
@ -302,6 +302,8 @@ psOldGen.hpp safepoint.hpp
|
||||
psOldGen.hpp spaceCounters.hpp
|
||||
|
||||
psPermGen.cpp gcUtil.hpp
|
||||
psPermGen.cpp markOop.inline.hpp
|
||||
psPermGen.cpp markSweep.inline.hpp
|
||||
psPermGen.cpp parallelScavengeHeap.hpp
|
||||
psPermGen.cpp psMarkSweepDecorator.hpp
|
||||
psPermGen.cpp psParallelCompact.hpp
|
||||
|
@ -100,4 +100,4 @@ spaceCounters.hpp mutableSpace.hpp
|
||||
spaceCounters.hpp perfData.hpp
|
||||
spaceCounters.hpp generationCounters.hpp
|
||||
|
||||
vmGCOperations.cpp g1CollectedHeap.hpp
|
||||
vmGCOperations.cpp g1CollectedHeap.inline.hpp
|
||||
|
@ -404,6 +404,8 @@ void ParEvacuateFollowersClosure::do_void() {
|
||||
if (terminator()->offer_termination()) break;
|
||||
par_scan_state()->end_term_time();
|
||||
}
|
||||
assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
|
||||
"Broken overflow list?");
|
||||
// Finish the last termination pause.
|
||||
par_scan_state()->end_term_time();
|
||||
}
|
||||
@ -456,6 +458,8 @@ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
|
||||
_is_alive_closure(this),
|
||||
_plab_stats(YoungPLABSize, PLABWeight)
|
||||
{
|
||||
NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
|
||||
NOT_PRODUCT(_num_par_pushes = 0;)
|
||||
_task_queues = new ObjToScanQueueSet(ParallelGCThreads);
|
||||
guarantee(_task_queues != NULL, "task_queues allocation failure.");
|
||||
|
||||
@ -993,12 +997,19 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
|
||||
"push forwarded object");
|
||||
}
|
||||
// Push it on one of the queues of to-be-scanned objects.
|
||||
if (!par_scan_state->work_queue()->push(obj_to_push)) {
|
||||
bool simulate_overflow = false;
|
||||
NOT_PRODUCT(
|
||||
if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
|
||||
// simulate a stack overflow
|
||||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
|
||||
// Add stats for overflow pushes.
|
||||
if (Verbose && PrintGCDetails) {
|
||||
gclog_or_tty->print("queue overflow!\n");
|
||||
}
|
||||
push_on_overflow_list(old);
|
||||
push_on_overflow_list(old, par_scan_state);
|
||||
par_scan_state->note_overflow_push();
|
||||
}
|
||||
par_scan_state->note_push();
|
||||
@ -1110,9 +1121,16 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
|
||||
"push forwarded object");
|
||||
}
|
||||
// Push it on one of the queues of to-be-scanned objects.
|
||||
if (!par_scan_state->work_queue()->push(obj_to_push)) {
|
||||
bool simulate_overflow = false;
|
||||
NOT_PRODUCT(
|
||||
if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
|
||||
// simulate a stack overflow
|
||||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
|
||||
// Add stats for overflow pushes.
|
||||
push_on_overflow_list(old);
|
||||
push_on_overflow_list(old, par_scan_state);
|
||||
par_scan_state->note_overflow_push();
|
||||
}
|
||||
par_scan_state->note_push();
|
||||
@ -1135,89 +1153,190 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
|
||||
return forward_ptr;
|
||||
}
|
||||
|
||||
void ParNewGeneration::push_on_overflow_list(oop from_space_obj) {
|
||||
oop cur_overflow_list = _overflow_list;
|
||||
#ifndef PRODUCT
|
||||
// It's OK to call this multi-threaded; the worst thing
|
||||
// that can happen is that we'll get a bunch of closely
|
||||
// spaced simulated oveflows, but that's OK, in fact
|
||||
// probably good as it would exercise the overflow code
|
||||
// under contention.
|
||||
bool ParNewGeneration::should_simulate_overflow() {
|
||||
if (_overflow_counter-- <= 0) { // just being defensive
|
||||
_overflow_counter = ParGCWorkQueueOverflowInterval;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#define BUSY (oop(0x1aff1aff))
|
||||
void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
|
||||
// if the object has been forwarded to itself, then we cannot
|
||||
// use the klass pointer for the linked list. Instead we have
|
||||
// to allocate an oopDesc in the C-Heap and use that for the linked list.
|
||||
// XXX This is horribly inefficient when a promotion failure occurs
|
||||
// and should be fixed. XXX FIX ME !!!
|
||||
#ifndef PRODUCT
|
||||
Atomic::inc_ptr(&_num_par_pushes);
|
||||
assert(_num_par_pushes > 0, "Tautology");
|
||||
#endif
|
||||
if (from_space_obj->forwardee() == from_space_obj) {
|
||||
oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
|
||||
listhead->forward_to(from_space_obj);
|
||||
from_space_obj = listhead;
|
||||
}
|
||||
while (true) {
|
||||
from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
|
||||
oop observed_overflow_list =
|
||||
(oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
|
||||
if (observed_overflow_list == cur_overflow_list) break;
|
||||
// Otherwise...
|
||||
oop observed_overflow_list = _overflow_list;
|
||||
oop cur_overflow_list;
|
||||
do {
|
||||
cur_overflow_list = observed_overflow_list;
|
||||
}
|
||||
if (cur_overflow_list != BUSY) {
|
||||
from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
|
||||
} else {
|
||||
from_space_obj->set_klass_to_list_ptr(NULL);
|
||||
}
|
||||
observed_overflow_list =
|
||||
(oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
|
||||
} while (cur_overflow_list != observed_overflow_list);
|
||||
}
|
||||
|
||||
// *NOTE*: The overflow list manipulation code here and
|
||||
// in CMSCollector:: are very similar in shape,
|
||||
// except that in the CMS case we thread the objects
|
||||
// directly into the list via their mark word, and do
|
||||
// not need to deal with special cases below related
|
||||
// to chunking of object arrays and promotion failure
|
||||
// handling.
|
||||
// CR 6797058 has been filed to attempt consolidation of
|
||||
// the common code.
|
||||
// Because of the common code, if you make any changes in
|
||||
// the code below, please check the CMS version to see if
|
||||
// similar changes might be needed.
|
||||
// See CMSCollector::par_take_from_overflow_list() for
|
||||
// more extensive documentation comments.
|
||||
bool
|
||||
ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
|
||||
ObjToScanQueue* work_q = par_scan_state->work_queue();
|
||||
assert(work_q->size() == 0, "Should first empty local work queue");
|
||||
// How many to take?
|
||||
int objsFromOverflow = MIN2(work_q->max_elems()/4,
|
||||
(juint)ParGCDesiredObjsFromOverflowList);
|
||||
size_t objsFromOverflow = MIN2((size_t)work_q->max_elems()/4,
|
||||
(size_t)ParGCDesiredObjsFromOverflowList);
|
||||
|
||||
if (_overflow_list == NULL) return false;
|
||||
|
||||
// Otherwise, there was something there; try claiming the list.
|
||||
oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
|
||||
|
||||
if (prefix == NULL) {
|
||||
return false;
|
||||
}
|
||||
oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
|
||||
// Trim off a prefix of at most objsFromOverflow items
|
||||
int i = 1;
|
||||
Thread* tid = Thread::current();
|
||||
size_t spin_count = (size_t)ParallelGCThreads;
|
||||
size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
|
||||
for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
|
||||
// someone grabbed it before we did ...
|
||||
// ... we spin for a short while...
|
||||
os::sleep(tid, sleep_time_millis, false);
|
||||
if (_overflow_list == NULL) {
|
||||
// nothing left to take
|
||||
return false;
|
||||
} else if (_overflow_list != BUSY) {
|
||||
// try and grab the prefix
|
||||
prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
|
||||
}
|
||||
}
|
||||
if (prefix == NULL || prefix == BUSY) {
|
||||
// Nothing to take or waited long enough
|
||||
if (prefix == NULL) {
|
||||
// Write back the NULL in case we overwrote it with BUSY above
|
||||
// and it is still the same value.
|
||||
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
assert(prefix != NULL && prefix != BUSY, "Error");
|
||||
size_t i = 1;
|
||||
oop cur = prefix;
|
||||
while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
|
||||
i++; cur = oop(cur->klass());
|
||||
}
|
||||
|
||||
// Reattach remaining (suffix) to overflow list
|
||||
if (cur->klass_or_null() != NULL) {
|
||||
oop suffix = oop(cur->klass());
|
||||
cur->set_klass_to_list_ptr(NULL);
|
||||
|
||||
// Find last item of suffix list
|
||||
oop last = suffix;
|
||||
while (last->klass_or_null() != NULL) {
|
||||
last = oop(last->klass());
|
||||
if (cur->klass_or_null() == NULL) {
|
||||
// Write back the NULL in lieu of the BUSY we wrote
|
||||
// above and it is still the same value.
|
||||
if (_overflow_list == BUSY) {
|
||||
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
|
||||
}
|
||||
// Atomically prepend suffix to current overflow list
|
||||
oop cur_overflow_list = _overflow_list;
|
||||
while (true) {
|
||||
last->set_klass_to_list_ptr(cur_overflow_list);
|
||||
oop observed_overflow_list =
|
||||
(oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
|
||||
if (observed_overflow_list == cur_overflow_list) break;
|
||||
// Otherwise...
|
||||
cur_overflow_list = observed_overflow_list;
|
||||
} else {
|
||||
assert(cur->klass_or_null() != BUSY, "Error");
|
||||
oop suffix = oop(cur->klass()); // suffix will be put back on global list
|
||||
cur->set_klass_to_list_ptr(NULL); // break off suffix
|
||||
// It's possible that the list is still in the empty(busy) state
|
||||
// we left it in a short while ago; in that case we may be
|
||||
// able to place back the suffix.
|
||||
oop observed_overflow_list = _overflow_list;
|
||||
oop cur_overflow_list = observed_overflow_list;
|
||||
bool attached = false;
|
||||
while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
|
||||
observed_overflow_list =
|
||||
(oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
|
||||
if (cur_overflow_list == observed_overflow_list) {
|
||||
attached = true;
|
||||
break;
|
||||
} else cur_overflow_list = observed_overflow_list;
|
||||
}
|
||||
if (!attached) {
|
||||
// Too bad, someone else got in in between; we'll need to do a splice.
|
||||
// Find the last item of suffix list
|
||||
oop last = suffix;
|
||||
while (last->klass_or_null() != NULL) {
|
||||
last = oop(last->klass());
|
||||
}
|
||||
// Atomically prepend suffix to current overflow list
|
||||
observed_overflow_list = _overflow_list;
|
||||
do {
|
||||
cur_overflow_list = observed_overflow_list;
|
||||
if (cur_overflow_list != BUSY) {
|
||||
// Do the splice ...
|
||||
last->set_klass_to_list_ptr(cur_overflow_list);
|
||||
} else { // cur_overflow_list == BUSY
|
||||
last->set_klass_to_list_ptr(NULL);
|
||||
}
|
||||
observed_overflow_list =
|
||||
(oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
|
||||
} while (cur_overflow_list != observed_overflow_list);
|
||||
}
|
||||
}
|
||||
|
||||
// Push objects on prefix list onto this thread's work queue
|
||||
assert(cur != NULL, "program logic");
|
||||
assert(prefix != NULL && prefix != BUSY, "program logic");
|
||||
cur = prefix;
|
||||
int n = 0;
|
||||
ssize_t n = 0;
|
||||
while (cur != NULL) {
|
||||
oop obj_to_push = cur->forwardee();
|
||||
oop next = oop(cur->klass_or_null());
|
||||
cur->set_klass(obj_to_push->klass());
|
||||
if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
|
||||
obj_to_push = cur;
|
||||
// This may be an array object that is self-forwarded. In that case, the list pointer
|
||||
// space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
|
||||
if (!is_in_reserved(cur)) {
|
||||
// This can become a scaling bottleneck when there is work queue overflow coincident
|
||||
// with promotion failure.
|
||||
oopDesc* f = cur;
|
||||
FREE_C_HEAP_ARRAY(oopDesc, f);
|
||||
} else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
|
||||
assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
|
||||
obj_to_push = cur;
|
||||
}
|
||||
work_q->push(obj_to_push);
|
||||
bool ok = work_q->push(obj_to_push);
|
||||
assert(ok, "Should have succeeded");
|
||||
cur = next;
|
||||
n++;
|
||||
}
|
||||
par_scan_state->note_overflow_refill(n);
|
||||
#ifndef PRODUCT
|
||||
assert(_num_par_pushes >= n, "Too many pops?");
|
||||
Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
#undef BUSY
|
||||
|
||||
void ParNewGeneration::ref_processor_init()
|
||||
{
|
||||
|
@ -278,6 +278,7 @@ class ParNewGeneration: public DefNewGeneration {
|
||||
friend class ParNewRefProcTask;
|
||||
friend class ParNewRefProcTaskExecutor;
|
||||
friend class ParScanThreadStateSet;
|
||||
friend class ParEvacuateFollowersClosure;
|
||||
|
||||
private:
|
||||
// XXX use a global constant instead of 64!
|
||||
@ -296,6 +297,7 @@ class ParNewGeneration: public DefNewGeneration {
|
||||
// klass-pointers (klass information already copied to the forwarded
|
||||
// image.) Manipulated with CAS.
|
||||
oop _overflow_list;
|
||||
NOT_PRODUCT(ssize_t _num_par_pushes;)
|
||||
|
||||
// If true, older generation does not support promotion undo, so avoid.
|
||||
static bool _avoid_promotion_undo;
|
||||
@ -372,8 +374,12 @@ class ParNewGeneration: public DefNewGeneration {
|
||||
oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
|
||||
oop obj, size_t obj_sz, markOop m);
|
||||
|
||||
// in support of testing overflow code
|
||||
NOT_PRODUCT(int _overflow_counter;)
|
||||
NOT_PRODUCT(bool should_simulate_overflow();)
|
||||
|
||||
// Push the given (from-space) object on the global overflow list.
|
||||
void push_on_overflow_list(oop from_space_obj);
|
||||
void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
|
||||
|
||||
// If the global overflow list is non-empty, move some tasks from it
|
||||
// onto "work_q" (which must be empty). No more than 1/4 of the
|
||||
|
@ -200,6 +200,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
|
||||
void oop_iterate(OopClosure* cl);
|
||||
void object_iterate(ObjectClosure* cl);
|
||||
void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
|
||||
void permanent_oop_iterate(OopClosure* cl);
|
||||
void permanent_object_iterate(ObjectClosure* cl);
|
||||
|
||||
|
@ -362,6 +362,10 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_after_gc();
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
|
||||
|
@ -116,7 +116,7 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
|
||||
// ObjectSpace stuff
|
||||
//
|
||||
|
||||
_object_space = new MutableSpace();
|
||||
_object_space = new MutableSpace(virtual_space()->alignment());
|
||||
|
||||
if (_object_space == NULL)
|
||||
vm_exit_during_initialization("Could not allocate an old gen space");
|
||||
@ -385,10 +385,10 @@ void PSOldGen::post_resize() {
|
||||
start_array()->set_covered_region(new_memregion);
|
||||
Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
|
||||
|
||||
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
|
||||
|
||||
// ALWAYS do this last!!
|
||||
object_space()->set_end(virtual_space_high);
|
||||
object_space()->initialize(new_memregion,
|
||||
SpaceDecorator::DontClear,
|
||||
SpaceDecorator::DontMangle);
|
||||
|
||||
assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
|
||||
"Sanity");
|
||||
|
@ -2203,6 +2203,10 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
collection_exit.ticks());
|
||||
gc_task_manager()->print_task_time_stamps();
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
|
||||
|
@ -615,6 +615,10 @@ bool PSScavenge::invoke_no_policy() {
|
||||
gc_task_manager()->print_task_time_stamps();
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
|
||||
return !promotion_failure_occurred;
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ void PSVirtualSpace::release() {
|
||||
_special = false;
|
||||
}
|
||||
|
||||
bool PSVirtualSpace::expand_by(size_t bytes, bool pre_touch) {
|
||||
bool PSVirtualSpace::expand_by(size_t bytes) {
|
||||
assert(is_aligned(bytes), "arg not aligned");
|
||||
DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this));
|
||||
|
||||
@ -92,15 +92,6 @@ bool PSVirtualSpace::expand_by(size_t bytes, bool pre_touch) {
|
||||
_committed_high_addr += bytes;
|
||||
}
|
||||
|
||||
if (pre_touch || AlwaysPreTouch) {
|
||||
for (char* curr = base_addr;
|
||||
curr < _committed_high_addr;
|
||||
curr += os::vm_page_size()) {
|
||||
char tmp = *curr;
|
||||
*curr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -255,7 +246,7 @@ PSVirtualSpaceHighToLow::PSVirtualSpaceHighToLow(ReservedSpace rs) {
|
||||
DEBUG_ONLY(verify());
|
||||
}
|
||||
|
||||
bool PSVirtualSpaceHighToLow::expand_by(size_t bytes, bool pre_touch) {
|
||||
bool PSVirtualSpaceHighToLow::expand_by(size_t bytes) {
|
||||
assert(is_aligned(bytes), "arg not aligned");
|
||||
DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this));
|
||||
|
||||
@ -269,15 +260,6 @@ bool PSVirtualSpaceHighToLow::expand_by(size_t bytes, bool pre_touch) {
|
||||
_committed_low_addr -= bytes;
|
||||
}
|
||||
|
||||
if (pre_touch || AlwaysPreTouch) {
|
||||
for (char* curr = base_addr;
|
||||
curr < _committed_high_addr;
|
||||
curr += os::vm_page_size()) {
|
||||
char tmp = *curr;
|
||||
*curr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -80,7 +80,7 @@ class PSVirtualSpace : public CHeapObj {
|
||||
inline void set_reserved(char* low_addr, char* high_addr, bool special);
|
||||
inline void set_reserved(ReservedSpace rs);
|
||||
inline void set_committed(char* low_addr, char* high_addr);
|
||||
virtual bool expand_by(size_t bytes, bool pre_touch = false);
|
||||
virtual bool expand_by(size_t bytes);
|
||||
virtual bool shrink_by(size_t bytes);
|
||||
virtual size_t expand_into(PSVirtualSpace* space, size_t bytes);
|
||||
void release();
|
||||
@ -127,7 +127,7 @@ class PSVirtualSpaceHighToLow : public PSVirtualSpace {
|
||||
PSVirtualSpaceHighToLow(ReservedSpace rs, size_t alignment);
|
||||
PSVirtualSpaceHighToLow(ReservedSpace rs);
|
||||
|
||||
virtual bool expand_by(size_t bytes, bool pre_touch = false);
|
||||
virtual bool expand_by(size_t bytes);
|
||||
virtual bool shrink_by(size_t bytes);
|
||||
virtual size_t expand_into(PSVirtualSpace* space, size_t bytes);
|
||||
|
||||
|
@ -64,12 +64,12 @@ void PSYoungGen::initialize_work() {
|
||||
}
|
||||
|
||||
if (UseNUMA) {
|
||||
_eden_space = new MutableNUMASpace();
|
||||
_eden_space = new MutableNUMASpace(virtual_space()->alignment());
|
||||
} else {
|
||||
_eden_space = new MutableSpace();
|
||||
_eden_space = new MutableSpace(virtual_space()->alignment());
|
||||
}
|
||||
_from_space = new MutableSpace();
|
||||
_to_space = new MutableSpace();
|
||||
_from_space = new MutableSpace(virtual_space()->alignment());
|
||||
_to_space = new MutableSpace(virtual_space()->alignment());
|
||||
|
||||
if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
|
||||
vm_exit_during_initialization("Could not allocate a young gen space");
|
||||
|
@ -67,6 +67,12 @@ void ageTable::merge(ageTable* subTable) {
|
||||
}
|
||||
}
|
||||
|
||||
void ageTable::merge_par(ageTable* subTable) {
|
||||
for (int i = 0; i < table_size; i++) {
|
||||
Atomic::add_ptr(subTable->sizes[i], &sizes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
int ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
|
||||
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
|
||||
size_t total = 0;
|
||||
|
@ -56,6 +56,7 @@ class ageTable VALUE_OBJ_CLASS_SPEC {
|
||||
// Merge another age table with the current one. Used
|
||||
// for parallel young generation gc.
|
||||
void merge(ageTable* subTable);
|
||||
void merge_par(ageTable* subTable);
|
||||
|
||||
// calculate new tenuring threshold based on age information
|
||||
int compute_tenuring_threshold(size_t survivor_capacity);
|
||||
|
@ -27,7 +27,7 @@
|
||||
# include "incls/_mutableNUMASpace.cpp.incl"
|
||||
|
||||
|
||||
MutableNUMASpace::MutableNUMASpace() {
|
||||
MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
|
||||
_lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true);
|
||||
_page_size = os::vm_page_size();
|
||||
_adaptation_cycles = 0;
|
||||
@ -221,7 +221,7 @@ bool MutableNUMASpace::update_layout(bool force) {
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i]));
|
||||
lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -443,10 +443,10 @@ void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection
|
||||
// Is there bottom?
|
||||
if (new_region.start() < intersection.start()) { // Yes
|
||||
// Try to coalesce small pages into a large one.
|
||||
if (UseLargePages && page_size() >= os::large_page_size()) {
|
||||
HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), os::large_page_size());
|
||||
if (UseLargePages && page_size() >= alignment()) {
|
||||
HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment());
|
||||
if (new_region.contains(p)
|
||||
&& pointer_delta(p, new_region.start(), sizeof(char)) >= os::large_page_size()) {
|
||||
&& pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
|
||||
if (intersection.contains(p)) {
|
||||
intersection = MemRegion(p, intersection.end());
|
||||
} else {
|
||||
@ -462,10 +462,10 @@ void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection
|
||||
// Is there top?
|
||||
if (intersection.end() < new_region.end()) { // Yes
|
||||
// Try to coalesce small pages into a large one.
|
||||
if (UseLargePages && page_size() >= os::large_page_size()) {
|
||||
HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), os::large_page_size());
|
||||
if (UseLargePages && page_size() >= alignment()) {
|
||||
HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment());
|
||||
if (new_region.contains(p)
|
||||
&& pointer_delta(new_region.end(), p, sizeof(char)) >= os::large_page_size()) {
|
||||
&& pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
|
||||
if (intersection.contains(p)) {
|
||||
intersection = MemRegion(intersection.start(), p);
|
||||
} else {
|
||||
@ -504,12 +504,12 @@ void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersecti
|
||||
// That's the only case we have to make an additional bias_region() call.
|
||||
HeapWord* start = invalid_region->start();
|
||||
HeapWord* end = invalid_region->end();
|
||||
if (UseLargePages && page_size() >= os::large_page_size()) {
|
||||
HeapWord *p = (HeapWord*)round_down((intptr_t) start, os::large_page_size());
|
||||
if (UseLargePages && page_size() >= alignment()) {
|
||||
HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment());
|
||||
if (new_region.contains(p)) {
|
||||
start = p;
|
||||
}
|
||||
p = (HeapWord*)round_to((intptr_t) end, os::large_page_size());
|
||||
p = (HeapWord*)round_to((intptr_t) end, alignment());
|
||||
if (new_region.contains(end)) {
|
||||
end = p;
|
||||
}
|
||||
@ -526,7 +526,8 @@ void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersecti
|
||||
|
||||
void MutableNUMASpace::initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space) {
|
||||
bool mangle_space,
|
||||
bool setup_pages) {
|
||||
assert(clear_space, "Reallocation will destory data!");
|
||||
assert(lgrp_spaces()->length() > 0, "There should be at least one space");
|
||||
|
||||
@ -538,7 +539,7 @@ void MutableNUMASpace::initialize(MemRegion mr,
|
||||
|
||||
// Compute chunk sizes
|
||||
size_t prev_page_size = page_size();
|
||||
set_page_size(UseLargePages ? os::large_page_size() : os::vm_page_size());
|
||||
set_page_size(UseLargePages ? alignment() : os::vm_page_size());
|
||||
HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
|
||||
HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
|
||||
size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
|
||||
@ -666,7 +667,7 @@ void MutableNUMASpace::initialize(MemRegion mr,
|
||||
}
|
||||
|
||||
// Clear space (set top = bottom) but never mangle.
|
||||
s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle);
|
||||
s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
|
||||
|
||||
set_adaptation_cycles(samples_count());
|
||||
}
|
||||
|
@ -82,8 +82,8 @@ class MutableNUMASpace : public MutableSpace {
|
||||
char* last_page_scanned() { return _last_page_scanned; }
|
||||
void set_last_page_scanned(char* p) { _last_page_scanned = p; }
|
||||
public:
|
||||
LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) {
|
||||
_space = new MutableSpace();
|
||||
LGRPSpace(int l, size_t alignment) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) {
|
||||
_space = new MutableSpace(alignment);
|
||||
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
|
||||
}
|
||||
~LGRPSpace() {
|
||||
@ -183,10 +183,10 @@ class MutableNUMASpace : public MutableSpace {
|
||||
|
||||
public:
|
||||
GrowableArray<LGRPSpace*>* lgrp_spaces() const { return _lgrp_spaces; }
|
||||
MutableNUMASpace();
|
||||
MutableNUMASpace(size_t alignment);
|
||||
virtual ~MutableNUMASpace();
|
||||
// Space initialization.
|
||||
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
||||
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space, bool setup_pages = SetupPages);
|
||||
// Update space layout if necessary. Do all adaptive resizing job.
|
||||
virtual void update();
|
||||
// Update allocation rate averages.
|
||||
|
@ -25,7 +25,10 @@
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_mutableSpace.cpp.incl"
|
||||
|
||||
MutableSpace::MutableSpace(): ImmutableSpace(), _top(NULL) {
|
||||
MutableSpace::MutableSpace(size_t alignment): ImmutableSpace(), _top(NULL), _alignment(alignment) {
|
||||
assert(MutableSpace::alignment() >= 0 &&
|
||||
MutableSpace::alignment() % os::vm_page_size() == 0,
|
||||
"Space should be aligned");
|
||||
_mangler = new MutableSpaceMangler(this);
|
||||
}
|
||||
|
||||
@ -33,16 +36,88 @@ MutableSpace::~MutableSpace() {
|
||||
delete _mangler;
|
||||
}
|
||||
|
||||
void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
|
||||
if (!mr.is_empty()) {
|
||||
size_t page_size = UseLargePages ? alignment() : os::vm_page_size();
|
||||
HeapWord *start = (HeapWord*)round_to((intptr_t) mr.start(), page_size);
|
||||
HeapWord *end = (HeapWord*)round_down((intptr_t) mr.end(), page_size);
|
||||
if (end > start) {
|
||||
size_t size = pointer_delta(end, start, sizeof(char));
|
||||
if (clear_space) {
|
||||
// Prefer page reallocation to migration.
|
||||
os::free_memory((char*)start, size);
|
||||
}
|
||||
os::numa_make_global((char*)start, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MutableSpace::pretouch_pages(MemRegion mr) {
|
||||
for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
|
||||
char t = *p; *p = t;
|
||||
}
|
||||
}
|
||||
|
||||
void MutableSpace::initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space) {
|
||||
HeapWord* bottom = mr.start();
|
||||
HeapWord* end = mr.end();
|
||||
bool mangle_space,
|
||||
bool setup_pages) {
|
||||
|
||||
assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
|
||||
assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
|
||||
"invalid space boundaries");
|
||||
set_bottom(bottom);
|
||||
set_end(end);
|
||||
|
||||
if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
|
||||
// The space may move left and right or expand/shrink.
|
||||
// We'd like to enforce the desired page placement.
|
||||
MemRegion head, tail;
|
||||
if (last_setup_region().is_empty()) {
|
||||
// If it's the first initialization don't limit the amount of work.
|
||||
head = mr;
|
||||
tail = MemRegion(mr.end(), mr.end());
|
||||
} else {
|
||||
// Is there an intersection with the address space?
|
||||
MemRegion intersection = last_setup_region().intersection(mr);
|
||||
if (intersection.is_empty()) {
|
||||
intersection = MemRegion(mr.end(), mr.end());
|
||||
}
|
||||
// All the sizes below are in words.
|
||||
size_t head_size = 0, tail_size = 0;
|
||||
if (mr.start() <= intersection.start()) {
|
||||
head_size = pointer_delta(intersection.start(), mr.start());
|
||||
}
|
||||
if(intersection.end() <= mr.end()) {
|
||||
tail_size = pointer_delta(mr.end(), intersection.end());
|
||||
}
|
||||
// Limit the amount of page manipulation if necessary.
|
||||
if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
|
||||
const size_t change_size = head_size + tail_size;
|
||||
const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
|
||||
head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
|
||||
head_size);
|
||||
tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
|
||||
tail_size);
|
||||
}
|
||||
head = MemRegion(intersection.start() - head_size, intersection.start());
|
||||
tail = MemRegion(intersection.end(), intersection.end() + tail_size);
|
||||
}
|
||||
assert(mr.contains(head) && mr.contains(tail), "Sanity");
|
||||
|
||||
if (UseNUMA) {
|
||||
numa_setup_pages(head, clear_space);
|
||||
numa_setup_pages(tail, clear_space);
|
||||
}
|
||||
|
||||
if (AlwaysPreTouch) {
|
||||
pretouch_pages(head);
|
||||
pretouch_pages(tail);
|
||||
}
|
||||
|
||||
// Remember where we stopped so that we can continue later.
|
||||
set_last_setup_region(MemRegion(head.start(), tail.end()));
|
||||
}
|
||||
|
||||
set_bottom(mr.start());
|
||||
set_end(mr.end());
|
||||
|
||||
if (clear_space) {
|
||||
clear(mangle_space);
|
||||
|
@ -25,7 +25,10 @@
|
||||
// A MutableSpace is a subtype of ImmutableSpace that supports the
|
||||
// concept of allocation. This includes the concepts that a space may
|
||||
// be only partially full, and the querry methods that go with such
|
||||
// an assumption.
|
||||
// an assumption. MutableSpace is also responsible for minimizing the
|
||||
// page allocation time by having the memory pretouched (with
|
||||
// AlwaysPretouch) and for optimizing page placement on NUMA systems
|
||||
// by make the underlying region interleaved (with UseNUMA).
|
||||
//
|
||||
// Invariant: (ImmutableSpace +) bottom() <= top() <= end()
|
||||
// top() is inclusive and end() is exclusive.
|
||||
@ -37,15 +40,23 @@ class MutableSpace: public ImmutableSpace {
|
||||
|
||||
// Helper for mangling unused space in debug builds
|
||||
MutableSpaceMangler* _mangler;
|
||||
|
||||
// The last region which page had been setup to be interleaved.
|
||||
MemRegion _last_setup_region;
|
||||
size_t _alignment;
|
||||
protected:
|
||||
HeapWord* _top;
|
||||
|
||||
MutableSpaceMangler* mangler() { return _mangler; }
|
||||
|
||||
void numa_setup_pages(MemRegion mr, bool clear_space);
|
||||
void pretouch_pages(MemRegion mr);
|
||||
|
||||
void set_last_setup_region(MemRegion mr) { _last_setup_region = mr; }
|
||||
MemRegion last_setup_region() const { return _last_setup_region; }
|
||||
|
||||
public:
|
||||
virtual ~MutableSpace();
|
||||
MutableSpace();
|
||||
MutableSpace(size_t page_size);
|
||||
|
||||
// Accessors
|
||||
HeapWord* top() const { return _top; }
|
||||
@ -57,13 +68,20 @@ class MutableSpace: public ImmutableSpace {
|
||||
virtual void set_bottom(HeapWord* value) { _bottom = value; }
|
||||
virtual void set_end(HeapWord* value) { _end = value; }
|
||||
|
||||
size_t alignment() { return _alignment; }
|
||||
|
||||
// Returns a subregion containing all objects in this space.
|
||||
MemRegion used_region() { return MemRegion(bottom(), top()); }
|
||||
|
||||
static const bool SetupPages = true;
|
||||
static const bool DontSetupPages = false;
|
||||
|
||||
// Initialization
|
||||
virtual void initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space);
|
||||
bool mangle_space,
|
||||
bool setup_pages = SetupPages);
|
||||
|
||||
virtual void clear(bool mangle_space);
|
||||
// Does the usual initialization but optionally resets top to bottom.
|
||||
#if 0 // MANGLE_SPACE
|
||||
|
@ -42,6 +42,7 @@ class Thread;
|
||||
class CollectedHeap : public CHeapObj {
|
||||
friend class VMStructs;
|
||||
friend class IsGCActiveMark; // Block structured external access to _is_gc_active
|
||||
friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe
|
||||
|
||||
#ifdef ASSERT
|
||||
static int _fire_out_of_memory_count;
|
||||
@ -82,8 +83,6 @@ class CollectedHeap : public CHeapObj {
|
||||
// Reinitialize tlabs before resuming mutators.
|
||||
virtual void resize_all_tlabs();
|
||||
|
||||
debug_only(static void check_for_valid_allocation_state();)
|
||||
|
||||
protected:
|
||||
// Allocate from the current thread's TLAB, with broken-out slow path.
|
||||
inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
|
||||
@ -142,6 +141,7 @@ class CollectedHeap : public CHeapObj {
|
||||
PRODUCT_RETURN;
|
||||
virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
|
||||
PRODUCT_RETURN;
|
||||
debug_only(static void check_for_valid_allocation_state();)
|
||||
|
||||
public:
|
||||
enum Name {
|
||||
@ -466,6 +466,10 @@ class CollectedHeap : public CHeapObj {
|
||||
// This includes objects in permanent memory.
|
||||
virtual void object_iterate(ObjectClosure* cl) = 0;
|
||||
|
||||
// Similar to object_iterate() except iterates only
|
||||
// over live objects.
|
||||
virtual void safe_object_iterate(ObjectClosure* cl) = 0;
|
||||
|
||||
// Behaves the same as oop_iterate, except only traverses
|
||||
// interior pointers contained in permanent memory. If there
|
||||
// is no permanent memory, does nothing.
|
||||
|
@ -140,6 +140,7 @@ c2_globals_<os_family>.hpp globalDefinitions.hpp
|
||||
c2_globals_<os_family>.hpp macros.hpp
|
||||
|
||||
c2_init_<arch>.cpp compile.hpp
|
||||
c2_init_<arch>.cpp node.hpp
|
||||
|
||||
c2compiler.cpp ad_<arch_model>.hpp
|
||||
c2compiler.cpp c2compiler.hpp
|
||||
@ -839,6 +840,7 @@ parseHelper.cpp systemDictionary.hpp
|
||||
phase.cpp compile.hpp
|
||||
phase.cpp compileBroker.hpp
|
||||
phase.cpp nmethod.hpp
|
||||
phase.cpp node.hpp
|
||||
phase.cpp phase.hpp
|
||||
|
||||
phase.hpp port.hpp
|
||||
|
@ -1311,6 +1311,7 @@ cppInterpreter_<arch>.cpp bytecodeHistogram.hpp
|
||||
cppInterpreter_<arch>.cpp debug.hpp
|
||||
cppInterpreter_<arch>.cpp deoptimization.hpp
|
||||
cppInterpreter_<arch>.cpp frame.inline.hpp
|
||||
cppInterpreter_<arch>.cpp interfaceSupport.hpp
|
||||
cppInterpreter_<arch>.cpp interpreterRuntime.hpp
|
||||
cppInterpreter_<arch>.cpp interpreter.hpp
|
||||
cppInterpreter_<arch>.cpp interpreterGenerator.hpp
|
||||
@ -2014,7 +2015,7 @@ instanceKlass.cpp verifier.hpp
|
||||
instanceKlass.cpp vmSymbols.hpp
|
||||
|
||||
instanceKlass.hpp accessFlags.hpp
|
||||
instanceKlass.hpp bitMap.hpp
|
||||
instanceKlass.hpp bitMap.inline.hpp
|
||||
instanceKlass.hpp constMethodOop.hpp
|
||||
instanceKlass.hpp constantPoolOop.hpp
|
||||
instanceKlass.hpp handles.hpp
|
||||
@ -3771,6 +3772,7 @@ spaceDecorator.hpp space.hpp
|
||||
|
||||
spaceDecorator.cpp copy.hpp
|
||||
spaceDecorator.cpp spaceDecorator.hpp
|
||||
spaceDecorator.cpp space.inline.hpp
|
||||
|
||||
specialized_oop_closures.cpp ostream.hpp
|
||||
specialized_oop_closures.cpp specialized_oop_closures.hpp
|
||||
|
@ -59,6 +59,8 @@ dump.cpp vm_operations.hpp
|
||||
|
||||
dump_<arch_model>.cpp assembler_<arch>.inline.hpp
|
||||
dump_<arch_model>.cpp compactingPermGenGen.hpp
|
||||
dump_<arch_model>.cpp generation.inline.hpp
|
||||
dump_<arch_model>.cpp space.inline.hpp
|
||||
|
||||
forte.cpp collectedHeap.inline.hpp
|
||||
forte.cpp debugInfoRec.hpp
|
||||
|
@ -163,7 +163,7 @@
|
||||
#ifdef USELABELS
|
||||
// Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
|
||||
// initialization (which is is the initialization of the table pointer...)
|
||||
#define DISPATCH(opcode) goto *dispatch_table[opcode]
|
||||
#define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
|
||||
#define CONTINUE { \
|
||||
opcode = *pc; \
|
||||
DO_UPDATE_INSTRUCTION_COUNT(opcode); \
|
||||
@ -341,7 +341,7 @@
|
||||
*/
|
||||
#undef CHECK_NULL
|
||||
#define CHECK_NULL(obj_) \
|
||||
if ((obj_) == 0) { \
|
||||
if ((obj_) == NULL) { \
|
||||
VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), ""); \
|
||||
}
|
||||
|
||||
@ -1362,7 +1362,7 @@ run:
|
||||
|
||||
#define NULL_COMPARISON_NOT_OP(name) \
|
||||
CASE(_if##name): { \
|
||||
int skip = (!(STACK_OBJECT(-1) == 0)) \
|
||||
int skip = (!(STACK_OBJECT(-1) == NULL)) \
|
||||
? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
|
||||
address branch_pc = pc; \
|
||||
UPDATE_PC_AND_TOS(skip, -1); \
|
||||
@ -1372,7 +1372,7 @@ run:
|
||||
|
||||
#define NULL_COMPARISON_OP(name) \
|
||||
CASE(_if##name): { \
|
||||
int skip = ((STACK_OBJECT(-1) == 0)) \
|
||||
int skip = ((STACK_OBJECT(-1) == NULL)) \
|
||||
? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
|
||||
address branch_pc = pc; \
|
||||
UPDATE_PC_AND_TOS(skip, -1); \
|
||||
|
@ -66,7 +66,6 @@ friend class CppInterpreterGenerator;
|
||||
friend class InterpreterGenerator;
|
||||
friend class InterpreterMacroAssembler;
|
||||
friend class frame;
|
||||
friend class SharedRuntime;
|
||||
friend class VMStructs;
|
||||
|
||||
public:
|
||||
|
@ -48,9 +48,14 @@ void Rewriter::compute_index_maps(constantPoolHandle pool, intArray*& index_map,
|
||||
|
||||
|
||||
// Creates a constant pool cache given an inverse_index_map
|
||||
// This creates the constant pool cache initially in a state
|
||||
// that is unsafe for concurrent GC processing but sets it to
|
||||
// a safe mode before the constant pool cache is returned.
|
||||
constantPoolCacheHandle Rewriter::new_constant_pool_cache(intArray& inverse_index_map, TRAPS) {
|
||||
const int length = inverse_index_map.length();
|
||||
constantPoolCacheOop cache = oopFactory::new_constantPoolCache(length, CHECK_(constantPoolCacheHandle()));
|
||||
constantPoolCacheOop cache = oopFactory::new_constantPoolCache(length,
|
||||
methodOopDesc::IsUnsafeConc,
|
||||
CHECK_(constantPoolCacheHandle()));
|
||||
cache->initialize(inverse_index_map);
|
||||
return constantPoolCacheHandle(THREAD, cache);
|
||||
}
|
||||
|
@ -346,9 +346,12 @@ int32 cmpstr(const void *k1, const void *k2) {
|
||||
return strcmp((const char *)k1,(const char *)k2);
|
||||
}
|
||||
|
||||
// Slimey cheap key comparator.
|
||||
// Cheap key comparator.
|
||||
int32 cmpkey(const void *key1, const void *key2) {
|
||||
return (int32)((intptr_t)key1 - (intptr_t)key2);
|
||||
if (key1 == key2) return 0;
|
||||
intptr_t delta = (intptr_t)key1 - (intptr_t)key2;
|
||||
if (delta > 0) return 1;
|
||||
return -1;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
@ -34,17 +34,6 @@
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#undef bzero
|
||||
inline void bzero(void *b, int len) { memset(b,0,len); }
|
||||
#undef bcopy
|
||||
inline void bcopy(const void *s, void *d, size_t len) { memmove(d,s,len); }
|
||||
#undef bcmp
|
||||
inline int bcmp(const void *s,const void *t,int len) { return memcmp(s,t,len);}
|
||||
extern "C" unsigned long strtoul(const char *s, char **end, int base);
|
||||
|
||||
// Definition for sys_errlist varies from Sun 4.1 & Solaris.
|
||||
// We use the new Solaris definition.
|
||||
#include <string.h>
|
||||
|
||||
// Access to the C++ class virtual function pointer
|
||||
// Put the class in the macro
|
||||
|
@ -610,6 +610,10 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
Universe::print_heap_after_gc();
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
|
||||
if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
|
||||
tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
|
||||
vm_exit(-1);
|
||||
@ -910,6 +914,13 @@ void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||
perm_gen()->object_iterate(cl);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->safe_object_iterate(cl);
|
||||
}
|
||||
perm_gen()->safe_object_iterate(cl);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->object_iterate_since_last_GC(cl);
|
||||
|
@ -215,6 +215,7 @@ public:
|
||||
void oop_iterate(OopClosure* cl);
|
||||
void oop_iterate(MemRegion mr, OopClosure* cl);
|
||||
void object_iterate(ObjectClosure* cl);
|
||||
void safe_object_iterate(ObjectClosure* cl);
|
||||
void object_iterate_since_last_GC(ObjectClosure* cl);
|
||||
Space* space_containing(const void* addr) const;
|
||||
|
||||
|
@ -319,6 +319,21 @@ void Generation::object_iterate(ObjectClosure* cl) {
|
||||
space_iterate(&blk);
|
||||
}
|
||||
|
||||
class GenerationSafeObjIterateClosure : public SpaceClosure {
|
||||
private:
|
||||
ObjectClosure* _cl;
|
||||
public:
|
||||
virtual void do_space(Space* s) {
|
||||
s->safe_object_iterate(_cl);
|
||||
}
|
||||
GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
|
||||
};
|
||||
|
||||
void Generation::safe_object_iterate(ObjectClosure* cl) {
|
||||
GenerationSafeObjIterateClosure blk(cl);
|
||||
space_iterate(&blk);
|
||||
}
|
||||
|
||||
void Generation::prepare_for_compaction(CompactPoint* cp) {
|
||||
// Generic implementation, can be specialized
|
||||
CompactibleSpace* space = first_compaction_space();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user