Merge
This commit is contained in:
commit
6ac5f30765
@ -4208,6 +4208,7 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
|
|||||||
PtrQueue::byte_offset_of_active()),
|
PtrQueue::byte_offset_of_active()),
|
||||||
tmp);
|
tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check on whether to annul.
|
// Check on whether to annul.
|
||||||
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
|
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
|
||||||
delayed() -> nop();
|
delayed() -> nop();
|
||||||
@ -4215,13 +4216,13 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
|
|||||||
// satb_log_barrier_work1(tmp, offset);
|
// satb_log_barrier_work1(tmp, offset);
|
||||||
if (index == noreg) {
|
if (index == noreg) {
|
||||||
if (Assembler::is_simm13(offset)) {
|
if (Assembler::is_simm13(offset)) {
|
||||||
ld_ptr(obj, offset, tmp);
|
load_heap_oop(obj, offset, tmp);
|
||||||
} else {
|
} else {
|
||||||
set(offset, tmp);
|
set(offset, tmp);
|
||||||
ld_ptr(obj, tmp, tmp);
|
load_heap_oop(obj, tmp, tmp);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ld_ptr(obj, index, tmp);
|
load_heap_oop(obj, index, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// satb_log_barrier_work2(obj, tmp, offset);
|
// satb_log_barrier_work2(obj, tmp, offset);
|
||||||
|
@ -6805,14 +6805,18 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
|
|||||||
jcc(Assembler::equal, done);
|
jcc(Assembler::equal, done);
|
||||||
|
|
||||||
// if (x.f == NULL) goto done;
|
// if (x.f == NULL) goto done;
|
||||||
cmpptr(Address(obj, 0), NULL_WORD);
|
#ifdef _LP64
|
||||||
|
load_heap_oop(tmp2, Address(obj, 0));
|
||||||
|
#else
|
||||||
|
movptr(tmp2, Address(obj, 0));
|
||||||
|
#endif
|
||||||
|
cmpptr(tmp2, (int32_t) NULL_WORD);
|
||||||
jcc(Assembler::equal, done);
|
jcc(Assembler::equal, done);
|
||||||
|
|
||||||
// Can we store original value in the thread's buffer?
|
// Can we store original value in the thread's buffer?
|
||||||
|
|
||||||
LP64_ONLY(movslq(tmp, index);)
|
|
||||||
movptr(tmp2, Address(obj, 0));
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
|
movslq(tmp, index);
|
||||||
cmpq(tmp, 0);
|
cmpq(tmp, 0);
|
||||||
#else
|
#else
|
||||||
cmpl(index, 0);
|
cmpl(index, 0);
|
||||||
@ -6834,8 +6838,7 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
|
|||||||
if(tosca_live) push(rax);
|
if(tosca_live) push(rax);
|
||||||
push(obj);
|
push(obj);
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
movq(c_rarg0, Address(obj, 0));
|
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, r15_thread);
|
||||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, r15_thread);
|
|
||||||
#else
|
#else
|
||||||
push(thread);
|
push(thread);
|
||||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
|
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
|
||||||
|
@ -269,11 +269,11 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
|
|||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void trace_method_handle_stub(const char* adaptername,
|
void trace_method_handle_stub(const char* adaptername,
|
||||||
oop mh,
|
oopDesc* mh,
|
||||||
intptr_t* entry_sp,
|
intptr_t* entry_sp,
|
||||||
intptr_t* saved_sp) {
|
intptr_t* saved_sp) {
|
||||||
// called as a leaf from native code: do not block the JVM!
|
// called as a leaf from native code: do not block the JVM!
|
||||||
printf("MH %s "PTR_FORMAT" "PTR_FORMAT" "INTX_FORMAT"\n", adaptername, mh, entry_sp, entry_sp - saved_sp);
|
printf("MH %s "PTR_FORMAT" "PTR_FORMAT" "INTX_FORMAT"\n", adaptername, (void*)mh, entry_sp, entry_sp - saved_sp);
|
||||||
}
|
}
|
||||||
#endif //PRODUCT
|
#endif //PRODUCT
|
||||||
|
|
||||||
|
@ -1302,22 +1302,19 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
|
|
||||||
const Register ic_reg = rax;
|
const Register ic_reg = rax;
|
||||||
const Register receiver = j_rarg0;
|
const Register receiver = j_rarg0;
|
||||||
const Register tmp = rdx;
|
|
||||||
|
|
||||||
Label ok;
|
Label ok;
|
||||||
Label exception_pending;
|
Label exception_pending;
|
||||||
|
|
||||||
|
assert_different_registers(ic_reg, receiver, rscratch1);
|
||||||
__ verify_oop(receiver);
|
__ verify_oop(receiver);
|
||||||
__ push(tmp); // spill (any other registers free here???)
|
__ load_klass(rscratch1, receiver);
|
||||||
__ load_klass(tmp, receiver);
|
__ cmpq(ic_reg, rscratch1);
|
||||||
__ cmpq(ic_reg, tmp);
|
|
||||||
__ jcc(Assembler::equal, ok);
|
__ jcc(Assembler::equal, ok);
|
||||||
|
|
||||||
__ pop(tmp);
|
|
||||||
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
||||||
|
|
||||||
__ bind(ok);
|
__ bind(ok);
|
||||||
__ pop(tmp);
|
|
||||||
|
|
||||||
// Verified entry point must be aligned
|
// Verified entry point must be aligned
|
||||||
__ align(8);
|
__ align(8);
|
||||||
|
@ -709,7 +709,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
//
|
//
|
||||||
// Input:
|
// Input:
|
||||||
// start - starting address
|
// start - starting address
|
||||||
// end - element count
|
// count - element count
|
||||||
void gen_write_ref_array_pre_barrier(Register start, Register count) {
|
void gen_write_ref_array_pre_barrier(Register start, Register count) {
|
||||||
assert_different_registers(start, count);
|
assert_different_registers(start, count);
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
@ -757,7 +757,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
|
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
|
||||||
__ addptr(rsp, 2*wordSize);
|
__ addptr(rsp, 2*wordSize);
|
||||||
__ popa();
|
__ popa();
|
||||||
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -1207,9 +1207,9 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ pusha(); // push registers (overkill)
|
__ pusha(); // push registers (overkill)
|
||||||
// must compute element count unless barrier set interface is changed (other platforms supply count)
|
// must compute element count unless barrier set interface is changed (other platforms supply count)
|
||||||
assert_different_registers(start, end, scratch);
|
assert_different_registers(start, end, scratch);
|
||||||
__ lea(scratch, Address(end, wordSize));
|
__ lea(scratch, Address(end, BytesPerHeapOop));
|
||||||
__ subptr(scratch, start);
|
__ subptr(scratch, start); // subtract start to get #bytes
|
||||||
__ shrptr(scratch, LogBytesPerWord);
|
__ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
|
||||||
__ mov(c_rarg0, start);
|
__ mov(c_rarg0, start);
|
||||||
__ mov(c_rarg1, scratch);
|
__ mov(c_rarg1, scratch);
|
||||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
|
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
|
||||||
@ -1225,6 +1225,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
Label L_loop;
|
Label L_loop;
|
||||||
|
|
||||||
__ shrptr(start, CardTableModRefBS::card_shift);
|
__ shrptr(start, CardTableModRefBS::card_shift);
|
||||||
|
__ addptr(end, BytesPerHeapOop);
|
||||||
__ shrptr(end, CardTableModRefBS::card_shift);
|
__ shrptr(end, CardTableModRefBS::card_shift);
|
||||||
__ subptr(end, start); // number of bytes to copy
|
__ subptr(end, start); // number of bytes to copy
|
||||||
|
|
||||||
@ -2251,6 +2252,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
// and report their number to the caller.
|
// and report their number to the caller.
|
||||||
assert_different_registers(rax, r14_length, count, to, end_to, rcx);
|
assert_different_registers(rax, r14_length, count, to, end_to, rcx);
|
||||||
__ lea(end_to, to_element_addr);
|
__ lea(end_to, to_element_addr);
|
||||||
|
__ addptr(end_to, -heapOopSize); // make an inclusive end pointer
|
||||||
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
|
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
|
||||||
__ movptr(rax, r14_length); // original oops
|
__ movptr(rax, r14_length); // original oops
|
||||||
__ addptr(rax, count); // K = (original - remaining) oops
|
__ addptr(rax, count); // K = (original - remaining) oops
|
||||||
@ -2259,7 +2261,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
|
|
||||||
// Come here on success only.
|
// Come here on success only.
|
||||||
__ BIND(L_do_card_marks);
|
__ BIND(L_do_card_marks);
|
||||||
__ addptr(end_to, -wordSize); // make an inclusive end pointer
|
__ addptr(end_to, -heapOopSize); // make an inclusive end pointer
|
||||||
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
|
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
|
||||||
__ xorptr(rax, rax); // return 0 on success
|
__ xorptr(rax, rax); // return 0 on success
|
||||||
|
|
||||||
|
@ -420,6 +420,13 @@ Form::DataType InstructForm::is_ideal_load() const {
|
|||||||
return _matrule->is_ideal_load();
|
return _matrule->is_ideal_load();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return 'true' if this instruction matches an ideal 'LoadKlass' node
|
||||||
|
bool InstructForm::skip_antidep_check() const {
|
||||||
|
if( _matrule == NULL ) return false;
|
||||||
|
|
||||||
|
return _matrule->skip_antidep_check();
|
||||||
|
}
|
||||||
|
|
||||||
// Return 'true' if this instruction matches an ideal 'Load?' node
|
// Return 'true' if this instruction matches an ideal 'Load?' node
|
||||||
Form::DataType InstructForm::is_ideal_store() const {
|
Form::DataType InstructForm::is_ideal_store() const {
|
||||||
if( _matrule == NULL ) return Form::none;
|
if( _matrule == NULL ) return Form::none;
|
||||||
@ -567,6 +574,8 @@ bool InstructForm::rematerialize(FormDict &globals, RegisterForm *registers ) {
|
|||||||
|
|
||||||
// loads from memory, so must check for anti-dependence
|
// loads from memory, so must check for anti-dependence
|
||||||
bool InstructForm::needs_anti_dependence_check(FormDict &globals) const {
|
bool InstructForm::needs_anti_dependence_check(FormDict &globals) const {
|
||||||
|
if ( skip_antidep_check() ) return false;
|
||||||
|
|
||||||
// Machine independent loads must be checked for anti-dependences
|
// Machine independent loads must be checked for anti-dependences
|
||||||
if( is_ideal_load() != Form::none ) return true;
|
if( is_ideal_load() != Form::none ) return true;
|
||||||
|
|
||||||
@ -3957,6 +3966,28 @@ Form::DataType MatchRule::is_ideal_load() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool MatchRule::skip_antidep_check() const {
|
||||||
|
// Some loads operate on what is effectively immutable memory so we
|
||||||
|
// should skip the anti dep computations. For some of these nodes
|
||||||
|
// the rewritable field keeps the anti dep logic from triggering but
|
||||||
|
// for certain kinds of LoadKlass it does not since they are
|
||||||
|
// actually reading memory which could be rewritten by the runtime,
|
||||||
|
// though never by generated code. This disables it uniformly for
|
||||||
|
// the nodes that behave like this: LoadKlass, LoadNKlass and
|
||||||
|
// LoadRange.
|
||||||
|
if ( _opType && (strcmp(_opType,"Set") == 0) && _rChild ) {
|
||||||
|
const char *opType = _rChild->_opType;
|
||||||
|
if (strcmp("LoadKlass", opType) == 0 ||
|
||||||
|
strcmp("LoadNKlass", opType) == 0 ||
|
||||||
|
strcmp("LoadRange", opType) == 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Form::DataType MatchRule::is_ideal_store() const {
|
Form::DataType MatchRule::is_ideal_store() const {
|
||||||
Form::DataType ideal_store = Form::none;
|
Form::DataType ideal_store = Form::none;
|
||||||
|
|
||||||
|
@ -158,6 +158,9 @@ public:
|
|||||||
|
|
||||||
virtual Form::CallType is_ideal_call() const; // matches ideal 'Call'
|
virtual Form::CallType is_ideal_call() const; // matches ideal 'Call'
|
||||||
virtual Form::DataType is_ideal_load() const; // node matches ideal 'LoadXNode'
|
virtual Form::DataType is_ideal_load() const; // node matches ideal 'LoadXNode'
|
||||||
|
// Should antidep checks be disabled for this Instruct
|
||||||
|
// See definition of MatchRule::skip_antidep_check
|
||||||
|
bool skip_antidep_check() const;
|
||||||
virtual Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
|
virtual Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
|
||||||
bool is_ideal_mem() const { return is_ideal_load() != Form::none || is_ideal_store() != Form::none; }
|
bool is_ideal_mem() const { return is_ideal_load() != Form::none || is_ideal_store() != Form::none; }
|
||||||
virtual uint two_address(FormDict &globals); // output reg must match input reg
|
virtual uint two_address(FormDict &globals); // output reg must match input reg
|
||||||
@ -1003,6 +1006,9 @@ public:
|
|||||||
bool is_ideal_loopEnd() const; // node matches ideal 'LoopEnd'
|
bool is_ideal_loopEnd() const; // node matches ideal 'LoopEnd'
|
||||||
bool is_ideal_bool() const; // node matches ideal 'Bool'
|
bool is_ideal_bool() const; // node matches ideal 'Bool'
|
||||||
Form::DataType is_ideal_load() const;// node matches ideal 'LoadXNode'
|
Form::DataType is_ideal_load() const;// node matches ideal 'LoadXNode'
|
||||||
|
// Should antidep checks be disabled for this rule
|
||||||
|
// See definition of MatchRule::skip_antidep_check
|
||||||
|
bool skip_antidep_check() const;
|
||||||
Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
|
Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
|
||||||
|
|
||||||
// Check if 'mRule2' is a cisc-spill variant of this MatchRule
|
// Check if 'mRule2' is a cisc-spill variant of this MatchRule
|
||||||
|
@ -3231,6 +3231,16 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||||||
this_klass->set_minor_version(minor_version);
|
this_klass->set_minor_version(minor_version);
|
||||||
this_klass->set_major_version(major_version);
|
this_klass->set_major_version(major_version);
|
||||||
|
|
||||||
|
// Set up methodOop::intrinsic_id as soon as we know the names of methods.
|
||||||
|
// (We used to do this lazily, but now we query it in Rewriter,
|
||||||
|
// which is eagerly done for every method, so we might as well do it now,
|
||||||
|
// when everything is fresh in memory.)
|
||||||
|
if (methodOopDesc::klass_id_for_intrinsics(this_klass->as_klassOop()) != vmSymbols::NO_SID) {
|
||||||
|
for (int j = 0; j < methods->length(); j++) {
|
||||||
|
((methodOop)methods->obj_at(j))->init_intrinsic_id();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (cached_class_file_bytes != NULL) {
|
if (cached_class_file_bytes != NULL) {
|
||||||
// JVMTI: we have an instanceKlass now, tell it about the cached bytes
|
// JVMTI: we have an instanceKlass now, tell it about the cached bytes
|
||||||
this_klass->set_cached_class_file(cached_class_file_bytes,
|
this_klass->set_cached_class_file(cached_class_file_bytes,
|
||||||
|
@ -513,9 +513,6 @@
|
|||||||
//
|
//
|
||||||
// for Emacs: (let ((c-backslash-column 120) (c-backslash-max-column 120)) (c-backslash-region (point) (point-max) nil t))
|
// for Emacs: (let ((c-backslash-column 120) (c-backslash-max-column 120)) (c-backslash-region (point) (point-max) nil t))
|
||||||
#define VM_INTRINSICS_DO(do_intrinsic, do_class, do_name, do_signature, do_alias) \
|
#define VM_INTRINSICS_DO(do_intrinsic, do_class, do_name, do_signature, do_alias) \
|
||||||
do_intrinsic(_Object_init, java_lang_Object, object_initializer_name, void_method_signature, F_R) \
|
|
||||||
/* (symbol object_initializer_name defined above) */ \
|
|
||||||
\
|
|
||||||
do_intrinsic(_hashCode, java_lang_Object, hashCode_name, void_int_signature, F_R) \
|
do_intrinsic(_hashCode, java_lang_Object, hashCode_name, void_int_signature, F_R) \
|
||||||
do_name( hashCode_name, "hashCode") \
|
do_name( hashCode_name, "hashCode") \
|
||||||
do_intrinsic(_getClass, java_lang_Object, getClass_name, void_class_signature, F_R) \
|
do_intrinsic(_getClass, java_lang_Object, getClass_name, void_class_signature, F_R) \
|
||||||
@ -635,9 +632,6 @@
|
|||||||
do_intrinsic(_equalsC, java_util_Arrays, equals_name, equalsC_signature, F_S) \
|
do_intrinsic(_equalsC, java_util_Arrays, equals_name, equalsC_signature, F_S) \
|
||||||
do_signature(equalsC_signature, "([C[C)Z") \
|
do_signature(equalsC_signature, "([C[C)Z") \
|
||||||
\
|
\
|
||||||
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
|
|
||||||
/* (symbols invoke_name and invoke_signature defined above) */ \
|
|
||||||
\
|
|
||||||
do_intrinsic(_compareTo, java_lang_String, compareTo_name, string_int_signature, F_R) \
|
do_intrinsic(_compareTo, java_lang_String, compareTo_name, string_int_signature, F_R) \
|
||||||
do_name( compareTo_name, "compareTo") \
|
do_name( compareTo_name, "compareTo") \
|
||||||
do_intrinsic(_indexOf, java_lang_String, indexOf_name, string_int_signature, F_R) \
|
do_intrinsic(_indexOf, java_lang_String, indexOf_name, string_int_signature, F_R) \
|
||||||
@ -656,8 +650,6 @@
|
|||||||
do_name( attemptUpdate_name, "attemptUpdate") \
|
do_name( attemptUpdate_name, "attemptUpdate") \
|
||||||
do_signature(attemptUpdate_signature, "(JJ)Z") \
|
do_signature(attemptUpdate_signature, "(JJ)Z") \
|
||||||
\
|
\
|
||||||
do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \
|
|
||||||
\
|
|
||||||
/* support for sun.misc.Unsafe */ \
|
/* support for sun.misc.Unsafe */ \
|
||||||
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
|
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
|
||||||
\
|
\
|
||||||
@ -819,10 +811,22 @@
|
|||||||
do_name( prefetchReadStatic_name, "prefetchReadStatic") \
|
do_name( prefetchReadStatic_name, "prefetchReadStatic") \
|
||||||
do_intrinsic(_prefetchWriteStatic, sun_misc_Unsafe, prefetchWriteStatic_name, prefetch_signature, F_SN) \
|
do_intrinsic(_prefetchWriteStatic, sun_misc_Unsafe, prefetchWriteStatic_name, prefetch_signature, F_SN) \
|
||||||
do_name( prefetchWriteStatic_name, "prefetchWriteStatic") \
|
do_name( prefetchWriteStatic_name, "prefetchWriteStatic") \
|
||||||
|
/*== LAST_COMPILER_INLINE*/ \
|
||||||
|
/*the compiler does have special inlining code for these; bytecode inline is just fine */ \
|
||||||
|
\
|
||||||
|
do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \
|
||||||
|
\
|
||||||
|
do_intrinsic(_Object_init, java_lang_Object, object_initializer_name, void_method_signature, F_R) \
|
||||||
|
/* (symbol object_initializer_name defined above) */ \
|
||||||
|
\
|
||||||
|
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
|
||||||
|
/* (symbols invoke_name and invoke_signature defined above) */ \
|
||||||
|
\
|
||||||
/*end*/
|
/*end*/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Class vmSymbols
|
// Class vmSymbols
|
||||||
|
|
||||||
class vmSymbols: AllStatic {
|
class vmSymbols: AllStatic {
|
||||||
@ -935,6 +939,7 @@ class vmIntrinsics: AllStatic {
|
|||||||
#undef VM_INTRINSIC_ENUM
|
#undef VM_INTRINSIC_ENUM
|
||||||
|
|
||||||
ID_LIMIT,
|
ID_LIMIT,
|
||||||
|
LAST_COMPILER_INLINE = _prefetchWriteStatic,
|
||||||
FIRST_ID = _none + 1
|
FIRST_ID = _none + 1
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -972,4 +977,7 @@ public:
|
|||||||
static Flags flags_for(ID id);
|
static Flags flags_for(ID id);
|
||||||
|
|
||||||
static const char* short_name_as_C_string(ID id, char* buf, int size);
|
static const char* short_name_as_C_string(ID id, char* buf, int size);
|
||||||
|
|
||||||
|
// Access to intrinsic methods:
|
||||||
|
static methodOop method_for(ID id);
|
||||||
};
|
};
|
||||||
|
@ -379,6 +379,14 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
|||||||
if ( loc != NULL ) {
|
if ( loc != NULL ) {
|
||||||
oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
|
oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
|
||||||
oop *derived_loc = loc;
|
oop *derived_loc = loc;
|
||||||
|
oop val = *base_loc;
|
||||||
|
if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
|
||||||
|
// Ignore NULL oops and decoded NULL narrow oops which
|
||||||
|
// equal to Universe::narrow_oop_base when a narrow oop
|
||||||
|
// implicit null check is used in compiled code.
|
||||||
|
// The narrow_oop_base could be NULL or be the address
|
||||||
|
// of the page below heap depending on compressed oops mode.
|
||||||
|
} else
|
||||||
derived_oop_fn(base_loc, derived_loc);
|
derived_oop_fn(base_loc, derived_loc);
|
||||||
}
|
}
|
||||||
oms.next();
|
oms.next();
|
||||||
@ -394,6 +402,15 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
|||||||
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
|
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
|
||||||
if ( loc != NULL ) {
|
if ( loc != NULL ) {
|
||||||
if ( omv.type() == OopMapValue::oop_value ) {
|
if ( omv.type() == OopMapValue::oop_value ) {
|
||||||
|
oop val = *loc;
|
||||||
|
if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
|
||||||
|
// Ignore NULL oops and decoded NULL narrow oops which
|
||||||
|
// equal to Universe::narrow_oop_base when a narrow oop
|
||||||
|
// implicit null check is used in compiled code.
|
||||||
|
// The narrow_oop_base could be NULL or be the address
|
||||||
|
// of the page below heap depending on compressed oops mode.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
||||||
!Universe::heap()->is_in_or_null(*loc)) {
|
!Universe::heap()->is_in_or_null(*loc)) {
|
||||||
@ -410,6 +427,8 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
|||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
oop_fn->do_oop(loc);
|
oop_fn->do_oop(loc);
|
||||||
} else if ( omv.type() == OopMapValue::value_value ) {
|
} else if ( omv.type() == OopMapValue::value_value ) {
|
||||||
|
assert((*loc) == (oop)NULL || !Universe::is_narrow_oop_base(*loc),
|
||||||
|
"found invalid value pointer");
|
||||||
value_fn->do_oop(loc);
|
value_fn->do_oop(loc);
|
||||||
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
|
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
|
||||||
narrowOop *nl = (narrowOop*)loc;
|
narrowOop *nl = (narrowOop*)loc;
|
||||||
|
@ -233,6 +233,10 @@ class OopMapSet : public ResourceObj {
|
|||||||
int heap_size() const;
|
int heap_size() const;
|
||||||
void copy_to(address addr);
|
void copy_to(address addr);
|
||||||
|
|
||||||
|
// Methods oops_do() and all_do() filter out NULL oops and
|
||||||
|
// oop == Universe::narrow_oop_base() before passing oops
|
||||||
|
// to closures.
|
||||||
|
|
||||||
// Iterates through frame for a compiled method
|
// Iterates through frame for a compiled method
|
||||||
static void oops_do (const frame* fr,
|
static void oops_do (const frame* fr,
|
||||||
const RegisterMap* reg_map, OopClosure* f);
|
const RegisterMap* reg_map, OopClosure* f);
|
||||||
|
@ -42,35 +42,40 @@ protected:
|
|||||||
BufferLength = 1024
|
BufferLength = 1024
|
||||||
};
|
};
|
||||||
|
|
||||||
oop *_buffer[BufferLength];
|
StarTask _buffer[BufferLength];
|
||||||
oop **_buffer_top;
|
StarTask* _buffer_top;
|
||||||
oop **_buffer_curr;
|
StarTask* _buffer_curr;
|
||||||
|
|
||||||
OopClosure* _oc;
|
OopClosure* _oc;
|
||||||
double _closure_app_seconds;
|
double _closure_app_seconds;
|
||||||
|
|
||||||
void process_buffer () {
|
void process_buffer () {
|
||||||
|
|
||||||
double start = os::elapsedTime();
|
double start = os::elapsedTime();
|
||||||
for (oop **curr = _buffer; curr < _buffer_curr; ++curr) {
|
for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
|
||||||
_oc->do_oop(*curr);
|
if (curr->is_narrow()) {
|
||||||
|
assert(UseCompressedOops, "Error");
|
||||||
|
_oc->do_oop((narrowOop*)(*curr));
|
||||||
|
} else {
|
||||||
|
_oc->do_oop((oop*)(*curr));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_buffer_curr = _buffer;
|
_buffer_curr = _buffer;
|
||||||
_closure_app_seconds += (os::elapsedTime() - start);
|
_closure_app_seconds += (os::elapsedTime() - start);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
template <class T> inline void do_oop_work(T* p) {
|
||||||
virtual void do_oop(narrowOop* p) {
|
|
||||||
guarantee(false, "NYI");
|
|
||||||
}
|
|
||||||
virtual void do_oop(oop *p) {
|
|
||||||
if (_buffer_curr == _buffer_top) {
|
if (_buffer_curr == _buffer_top) {
|
||||||
process_buffer();
|
process_buffer();
|
||||||
}
|
}
|
||||||
|
StarTask new_ref(p);
|
||||||
*_buffer_curr = p;
|
*_buffer_curr = new_ref;
|
||||||
++_buffer_curr;
|
++_buffer_curr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
|
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||||
|
|
||||||
void done () {
|
void done () {
|
||||||
if (_buffer_curr > _buffer) {
|
if (_buffer_curr > _buffer) {
|
||||||
process_buffer();
|
process_buffer();
|
||||||
@ -88,18 +93,17 @@ public:
|
|||||||
class BufferingOopsInGenClosure: public OopsInGenClosure {
|
class BufferingOopsInGenClosure: public OopsInGenClosure {
|
||||||
BufferingOopClosure _boc;
|
BufferingOopClosure _boc;
|
||||||
OopsInGenClosure* _oc;
|
OopsInGenClosure* _oc;
|
||||||
|
protected:
|
||||||
|
template <class T> inline void do_oop_work(T* p) {
|
||||||
|
assert(generation()->is_in_reserved((void*)p), "Must be in!");
|
||||||
|
_boc.do_oop(p);
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
BufferingOopsInGenClosure(OopsInGenClosure *oc) :
|
BufferingOopsInGenClosure(OopsInGenClosure *oc) :
|
||||||
_boc(oc), _oc(oc) {}
|
_boc(oc), _oc(oc) {}
|
||||||
|
|
||||||
virtual void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||||
}
|
|
||||||
|
|
||||||
virtual void do_oop(oop* p) {
|
|
||||||
assert(generation()->is_in_reserved(p), "Must be in!");
|
|
||||||
_boc.do_oop(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
void done() {
|
void done() {
|
||||||
_boc.done();
|
_boc.done();
|
||||||
@ -130,9 +134,9 @@ private:
|
|||||||
BufferLength = 1024
|
BufferLength = 1024
|
||||||
};
|
};
|
||||||
|
|
||||||
oop *_buffer[BufferLength];
|
StarTask _buffer[BufferLength];
|
||||||
oop **_buffer_top;
|
StarTask* _buffer_top;
|
||||||
oop **_buffer_curr;
|
StarTask* _buffer_curr;
|
||||||
|
|
||||||
HeapRegion* _hr_buffer[BufferLength];
|
HeapRegion* _hr_buffer[BufferLength];
|
||||||
HeapRegion** _hr_curr;
|
HeapRegion** _hr_curr;
|
||||||
@ -148,13 +152,18 @@ private:
|
|||||||
double start = os::elapsedTime();
|
double start = os::elapsedTime();
|
||||||
HeapRegion** hr_curr = _hr_buffer;
|
HeapRegion** hr_curr = _hr_buffer;
|
||||||
HeapRegion* hr_prev = NULL;
|
HeapRegion* hr_prev = NULL;
|
||||||
for (oop **curr = _buffer; curr < _buffer_curr; ++curr) {
|
for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
|
||||||
HeapRegion* region = *hr_curr;
|
HeapRegion* region = *hr_curr;
|
||||||
if (region != hr_prev) {
|
if (region != hr_prev) {
|
||||||
_oc->set_region(region);
|
_oc->set_region(region);
|
||||||
hr_prev = region;
|
hr_prev = region;
|
||||||
}
|
}
|
||||||
_oc->do_oop(*curr);
|
if (curr->is_narrow()) {
|
||||||
|
assert(UseCompressedOops, "Error");
|
||||||
|
_oc->do_oop((narrowOop*)(*curr));
|
||||||
|
} else {
|
||||||
|
_oc->do_oop((oop*)(*curr));
|
||||||
|
}
|
||||||
++hr_curr;
|
++hr_curr;
|
||||||
}
|
}
|
||||||
_buffer_curr = _buffer;
|
_buffer_curr = _buffer;
|
||||||
@ -163,17 +172,16 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
virtual void do_oop(narrowOop *p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
|
||||||
|
|
||||||
virtual void do_oop(oop *p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
if (_buffer_curr == _buffer_top) {
|
if (_buffer_curr == _buffer_top) {
|
||||||
assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
|
assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
|
||||||
process_buffer();
|
process_buffer();
|
||||||
}
|
}
|
||||||
|
StarTask new_ref(p);
|
||||||
*_buffer_curr = p;
|
*_buffer_curr = new_ref;
|
||||||
++_buffer_curr;
|
++_buffer_curr;
|
||||||
*_hr_curr = _from;
|
*_hr_curr = _from;
|
||||||
++_hr_curr;
|
++_hr_curr;
|
||||||
|
@ -452,13 +452,10 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
|||||||
_regionStack.allocate(G1MarkRegionStackSize);
|
_regionStack.allocate(G1MarkRegionStackSize);
|
||||||
|
|
||||||
// Create & start a ConcurrentMark thread.
|
// Create & start a ConcurrentMark thread.
|
||||||
if (G1ConcMark) {
|
|
||||||
_cmThread = new ConcurrentMarkThread(this);
|
_cmThread = new ConcurrentMarkThread(this);
|
||||||
assert(cmThread() != NULL, "CM Thread should have been created");
|
assert(cmThread() != NULL, "CM Thread should have been created");
|
||||||
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
|
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
|
||||||
} else {
|
|
||||||
_cmThread = NULL;
|
|
||||||
}
|
|
||||||
_g1h = G1CollectedHeap::heap();
|
_g1h = G1CollectedHeap::heap();
|
||||||
assert(CGC_lock != NULL, "Where's the CGC_lock?");
|
assert(CGC_lock != NULL, "Where's the CGC_lock?");
|
||||||
assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
|
assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
|
||||||
@ -783,18 +780,18 @@ public:
|
|||||||
bool do_barrier) : _cm(cm), _g1h(g1h),
|
bool do_barrier) : _cm(cm), _g1h(g1h),
|
||||||
_do_barrier(do_barrier) { }
|
_do_barrier(do_barrier) { }
|
||||||
|
|
||||||
virtual void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
|
||||||
|
|
||||||
virtual void do_oop(oop* p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
oop thisOop = *p;
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (thisOop != NULL) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
assert(thisOop->is_oop() || thisOop->mark() == NULL,
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
assert(obj->is_oop() || obj->mark() == NULL,
|
||||||
"expected an oop, possibly with mark word displaced");
|
"expected an oop, possibly with mark word displaced");
|
||||||
HeapWord* addr = (HeapWord*)thisOop;
|
HeapWord* addr = (HeapWord*)obj;
|
||||||
if (_g1h->is_in_g1_reserved(addr)) {
|
if (_g1h->is_in_g1_reserved(addr)) {
|
||||||
_cm->grayRoot(thisOop);
|
_cm->grayRoot(obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (_do_barrier) {
|
if (_do_barrier) {
|
||||||
@ -850,16 +847,6 @@ void ConcurrentMark::checkpointRootsInitial() {
|
|||||||
double start = os::elapsedTime();
|
double start = os::elapsedTime();
|
||||||
GCOverheadReporter::recordSTWStart(start);
|
GCOverheadReporter::recordSTWStart(start);
|
||||||
|
|
||||||
// If there has not been a GC[n-1] since last GC[n] cycle completed,
|
|
||||||
// precede our marking with a collection of all
|
|
||||||
// younger generations to keep floating garbage to a minimum.
|
|
||||||
// YSR: we won't do this for now -- it's an optimization to be
|
|
||||||
// done post-beta.
|
|
||||||
|
|
||||||
// YSR: ignoring weak refs for now; will do at bug fixing stage
|
|
||||||
// EVM: assert(discoveredRefsAreClear());
|
|
||||||
|
|
||||||
|
|
||||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||||
g1p->record_concurrent_mark_init_start();
|
g1p->record_concurrent_mark_init_start();
|
||||||
checkpointRootsInitialPre();
|
checkpointRootsInitialPre();
|
||||||
@ -1135,6 +1122,13 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (VerifyDuringGC) {
|
||||||
|
HandleMark hm; // handle scope
|
||||||
|
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||||
|
Universe::heap()->prepare_for_verify();
|
||||||
|
Universe::verify(true, false, true);
|
||||||
|
}
|
||||||
|
|
||||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||||
g1p->record_concurrent_mark_remark_start();
|
g1p->record_concurrent_mark_remark_start();
|
||||||
|
|
||||||
@ -1159,8 +1153,10 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||||||
JavaThread::satb_mark_queue_set().set_active_all_threads(false);
|
JavaThread::satb_mark_queue_set().set_active_all_threads(false);
|
||||||
|
|
||||||
if (VerifyDuringGC) {
|
if (VerifyDuringGC) {
|
||||||
g1h->prepare_for_verify();
|
HandleMark hm; // handle scope
|
||||||
g1h->verify(/* allow_dirty */ true,
|
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||||
|
Universe::heap()->prepare_for_verify();
|
||||||
|
Universe::heap()->verify(/* allow_dirty */ true,
|
||||||
/* silent */ false,
|
/* silent */ false,
|
||||||
/* use_prev_marking */ false);
|
/* use_prev_marking */ false);
|
||||||
}
|
}
|
||||||
@ -1658,6 +1654,15 @@ void ConcurrentMark::cleanup() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (VerifyDuringGC) {
|
||||||
|
HandleMark hm; // handle scope
|
||||||
|
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||||
|
Universe::heap()->prepare_for_verify();
|
||||||
|
Universe::verify(/* allow dirty */ true,
|
||||||
|
/* silent */ false,
|
||||||
|
/* prev marking */ true);
|
||||||
|
}
|
||||||
|
|
||||||
_cleanup_co_tracker.disable();
|
_cleanup_co_tracker.disable();
|
||||||
|
|
||||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||||
@ -1790,10 +1795,12 @@ void ConcurrentMark::cleanup() {
|
|||||||
g1h->increment_total_collections();
|
g1h->increment_total_collections();
|
||||||
|
|
||||||
if (VerifyDuringGC) {
|
if (VerifyDuringGC) {
|
||||||
g1h->prepare_for_verify();
|
HandleMark hm; // handle scope
|
||||||
g1h->verify(/* allow_dirty */ true,
|
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||||
|
Universe::heap()->prepare_for_verify();
|
||||||
|
Universe::verify(/* allow dirty */ true,
|
||||||
/* silent */ false,
|
/* silent */ false,
|
||||||
/* use_prev_marking */ true);
|
/* prev marking */ true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1852,12 +1859,11 @@ class G1CMKeepAliveClosure: public OopClosure {
|
|||||||
_g1(g1), _cm(cm),
|
_g1(g1), _cm(cm),
|
||||||
_bitMap(bitMap) {}
|
_bitMap(bitMap) {}
|
||||||
|
|
||||||
void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
|
||||||
|
|
||||||
void do_oop(oop* p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
oop thisOop = *p;
|
oop thisOop = oopDesc::load_decode_heap_oop(p);
|
||||||
HeapWord* addr = (HeapWord*)thisOop;
|
HeapWord* addr = (HeapWord*)thisOop;
|
||||||
if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) {
|
if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) {
|
||||||
_bitMap->mark(addr);
|
_bitMap->mark(addr);
|
||||||
@ -2016,12 +2022,11 @@ public:
|
|||||||
ReachablePrinterOopClosure(CMBitMapRO* bitmap, outputStream* out) :
|
ReachablePrinterOopClosure(CMBitMapRO* bitmap, outputStream* out) :
|
||||||
_bitmap(bitmap), _g1h(G1CollectedHeap::heap()), _out(out) { }
|
_bitmap(bitmap), _g1h(G1CollectedHeap::heap()), _out(out) { }
|
||||||
|
|
||||||
void do_oop(narrowOop* p) {
|
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
|
||||||
|
|
||||||
void do_oop(oop* p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
oop obj = *p;
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
const char* str = NULL;
|
const char* str = NULL;
|
||||||
const char* str2 = "";
|
const char* str2 = "";
|
||||||
|
|
||||||
@ -2163,6 +2168,7 @@ void ConcurrentMark::deal_with_reference(oop obj) {
|
|||||||
|
|
||||||
|
|
||||||
HeapWord* objAddr = (HeapWord*) obj;
|
HeapWord* objAddr = (HeapWord*) obj;
|
||||||
|
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
|
||||||
if (_g1h->is_in_g1_reserved(objAddr)) {
|
if (_g1h->is_in_g1_reserved(objAddr)) {
|
||||||
tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
|
tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
|
||||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||||
@ -2380,7 +2386,7 @@ class CSMarkOopClosure: public OopClosure {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool drain() {
|
template <class T> bool drain() {
|
||||||
while (_ms_ind > 0) {
|
while (_ms_ind > 0) {
|
||||||
oop obj = pop();
|
oop obj = pop();
|
||||||
assert(obj != NULL, "Since index was non-zero.");
|
assert(obj != NULL, "Since index was non-zero.");
|
||||||
@ -2394,9 +2400,8 @@ class CSMarkOopClosure: public OopClosure {
|
|||||||
}
|
}
|
||||||
// Now process this portion of this one.
|
// Now process this portion of this one.
|
||||||
int lim = MIN2(next_arr_ind, len);
|
int lim = MIN2(next_arr_ind, len);
|
||||||
assert(!UseCompressedOops, "This needs to be fixed");
|
|
||||||
for (int j = arr_ind; j < lim; j++) {
|
for (int j = arr_ind; j < lim; j++) {
|
||||||
do_oop(aobj->obj_at_addr<oop>(j));
|
do_oop(aobj->obj_at_addr<T>(j));
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@ -2423,13 +2428,13 @@ public:
|
|||||||
FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
|
FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
|
||||||
|
|
||||||
void do_oop(oop* p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
oop obj = *p;
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (obj == NULL) return;
|
if (oopDesc::is_null(heap_oop)) return;
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
if (obj->is_forwarded()) {
|
if (obj->is_forwarded()) {
|
||||||
// If the object has already been forwarded, we have to make sure
|
// If the object has already been forwarded, we have to make sure
|
||||||
// that it's marked. So follow the forwarding pointer. Note that
|
// that it's marked. So follow the forwarding pointer. Note that
|
||||||
@ -2478,7 +2483,11 @@ public:
|
|||||||
oop obj = oop(addr);
|
oop obj = oop(addr);
|
||||||
if (!obj->is_forwarded()) {
|
if (!obj->is_forwarded()) {
|
||||||
if (!_oop_cl.push(obj)) return false;
|
if (!_oop_cl.push(obj)) return false;
|
||||||
if (!_oop_cl.drain()) return false;
|
if (UseCompressedOops) {
|
||||||
|
if (!_oop_cl.drain<narrowOop>()) return false;
|
||||||
|
} else {
|
||||||
|
if (!_oop_cl.drain<oop>()) return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Otherwise...
|
// Otherwise...
|
||||||
return true;
|
return true;
|
||||||
@ -2636,9 +2645,6 @@ void ConcurrentMark::disable_co_trackers() {
|
|||||||
|
|
||||||
// abandon current marking iteration due to a Full GC
|
// abandon current marking iteration due to a Full GC
|
||||||
void ConcurrentMark::abort() {
|
void ConcurrentMark::abort() {
|
||||||
// If we're not marking, nothing to do.
|
|
||||||
if (!G1ConcMark) return;
|
|
||||||
|
|
||||||
// Clear all marks to force marking thread to do nothing
|
// Clear all marks to force marking thread to do nothing
|
||||||
_nextMarkBitMap->clearAll();
|
_nextMarkBitMap->clearAll();
|
||||||
// Empty mark stack
|
// Empty mark stack
|
||||||
@ -2814,14 +2820,14 @@ private:
|
|||||||
CMTask* _task;
|
CMTask* _task;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
|
||||||
|
|
||||||
void do_oop(oop* p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" );
|
tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" );
|
||||||
|
tmp_guarantee_CM( !_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(), "invariant" );
|
||||||
|
|
||||||
oop obj = *p;
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
if (_cm->verbose_high())
|
if (_cm->verbose_high())
|
||||||
gclog_or_tty->print_cr("[%d] we're looking at location "
|
gclog_or_tty->print_cr("[%d] we're looking at location "
|
||||||
"*"PTR_FORMAT" = "PTR_FORMAT,
|
"*"PTR_FORMAT" = "PTR_FORMAT,
|
||||||
@ -2967,6 +2973,7 @@ void CMTask::deal_with_reference(oop obj) {
|
|||||||
++_refs_reached;
|
++_refs_reached;
|
||||||
|
|
||||||
HeapWord* objAddr = (HeapWord*) obj;
|
HeapWord* objAddr = (HeapWord*) obj;
|
||||||
|
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
|
||||||
if (_g1h->is_in_g1_reserved(objAddr)) {
|
if (_g1h->is_in_g1_reserved(objAddr)) {
|
||||||
tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
|
tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
|
||||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||||
@ -3030,6 +3037,7 @@ void CMTask::deal_with_reference(oop obj) {
|
|||||||
void CMTask::push(oop obj) {
|
void CMTask::push(oop obj) {
|
||||||
HeapWord* objAddr = (HeapWord*) obj;
|
HeapWord* objAddr = (HeapWord*) obj;
|
||||||
tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" );
|
tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" );
|
||||||
|
tmp_guarantee_CM( !_g1h->heap_region_containing(objAddr)->is_on_free_list(), "invariant" );
|
||||||
tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" );
|
tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" );
|
||||||
tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" );
|
tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" );
|
||||||
|
|
||||||
@ -3275,6 +3283,8 @@ void CMTask::drain_local_queue(bool partially) {
|
|||||||
|
|
||||||
tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj),
|
tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj),
|
||||||
"invariant" );
|
"invariant" );
|
||||||
|
tmp_guarantee_CM( !_g1h->heap_region_containing(obj)->is_on_free_list(),
|
||||||
|
"invariant" );
|
||||||
|
|
||||||
scan_object(obj);
|
scan_object(obj);
|
||||||
|
|
||||||
|
@ -763,6 +763,7 @@ private:
|
|||||||
CMBitMap* _nextMarkBitMap;
|
CMBitMap* _nextMarkBitMap;
|
||||||
// the task queue of this task
|
// the task queue of this task
|
||||||
CMTaskQueue* _task_queue;
|
CMTaskQueue* _task_queue;
|
||||||
|
private:
|
||||||
// the task queue set---needed for stealing
|
// the task queue set---needed for stealing
|
||||||
CMTaskQueueSet* _task_queues;
|
CMTaskQueueSet* _task_queues;
|
||||||
// indicates whether the task has been claimed---this is only for
|
// indicates whether the task has been claimed---this is only for
|
||||||
|
@ -424,7 +424,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
|||||||
while (n <= next_boundary) {
|
while (n <= next_boundary) {
|
||||||
q = n;
|
q = n;
|
||||||
oop obj = oop(q);
|
oop obj = oop(q);
|
||||||
if (obj->klass() == NULL) return q;
|
if (obj->klass_or_null() == NULL) return q;
|
||||||
n += obj->size();
|
n += obj->size();
|
||||||
}
|
}
|
||||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||||
@ -436,7 +436,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
|||||||
while (n <= next_boundary) {
|
while (n <= next_boundary) {
|
||||||
q = n;
|
q = n;
|
||||||
oop obj = oop(q);
|
oop obj = oop(q);
|
||||||
if (obj->klass() == NULL) return q;
|
if (obj->klass_or_null() == NULL) return q;
|
||||||
n += _sp->block_size(q);
|
n += _sp->block_size(q);
|
||||||
}
|
}
|
||||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||||
|
@ -96,14 +96,14 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
|||||||
while (n <= addr) {
|
while (n <= addr) {
|
||||||
q = n;
|
q = n;
|
||||||
oop obj = oop(q);
|
oop obj = oop(q);
|
||||||
if (obj->klass() == NULL) return q;
|
if (obj->klass_or_null() == NULL) return q;
|
||||||
n += obj->size();
|
n += obj->size();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
while (n <= addr) {
|
while (n <= addr) {
|
||||||
q = n;
|
q = n;
|
||||||
oop obj = oop(q);
|
oop obj = oop(q);
|
||||||
if (obj->klass() == NULL) return q;
|
if (obj->klass_or_null() == NULL) return q;
|
||||||
n += _sp->block_size(q);
|
n += _sp->block_size(q);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -115,7 +115,7 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
|||||||
inline HeapWord*
|
inline HeapWord*
|
||||||
G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
|
G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
|
||||||
const void* addr) {
|
const void* addr) {
|
||||||
if (oop(q)->klass() == NULL) return q;
|
if (oop(q)->klass_or_null() == NULL) return q;
|
||||||
HeapWord* n = q + _sp->block_size(q);
|
HeapWord* n = q + _sp->block_size(q);
|
||||||
// In the normal case, where the query "addr" is a card boundary, and the
|
// In the normal case, where the query "addr" is a card boundary, and the
|
||||||
// offset table chunks are the same size as cards, the block starting at
|
// offset table chunks are the same size as cards, the block starting at
|
||||||
|
@ -1658,8 +1658,15 @@ size_t G1CollectedHeap::used() const {
|
|||||||
assert(Heap_lock->owner() != NULL,
|
assert(Heap_lock->owner() != NULL,
|
||||||
"Should be owned on this thread's behalf.");
|
"Should be owned on this thread's behalf.");
|
||||||
size_t result = _summary_bytes_used;
|
size_t result = _summary_bytes_used;
|
||||||
if (_cur_alloc_region != NULL)
|
// Read only once in case it is set to NULL concurrently
|
||||||
result += _cur_alloc_region->used();
|
HeapRegion* hr = _cur_alloc_region;
|
||||||
|
if (hr != NULL)
|
||||||
|
result += hr->used();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t G1CollectedHeap::used_unlocked() const {
|
||||||
|
size_t result = _summary_bytes_used;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2133,12 +2140,12 @@ public:
|
|||||||
VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
|
VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
|
||||||
g1h = _g1h;
|
g1h = _g1h;
|
||||||
}
|
}
|
||||||
void do_oop(narrowOop *p) {
|
void do_oop(narrowOop *p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
void do_oop( oop *p) { do_oop_work(p); }
|
||||||
}
|
|
||||||
void do_oop(oop *p) {
|
template <class T> void do_oop_work(T *p) {
|
||||||
oop obj = *p;
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
assert(obj == NULL || !g1h->is_obj_dead(obj),
|
guarantee(obj == NULL || !g1h->is_obj_dead(obj),
|
||||||
"Dead object referenced by a not dead object");
|
"Dead object referenced by a not dead object");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -2206,8 +2213,10 @@ public:
|
|||||||
// use_prev_marking == true -> use "prev" marking information,
|
// use_prev_marking == true -> use "prev" marking information,
|
||||||
// use_prev_marking == false -> use "next" marking information
|
// use_prev_marking == false -> use "next" marking information
|
||||||
VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
|
VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
|
||||||
: _allow_dirty(allow_dirty), _par(par),
|
: _allow_dirty(allow_dirty),
|
||||||
|
_par(par),
|
||||||
_use_prev_marking(use_prev_marking) {}
|
_use_prev_marking(use_prev_marking) {}
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
|
guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
|
||||||
"Should be unclaimed at verify points.");
|
"Should be unclaimed at verify points.");
|
||||||
@ -2231,18 +2240,16 @@ public:
|
|||||||
// use_prev_marking == true -> use "prev" marking information,
|
// use_prev_marking == true -> use "prev" marking information,
|
||||||
// use_prev_marking == false -> use "next" marking information
|
// use_prev_marking == false -> use "next" marking information
|
||||||
VerifyRootsClosure(bool use_prev_marking) :
|
VerifyRootsClosure(bool use_prev_marking) :
|
||||||
_g1h(G1CollectedHeap::heap()), _failures(false),
|
_g1h(G1CollectedHeap::heap()),
|
||||||
|
_failures(false),
|
||||||
_use_prev_marking(use_prev_marking) { }
|
_use_prev_marking(use_prev_marking) { }
|
||||||
|
|
||||||
bool failures() { return _failures; }
|
bool failures() { return _failures; }
|
||||||
|
|
||||||
void do_oop(narrowOop* p) {
|
template <class T> void do_oop_nv(T* p) {
|
||||||
guarantee(false, "NYI");
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
}
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
void do_oop(oop* p) {
|
|
||||||
oop obj = *p;
|
|
||||||
if (obj != NULL) {
|
|
||||||
if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
|
if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
|
||||||
gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
|
gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
|
||||||
"points to dead obj "PTR_FORMAT, p, (void*) obj);
|
"points to dead obj "PTR_FORMAT, p, (void*) obj);
|
||||||
@ -2251,6 +2258,9 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
|
void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
};
|
};
|
||||||
|
|
||||||
// This is the task used for parallel heap verification.
|
// This is the task used for parallel heap verification.
|
||||||
@ -2267,7 +2277,8 @@ public:
|
|||||||
G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
|
G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
|
||||||
bool use_prev_marking) :
|
bool use_prev_marking) :
|
||||||
AbstractGangTask("Parallel verify task"),
|
AbstractGangTask("Parallel verify task"),
|
||||||
_g1h(g1h), _allow_dirty(allow_dirty),
|
_g1h(g1h),
|
||||||
|
_allow_dirty(allow_dirty),
|
||||||
_use_prev_marking(use_prev_marking) { }
|
_use_prev_marking(use_prev_marking) { }
|
||||||
|
|
||||||
void work(int worker_i) {
|
void work(int worker_i) {
|
||||||
@ -2342,7 +2353,7 @@ void G1CollectedHeap::print_on(outputStream* st) const {
|
|||||||
void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
|
void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
|
||||||
st->print(" %-20s", "garbage-first heap");
|
st->print(" %-20s", "garbage-first heap");
|
||||||
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
||||||
capacity()/K, used()/K);
|
capacity()/K, used_unlocked()/K);
|
||||||
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
||||||
_g1_storage.low_boundary(),
|
_g1_storage.low_boundary(),
|
||||||
_g1_storage.high(),
|
_g1_storage.high(),
|
||||||
@ -2479,14 +2490,12 @@ void G1CollectedHeap::do_collection_pause() {
|
|||||||
|
|
||||||
void
|
void
|
||||||
G1CollectedHeap::doConcurrentMark() {
|
G1CollectedHeap::doConcurrentMark() {
|
||||||
if (G1ConcMark) {
|
|
||||||
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
||||||
if (!_cmThread->in_progress()) {
|
if (!_cmThread->in_progress()) {
|
||||||
_cmThread->set_started();
|
_cmThread->set_started();
|
||||||
CGC_lock->notify();
|
CGC_lock->notify();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
class VerifyMarkedObjsClosure: public ObjectClosure {
|
class VerifyMarkedObjsClosure: public ObjectClosure {
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
@ -2561,9 +2570,11 @@ G1CollectedHeap::setup_surviving_young_words() {
|
|||||||
"Not enough space for young surv words summary.");
|
"Not enough space for young surv words summary.");
|
||||||
}
|
}
|
||||||
memset(_surviving_young_words, 0, array_length * sizeof(size_t));
|
memset(_surviving_young_words, 0, array_length * sizeof(size_t));
|
||||||
|
#ifdef ASSERT
|
||||||
for (size_t i = 0; i < array_length; ++i) {
|
for (size_t i = 0; i < array_length; ++i) {
|
||||||
guarantee( _surviving_young_words[i] == 0, "invariant" );
|
assert( _surviving_young_words[i] == 0, "memset above" );
|
||||||
}
|
}
|
||||||
|
#endif // !ASSERT
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -2649,7 +2660,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
|||||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||||
|
|
||||||
// We want to turn off ref discovery, if necessary, and turn it back on
|
// We want to turn off ref discovery, if necessary, and turn it back on
|
||||||
// on again later if we do.
|
// on again later if we do. XXX Dubious: why is discovery disabled?
|
||||||
bool was_enabled = ref_processor()->discovery_enabled();
|
bool was_enabled = ref_processor()->discovery_enabled();
|
||||||
if (was_enabled) ref_processor()->disable_discovery();
|
if (was_enabled) ref_processor()->disable_discovery();
|
||||||
|
|
||||||
@ -2662,9 +2673,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
|||||||
double start_time_sec = os::elapsedTime();
|
double start_time_sec = os::elapsedTime();
|
||||||
GCOverheadReporter::recordSTWStart(start_time_sec);
|
GCOverheadReporter::recordSTWStart(start_time_sec);
|
||||||
size_t start_used_bytes = used();
|
size_t start_used_bytes = used();
|
||||||
if (!G1ConcMark) {
|
|
||||||
do_sync_mark();
|
|
||||||
}
|
|
||||||
|
|
||||||
g1_policy()->record_collection_pause_start(start_time_sec,
|
g1_policy()->record_collection_pause_start(start_time_sec,
|
||||||
start_used_bytes);
|
start_used_bytes);
|
||||||
@ -2775,6 +2783,13 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
|||||||
g1_policy()->should_initiate_conc_mark()) {
|
g1_policy()->should_initiate_conc_mark()) {
|
||||||
concurrent_mark()->checkpointRootsInitialPost();
|
concurrent_mark()->checkpointRootsInitialPost();
|
||||||
set_marking_started();
|
set_marking_started();
|
||||||
|
// CAUTION: after the doConcurrentMark() call below,
|
||||||
|
// the concurrent marking thread(s) could be running
|
||||||
|
// concurrently with us. Make sure that anything after
|
||||||
|
// this point does not assume that we are the only GC thread
|
||||||
|
// running. Note: of course, the actual marking work will
|
||||||
|
// not start until the safepoint itself is released in
|
||||||
|
// ConcurrentGCThread::safepoint_desynchronize().
|
||||||
doConcurrentMark();
|
doConcurrentMark();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2965,6 +2980,7 @@ void G1CollectedHeap::get_gc_alloc_regions() {
|
|||||||
|
|
||||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||||
assert(_gc_alloc_regions[ap] == NULL, "invariant");
|
assert(_gc_alloc_regions[ap] == NULL, "invariant");
|
||||||
|
assert(_gc_alloc_region_counts[ap] == 0, "invariant");
|
||||||
|
|
||||||
// Create new GC alloc regions.
|
// Create new GC alloc regions.
|
||||||
HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
|
HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
|
||||||
@ -2993,6 +3009,9 @@ void G1CollectedHeap::get_gc_alloc_regions() {
|
|||||||
if (alloc_region == NULL) {
|
if (alloc_region == NULL) {
|
||||||
// we will get a new GC alloc region
|
// we will get a new GC alloc region
|
||||||
alloc_region = newAllocRegionWithExpansion(ap, 0);
|
alloc_region = newAllocRegionWithExpansion(ap, 0);
|
||||||
|
} else {
|
||||||
|
// the region was retained from the last collection
|
||||||
|
++_gc_alloc_region_counts[ap];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (alloc_region != NULL) {
|
if (alloc_region != NULL) {
|
||||||
@ -3031,11 +3050,11 @@ void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
|
|||||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||||
HeapRegion* r = _gc_alloc_regions[ap];
|
HeapRegion* r = _gc_alloc_regions[ap];
|
||||||
_retained_gc_alloc_regions[ap] = NULL;
|
_retained_gc_alloc_regions[ap] = NULL;
|
||||||
|
_gc_alloc_region_counts[ap] = 0;
|
||||||
|
|
||||||
if (r != NULL) {
|
if (r != NULL) {
|
||||||
// we retain nothing on _gc_alloc_regions between GCs
|
// we retain nothing on _gc_alloc_regions between GCs
|
||||||
set_gc_alloc_region(ap, NULL);
|
set_gc_alloc_region(ap, NULL);
|
||||||
_gc_alloc_region_counts[ap] = 0;
|
|
||||||
|
|
||||||
if (r->is_empty()) {
|
if (r->is_empty()) {
|
||||||
// we didn't actually allocate anything in it; let's just put
|
// we didn't actually allocate anything in it; let's just put
|
||||||
@ -3123,9 +3142,7 @@ class G1KeepAliveClosure: public OopClosure {
|
|||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
public:
|
public:
|
||||||
G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
||||||
void do_oop(narrowOop* p) {
|
void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
|
||||||
guarantee(false, "NYI");
|
|
||||||
}
|
|
||||||
void do_oop( oop* p) {
|
void do_oop( oop* p) {
|
||||||
oop obj = *p;
|
oop obj = *p;
|
||||||
#ifdef G1_DEBUG
|
#ifdef G1_DEBUG
|
||||||
@ -3138,7 +3155,6 @@ public:
|
|||||||
if (_g1->obj_in_cs(obj)) {
|
if (_g1->obj_in_cs(obj)) {
|
||||||
assert( obj->is_forwarded(), "invariant" );
|
assert( obj->is_forwarded(), "invariant" );
|
||||||
*p = obj->forwardee();
|
*p = obj->forwardee();
|
||||||
|
|
||||||
#ifdef G1_DEBUG
|
#ifdef G1_DEBUG
|
||||||
gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
|
gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
|
||||||
(void*) obj, (void*) *p);
|
(void*) obj, (void*) *p);
|
||||||
@ -3155,12 +3171,12 @@ public:
|
|||||||
UpdateRSetImmediate(G1CollectedHeap* g1) :
|
UpdateRSetImmediate(G1CollectedHeap* g1) :
|
||||||
_g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
|
_g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
|
||||||
|
|
||||||
void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
template <class T> void do_oop_work(T* p) {
|
||||||
void do_oop(oop* p) {
|
|
||||||
assert(_from->is_in_reserved(p), "paranoia");
|
assert(_from->is_in_reserved(p), "paranoia");
|
||||||
if (*p != NULL && !_from->is_survivor()) {
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
|
||||||
_g1_rem_set->par_write_ref(_from, p, 0);
|
_g1_rem_set->par_write_ref(_from, p, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3176,12 +3192,12 @@ public:
|
|||||||
UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
|
UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
|
||||||
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
|
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
|
||||||
|
|
||||||
void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
template <class T> void do_oop_work(T* p) {
|
||||||
void do_oop(oop* p) {
|
|
||||||
assert(_from->is_in_reserved(p), "paranoia");
|
assert(_from->is_in_reserved(p), "paranoia");
|
||||||
if (!_from->is_in_reserved(*p) && !_from->is_survivor()) {
|
if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
|
||||||
|
!_from->is_survivor()) {
|
||||||
size_t card_index = _ct_bs->index_for(p);
|
size_t card_index = _ct_bs->index_for(p);
|
||||||
if (_ct_bs->mark_card_deferred(card_index)) {
|
if (_ct_bs->mark_card_deferred(card_index)) {
|
||||||
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
|
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
|
||||||
@ -3536,316 +3552,15 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
|
|||||||
fill_with_object(block, free_words);
|
fill_with_object(block, free_words);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define use_local_bitmaps 1
|
|
||||||
#define verify_local_bitmaps 0
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
|
||||||
class GCLabBitMap;
|
|
||||||
class GCLabBitMapClosure: public BitMapClosure {
|
|
||||||
private:
|
|
||||||
ConcurrentMark* _cm;
|
|
||||||
GCLabBitMap* _bitmap;
|
|
||||||
|
|
||||||
public:
|
|
||||||
GCLabBitMapClosure(ConcurrentMark* cm,
|
|
||||||
GCLabBitMap* bitmap) {
|
|
||||||
_cm = cm;
|
|
||||||
_bitmap = bitmap;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool do_bit(size_t offset);
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // PRODUCT
|
|
||||||
|
|
||||||
#define oop_buffer_length 256
|
|
||||||
|
|
||||||
class GCLabBitMap: public BitMap {
|
|
||||||
private:
|
|
||||||
ConcurrentMark* _cm;
|
|
||||||
|
|
||||||
int _shifter;
|
|
||||||
size_t _bitmap_word_covers_words;
|
|
||||||
|
|
||||||
// beginning of the heap
|
|
||||||
HeapWord* _heap_start;
|
|
||||||
|
|
||||||
// this is the actual start of the GCLab
|
|
||||||
HeapWord* _real_start_word;
|
|
||||||
|
|
||||||
// this is the actual end of the GCLab
|
|
||||||
HeapWord* _real_end_word;
|
|
||||||
|
|
||||||
// this is the first word, possibly located before the actual start
|
|
||||||
// of the GCLab, that corresponds to the first bit of the bitmap
|
|
||||||
HeapWord* _start_word;
|
|
||||||
|
|
||||||
// size of a GCLab in words
|
|
||||||
size_t _gclab_word_size;
|
|
||||||
|
|
||||||
static int shifter() {
|
|
||||||
return MinObjAlignment - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// how many heap words does a single bitmap word corresponds to?
|
|
||||||
static size_t bitmap_word_covers_words() {
|
|
||||||
return BitsPerWord << shifter();
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t gclab_word_size() {
|
|
||||||
return G1ParallelGCAllocBufferSize / HeapWordSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t bitmap_size_in_bits() {
|
|
||||||
size_t bits_in_bitmap = gclab_word_size() >> shifter();
|
|
||||||
// We are going to ensure that the beginning of a word in this
|
|
||||||
// bitmap also corresponds to the beginning of a word in the
|
|
||||||
// global marking bitmap. To handle the case where a GCLab
|
|
||||||
// starts from the middle of the bitmap, we need to add enough
|
|
||||||
// space (i.e. up to a bitmap word) to ensure that we have
|
|
||||||
// enough bits in the bitmap.
|
|
||||||
return bits_in_bitmap + BitsPerWord - 1;
|
|
||||||
}
|
|
||||||
public:
|
|
||||||
GCLabBitMap(HeapWord* heap_start)
|
|
||||||
: BitMap(bitmap_size_in_bits()),
|
|
||||||
_cm(G1CollectedHeap::heap()->concurrent_mark()),
|
|
||||||
_shifter(shifter()),
|
|
||||||
_bitmap_word_covers_words(bitmap_word_covers_words()),
|
|
||||||
_heap_start(heap_start),
|
|
||||||
_gclab_word_size(gclab_word_size()),
|
|
||||||
_real_start_word(NULL),
|
|
||||||
_real_end_word(NULL),
|
|
||||||
_start_word(NULL)
|
|
||||||
{
|
|
||||||
guarantee( size_in_words() >= bitmap_size_in_words(),
|
|
||||||
"just making sure");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline unsigned heapWordToOffset(HeapWord* addr) {
|
|
||||||
unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
|
|
||||||
assert(offset < size(), "offset should be within bounds");
|
|
||||||
return offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline HeapWord* offsetToHeapWord(size_t offset) {
|
|
||||||
HeapWord* addr = _start_word + (offset << _shifter);
|
|
||||||
assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool fields_well_formed() {
|
|
||||||
bool ret1 = (_real_start_word == NULL) &&
|
|
||||||
(_real_end_word == NULL) &&
|
|
||||||
(_start_word == NULL);
|
|
||||||
if (ret1)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
bool ret2 = _real_start_word >= _start_word &&
|
|
||||||
_start_word < _real_end_word &&
|
|
||||||
(_real_start_word + _gclab_word_size) == _real_end_word &&
|
|
||||||
(_start_word + _gclab_word_size + _bitmap_word_covers_words)
|
|
||||||
> _real_end_word;
|
|
||||||
return ret2;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool mark(HeapWord* addr) {
|
|
||||||
guarantee(use_local_bitmaps, "invariant");
|
|
||||||
assert(fields_well_formed(), "invariant");
|
|
||||||
|
|
||||||
if (addr >= _real_start_word && addr < _real_end_word) {
|
|
||||||
assert(!isMarked(addr), "should not have already been marked");
|
|
||||||
|
|
||||||
// first mark it on the bitmap
|
|
||||||
at_put(heapWordToOffset(addr), true);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool isMarked(HeapWord* addr) {
|
|
||||||
guarantee(use_local_bitmaps, "invariant");
|
|
||||||
assert(fields_well_formed(), "invariant");
|
|
||||||
|
|
||||||
return at(heapWordToOffset(addr));
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_buffer(HeapWord* start) {
|
|
||||||
guarantee(use_local_bitmaps, "invariant");
|
|
||||||
clear();
|
|
||||||
|
|
||||||
assert(start != NULL, "invariant");
|
|
||||||
_real_start_word = start;
|
|
||||||
_real_end_word = start + _gclab_word_size;
|
|
||||||
|
|
||||||
size_t diff =
|
|
||||||
pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
|
|
||||||
_start_word = start - diff;
|
|
||||||
|
|
||||||
assert(fields_well_formed(), "invariant");
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
void verify() {
|
|
||||||
// verify that the marks have been propagated
|
|
||||||
GCLabBitMapClosure cl(_cm, this);
|
|
||||||
iterate(&cl);
|
|
||||||
}
|
|
||||||
#endif // PRODUCT
|
|
||||||
|
|
||||||
void retire() {
|
|
||||||
guarantee(use_local_bitmaps, "invariant");
|
|
||||||
assert(fields_well_formed(), "invariant");
|
|
||||||
|
|
||||||
if (_start_word != NULL) {
|
|
||||||
CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
|
|
||||||
|
|
||||||
// this means that the bitmap was set up for the GCLab
|
|
||||||
assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
|
|
||||||
|
|
||||||
mark_bitmap->mostly_disjoint_range_union(this,
|
|
||||||
0, // always start from the start of the bitmap
|
|
||||||
_start_word,
|
|
||||||
size_in_words());
|
|
||||||
_cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
if (use_local_bitmaps && verify_local_bitmaps)
|
|
||||||
verify();
|
|
||||||
#endif // PRODUCT
|
|
||||||
} else {
|
|
||||||
assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t bitmap_size_in_words() {
|
|
||||||
return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
|
|
||||||
bool GCLabBitMapClosure::do_bit(size_t offset) {
|
bool GCLabBitMapClosure::do_bit(size_t offset) {
|
||||||
HeapWord* addr = _bitmap->offsetToHeapWord(offset);
|
HeapWord* addr = _bitmap->offsetToHeapWord(offset);
|
||||||
guarantee(_cm->isMarked(oop(addr)), "it should be!");
|
guarantee(_cm->isMarked(oop(addr)), "it should be!");
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
|
||||||
private:
|
|
||||||
bool _retired;
|
|
||||||
bool _during_marking;
|
|
||||||
GCLabBitMap _bitmap;
|
|
||||||
|
|
||||||
public:
|
|
||||||
G1ParGCAllocBuffer() :
|
|
||||||
ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
|
|
||||||
_during_marking(G1CollectedHeap::heap()->mark_in_progress()),
|
|
||||||
_bitmap(G1CollectedHeap::heap()->reserved_region().start()),
|
|
||||||
_retired(false)
|
|
||||||
{ }
|
|
||||||
|
|
||||||
inline bool mark(HeapWord* addr) {
|
|
||||||
guarantee(use_local_bitmaps, "invariant");
|
|
||||||
assert(_during_marking, "invariant");
|
|
||||||
return _bitmap.mark(addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void set_buf(HeapWord* buf) {
|
|
||||||
if (use_local_bitmaps && _during_marking)
|
|
||||||
_bitmap.set_buffer(buf);
|
|
||||||
ParGCAllocBuffer::set_buf(buf);
|
|
||||||
_retired = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void retire(bool end_of_gc, bool retain) {
|
|
||||||
if (_retired)
|
|
||||||
return;
|
|
||||||
if (use_local_bitmaps && _during_marking) {
|
|
||||||
_bitmap.retire();
|
|
||||||
}
|
|
||||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
|
||||||
_retired = true;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
class G1ParScanThreadState : public StackObj {
|
|
||||||
protected:
|
|
||||||
G1CollectedHeap* _g1h;
|
|
||||||
RefToScanQueue* _refs;
|
|
||||||
DirtyCardQueue _dcq;
|
|
||||||
CardTableModRefBS* _ct_bs;
|
|
||||||
G1RemSet* _g1_rem;
|
|
||||||
|
|
||||||
typedef GrowableArray<oop*> OverflowQueue;
|
|
||||||
OverflowQueue* _overflowed_refs;
|
|
||||||
|
|
||||||
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
|
|
||||||
ageTable _age_table;
|
|
||||||
|
|
||||||
size_t _alloc_buffer_waste;
|
|
||||||
size_t _undo_waste;
|
|
||||||
|
|
||||||
OopsInHeapRegionClosure* _evac_failure_cl;
|
|
||||||
G1ParScanHeapEvacClosure* _evac_cl;
|
|
||||||
G1ParScanPartialArrayClosure* _partial_scan_cl;
|
|
||||||
|
|
||||||
int _hash_seed;
|
|
||||||
int _queue_num;
|
|
||||||
|
|
||||||
int _term_attempts;
|
|
||||||
#if G1_DETAILED_STATS
|
|
||||||
int _pushes, _pops, _steals, _steal_attempts;
|
|
||||||
int _overflow_pushes;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
double _start;
|
|
||||||
double _start_strong_roots;
|
|
||||||
double _strong_roots_time;
|
|
||||||
double _start_term;
|
|
||||||
double _term_time;
|
|
||||||
|
|
||||||
// Map from young-age-index (0 == not young, 1 is youngest) to
|
|
||||||
// surviving words. base is what we get back from the malloc call
|
|
||||||
size_t* _surviving_young_words_base;
|
|
||||||
// this points into the array, as we use the first few entries for padding
|
|
||||||
size_t* _surviving_young_words;
|
|
||||||
|
|
||||||
#define PADDING_ELEM_NUM (64 / sizeof(size_t))
|
|
||||||
|
|
||||||
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
|
||||||
|
|
||||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
|
||||||
|
|
||||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
|
||||||
CardTableModRefBS* ctbs() { return _ct_bs; }
|
|
||||||
|
|
||||||
void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
|
|
||||||
if (!from->is_survivor()) {
|
|
||||||
_g1_rem->par_write_ref(from, p, tid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
|
|
||||||
// If the new value of the field points to the same region or
|
|
||||||
// is the to-space, we don't need to include it in the Rset updates.
|
|
||||||
if (!from->is_in_reserved(*p) && !from->is_survivor()) {
|
|
||||||
size_t card_index = ctbs()->index_for(p);
|
|
||||||
// If the card hasn't been added to the buffer, do it.
|
|
||||||
if (ctbs()->mark_card_deferred(card_index)) {
|
|
||||||
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
|
|
||||||
: _g1h(g1h),
|
: _g1h(g1h),
|
||||||
_refs(g1h->task_queue(queue_num)),
|
_refs(g1h->task_queue(queue_num)),
|
||||||
_dcq(&g1h->dirty_card_queue_set()),
|
_dcq(&g1h->dirty_card_queue_set()),
|
||||||
@ -3882,268 +3597,21 @@ public:
|
|||||||
_start = os::elapsedTime();
|
_start = os::elapsedTime();
|
||||||
}
|
}
|
||||||
|
|
||||||
~G1ParScanThreadState() {
|
|
||||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
|
||||||
}
|
|
||||||
|
|
||||||
RefToScanQueue* refs() { return _refs; }
|
|
||||||
OverflowQueue* overflowed_refs() { return _overflowed_refs; }
|
|
||||||
ageTable* age_table() { return &_age_table; }
|
|
||||||
|
|
||||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
|
||||||
return &_alloc_buffers[purpose];
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
|
|
||||||
size_t undo_waste() { return _undo_waste; }
|
|
||||||
|
|
||||||
void push_on_queue(oop* ref) {
|
|
||||||
assert(ref != NULL, "invariant");
|
|
||||||
assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
|
|
||||||
|
|
||||||
if (!refs()->push(ref)) {
|
|
||||||
overflowed_refs()->push(ref);
|
|
||||||
IF_G1_DETAILED_STATS(note_overflow_push());
|
|
||||||
} else {
|
|
||||||
IF_G1_DETAILED_STATS(note_push());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void pop_from_queue(oop*& ref) {
|
|
||||||
if (!refs()->pop_local(ref)) {
|
|
||||||
ref = NULL;
|
|
||||||
} else {
|
|
||||||
assert(ref != NULL, "invariant");
|
|
||||||
assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
|
|
||||||
"invariant");
|
|
||||||
|
|
||||||
IF_G1_DETAILED_STATS(note_pop());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void pop_from_overflow_queue(oop*& ref) {
|
|
||||||
ref = overflowed_refs()->pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
int refs_to_scan() { return refs()->size(); }
|
|
||||||
int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
|
|
||||||
|
|
||||||
void update_rs(HeapRegion* from, oop* p, int tid) {
|
|
||||||
if (G1DeferredRSUpdate) {
|
|
||||||
deferred_rs_update(from, p, tid);
|
|
||||||
} else {
|
|
||||||
immediate_rs_update(from, p, tid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
|
||||||
|
|
||||||
HeapWord* obj = NULL;
|
|
||||||
if (word_sz * 100 <
|
|
||||||
(size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
|
|
||||||
ParallelGCBufferWastePct) {
|
|
||||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
|
||||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
|
||||||
alloc_buf->retire(false, false);
|
|
||||||
|
|
||||||
HeapWord* buf =
|
|
||||||
_g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
|
|
||||||
if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
|
||||||
// Otherwise.
|
|
||||||
alloc_buf->set_buf(buf);
|
|
||||||
|
|
||||||
obj = alloc_buf->allocate(word_sz);
|
|
||||||
assert(obj != NULL, "buffer was definitely big enough...");
|
|
||||||
} else {
|
|
||||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
|
||||||
}
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
|
|
||||||
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
|
|
||||||
if (obj != NULL) return obj;
|
|
||||||
return allocate_slow(purpose, word_sz);
|
|
||||||
}
|
|
||||||
|
|
||||||
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
|
|
||||||
if (alloc_buffer(purpose)->contains(obj)) {
|
|
||||||
guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
|
||||||
"should contain whole object");
|
|
||||||
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
|
||||||
} else {
|
|
||||||
CollectedHeap::fill_with_object(obj, word_sz);
|
|
||||||
add_to_undo_waste(word_sz);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
|
||||||
_evac_failure_cl = evac_failure_cl;
|
|
||||||
}
|
|
||||||
OopsInHeapRegionClosure* evac_failure_closure() {
|
|
||||||
return _evac_failure_cl;
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
|
|
||||||
_evac_cl = evac_cl;
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
|
|
||||||
_partial_scan_cl = partial_scan_cl;
|
|
||||||
}
|
|
||||||
|
|
||||||
int* hash_seed() { return &_hash_seed; }
|
|
||||||
int queue_num() { return _queue_num; }
|
|
||||||
|
|
||||||
int term_attempts() { return _term_attempts; }
|
|
||||||
void note_term_attempt() { _term_attempts++; }
|
|
||||||
|
|
||||||
#if G1_DETAILED_STATS
|
|
||||||
int pushes() { return _pushes; }
|
|
||||||
int pops() { return _pops; }
|
|
||||||
int steals() { return _steals; }
|
|
||||||
int steal_attempts() { return _steal_attempts; }
|
|
||||||
int overflow_pushes() { return _overflow_pushes; }
|
|
||||||
|
|
||||||
void note_push() { _pushes++; }
|
|
||||||
void note_pop() { _pops++; }
|
|
||||||
void note_steal() { _steals++; }
|
|
||||||
void note_steal_attempt() { _steal_attempts++; }
|
|
||||||
void note_overflow_push() { _overflow_pushes++; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void start_strong_roots() {
|
|
||||||
_start_strong_roots = os::elapsedTime();
|
|
||||||
}
|
|
||||||
void end_strong_roots() {
|
|
||||||
_strong_roots_time += (os::elapsedTime() - _start_strong_roots);
|
|
||||||
}
|
|
||||||
double strong_roots_time() { return _strong_roots_time; }
|
|
||||||
|
|
||||||
void start_term_time() {
|
|
||||||
note_term_attempt();
|
|
||||||
_start_term = os::elapsedTime();
|
|
||||||
}
|
|
||||||
void end_term_time() {
|
|
||||||
_term_time += (os::elapsedTime() - _start_term);
|
|
||||||
}
|
|
||||||
double term_time() { return _term_time; }
|
|
||||||
|
|
||||||
double elapsed() {
|
|
||||||
return os::elapsedTime() - _start;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t* surviving_young_words() {
|
|
||||||
// We add on to hide entry 0 which accumulates surviving words for
|
|
||||||
// age -1 regions (i.e. non-young ones)
|
|
||||||
return _surviving_young_words;
|
|
||||||
}
|
|
||||||
|
|
||||||
void retire_alloc_buffers() {
|
|
||||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
|
||||||
size_t waste = _alloc_buffers[ap].words_remaining();
|
|
||||||
add_to_alloc_buffer_waste(waste);
|
|
||||||
_alloc_buffers[ap].retire(true, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
void deal_with_reference(oop* ref_to_scan) {
|
|
||||||
if (has_partial_array_mask(ref_to_scan)) {
|
|
||||||
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
|
||||||
} else {
|
|
||||||
// Note: we can use "raw" versions of "region_containing" because
|
|
||||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
|
||||||
// humongous region.
|
|
||||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
|
||||||
_evac_cl->set_region(r);
|
|
||||||
_evac_cl->do_oop_nv(ref_to_scan);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
void trim_queue() {
|
|
||||||
// I've replicated the loop twice, first to drain the overflow
|
|
||||||
// queue, second to drain the task queue. This is better than
|
|
||||||
// having a single loop, which checks both conditions and, inside
|
|
||||||
// it, either pops the overflow queue or the task queue, as each
|
|
||||||
// loop is tighter. Also, the decision to drain the overflow queue
|
|
||||||
// first is not arbitrary, as the overflow queue is not visible
|
|
||||||
// to the other workers, whereas the task queue is. So, we want to
|
|
||||||
// drain the "invisible" entries first, while allowing the other
|
|
||||||
// workers to potentially steal the "visible" entries.
|
|
||||||
|
|
||||||
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
|
|
||||||
while (overflowed_refs_to_scan() > 0) {
|
|
||||||
oop *ref_to_scan = NULL;
|
|
||||||
pop_from_overflow_queue(ref_to_scan);
|
|
||||||
assert(ref_to_scan != NULL, "invariant");
|
|
||||||
// We shouldn't have pushed it on the queue if it was not
|
|
||||||
// pointing into the CSet.
|
|
||||||
assert(ref_to_scan != NULL, "sanity");
|
|
||||||
assert(has_partial_array_mask(ref_to_scan) ||
|
|
||||||
_g1h->obj_in_cs(*ref_to_scan), "sanity");
|
|
||||||
|
|
||||||
deal_with_reference(ref_to_scan);
|
|
||||||
}
|
|
||||||
|
|
||||||
while (refs_to_scan() > 0) {
|
|
||||||
oop *ref_to_scan = NULL;
|
|
||||||
pop_from_queue(ref_to_scan);
|
|
||||||
|
|
||||||
if (ref_to_scan != NULL) {
|
|
||||||
// We shouldn't have pushed it on the queue if it was not
|
|
||||||
// pointing into the CSet.
|
|
||||||
assert(has_partial_array_mask(ref_to_scan) ||
|
|
||||||
_g1h->obj_in_cs(*ref_to_scan), "sanity");
|
|
||||||
|
|
||||||
deal_with_reference(ref_to_scan);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
|
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
|
||||||
_par_scan_state(par_scan_state) { }
|
_par_scan_state(par_scan_state) { }
|
||||||
|
|
||||||
// This closure is applied to the fields of the objects that have just been copied.
|
template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
|
||||||
// Should probably be made inline and moved in g1OopClosures.inline.hpp.
|
|
||||||
void G1ParScanClosure::do_oop_nv(oop* p) {
|
|
||||||
oop obj = *p;
|
|
||||||
|
|
||||||
if (obj != NULL) {
|
|
||||||
if (_g1->in_cset_fast_test(obj)) {
|
|
||||||
// We're not going to even bother checking whether the object is
|
|
||||||
// already forwarded or not, as this usually causes an immediate
|
|
||||||
// stall. We'll try to prefetch the object (for write, given that
|
|
||||||
// we might need to install the forwarding reference) and we'll
|
|
||||||
// get back to it when pop it from the queue
|
|
||||||
Prefetch::write(obj->mark_addr(), 0);
|
|
||||||
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
|
|
||||||
|
|
||||||
// slightly paranoid test; I'm trying to catch potential
|
|
||||||
// problems before we go into push_on_queue to know where the
|
|
||||||
// problem is coming from
|
|
||||||
assert(obj == *p, "the value of *p should not have changed");
|
|
||||||
_par_scan_state->push_on_queue(p);
|
|
||||||
} else {
|
|
||||||
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1ParCopyHelper::mark_forwardee(oop* p) {
|
|
||||||
// This is called _after_ do_oop_work has been called, hence after
|
// This is called _after_ do_oop_work has been called, hence after
|
||||||
// the object has been relocated to its new location and *p points
|
// the object has been relocated to its new location and *p points
|
||||||
// to its new location.
|
// to its new location.
|
||||||
|
|
||||||
oop thisOop = *p;
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (thisOop != NULL) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)),
|
oop obj = oopDesc::decode_heap_oop(heap_oop);
|
||||||
|
assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
|
||||||
"shouldn't still be in the CSet if evacuation didn't fail.");
|
"shouldn't still be in the CSet if evacuation didn't fail.");
|
||||||
HeapWord* addr = (HeapWord*)thisOop;
|
HeapWord* addr = (HeapWord*)obj;
|
||||||
if (_g1->is_in_g1_reserved(addr))
|
if (_g1->is_in_g1_reserved(addr))
|
||||||
_cm->grayRoot(oop(addr));
|
_cm->grayRoot(oop(addr));
|
||||||
}
|
}
|
||||||
@ -4226,7 +3694,8 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
|||||||
|
|
||||||
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
|
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
|
||||||
arrayOop(old)->set_length(0);
|
arrayOop(old)->set_length(0);
|
||||||
_par_scan_state->push_on_queue(set_partial_array_mask(old));
|
oop* old_p = set_partial_array_mask(old);
|
||||||
|
_par_scan_state->push_on_queue(old_p);
|
||||||
} else {
|
} else {
|
||||||
// No point in using the slower heap_region_containing() method,
|
// No point in using the slower heap_region_containing() method,
|
||||||
// given that we know obj is in the heap.
|
// given that we know obj is in the heap.
|
||||||
@ -4240,11 +3709,11 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
|||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<bool do_gen_barrier, G1Barrier barrier,
|
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test>
|
||||||
bool do_mark_forwardee, bool skip_cset_test>
|
template <class T>
|
||||||
void G1ParCopyClosure<do_gen_barrier, barrier,
|
void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test>
|
||||||
do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
|
::do_oop_work(T* p) {
|
||||||
oop obj = *p;
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
assert(barrier != G1BarrierRS || obj != NULL,
|
assert(barrier != G1BarrierRS || obj != NULL,
|
||||||
"Precondition: G1BarrierRS implies obj is nonNull");
|
"Precondition: G1BarrierRS implies obj is nonNull");
|
||||||
|
|
||||||
@ -4261,9 +3730,10 @@ void G1ParCopyClosure<do_gen_barrier, barrier,
|
|||||||
"into CS.", p, (void*) obj);
|
"into CS.", p, (void*) obj);
|
||||||
#endif
|
#endif
|
||||||
if (obj->is_forwarded()) {
|
if (obj->is_forwarded()) {
|
||||||
*p = obj->forwardee();
|
oopDesc::encode_store_heap_oop(p, obj->forwardee());
|
||||||
} else {
|
} else {
|
||||||
*p = copy_to_survivor_space(obj);
|
oop copy_oop = copy_to_survivor_space(obj);
|
||||||
|
oopDesc::encode_store_heap_oop(p, copy_oop);
|
||||||
}
|
}
|
||||||
// When scanning the RS, we only care about objs in CS.
|
// When scanning the RS, we only care about objs in CS.
|
||||||
if (barrier == G1BarrierRS) {
|
if (barrier == G1BarrierRS) {
|
||||||
@ -4282,21 +3752,9 @@ void G1ParCopyClosure<do_gen_barrier, barrier,
|
|||||||
}
|
}
|
||||||
|
|
||||||
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
|
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
|
||||||
|
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p);
|
||||||
|
|
||||||
template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
|
template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
|
||||||
oop obj, int start, int end) {
|
|
||||||
// process our set of indices (include header in first chunk)
|
|
||||||
assert(start < end, "invariant");
|
|
||||||
T* const base = (T*)objArrayOop(obj)->base();
|
|
||||||
T* const start_addr = (start == 0) ? (T*) obj : base + start;
|
|
||||||
T* const end_addr = base + end;
|
|
||||||
MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
|
|
||||||
_scanner.set_region(_g1->heap_region_containing(obj));
|
|
||||||
obj->oop_iterate(&_scanner, mr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
|
|
||||||
assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
|
|
||||||
assert(has_partial_array_mask(p), "invariant");
|
assert(has_partial_array_mask(p), "invariant");
|
||||||
oop old = clear_partial_array_mask(p);
|
oop old = clear_partial_array_mask(p);
|
||||||
assert(old->is_objArray(), "must be obj array");
|
assert(old->is_objArray(), "must be obj array");
|
||||||
@ -4316,19 +3774,19 @@ void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
|
|||||||
end = start + ParGCArrayScanChunk;
|
end = start + ParGCArrayScanChunk;
|
||||||
arrayOop(old)->set_length(end);
|
arrayOop(old)->set_length(end);
|
||||||
// Push remainder.
|
// Push remainder.
|
||||||
_par_scan_state->push_on_queue(set_partial_array_mask(old));
|
oop* old_p = set_partial_array_mask(old);
|
||||||
|
assert(arrayOop(old)->length() < obj->length(), "Empty push?");
|
||||||
|
_par_scan_state->push_on_queue(old_p);
|
||||||
} else {
|
} else {
|
||||||
// Restore length so that the heap remains parsable in
|
// Restore length so that the heap remains parsable in
|
||||||
// case of evacuation failure.
|
// case of evacuation failure.
|
||||||
arrayOop(old)->set_length(end);
|
arrayOop(old)->set_length(end);
|
||||||
}
|
}
|
||||||
|
_scanner.set_region(_g1->heap_region_containing_raw(obj));
|
||||||
// process our set of indices (include header in first chunk)
|
// process our set of indices (include header in first chunk)
|
||||||
process_array_chunk<oop>(obj, start, end);
|
obj->oop_iterate_range(&_scanner, start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
int G1ScanAndBalanceClosure::_nq = 0;
|
|
||||||
|
|
||||||
class G1ParEvacuateFollowersClosure : public VoidClosure {
|
class G1ParEvacuateFollowersClosure : public VoidClosure {
|
||||||
protected:
|
protected:
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
@ -4351,21 +3809,28 @@ public:
|
|||||||
void do_void() {
|
void do_void() {
|
||||||
G1ParScanThreadState* pss = par_scan_state();
|
G1ParScanThreadState* pss = par_scan_state();
|
||||||
while (true) {
|
while (true) {
|
||||||
oop* ref_to_scan;
|
|
||||||
pss->trim_queue();
|
pss->trim_queue();
|
||||||
IF_G1_DETAILED_STATS(pss->note_steal_attempt());
|
IF_G1_DETAILED_STATS(pss->note_steal_attempt());
|
||||||
if (queues()->steal(pss->queue_num(),
|
|
||||||
pss->hash_seed(),
|
StarTask stolen_task;
|
||||||
ref_to_scan)) {
|
if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
|
||||||
IF_G1_DETAILED_STATS(pss->note_steal());
|
IF_G1_DETAILED_STATS(pss->note_steal());
|
||||||
|
|
||||||
// slightly paranoid tests; I'm trying to catch potential
|
// slightly paranoid tests; I'm trying to catch potential
|
||||||
// problems before we go into push_on_queue to know where the
|
// problems before we go into push_on_queue to know where the
|
||||||
// problem is coming from
|
// problem is coming from
|
||||||
assert(ref_to_scan != NULL, "invariant");
|
assert((oop*)stolen_task != NULL, "Error");
|
||||||
assert(has_partial_array_mask(ref_to_scan) ||
|
if (stolen_task.is_narrow()) {
|
||||||
_g1h->obj_in_cs(*ref_to_scan), "invariant");
|
assert(UseCompressedOops, "Error");
|
||||||
pss->push_on_queue(ref_to_scan);
|
narrowOop* p = (narrowOop*) stolen_task;
|
||||||
|
assert(has_partial_array_mask(p) ||
|
||||||
|
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error");
|
||||||
|
pss->push_on_queue(p);
|
||||||
|
} else {
|
||||||
|
oop* p = (oop*) stolen_task;
|
||||||
|
assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error");
|
||||||
|
pss->push_on_queue(p);
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
pss->start_term_time();
|
pss->start_term_time();
|
||||||
@ -4382,6 +3847,7 @@ protected:
|
|||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
RefToScanQueueSet *_queues;
|
RefToScanQueueSet *_queues;
|
||||||
ParallelTaskTerminator _terminator;
|
ParallelTaskTerminator _terminator;
|
||||||
|
int _n_workers;
|
||||||
|
|
||||||
Mutex _stats_lock;
|
Mutex _stats_lock;
|
||||||
Mutex* stats_lock() { return &_stats_lock; }
|
Mutex* stats_lock() { return &_stats_lock; }
|
||||||
@ -4397,7 +3863,8 @@ public:
|
|||||||
_g1h(g1h),
|
_g1h(g1h),
|
||||||
_queues(task_queues),
|
_queues(task_queues),
|
||||||
_terminator(workers, _queues),
|
_terminator(workers, _queues),
|
||||||
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
|
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
|
||||||
|
_n_workers(workers)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
RefToScanQueueSet* queues() { return _queues; }
|
RefToScanQueueSet* queues() { return _queues; }
|
||||||
@ -4407,6 +3874,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void work(int i) {
|
void work(int i) {
|
||||||
|
if (i >= _n_workers) return; // no work needed this round
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
HandleMark hm;
|
HandleMark hm;
|
||||||
|
|
||||||
@ -4504,23 +3972,6 @@ public:
|
|||||||
|
|
||||||
// *** Common G1 Evacuation Stuff
|
// *** Common G1 Evacuation Stuff
|
||||||
|
|
||||||
class G1CountClosure: public OopsInHeapRegionClosure {
|
|
||||||
public:
|
|
||||||
int n;
|
|
||||||
G1CountClosure() : n(0) {}
|
|
||||||
void do_oop(narrowOop* p) {
|
|
||||||
guarantee(false, "NYI");
|
|
||||||
}
|
|
||||||
void do_oop(oop* p) {
|
|
||||||
oop obj = *p;
|
|
||||||
assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj),
|
|
||||||
"Rem set closure called on non-rem-set pointer.");
|
|
||||||
n++;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static G1CountClosure count_closure;
|
|
||||||
|
|
||||||
void
|
void
|
||||||
G1CollectedHeap::
|
G1CollectedHeap::
|
||||||
g1_process_strong_roots(bool collecting_perm_gen,
|
g1_process_strong_roots(bool collecting_perm_gen,
|
||||||
@ -5570,8 +5021,3 @@ bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
|
|||||||
void G1CollectedHeap::g1_unimplemented() {
|
void G1CollectedHeap::g1_unimplemented() {
|
||||||
// Unimplemented();
|
// Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Local Variables: ***
|
|
||||||
// c-indentation-style: gnu ***
|
|
||||||
// End: ***
|
|
||||||
|
@ -56,8 +56,8 @@ class ConcurrentZFThread;
|
|||||||
# define IF_G1_DETAILED_STATS(code)
|
# define IF_G1_DETAILED_STATS(code)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef GenericTaskQueue<oop*> RefToScanQueue;
|
typedef GenericTaskQueue<StarTask> RefToScanQueue;
|
||||||
typedef GenericTaskQueueSet<oop*> RefToScanQueueSet;
|
typedef GenericTaskQueueSet<StarTask> RefToScanQueueSet;
|
||||||
|
|
||||||
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
||||||
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||||
@ -700,6 +700,9 @@ public:
|
|||||||
size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); }
|
size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); }
|
||||||
virtual size_t capacity() const;
|
virtual size_t capacity() const;
|
||||||
virtual size_t used() const;
|
virtual size_t used() const;
|
||||||
|
// This should be called when we're not holding the heap lock. The
|
||||||
|
// result might be a bit inaccurate.
|
||||||
|
size_t used_unlocked() const;
|
||||||
size_t recalculate_used() const;
|
size_t recalculate_used() const;
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
size_t recalculate_used_regions() const;
|
size_t recalculate_used_regions() const;
|
||||||
@ -1271,6 +1274,552 @@ public:
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Local Variables: ***
|
#define use_local_bitmaps 1
|
||||||
// c-indentation-style: gnu ***
|
#define verify_local_bitmaps 0
|
||||||
// End: ***
|
#define oop_buffer_length 256
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
class GCLabBitMap;
|
||||||
|
class GCLabBitMapClosure: public BitMapClosure {
|
||||||
|
private:
|
||||||
|
ConcurrentMark* _cm;
|
||||||
|
GCLabBitMap* _bitmap;
|
||||||
|
|
||||||
|
public:
|
||||||
|
GCLabBitMapClosure(ConcurrentMark* cm,
|
||||||
|
GCLabBitMap* bitmap) {
|
||||||
|
_cm = cm;
|
||||||
|
_bitmap = bitmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual bool do_bit(size_t offset);
|
||||||
|
};
|
||||||
|
#endif // !PRODUCT
|
||||||
|
|
||||||
|
class GCLabBitMap: public BitMap {
|
||||||
|
private:
|
||||||
|
ConcurrentMark* _cm;
|
||||||
|
|
||||||
|
int _shifter;
|
||||||
|
size_t _bitmap_word_covers_words;
|
||||||
|
|
||||||
|
// beginning of the heap
|
||||||
|
HeapWord* _heap_start;
|
||||||
|
|
||||||
|
// this is the actual start of the GCLab
|
||||||
|
HeapWord* _real_start_word;
|
||||||
|
|
||||||
|
// this is the actual end of the GCLab
|
||||||
|
HeapWord* _real_end_word;
|
||||||
|
|
||||||
|
// this is the first word, possibly located before the actual start
|
||||||
|
// of the GCLab, that corresponds to the first bit of the bitmap
|
||||||
|
HeapWord* _start_word;
|
||||||
|
|
||||||
|
// size of a GCLab in words
|
||||||
|
size_t _gclab_word_size;
|
||||||
|
|
||||||
|
static int shifter() {
|
||||||
|
return MinObjAlignment - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// how many heap words does a single bitmap word corresponds to?
|
||||||
|
static size_t bitmap_word_covers_words() {
|
||||||
|
return BitsPerWord << shifter();
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t gclab_word_size() {
|
||||||
|
return G1ParallelGCAllocBufferSize / HeapWordSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t bitmap_size_in_bits() {
|
||||||
|
size_t bits_in_bitmap = gclab_word_size() >> shifter();
|
||||||
|
// We are going to ensure that the beginning of a word in this
|
||||||
|
// bitmap also corresponds to the beginning of a word in the
|
||||||
|
// global marking bitmap. To handle the case where a GCLab
|
||||||
|
// starts from the middle of the bitmap, we need to add enough
|
||||||
|
// space (i.e. up to a bitmap word) to ensure that we have
|
||||||
|
// enough bits in the bitmap.
|
||||||
|
return bits_in_bitmap + BitsPerWord - 1;
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
GCLabBitMap(HeapWord* heap_start)
|
||||||
|
: BitMap(bitmap_size_in_bits()),
|
||||||
|
_cm(G1CollectedHeap::heap()->concurrent_mark()),
|
||||||
|
_shifter(shifter()),
|
||||||
|
_bitmap_word_covers_words(bitmap_word_covers_words()),
|
||||||
|
_heap_start(heap_start),
|
||||||
|
_gclab_word_size(gclab_word_size()),
|
||||||
|
_real_start_word(NULL),
|
||||||
|
_real_end_word(NULL),
|
||||||
|
_start_word(NULL)
|
||||||
|
{
|
||||||
|
guarantee( size_in_words() >= bitmap_size_in_words(),
|
||||||
|
"just making sure");
|
||||||
|
}
|
||||||
|
|
||||||
|
inline unsigned heapWordToOffset(HeapWord* addr) {
|
||||||
|
unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
|
||||||
|
assert(offset < size(), "offset should be within bounds");
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline HeapWord* offsetToHeapWord(size_t offset) {
|
||||||
|
HeapWord* addr = _start_word + (offset << _shifter);
|
||||||
|
assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool fields_well_formed() {
|
||||||
|
bool ret1 = (_real_start_word == NULL) &&
|
||||||
|
(_real_end_word == NULL) &&
|
||||||
|
(_start_word == NULL);
|
||||||
|
if (ret1)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
bool ret2 = _real_start_word >= _start_word &&
|
||||||
|
_start_word < _real_end_word &&
|
||||||
|
(_real_start_word + _gclab_word_size) == _real_end_word &&
|
||||||
|
(_start_word + _gclab_word_size + _bitmap_word_covers_words)
|
||||||
|
> _real_end_word;
|
||||||
|
return ret2;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool mark(HeapWord* addr) {
|
||||||
|
guarantee(use_local_bitmaps, "invariant");
|
||||||
|
assert(fields_well_formed(), "invariant");
|
||||||
|
|
||||||
|
if (addr >= _real_start_word && addr < _real_end_word) {
|
||||||
|
assert(!isMarked(addr), "should not have already been marked");
|
||||||
|
|
||||||
|
// first mark it on the bitmap
|
||||||
|
at_put(heapWordToOffset(addr), true);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool isMarked(HeapWord* addr) {
|
||||||
|
guarantee(use_local_bitmaps, "invariant");
|
||||||
|
assert(fields_well_formed(), "invariant");
|
||||||
|
|
||||||
|
return at(heapWordToOffset(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_buffer(HeapWord* start) {
|
||||||
|
guarantee(use_local_bitmaps, "invariant");
|
||||||
|
clear();
|
||||||
|
|
||||||
|
assert(start != NULL, "invariant");
|
||||||
|
_real_start_word = start;
|
||||||
|
_real_end_word = start + _gclab_word_size;
|
||||||
|
|
||||||
|
size_t diff =
|
||||||
|
pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
|
||||||
|
_start_word = start - diff;
|
||||||
|
|
||||||
|
assert(fields_well_formed(), "invariant");
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void verify() {
|
||||||
|
// verify that the marks have been propagated
|
||||||
|
GCLabBitMapClosure cl(_cm, this);
|
||||||
|
iterate(&cl);
|
||||||
|
}
|
||||||
|
#endif // PRODUCT
|
||||||
|
|
||||||
|
void retire() {
|
||||||
|
guarantee(use_local_bitmaps, "invariant");
|
||||||
|
assert(fields_well_formed(), "invariant");
|
||||||
|
|
||||||
|
if (_start_word != NULL) {
|
||||||
|
CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
|
||||||
|
|
||||||
|
// this means that the bitmap was set up for the GCLab
|
||||||
|
assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
|
||||||
|
|
||||||
|
mark_bitmap->mostly_disjoint_range_union(this,
|
||||||
|
0, // always start from the start of the bitmap
|
||||||
|
_start_word,
|
||||||
|
size_in_words());
|
||||||
|
_cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
if (use_local_bitmaps && verify_local_bitmaps)
|
||||||
|
verify();
|
||||||
|
#endif // PRODUCT
|
||||||
|
} else {
|
||||||
|
assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t bitmap_size_in_words() {
|
||||||
|
return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
||||||
|
private:
|
||||||
|
bool _retired;
|
||||||
|
bool _during_marking;
|
||||||
|
GCLabBitMap _bitmap;
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1ParGCAllocBuffer() :
|
||||||
|
ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
|
||||||
|
_during_marking(G1CollectedHeap::heap()->mark_in_progress()),
|
||||||
|
_bitmap(G1CollectedHeap::heap()->reserved_region().start()),
|
||||||
|
_retired(false)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
inline bool mark(HeapWord* addr) {
|
||||||
|
guarantee(use_local_bitmaps, "invariant");
|
||||||
|
assert(_during_marking, "invariant");
|
||||||
|
return _bitmap.mark(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void set_buf(HeapWord* buf) {
|
||||||
|
if (use_local_bitmaps && _during_marking)
|
||||||
|
_bitmap.set_buffer(buf);
|
||||||
|
ParGCAllocBuffer::set_buf(buf);
|
||||||
|
_retired = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void retire(bool end_of_gc, bool retain) {
|
||||||
|
if (_retired)
|
||||||
|
return;
|
||||||
|
if (use_local_bitmaps && _during_marking) {
|
||||||
|
_bitmap.retire();
|
||||||
|
}
|
||||||
|
ParGCAllocBuffer::retire(end_of_gc, retain);
|
||||||
|
_retired = true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1ParScanThreadState : public StackObj {
|
||||||
|
protected:
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
RefToScanQueue* _refs;
|
||||||
|
DirtyCardQueue _dcq;
|
||||||
|
CardTableModRefBS* _ct_bs;
|
||||||
|
G1RemSet* _g1_rem;
|
||||||
|
|
||||||
|
typedef GrowableArray<StarTask> OverflowQueue;
|
||||||
|
OverflowQueue* _overflowed_refs;
|
||||||
|
|
||||||
|
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
|
||||||
|
ageTable _age_table;
|
||||||
|
|
||||||
|
size_t _alloc_buffer_waste;
|
||||||
|
size_t _undo_waste;
|
||||||
|
|
||||||
|
OopsInHeapRegionClosure* _evac_failure_cl;
|
||||||
|
G1ParScanHeapEvacClosure* _evac_cl;
|
||||||
|
G1ParScanPartialArrayClosure* _partial_scan_cl;
|
||||||
|
|
||||||
|
int _hash_seed;
|
||||||
|
int _queue_num;
|
||||||
|
|
||||||
|
int _term_attempts;
|
||||||
|
#if G1_DETAILED_STATS
|
||||||
|
int _pushes, _pops, _steals, _steal_attempts;
|
||||||
|
int _overflow_pushes;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
double _start;
|
||||||
|
double _start_strong_roots;
|
||||||
|
double _strong_roots_time;
|
||||||
|
double _start_term;
|
||||||
|
double _term_time;
|
||||||
|
|
||||||
|
// Map from young-age-index (0 == not young, 1 is youngest) to
|
||||||
|
// surviving words. base is what we get back from the malloc call
|
||||||
|
size_t* _surviving_young_words_base;
|
||||||
|
// this points into the array, as we use the first few entries for padding
|
||||||
|
size_t* _surviving_young_words;
|
||||||
|
|
||||||
|
#define PADDING_ELEM_NUM (64 / sizeof(size_t))
|
||||||
|
|
||||||
|
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||||
|
|
||||||
|
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||||
|
|
||||||
|
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||||
|
CardTableModRefBS* ctbs() { return _ct_bs; }
|
||||||
|
|
||||||
|
template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||||
|
if (!from->is_survivor()) {
|
||||||
|
_g1_rem->par_write_ref(from, p, tid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
|
||||||
|
// If the new value of the field points to the same region or
|
||||||
|
// is the to-space, we don't need to include it in the Rset updates.
|
||||||
|
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
||||||
|
size_t card_index = ctbs()->index_for(p);
|
||||||
|
// If the card hasn't been added to the buffer, do it.
|
||||||
|
if (ctbs()->mark_card_deferred(card_index)) {
|
||||||
|
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
|
||||||
|
|
||||||
|
~G1ParScanThreadState() {
|
||||||
|
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
||||||
|
}
|
||||||
|
|
||||||
|
RefToScanQueue* refs() { return _refs; }
|
||||||
|
OverflowQueue* overflowed_refs() { return _overflowed_refs; }
|
||||||
|
ageTable* age_table() { return &_age_table; }
|
||||||
|
|
||||||
|
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||||
|
return &_alloc_buffers[purpose];
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
|
||||||
|
size_t undo_waste() { return _undo_waste; }
|
||||||
|
|
||||||
|
template <class T> void push_on_queue(T* ref) {
|
||||||
|
assert(ref != NULL, "invariant");
|
||||||
|
assert(has_partial_array_mask(ref) ||
|
||||||
|
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(ref)), "invariant");
|
||||||
|
#ifdef ASSERT
|
||||||
|
if (has_partial_array_mask(ref)) {
|
||||||
|
oop p = clear_partial_array_mask(ref);
|
||||||
|
// Verify that we point into the CS
|
||||||
|
assert(_g1h->obj_in_cs(p), "Should be in CS");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
if (!refs()->push(ref)) {
|
||||||
|
overflowed_refs()->push(ref);
|
||||||
|
IF_G1_DETAILED_STATS(note_overflow_push());
|
||||||
|
} else {
|
||||||
|
IF_G1_DETAILED_STATS(note_push());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void pop_from_queue(StarTask& ref) {
|
||||||
|
if (refs()->pop_local(ref)) {
|
||||||
|
assert((oop*)ref != NULL, "pop_local() returned true");
|
||||||
|
assert(UseCompressedOops || !ref.is_narrow(), "Error");
|
||||||
|
assert(has_partial_array_mask((oop*)ref) ||
|
||||||
|
_g1h->obj_in_cs(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
|
||||||
|
: oopDesc::load_decode_heap_oop((oop*)ref)),
|
||||||
|
"invariant");
|
||||||
|
IF_G1_DETAILED_STATS(note_pop());
|
||||||
|
} else {
|
||||||
|
StarTask null_task;
|
||||||
|
ref = null_task;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void pop_from_overflow_queue(StarTask& ref) {
|
||||||
|
StarTask new_ref = overflowed_refs()->pop();
|
||||||
|
assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
|
||||||
|
assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
|
||||||
|
assert(has_partial_array_mask((oop*)new_ref) ||
|
||||||
|
_g1h->obj_in_cs(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
|
||||||
|
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
|
||||||
|
"invariant");
|
||||||
|
ref = new_ref;
|
||||||
|
}
|
||||||
|
|
||||||
|
int refs_to_scan() { return refs()->size(); }
|
||||||
|
int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
|
||||||
|
|
||||||
|
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
||||||
|
if (G1DeferredRSUpdate) {
|
||||||
|
deferred_rs_update(from, p, tid);
|
||||||
|
} else {
|
||||||
|
immediate_rs_update(from, p, tid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
||||||
|
|
||||||
|
HeapWord* obj = NULL;
|
||||||
|
if (word_sz * 100 <
|
||||||
|
(size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
|
||||||
|
ParallelGCBufferWastePct) {
|
||||||
|
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
||||||
|
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||||
|
alloc_buf->retire(false, false);
|
||||||
|
|
||||||
|
HeapWord* buf =
|
||||||
|
_g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
|
||||||
|
if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
||||||
|
// Otherwise.
|
||||||
|
alloc_buf->set_buf(buf);
|
||||||
|
|
||||||
|
obj = alloc_buf->allocate(word_sz);
|
||||||
|
assert(obj != NULL, "buffer was definitely big enough...");
|
||||||
|
} else {
|
||||||
|
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
|
||||||
|
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
|
||||||
|
if (obj != NULL) return obj;
|
||||||
|
return allocate_slow(purpose, word_sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
|
||||||
|
if (alloc_buffer(purpose)->contains(obj)) {
|
||||||
|
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
||||||
|
"should contain whole object");
|
||||||
|
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
||||||
|
} else {
|
||||||
|
CollectedHeap::fill_with_object(obj, word_sz);
|
||||||
|
add_to_undo_waste(word_sz);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
||||||
|
_evac_failure_cl = evac_failure_cl;
|
||||||
|
}
|
||||||
|
OopsInHeapRegionClosure* evac_failure_closure() {
|
||||||
|
return _evac_failure_cl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
|
||||||
|
_evac_cl = evac_cl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
|
||||||
|
_partial_scan_cl = partial_scan_cl;
|
||||||
|
}
|
||||||
|
|
||||||
|
int* hash_seed() { return &_hash_seed; }
|
||||||
|
int queue_num() { return _queue_num; }
|
||||||
|
|
||||||
|
int term_attempts() { return _term_attempts; }
|
||||||
|
void note_term_attempt() { _term_attempts++; }
|
||||||
|
|
||||||
|
#if G1_DETAILED_STATS
|
||||||
|
int pushes() { return _pushes; }
|
||||||
|
int pops() { return _pops; }
|
||||||
|
int steals() { return _steals; }
|
||||||
|
int steal_attempts() { return _steal_attempts; }
|
||||||
|
int overflow_pushes() { return _overflow_pushes; }
|
||||||
|
|
||||||
|
void note_push() { _pushes++; }
|
||||||
|
void note_pop() { _pops++; }
|
||||||
|
void note_steal() { _steals++; }
|
||||||
|
void note_steal_attempt() { _steal_attempts++; }
|
||||||
|
void note_overflow_push() { _overflow_pushes++; }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void start_strong_roots() {
|
||||||
|
_start_strong_roots = os::elapsedTime();
|
||||||
|
}
|
||||||
|
void end_strong_roots() {
|
||||||
|
_strong_roots_time += (os::elapsedTime() - _start_strong_roots);
|
||||||
|
}
|
||||||
|
double strong_roots_time() { return _strong_roots_time; }
|
||||||
|
|
||||||
|
void start_term_time() {
|
||||||
|
note_term_attempt();
|
||||||
|
_start_term = os::elapsedTime();
|
||||||
|
}
|
||||||
|
void end_term_time() {
|
||||||
|
_term_time += (os::elapsedTime() - _start_term);
|
||||||
|
}
|
||||||
|
double term_time() { return _term_time; }
|
||||||
|
|
||||||
|
double elapsed() {
|
||||||
|
return os::elapsedTime() - _start;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t* surviving_young_words() {
|
||||||
|
// We add on to hide entry 0 which accumulates surviving words for
|
||||||
|
// age -1 regions (i.e. non-young ones)
|
||||||
|
return _surviving_young_words;
|
||||||
|
}
|
||||||
|
|
||||||
|
void retire_alloc_buffers() {
|
||||||
|
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||||
|
size_t waste = _alloc_buffers[ap].words_remaining();
|
||||||
|
add_to_alloc_buffer_waste(waste);
|
||||||
|
_alloc_buffers[ap].retire(true, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <class T> void deal_with_reference(T* ref_to_scan) {
|
||||||
|
if (has_partial_array_mask(ref_to_scan)) {
|
||||||
|
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
||||||
|
} else {
|
||||||
|
// Note: we can use "raw" versions of "region_containing" because
|
||||||
|
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||||
|
// humongous region.
|
||||||
|
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||||
|
_evac_cl->set_region(r);
|
||||||
|
_evac_cl->do_oop_nv(ref_to_scan);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
void trim_queue() {
|
||||||
|
// I've replicated the loop twice, first to drain the overflow
|
||||||
|
// queue, second to drain the task queue. This is better than
|
||||||
|
// having a single loop, which checks both conditions and, inside
|
||||||
|
// it, either pops the overflow queue or the task queue, as each
|
||||||
|
// loop is tighter. Also, the decision to drain the overflow queue
|
||||||
|
// first is not arbitrary, as the overflow queue is not visible
|
||||||
|
// to the other workers, whereas the task queue is. So, we want to
|
||||||
|
// drain the "invisible" entries first, while allowing the other
|
||||||
|
// workers to potentially steal the "visible" entries.
|
||||||
|
|
||||||
|
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
|
||||||
|
while (overflowed_refs_to_scan() > 0) {
|
||||||
|
StarTask ref_to_scan;
|
||||||
|
assert((oop*)ref_to_scan == NULL, "Constructed above");
|
||||||
|
pop_from_overflow_queue(ref_to_scan);
|
||||||
|
// We shouldn't have pushed it on the queue if it was not
|
||||||
|
// pointing into the CSet.
|
||||||
|
assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
|
||||||
|
if (ref_to_scan.is_narrow()) {
|
||||||
|
assert(UseCompressedOops, "Error");
|
||||||
|
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||||
|
assert(!has_partial_array_mask(p) &&
|
||||||
|
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||||
|
deal_with_reference(p);
|
||||||
|
} else {
|
||||||
|
oop* p = (oop*)ref_to_scan;
|
||||||
|
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
||||||
|
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||||
|
deal_with_reference(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
while (refs_to_scan() > 0) {
|
||||||
|
StarTask ref_to_scan;
|
||||||
|
assert((oop*)ref_to_scan == NULL, "Constructed above");
|
||||||
|
pop_from_queue(ref_to_scan);
|
||||||
|
if ((oop*)ref_to_scan != NULL) {
|
||||||
|
if (ref_to_scan.is_narrow()) {
|
||||||
|
assert(UseCompressedOops, "Error");
|
||||||
|
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||||
|
assert(!has_partial_array_mask(p) &&
|
||||||
|
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||||
|
deal_with_reference(p);
|
||||||
|
} else {
|
||||||
|
oop* p = (oop*)ref_to_scan;
|
||||||
|
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
||||||
|
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||||
|
deal_with_reference(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
@ -293,10 +293,6 @@ void G1CollectorPolicy::init() {
|
|||||||
if (G1SteadyStateUsed < 50) {
|
if (G1SteadyStateUsed < 50) {
|
||||||
vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
|
vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
|
||||||
}
|
}
|
||||||
if (UseConcMarkSweepGC) {
|
|
||||||
vm_exit_during_initialization("-XX:+UseG1GC is incompatible with "
|
|
||||||
"-XX:+UseConcMarkSweepGC.");
|
|
||||||
}
|
|
||||||
|
|
||||||
initialize_gc_policy_counters();
|
initialize_gc_policy_counters();
|
||||||
|
|
||||||
|
@ -42,18 +42,6 @@ public:
|
|||||||
virtual void set_region(HeapRegion* from) { _from = from; }
|
virtual void set_region(HeapRegion* from) { _from = from; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class G1ScanAndBalanceClosure : public OopClosure {
|
|
||||||
G1CollectedHeap* _g1;
|
|
||||||
static int _nq;
|
|
||||||
public:
|
|
||||||
G1ScanAndBalanceClosure(G1CollectedHeap* g1) : _g1(g1) { }
|
|
||||||
inline void do_oop_nv(oop* p);
|
|
||||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
virtual void do_oop(oop* p);
|
|
||||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
};
|
|
||||||
|
|
||||||
class G1ParClosureSuper : public OopsInHeapRegionClosure {
|
class G1ParClosureSuper : public OopsInHeapRegionClosure {
|
||||||
protected:
|
protected:
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
@ -69,34 +57,32 @@ class G1ParScanClosure : public G1ParClosureSuper {
|
|||||||
public:
|
public:
|
||||||
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
G1ParClosureSuper(g1, par_scan_state) { }
|
G1ParClosureSuper(g1, par_scan_state) { }
|
||||||
void do_oop_nv(oop* p); // should be made inline
|
template <class T> void do_oop_nv(T* p);
|
||||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
};
|
};
|
||||||
|
|
||||||
#define G1_PARTIAL_ARRAY_MASK 1
|
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||||
|
|
||||||
inline bool has_partial_array_mask(oop* ref) {
|
template <class T> inline bool has_partial_array_mask(T* ref) {
|
||||||
return (intptr_t) ref & G1_PARTIAL_ARRAY_MASK;
|
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline oop* set_partial_array_mask(oop obj) {
|
template <class T> inline T* set_partial_array_mask(T obj) {
|
||||||
return (oop*) ((intptr_t) obj | G1_PARTIAL_ARRAY_MASK);
|
assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
|
||||||
|
return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline oop clear_partial_array_mask(oop* ref) {
|
template <class T> inline oop clear_partial_array_mask(T* ref) {
|
||||||
return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
|
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
|
||||||
G1ParScanClosure _scanner;
|
G1ParScanClosure _scanner;
|
||||||
template <class T> void process_array_chunk(oop obj, int start, int end);
|
|
||||||
public:
|
public:
|
||||||
G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
|
G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
|
||||||
void do_oop_nv(oop* p);
|
template <class T> void do_oop_nv(T* p);
|
||||||
void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
};
|
};
|
||||||
@ -105,7 +91,7 @@ public:
|
|||||||
class G1ParCopyHelper : public G1ParClosureSuper {
|
class G1ParCopyHelper : public G1ParClosureSuper {
|
||||||
G1ParScanClosure *_scanner;
|
G1ParScanClosure *_scanner;
|
||||||
protected:
|
protected:
|
||||||
void mark_forwardee(oop* p);
|
template <class T> void mark_forwardee(T* p);
|
||||||
oop copy_to_survivor_space(oop obj);
|
oop copy_to_survivor_space(oop obj);
|
||||||
public:
|
public:
|
||||||
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
|
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
|
||||||
@ -117,36 +103,35 @@ template<bool do_gen_barrier, G1Barrier barrier,
|
|||||||
bool do_mark_forwardee, bool skip_cset_test>
|
bool do_mark_forwardee, bool skip_cset_test>
|
||||||
class G1ParCopyClosure : public G1ParCopyHelper {
|
class G1ParCopyClosure : public G1ParCopyHelper {
|
||||||
G1ParScanClosure _scanner;
|
G1ParScanClosure _scanner;
|
||||||
void do_oop_work(oop* p);
|
template <class T> void do_oop_work(T* p);
|
||||||
void do_oop_work(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
public:
|
public:
|
||||||
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
_scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
|
_scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
|
||||||
inline void do_oop_nv(oop* p) {
|
template <class T> void do_oop_nv(T* p) {
|
||||||
do_oop_work(p);
|
do_oop_work(p);
|
||||||
if (do_mark_forwardee)
|
if (do_mark_forwardee)
|
||||||
mark_forwardee(p);
|
mark_forwardee(p);
|
||||||
}
|
}
|
||||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
|
typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
|
||||||
typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure;
|
typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure;
|
||||||
|
typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
|
||||||
typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure;
|
typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure;
|
||||||
typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure;
|
typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure;
|
||||||
typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
|
|
||||||
typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure;
|
typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure;
|
||||||
// This is the only case when we set skip_cset_test. Basically, this
|
// This is the only case when we set skip_cset_test. Basically, this
|
||||||
// closure is (should?) only be called directly while we're draining
|
// closure is (should?) only be called directly while we're draining
|
||||||
// the overflow and task queues. In that case we know that the
|
// the overflow and task queues. In that case we know that the
|
||||||
// reference in question points into the collection set, otherwise we
|
// reference in question points into the collection set, otherwise we
|
||||||
// would not have pushed it on the queue.
|
// would not have pushed it on the queue. The following is defined in
|
||||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
|
// g1_specialized_oop_closures.hpp.
|
||||||
|
// typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
|
||||||
// We need a separate closure to handle references during evacuation
|
// We need a separate closure to handle references during evacuation
|
||||||
// failure processing, as it cannot asume that the reference already
|
// failure processing, as we cannot asume that the reference already
|
||||||
// points to the collection set (like G1ParScanHeapEvacClosure does).
|
// points into the collection set (like G1ParScanHeapEvacClosure does).
|
||||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
|
typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
|
||||||
|
|
||||||
class FilterIntoCSClosure: public OopClosure {
|
class FilterIntoCSClosure: public OopClosure {
|
||||||
@ -158,10 +143,9 @@ public:
|
|||||||
G1CollectedHeap* g1, OopClosure* oc) :
|
G1CollectedHeap* g1, OopClosure* oc) :
|
||||||
_dcto_cl(dcto_cl), _g1(g1), _oc(oc)
|
_dcto_cl(dcto_cl), _g1(g1), _oc(oc)
|
||||||
{}
|
{}
|
||||||
inline void do_oop_nv(oop* p);
|
template <class T> void do_oop_nv(T* p);
|
||||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(oop* p);
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||||
bool do_header() { return false; }
|
bool do_header() { return false; }
|
||||||
};
|
};
|
||||||
@ -174,10 +158,9 @@ public:
|
|||||||
OopsInHeapRegionClosure* oc) :
|
OopsInHeapRegionClosure* oc) :
|
||||||
_g1(g1), _oc(oc)
|
_g1(g1), _oc(oc)
|
||||||
{}
|
{}
|
||||||
inline void do_oop_nv(oop* p);
|
template <class T> void do_oop_nv(T* p);
|
||||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(oop* p);
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||||
bool do_header() { return false; }
|
bool do_header() { return false; }
|
||||||
void set_region(HeapRegion* from) {
|
void set_region(HeapRegion* from) {
|
||||||
@ -195,10 +178,9 @@ public:
|
|||||||
ConcurrentMark* cm)
|
ConcurrentMark* cm)
|
||||||
: _g1(g1), _oc(oc), _cm(cm) { }
|
: _g1(g1), _oc(oc), _cm(cm) { }
|
||||||
|
|
||||||
inline void do_oop_nv(oop* p);
|
template <class T> void do_oop_nv(T* p);
|
||||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(oop* p);
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||||
bool do_header() { return false; }
|
bool do_header() { return false; }
|
||||||
void set_region(HeapRegion* from) {
|
void set_region(HeapRegion* from) {
|
||||||
@ -213,10 +195,9 @@ class FilterOutOfRegionClosure: public OopClosure {
|
|||||||
int _out_of_region;
|
int _out_of_region;
|
||||||
public:
|
public:
|
||||||
FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
|
FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
|
||||||
inline void do_oop_nv(oop* p);
|
template <class T> void do_oop_nv(T* p);
|
||||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(oop* p);
|
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
|
||||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||||
bool do_header() { return false; }
|
bool do_header() { return false; }
|
||||||
int out_of_region() { return _out_of_region; }
|
int out_of_region() { return _out_of_region; }
|
||||||
|
@ -31,9 +31,10 @@
|
|||||||
// perf-critical inner loop.
|
// perf-critical inner loop.
|
||||||
#define FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT 0
|
#define FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT 0
|
||||||
|
|
||||||
inline void FilterIntoCSClosure::do_oop_nv(oop* p) {
|
template <class T> inline void FilterIntoCSClosure::do_oop_nv(T* p) {
|
||||||
oop obj = *p;
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (obj != NULL && _g1->obj_in_cs(obj)) {
|
if (!oopDesc::is_null(heap_oop) &&
|
||||||
|
_g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
|
||||||
_oc->do_oop(p);
|
_oc->do_oop(p);
|
||||||
#if FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT
|
#if FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT
|
||||||
_dcto_cl->incr_count();
|
_dcto_cl->incr_count();
|
||||||
@ -41,44 +42,32 @@ inline void FilterIntoCSClosure::do_oop_nv(oop* p) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void FilterIntoCSClosure::do_oop(oop* p)
|
|
||||||
{
|
|
||||||
do_oop_nv(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT 0
|
#define FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT 0
|
||||||
|
|
||||||
inline void FilterOutOfRegionClosure::do_oop_nv(oop* p) {
|
template <class T> inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
|
||||||
oop obj = *p;
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
HeapWord* obj_hw = (HeapWord*)obj;
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
if (obj_hw != NULL && (obj_hw < _r_bottom || obj_hw >= _r_end)) {
|
HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
if (obj_hw < _r_bottom || obj_hw >= _r_end) {
|
||||||
_oc->do_oop(p);
|
_oc->do_oop(p);
|
||||||
#if FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT
|
#if FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT
|
||||||
_out_of_region++;
|
_out_of_region++;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void FilterOutOfRegionClosure::do_oop(oop* p)
|
|
||||||
{
|
|
||||||
do_oop_nv(p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) {
|
template <class T> inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
|
||||||
oop obj = *p;
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (obj != NULL && _g1->obj_in_cs(obj))
|
if (!oopDesc::is_null(heap_oop) &&
|
||||||
|
_g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop)))
|
||||||
_oc->do_oop(p);
|
_oc->do_oop(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void FilterInHeapRegionAndIntoCSClosure::do_oop(oop* p)
|
template <class T> inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
|
||||||
{
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
do_oop_nv(p);
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
}
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
|
||||||
|
|
||||||
inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) {
|
|
||||||
oop obj = *p;
|
|
||||||
if (obj != NULL) {
|
|
||||||
HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj);
|
HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj);
|
||||||
if (hr != NULL) {
|
if (hr != NULL) {
|
||||||
if (hr->in_collection_set())
|
if (hr->in_collection_set())
|
||||||
@ -89,24 +78,29 @@ inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop(oop* p)
|
// This closure is applied to the fields of the objects that have just been copied.
|
||||||
{
|
template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||||
do_oop_nv(p);
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
}
|
|
||||||
|
|
||||||
inline void G1ScanAndBalanceClosure::do_oop_nv(oop* p) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
RefToScanQueue* q;
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
if (ParallelGCThreads > 0) {
|
if (_g1->in_cset_fast_test(obj)) {
|
||||||
// Deal the work out equally.
|
// We're not going to even bother checking whether the object is
|
||||||
_nq = (_nq + 1) % ParallelGCThreads;
|
// already forwarded or not, as this usually causes an immediate
|
||||||
q = _g1->task_queue(_nq);
|
// stall. We'll try to prefetch the object (for write, given that
|
||||||
|
// we might need to install the forwarding reference) and we'll
|
||||||
|
// get back to it when pop it from the queue
|
||||||
|
Prefetch::write(obj->mark_addr(), 0);
|
||||||
|
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
|
||||||
|
|
||||||
|
// slightly paranoid test; I'm trying to catch potential
|
||||||
|
// problems before we go into push_on_queue to know where the
|
||||||
|
// problem is coming from
|
||||||
|
assert(obj == oopDesc::load_decode_heap_oop(p),
|
||||||
|
"p should still be pointing to obj");
|
||||||
|
_par_scan_state->push_on_queue(p);
|
||||||
} else {
|
} else {
|
||||||
q = _g1->task_queue(0);
|
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
|
||||||
}
|
}
|
||||||
bool nooverflow = q->push(p);
|
|
||||||
guarantee(nooverflow, "Overflow during poplularity region processing");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void G1ScanAndBalanceClosure::do_oop(oop* p) {
|
|
||||||
do_oop_nv(p);
|
|
||||||
}
|
}
|
||||||
|
@ -65,11 +65,10 @@ public:
|
|||||||
void set_region(HeapRegion* from) {
|
void set_region(HeapRegion* from) {
|
||||||
_blk->set_region(from);
|
_blk->set_region(from);
|
||||||
}
|
}
|
||||||
virtual void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
template <class T> void do_oop_work(T* p) {
|
||||||
virtual void do_oop(oop* p) {
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
oop obj = *p;
|
|
||||||
if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
|
if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
|
||||||
}
|
}
|
||||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||||
@ -110,11 +109,10 @@ class VerifyRSCleanCardOopClosure: public OopClosure {
|
|||||||
public:
|
public:
|
||||||
VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
||||||
|
|
||||||
virtual void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
template <class T> void do_oop_work(T* p) {
|
||||||
virtual void do_oop(oop* p) {
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
oop obj = *p;
|
|
||||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||||
guarantee(to == NULL || !to->in_collection_set(),
|
guarantee(to == NULL || !to->in_collection_set(),
|
||||||
"Missed a rem set member.");
|
"Missed a rem set member.");
|
||||||
@ -129,9 +127,9 @@ HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
|||||||
{
|
{
|
||||||
_seq_task = new SubTasksDone(NumSeqTasks);
|
_seq_task = new SubTasksDone(NumSeqTasks);
|
||||||
guarantee(n_workers() > 0, "There should be some workers");
|
guarantee(n_workers() > 0, "There should be some workers");
|
||||||
_new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, n_workers());
|
_new_refs = NEW_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, n_workers());
|
||||||
for (uint i = 0; i < n_workers(); i++) {
|
for (uint i = 0; i < n_workers(); i++) {
|
||||||
_new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
|
_new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<OopOrNarrowOopStar>(8192,true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,7 +138,7 @@ HRInto_G1RemSet::~HRInto_G1RemSet() {
|
|||||||
for (uint i = 0; i < n_workers(); i++) {
|
for (uint i = 0; i < n_workers(); i++) {
|
||||||
delete _new_refs[i];
|
delete _new_refs[i];
|
||||||
}
|
}
|
||||||
FREE_C_HEAP_ARRAY(GrowableArray<oop*>*, _new_refs);
|
FREE_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, _new_refs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
|
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
|
||||||
@ -428,15 +426,15 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
template <class T> void
|
||||||
HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc,
|
HRInto_G1RemSet::scanNewRefsRS_work(OopsInHeapRegionClosure* oc,
|
||||||
int worker_i) {
|
int worker_i) {
|
||||||
double scan_new_refs_start_sec = os::elapsedTime();
|
double scan_new_refs_start_sec = os::elapsedTime();
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
|
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
|
||||||
for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
|
for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
|
||||||
oop* p = _new_refs[worker_i]->at(i);
|
T* p = (T*) _new_refs[worker_i]->at(i);
|
||||||
oop obj = *p;
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
// *p was in the collection set when p was pushed on "_new_refs", but
|
// *p was in the collection set when p was pushed on "_new_refs", but
|
||||||
// another thread may have processed this location from an RS, so it
|
// another thread may have processed this location from an RS, so it
|
||||||
// might not point into the CS any longer. If so, it's obviously been
|
// might not point into the CS any longer. If so, it's obviously been
|
||||||
@ -549,11 +547,10 @@ class UpdateRSetOopsIntoCSImmediate : public OopClosure {
|
|||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
public:
|
public:
|
||||||
UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
|
UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
|
||||||
virtual void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
template <class T> void do_oop_work(T* p) {
|
||||||
virtual void do_oop(oop* p) {
|
HeapRegion* to = _g1->heap_region_containing(oopDesc::load_decode_heap_oop(p));
|
||||||
HeapRegion* to = _g1->heap_region_containing(*p);
|
|
||||||
if (to->in_collection_set()) {
|
if (to->in_collection_set()) {
|
||||||
to->rem_set()->add_reference(p, 0);
|
to->rem_set()->add_reference(p, 0);
|
||||||
}
|
}
|
||||||
@ -567,11 +564,10 @@ class UpdateRSetOopsIntoCSDeferred : public OopClosure {
|
|||||||
public:
|
public:
|
||||||
UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
|
UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
|
||||||
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
|
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
|
||||||
virtual void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
template <class T> void do_oop_work(T* p) {
|
||||||
virtual void do_oop(oop* p) {
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
oop obj = *p;
|
|
||||||
if (_g1->obj_in_cs(obj)) {
|
if (_g1->obj_in_cs(obj)) {
|
||||||
size_t card_index = _ct_bs->index_for(p);
|
size_t card_index = _ct_bs->index_for(p);
|
||||||
if (_ct_bs->mark_card_deferred(card_index)) {
|
if (_ct_bs->mark_card_deferred(card_index)) {
|
||||||
@ -581,10 +577,10 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void HRInto_G1RemSet::new_refs_iterate(OopClosure* cl) {
|
template <class T> void HRInto_G1RemSet::new_refs_iterate_work(OopClosure* cl) {
|
||||||
for (size_t i = 0; i < n_workers(); i++) {
|
for (size_t i = 0; i < n_workers(); i++) {
|
||||||
for (int j = 0; j < _new_refs[i]->length(); j++) {
|
for (int j = 0; j < _new_refs[i]->length(); j++) {
|
||||||
oop* p = _new_refs[i]->at(j);
|
T* p = (T*) _new_refs[i]->at(j);
|
||||||
cl->do_oop(p);
|
cl->do_oop(p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -62,10 +62,12 @@ public:
|
|||||||
// If "this" is of the given subtype, return "this", else "NULL".
|
// If "this" is of the given subtype, return "this", else "NULL".
|
||||||
virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
|
virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
|
||||||
|
|
||||||
// Record, if necessary, the fact that *p (where "p" is in region "from")
|
// Record, if necessary, the fact that *p (where "p" is in region "from",
|
||||||
// has changed to its new value.
|
// and is, a fortiori, required to be non-NULL) has changed to its new value.
|
||||||
virtual void write_ref(HeapRegion* from, oop* p) = 0;
|
virtual void write_ref(HeapRegion* from, oop* p) = 0;
|
||||||
|
virtual void write_ref(HeapRegion* from, narrowOop* p) = 0;
|
||||||
virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
|
virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
|
||||||
|
virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0;
|
||||||
|
|
||||||
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
|
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
|
||||||
// or card, respectively, such that a region or card with a corresponding
|
// or card, respectively, such that a region or card with a corresponding
|
||||||
@ -105,7 +107,9 @@ public:
|
|||||||
|
|
||||||
// Nothing is necessary in the version below.
|
// Nothing is necessary in the version below.
|
||||||
void write_ref(HeapRegion* from, oop* p) {}
|
void write_ref(HeapRegion* from, oop* p) {}
|
||||||
|
void write_ref(HeapRegion* from, narrowOop* p) {}
|
||||||
void par_write_ref(HeapRegion* from, oop* p, int tid) {}
|
void par_write_ref(HeapRegion* from, oop* p, int tid) {}
|
||||||
|
void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {}
|
||||||
|
|
||||||
void scrub(BitMap* region_bm, BitMap* card_bm) {}
|
void scrub(BitMap* region_bm, BitMap* card_bm) {}
|
||||||
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||||
@ -143,8 +147,19 @@ protected:
|
|||||||
// their references into the collection summarized in "_new_refs".
|
// their references into the collection summarized in "_new_refs".
|
||||||
bool _par_traversal_in_progress;
|
bool _par_traversal_in_progress;
|
||||||
void set_par_traversal(bool b) { _par_traversal_in_progress = b; }
|
void set_par_traversal(bool b) { _par_traversal_in_progress = b; }
|
||||||
GrowableArray<oop*>** _new_refs;
|
GrowableArray<OopOrNarrowOopStar>** _new_refs;
|
||||||
void new_refs_iterate(OopClosure* cl);
|
template <class T> void new_refs_iterate_work(OopClosure* cl);
|
||||||
|
void new_refs_iterate(OopClosure* cl) {
|
||||||
|
if (UseCompressedOops) {
|
||||||
|
new_refs_iterate_work<narrowOop>(cl);
|
||||||
|
} else {
|
||||||
|
new_refs_iterate_work<oop>(cl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
template <class T> void write_ref_nv(HeapRegion* from, T* p);
|
||||||
|
template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// This is called to reset dual hash tables after the gc pause
|
// This is called to reset dual hash tables after the gc pause
|
||||||
@ -161,7 +176,14 @@ public:
|
|||||||
void prepare_for_oops_into_collection_set_do();
|
void prepare_for_oops_into_collection_set_do();
|
||||||
void cleanup_after_oops_into_collection_set_do();
|
void cleanup_after_oops_into_collection_set_do();
|
||||||
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
|
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
|
||||||
void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i);
|
template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i);
|
||||||
|
void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) {
|
||||||
|
if (UseCompressedOops) {
|
||||||
|
scanNewRefsRS_work<narrowOop>(oc, worker_i);
|
||||||
|
} else {
|
||||||
|
scanNewRefsRS_work<oop>(oc, worker_i);
|
||||||
|
}
|
||||||
|
}
|
||||||
void updateRS(int worker_i);
|
void updateRS(int worker_i);
|
||||||
HeapRegion* calculateStartRegion(int i);
|
HeapRegion* calculateStartRegion(int i);
|
||||||
|
|
||||||
@ -172,12 +194,22 @@ public:
|
|||||||
|
|
||||||
// Record, if necessary, the fact that *p (where "p" is in region "from",
|
// Record, if necessary, the fact that *p (where "p" is in region "from",
|
||||||
// which is required to be non-NULL) has changed to a new non-NULL value.
|
// which is required to be non-NULL) has changed to a new non-NULL value.
|
||||||
inline void write_ref(HeapRegion* from, oop* p);
|
// [Below the virtual version calls a non-virtual protected
|
||||||
// The "_nv" version is the same; it exists just so that it is not virtual.
|
// workhorse that is templatified for narrow vs wide oop.]
|
||||||
inline void write_ref_nv(HeapRegion* from, oop* p);
|
inline void write_ref(HeapRegion* from, oop* p) {
|
||||||
|
write_ref_nv(from, p);
|
||||||
|
}
|
||||||
|
inline void write_ref(HeapRegion* from, narrowOop* p) {
|
||||||
|
write_ref_nv(from, p);
|
||||||
|
}
|
||||||
|
inline void par_write_ref(HeapRegion* from, oop* p, int tid) {
|
||||||
|
par_write_ref_nv(from, p, tid);
|
||||||
|
}
|
||||||
|
inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {
|
||||||
|
par_write_ref_nv(from, p, tid);
|
||||||
|
}
|
||||||
|
|
||||||
inline bool self_forwarded(oop obj);
|
bool self_forwarded(oop obj);
|
||||||
inline void par_write_ref(HeapRegion* from, oop* p, int tid);
|
|
||||||
|
|
||||||
void scrub(BitMap* region_bm, BitMap* card_bm);
|
void scrub(BitMap* region_bm, BitMap* card_bm);
|
||||||
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||||
@ -208,6 +240,9 @@ class UpdateRSOopClosure: public OopClosure {
|
|||||||
HeapRegion* _from;
|
HeapRegion* _from;
|
||||||
HRInto_G1RemSet* _rs;
|
HRInto_G1RemSet* _rs;
|
||||||
int _worker_i;
|
int _worker_i;
|
||||||
|
|
||||||
|
template <class T> void do_oop_work(T* p);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
|
UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
|
||||||
_from(NULL), _rs(rs), _worker_i(worker_i) {
|
_from(NULL), _rs(rs), _worker_i(worker_i) {
|
||||||
@ -219,11 +254,10 @@ public:
|
|||||||
_from = from;
|
_from = from;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void do_oop(narrowOop* p);
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
virtual void do_oop(oop* p);
|
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||||
|
|
||||||
// Override: this closure is idempotent.
|
// Override: this closure is idempotent.
|
||||||
// bool idempotent() { return true; }
|
// bool idempotent() { return true; }
|
||||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -30,12 +30,8 @@ inline size_t G1RemSet::n_workers() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, oop* p) {
|
template <class T> inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
|
||||||
par_write_ref(from, p, 0);
|
par_write_ref_nv(from, p, 0);
|
||||||
}
|
|
||||||
|
|
||||||
inline void HRInto_G1RemSet::write_ref(HeapRegion* from, oop* p) {
|
|
||||||
write_ref_nv(from, p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
|
inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
|
||||||
@ -43,8 +39,8 @@ inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
|
template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
|
||||||
oop obj = *p;
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// can't do because of races
|
// can't do because of races
|
||||||
// assert(obj == NULL || obj->is_oop(), "expected an oop");
|
// assert(obj == NULL || obj->is_oop(), "expected an oop");
|
||||||
@ -71,7 +67,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
|
|||||||
// false during the evacuation failure handing.
|
// false during the evacuation failure handing.
|
||||||
if (_par_traversal_in_progress &&
|
if (_par_traversal_in_progress &&
|
||||||
to->in_collection_set() && !self_forwarded(obj)) {
|
to->in_collection_set() && !self_forwarded(obj)) {
|
||||||
_new_refs[tid]->push(p);
|
_new_refs[tid]->push((void*)p);
|
||||||
// Deferred updates to the Cset are either discarded (in the normal case),
|
// Deferred updates to the Cset are either discarded (in the normal case),
|
||||||
// or processed (if an evacuation failure occurs) at the end
|
// or processed (if an evacuation failure occurs) at the end
|
||||||
// of the collection.
|
// of the collection.
|
||||||
@ -89,11 +85,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void UpdateRSOopClosure::do_oop(narrowOop* p) {
|
template <class T> inline void UpdateRSOopClosure::do_oop_work(T* p) {
|
||||||
guarantee(false, "NYI");
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void UpdateRSOopClosure::do_oop(oop* p) {
|
|
||||||
assert(_from != NULL, "from region must be non-NULL");
|
assert(_from != NULL, "from region must be non-NULL");
|
||||||
_rs->par_write_ref(_from, p, _worker_i);
|
_rs->par_write_ref(_from, p, _worker_i);
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,7 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
|
|||||||
|
|
||||||
|
|
||||||
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
||||||
|
assert(pre_val->is_oop_or_null(true), "Error");
|
||||||
if (!JavaThread::satb_mark_queue_set().active()) return;
|
if (!JavaThread::satb_mark_queue_set().active()) return;
|
||||||
Thread* thr = Thread::current();
|
Thread* thr = Thread::current();
|
||||||
if (thr->is_Java_thread()) {
|
if (thr->is_Java_thread()) {
|
||||||
@ -46,31 +47,30 @@ void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// When we know the current java thread:
|
// When we know the current java thread:
|
||||||
void
|
template <class T> void
|
||||||
G1SATBCardTableModRefBS::write_ref_field_pre_static(void* field,
|
G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
|
||||||
oop newVal,
|
oop new_val,
|
||||||
JavaThread* jt) {
|
JavaThread* jt) {
|
||||||
if (!JavaThread::satb_mark_queue_set().active()) return;
|
if (!JavaThread::satb_mark_queue_set().active()) return;
|
||||||
assert(!UseCompressedOops, "Else will need to modify this to deal with narrowOop");
|
T heap_oop = oopDesc::load_heap_oop(field);
|
||||||
oop preVal = *(oop*)field;
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
if (preVal != NULL) {
|
oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
jt->satb_mark_queue().enqueue(preVal);
|
assert(pre_val->is_oop(true /* ignore mark word */), "Error");
|
||||||
|
jt->satb_mark_queue().enqueue(pre_val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
template <class T> void
|
||||||
G1SATBCardTableModRefBS::write_ref_array_pre(MemRegion mr) {
|
G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
|
||||||
if (!JavaThread::satb_mark_queue_set().active()) return;
|
if (!JavaThread::satb_mark_queue_set().active()) return;
|
||||||
assert(!UseCompressedOops, "Else will need to modify this to deal with narrowOop");
|
T* elem_ptr = dst;
|
||||||
oop* elem_ptr = (oop*)mr.start();
|
for (int i = 0; i < count; i++, elem_ptr++) {
|
||||||
while ((HeapWord*)elem_ptr < mr.end()) {
|
T heap_oop = oopDesc::load_heap_oop(elem_ptr);
|
||||||
oop elem = *elem_ptr;
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
if (elem != NULL) enqueue(elem);
|
enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
|
||||||
elem_ptr++;
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
G1SATBCardTableLoggingModRefBS::
|
G1SATBCardTableLoggingModRefBS::
|
||||||
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
||||||
|
@ -47,31 +47,41 @@ public:
|
|||||||
|
|
||||||
// This notes that we don't need to access any BarrierSet data
|
// This notes that we don't need to access any BarrierSet data
|
||||||
// structures, so this can be called from a static context.
|
// structures, so this can be called from a static context.
|
||||||
static void write_ref_field_pre_static(void* field, oop newVal) {
|
template <class T> static void write_ref_field_pre_static(T* field, oop newVal) {
|
||||||
assert(!UseCompressedOops, "Else needs to be templatized");
|
T heap_oop = oopDesc::load_heap_oop(field);
|
||||||
oop preVal = *((oop*)field);
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
if (preVal != NULL) {
|
enqueue(oopDesc::decode_heap_oop(heap_oop));
|
||||||
enqueue(preVal);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// When we know the current java thread:
|
// When we know the current java thread:
|
||||||
static void write_ref_field_pre_static(void* field, oop newVal,
|
template <class T> static void write_ref_field_pre_static(T* field, oop newVal,
|
||||||
JavaThread* jt);
|
JavaThread* jt);
|
||||||
|
|
||||||
// We export this to make it available in cases where the static
|
// We export this to make it available in cases where the static
|
||||||
// type of the barrier set is known. Note that it is non-virtual.
|
// type of the barrier set is known. Note that it is non-virtual.
|
||||||
inline void inline_write_ref_field_pre(void* field, oop newVal) {
|
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
|
||||||
write_ref_field_pre_static(field, newVal);
|
write_ref_field_pre_static(field, newVal);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is the more general virtual version.
|
// These are the more general virtual versions.
|
||||||
void write_ref_field_pre_work(void* field, oop new_val) {
|
virtual void write_ref_field_pre_work(oop* field, oop new_val) {
|
||||||
inline_write_ref_field_pre(field, new_val);
|
inline_write_ref_field_pre(field, new_val);
|
||||||
}
|
}
|
||||||
|
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
|
||||||
|
inline_write_ref_field_pre(field, new_val);
|
||||||
|
}
|
||||||
|
virtual void write_ref_field_pre_work(void* field, oop new_val) {
|
||||||
|
guarantee(false, "Not needed");
|
||||||
|
}
|
||||||
|
|
||||||
virtual void write_ref_array_pre(MemRegion mr);
|
template <class T> void write_ref_array_pre_work(T* dst, int count);
|
||||||
|
virtual void write_ref_array_pre(oop* dst, int count) {
|
||||||
|
write_ref_array_pre_work(dst, count);
|
||||||
|
}
|
||||||
|
virtual void write_ref_array_pre(narrowOop* dst, int count) {
|
||||||
|
write_ref_array_pre_work(dst, count);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Adds card-table logging to the post-barrier.
|
// Adds card-table logging to the post-barrier.
|
||||||
|
@ -80,9 +80,6 @@
|
|||||||
develop(bool, G1TraceConcurrentRefinement, false, \
|
develop(bool, G1TraceConcurrentRefinement, false, \
|
||||||
"Trace G1 concurrent refinement") \
|
"Trace G1 concurrent refinement") \
|
||||||
\
|
\
|
||||||
develop(bool, G1ConcMark, true, \
|
|
||||||
"If true, run concurrent marking for G1") \
|
|
||||||
\
|
|
||||||
product(intx, G1MarkStackSize, 2 * 1024 * 1024, \
|
product(intx, G1MarkStackSize, 2 * 1024 * 1024, \
|
||||||
"Size of the mark stack for concurrent marking.") \
|
"Size of the mark stack for concurrent marking.") \
|
||||||
\
|
\
|
||||||
|
@ -37,14 +37,12 @@ template<bool do_gen_barrier, G1Barrier barrier,
|
|||||||
class G1ParCopyClosure;
|
class G1ParCopyClosure;
|
||||||
class G1ParScanClosure;
|
class G1ParScanClosure;
|
||||||
|
|
||||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true>
|
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
|
||||||
G1ParScanHeapEvacClosure;
|
|
||||||
|
|
||||||
class FilterIntoCSClosure;
|
class FilterIntoCSClosure;
|
||||||
class FilterOutOfRegionClosure;
|
class FilterOutOfRegionClosure;
|
||||||
class FilterInHeapRegionAndIntoCSClosure;
|
class FilterInHeapRegionAndIntoCSClosure;
|
||||||
class FilterAndMarkInHeapRegionAndIntoCSClosure;
|
class FilterAndMarkInHeapRegionAndIntoCSClosure;
|
||||||
class G1ScanAndBalanceClosure;
|
|
||||||
|
|
||||||
#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
|
#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
|
||||||
#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
|
#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
|
||||||
@ -56,8 +54,7 @@ class G1ScanAndBalanceClosure;
|
|||||||
f(FilterIntoCSClosure,_nv) \
|
f(FilterIntoCSClosure,_nv) \
|
||||||
f(FilterOutOfRegionClosure,_nv) \
|
f(FilterOutOfRegionClosure,_nv) \
|
||||||
f(FilterInHeapRegionAndIntoCSClosure,_nv) \
|
f(FilterInHeapRegionAndIntoCSClosure,_nv) \
|
||||||
f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv) \
|
f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv)
|
||||||
f(G1ScanAndBalanceClosure,_nv)
|
|
||||||
|
|
||||||
#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
|
#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
|
||||||
#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
|
#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
|
||||||
|
@ -66,16 +66,16 @@ public:
|
|||||||
bool failures() { return _failures; }
|
bool failures() { return _failures; }
|
||||||
int n_failures() { return _n_failures; }
|
int n_failures() { return _n_failures; }
|
||||||
|
|
||||||
virtual void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
guarantee(false, "NYI");
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
}
|
|
||||||
|
|
||||||
void do_oop(oop* p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
assert(_containing_obj != NULL, "Precondition");
|
assert(_containing_obj != NULL, "Precondition");
|
||||||
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
|
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
|
||||||
"Precondition");
|
"Precondition");
|
||||||
oop obj = *p;
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (obj != NULL) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
bool failed = false;
|
bool failed = false;
|
||||||
if (!_g1h->is_in_closed_subset(obj) ||
|
if (!_g1h->is_in_closed_subset(obj) ||
|
||||||
_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
|
_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
|
||||||
@ -106,8 +106,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!_g1h->full_collection()) {
|
if (!_g1h->full_collection()) {
|
||||||
HeapRegion* from = _g1h->heap_region_containing(p);
|
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||||
HeapRegion* to = _g1h->heap_region_containing(*p);
|
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||||
if (from != NULL && to != NULL &&
|
if (from != NULL && to != NULL &&
|
||||||
from != to &&
|
from != to &&
|
||||||
!to->isHumongous()) {
|
!to->isHumongous()) {
|
||||||
@ -534,13 +534,13 @@ HeapRegion::object_iterate_mem_careful(MemRegion mr,
|
|||||||
// Otherwise, find the obj that extends onto mr.start().
|
// Otherwise, find the obj that extends onto mr.start().
|
||||||
|
|
||||||
assert(cur <= mr.start()
|
assert(cur <= mr.start()
|
||||||
&& (oop(cur)->klass() == NULL ||
|
&& (oop(cur)->klass_or_null() == NULL ||
|
||||||
cur + oop(cur)->size() > mr.start()),
|
cur + oop(cur)->size() > mr.start()),
|
||||||
"postcondition of block_start");
|
"postcondition of block_start");
|
||||||
oop obj;
|
oop obj;
|
||||||
while (cur < mr.end()) {
|
while (cur < mr.end()) {
|
||||||
obj = oop(cur);
|
obj = oop(cur);
|
||||||
if (obj->klass() == NULL) {
|
if (obj->klass_or_null() == NULL) {
|
||||||
// Ran into an unparseable point.
|
// Ran into an unparseable point.
|
||||||
return cur;
|
return cur;
|
||||||
} else if (!g1h->is_obj_dead(obj)) {
|
} else if (!g1h->is_obj_dead(obj)) {
|
||||||
@ -577,7 +577,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
|||||||
assert(cur <= mr.start(), "Postcondition");
|
assert(cur <= mr.start(), "Postcondition");
|
||||||
|
|
||||||
while (cur <= mr.start()) {
|
while (cur <= mr.start()) {
|
||||||
if (oop(cur)->klass() == NULL) {
|
if (oop(cur)->klass_or_null() == NULL) {
|
||||||
// Ran into an unparseable point.
|
// Ran into an unparseable point.
|
||||||
return cur;
|
return cur;
|
||||||
}
|
}
|
||||||
@ -591,7 +591,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
|||||||
obj = oop(cur);
|
obj = oop(cur);
|
||||||
// If we finish this loop...
|
// If we finish this loop...
|
||||||
assert(cur <= mr.start()
|
assert(cur <= mr.start()
|
||||||
&& obj->klass() != NULL
|
&& obj->klass_or_null() != NULL
|
||||||
&& cur + obj->size() > mr.start(),
|
&& cur + obj->size() > mr.start(),
|
||||||
"Loop postcondition");
|
"Loop postcondition");
|
||||||
if (!g1h->is_obj_dead(obj)) {
|
if (!g1h->is_obj_dead(obj)) {
|
||||||
@ -601,7 +601,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
|||||||
HeapWord* next;
|
HeapWord* next;
|
||||||
while (cur < mr.end()) {
|
while (cur < mr.end()) {
|
||||||
obj = oop(cur);
|
obj = oop(cur);
|
||||||
if (obj->klass() == NULL) {
|
if (obj->klass_or_null() == NULL) {
|
||||||
// Ran into an unparseable point.
|
// Ran into an unparseable point.
|
||||||
return cur;
|
return cur;
|
||||||
};
|
};
|
||||||
@ -781,8 +781,13 @@ void G1OffsetTableContigSpace::set_saved_mark() {
|
|||||||
// will pick up the right saved_mark_word() as the high water mark
|
// will pick up the right saved_mark_word() as the high water mark
|
||||||
// of the region. Either way, the behaviour will be correct.
|
// of the region. Either way, the behaviour will be correct.
|
||||||
ContiguousSpace::set_saved_mark();
|
ContiguousSpace::set_saved_mark();
|
||||||
|
OrderAccess::storestore();
|
||||||
_gc_time_stamp = curr_gc_time_stamp;
|
_gc_time_stamp = curr_gc_time_stamp;
|
||||||
OrderAccess::fence();
|
// The following fence is to force a flush of the writes above, but
|
||||||
|
// is strictly not needed because when an allocating worker thread
|
||||||
|
// calls set_saved_mark() it does so under the ParGCRareEvent_lock;
|
||||||
|
// when the lock is released, the write will be flushed.
|
||||||
|
// OrderAccess::fence();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ protected:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_reference_work(oop* from, bool par) {
|
void add_reference_work(OopOrNarrowOopStar from, bool par) {
|
||||||
// Must make this robust in case "from" is not in "_hr", because of
|
// Must make this robust in case "from" is not in "_hr", because of
|
||||||
// concurrency.
|
// concurrency.
|
||||||
|
|
||||||
@ -173,11 +173,11 @@ public:
|
|||||||
_bm.clear();
|
_bm.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_reference(oop* from) {
|
void add_reference(OopOrNarrowOopStar from) {
|
||||||
add_reference_work(from, /*parallel*/ true);
|
add_reference_work(from, /*parallel*/ true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void seq_add_reference(oop* from) {
|
void seq_add_reference(OopOrNarrowOopStar from) {
|
||||||
add_reference_work(from, /*parallel*/ false);
|
add_reference_work(from, /*parallel*/ false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,7 +220,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Requires "from" to be in "hr()".
|
// Requires "from" to be in "hr()".
|
||||||
bool contains_reference(oop* from) const {
|
bool contains_reference(OopOrNarrowOopStar from) const {
|
||||||
assert(hr()->is_in_reserved(from), "Precondition.");
|
assert(hr()->is_in_reserved(from), "Precondition.");
|
||||||
size_t card_ind = pointer_delta(from, hr()->bottom(),
|
size_t card_ind = pointer_delta(from, hr()->bottom(),
|
||||||
CardTableModRefBS::card_size);
|
CardTableModRefBS::card_size);
|
||||||
@ -394,7 +394,7 @@ public:
|
|||||||
void set_next(PosParPRT* nxt) { _next = nxt; }
|
void set_next(PosParPRT* nxt) { _next = nxt; }
|
||||||
PosParPRT** next_addr() { return &_next; }
|
PosParPRT** next_addr() { return &_next; }
|
||||||
|
|
||||||
void add_reference(oop* from, int tid) {
|
void add_reference(OopOrNarrowOopStar from, int tid) {
|
||||||
// Expand if necessary.
|
// Expand if necessary.
|
||||||
PerRegionTable** pt = par_tables();
|
PerRegionTable** pt = par_tables();
|
||||||
if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) {
|
if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) {
|
||||||
@ -447,7 +447,7 @@ public:
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool contains_reference(oop* from) const {
|
bool contains_reference(OopOrNarrowOopStar from) const {
|
||||||
if (PerRegionTable::contains_reference(from)) return true;
|
if (PerRegionTable::contains_reference(from)) return true;
|
||||||
if (_par_tables != NULL) {
|
if (_par_tables != NULL) {
|
||||||
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
|
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
|
||||||
@ -564,12 +564,15 @@ void OtherRegionsTable::print_from_card_cache() {
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void OtherRegionsTable::add_reference(oop* from, int tid) {
|
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||||
size_t cur_hrs_ind = hr()->hrs_index();
|
size_t cur_hrs_ind = hr()->hrs_index();
|
||||||
|
|
||||||
#if HRRS_VERBOSE
|
#if HRRS_VERBOSE
|
||||||
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
|
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
|
||||||
from, *from);
|
from,
|
||||||
|
UseCompressedOops
|
||||||
|
? oopDesc::load_decode_heap_oop((narrowOop*)from)
|
||||||
|
: oopDesc::load_decode_heap_oop((oop*)from));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
|
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
|
||||||
@ -1021,13 +1024,13 @@ bool OtherRegionsTable::del_single_region_table(size_t ind,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OtherRegionsTable::contains_reference(oop* from) const {
|
bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
|
||||||
// Cast away const in this case.
|
// Cast away const in this case.
|
||||||
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
|
||||||
return contains_reference_locked(from);
|
return contains_reference_locked(from);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OtherRegionsTable::contains_reference_locked(oop* from) const {
|
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
|
||||||
HeapRegion* hr = _g1h->heap_region_containing_raw(from);
|
HeapRegion* hr = _g1h->heap_region_containing_raw(from);
|
||||||
if (hr == NULL) return false;
|
if (hr == NULL) return false;
|
||||||
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
|
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
|
||||||
@ -1288,7 +1291,7 @@ bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
oop** HeapRegionRemSet::_recorded_oops = NULL;
|
OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
|
||||||
HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
|
HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
|
||||||
HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
|
HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
|
||||||
int HeapRegionRemSet::_n_recorded = 0;
|
int HeapRegionRemSet::_n_recorded = 0;
|
||||||
@ -1297,13 +1300,13 @@ HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
|
|||||||
int* HeapRegionRemSet::_recorded_event_index = NULL;
|
int* HeapRegionRemSet::_recorded_event_index = NULL;
|
||||||
int HeapRegionRemSet::_n_recorded_events = 0;
|
int HeapRegionRemSet::_n_recorded_events = 0;
|
||||||
|
|
||||||
void HeapRegionRemSet::record(HeapRegion* hr, oop* f) {
|
void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
|
||||||
if (_recorded_oops == NULL) {
|
if (_recorded_oops == NULL) {
|
||||||
assert(_n_recorded == 0
|
assert(_n_recorded == 0
|
||||||
&& _recorded_cards == NULL
|
&& _recorded_cards == NULL
|
||||||
&& _recorded_regions == NULL,
|
&& _recorded_regions == NULL,
|
||||||
"Inv");
|
"Inv");
|
||||||
_recorded_oops = NEW_C_HEAP_ARRAY(oop*, MaxRecorded);
|
_recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded);
|
||||||
_recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded);
|
_recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded);
|
||||||
_recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded);
|
_recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded);
|
||||||
}
|
}
|
||||||
@ -1408,21 +1411,21 @@ void HeapRegionRemSet::test() {
|
|||||||
HeapRegionRemSet* hrrs = hr0->rem_set();
|
HeapRegionRemSet* hrrs = hr0->rem_set();
|
||||||
|
|
||||||
// Make three references from region 0x101...
|
// Make three references from region 0x101...
|
||||||
hrrs->add_reference((oop*)hr1_start);
|
hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
|
||||||
hrrs->add_reference((oop*)hr1_mid);
|
hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
|
||||||
hrrs->add_reference((oop*)hr1_last);
|
hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
|
||||||
|
|
||||||
hrrs->add_reference((oop*)hr2_start);
|
hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
|
||||||
hrrs->add_reference((oop*)hr2_mid);
|
hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
|
||||||
hrrs->add_reference((oop*)hr2_last);
|
hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
|
||||||
|
|
||||||
hrrs->add_reference((oop*)hr3_start);
|
hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
|
||||||
hrrs->add_reference((oop*)hr3_mid);
|
hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
|
||||||
hrrs->add_reference((oop*)hr3_last);
|
hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
|
||||||
|
|
||||||
// Now cause a coarsening.
|
// Now cause a coarsening.
|
||||||
hrrs->add_reference((oop*)hr4->bottom());
|
hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
|
||||||
hrrs->add_reference((oop*)hr5->bottom());
|
hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
|
||||||
|
|
||||||
// Now, does iteration yield these three?
|
// Now, does iteration yield these three?
|
||||||
HeapRegionRemSetIterator iter;
|
HeapRegionRemSetIterator iter;
|
||||||
|
@ -116,9 +116,9 @@ public:
|
|||||||
|
|
||||||
// For now. Could "expand" some tables in the future, so that this made
|
// For now. Could "expand" some tables in the future, so that this made
|
||||||
// sense.
|
// sense.
|
||||||
void add_reference(oop* from, int tid);
|
void add_reference(OopOrNarrowOopStar from, int tid);
|
||||||
|
|
||||||
void add_reference(oop* from) {
|
void add_reference(OopOrNarrowOopStar from) {
|
||||||
return add_reference(from, 0);
|
return add_reference(from, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,8 +140,8 @@ public:
|
|||||||
static size_t static_mem_size();
|
static size_t static_mem_size();
|
||||||
static size_t fl_mem_size();
|
static size_t fl_mem_size();
|
||||||
|
|
||||||
bool contains_reference(oop* from) const;
|
bool contains_reference(OopOrNarrowOopStar from) const;
|
||||||
bool contains_reference_locked(oop* from) const;
|
bool contains_reference_locked(OopOrNarrowOopStar from) const;
|
||||||
|
|
||||||
void clear();
|
void clear();
|
||||||
|
|
||||||
@ -192,7 +192,7 @@ private:
|
|||||||
// Unused unless G1RecordHRRSOops is true.
|
// Unused unless G1RecordHRRSOops is true.
|
||||||
|
|
||||||
static const int MaxRecorded = 1000000;
|
static const int MaxRecorded = 1000000;
|
||||||
static oop** _recorded_oops;
|
static OopOrNarrowOopStar* _recorded_oops;
|
||||||
static HeapWord** _recorded_cards;
|
static HeapWord** _recorded_cards;
|
||||||
static HeapRegion** _recorded_regions;
|
static HeapRegion** _recorded_regions;
|
||||||
static int _n_recorded;
|
static int _n_recorded;
|
||||||
@ -231,13 +231,13 @@ public:
|
|||||||
|
|
||||||
/* Used in the sequential case. Returns "true" iff this addition causes
|
/* Used in the sequential case. Returns "true" iff this addition causes
|
||||||
the size limit to be reached. */
|
the size limit to be reached. */
|
||||||
void add_reference(oop* from) {
|
void add_reference(OopOrNarrowOopStar from) {
|
||||||
_other_regions.add_reference(from);
|
_other_regions.add_reference(from);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Used in the parallel case. Returns "true" iff this addition causes
|
/* Used in the parallel case. Returns "true" iff this addition causes
|
||||||
the size limit to be reached. */
|
the size limit to be reached. */
|
||||||
void add_reference(oop* from, int tid) {
|
void add_reference(OopOrNarrowOopStar from, int tid) {
|
||||||
_other_regions.add_reference(from, tid);
|
_other_regions.add_reference(from, tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,7 +301,7 @@ public:
|
|||||||
return OtherRegionsTable::fl_mem_size();
|
return OtherRegionsTable::fl_mem_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool contains_reference(oop* from) const {
|
bool contains_reference(OopOrNarrowOopStar from) const {
|
||||||
return _other_regions.contains_reference(from);
|
return _other_regions.contains_reference(from);
|
||||||
}
|
}
|
||||||
void print() const;
|
void print() const;
|
||||||
@ -329,7 +329,7 @@ public:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void record(HeapRegion* hr, oop* f);
|
static void record(HeapRegion* hr, OopOrNarrowOopStar f);
|
||||||
static void print_recorded();
|
static void print_recorded();
|
||||||
static void record_event(Event evnt);
|
static void record_event(Event evnt);
|
||||||
|
|
||||||
|
@ -43,6 +43,18 @@ void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void ObjPtrQueue::verify_oops_in_buffer() {
|
||||||
|
if (_buf == NULL) return;
|
||||||
|
for (size_t i = _index; i < _sz; i += oopSize) {
|
||||||
|
oop obj = (oop)_buf[byte_index_to_index((int)i)];
|
||||||
|
assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
|
||||||
|
"Not an oop");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
|
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
|
||||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||||
#endif // _MSC_VER
|
#endif // _MSC_VER
|
||||||
@ -66,6 +78,7 @@ void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
|
|||||||
|
|
||||||
|
|
||||||
void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
|
void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
|
||||||
|
DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
|
||||||
t->satb_mark_queue().handle_zero_index();
|
t->satb_mark_queue().handle_zero_index();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,7 +156,7 @@ void SATBMarkQueueSet::abandon_partial_marking() {
|
|||||||
}
|
}
|
||||||
_completed_buffers_tail = NULL;
|
_completed_buffers_tail = NULL;
|
||||||
_n_completed_buffers = 0;
|
_n_completed_buffers = 0;
|
||||||
debug_only(assert_completed_buffer_list_len_correct_locked());
|
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
|
||||||
}
|
}
|
||||||
while (buffers_to_delete != NULL) {
|
while (buffers_to_delete != NULL) {
|
||||||
CompletedBufferNode* nd = buffers_to_delete;
|
CompletedBufferNode* nd = buffers_to_delete;
|
||||||
|
@ -39,6 +39,7 @@ public:
|
|||||||
static void apply_closure_to_buffer(ObjectClosure* cl,
|
static void apply_closure_to_buffer(ObjectClosure* cl,
|
||||||
void** buf, size_t index, size_t sz);
|
void** buf, size_t index, size_t sz);
|
||||||
|
|
||||||
|
void verify_oops_in_buffer() NOT_DEBUG_RETURN;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
bufferingOopClosure.hpp genOopClosures.hpp
|
bufferingOopClosure.hpp genOopClosures.hpp
|
||||||
bufferingOopClosure.hpp generation.hpp
|
bufferingOopClosure.hpp generation.hpp
|
||||||
bufferingOopClosure.hpp os.hpp
|
bufferingOopClosure.hpp os.hpp
|
||||||
|
bufferingOopClosure.hpp taskqueue.hpp
|
||||||
|
|
||||||
cardTableRS.cpp concurrentMark.hpp
|
cardTableRS.cpp concurrentMark.hpp
|
||||||
cardTableRS.cpp g1SATBCardTableModRefBS.hpp
|
cardTableRS.cpp g1SATBCardTableModRefBS.hpp
|
||||||
@ -139,7 +140,7 @@ g1CollectedHeap.cpp concurrentZFThread.hpp
|
|||||||
g1CollectedHeap.cpp g1CollectedHeap.inline.hpp
|
g1CollectedHeap.cpp g1CollectedHeap.inline.hpp
|
||||||
g1CollectedHeap.cpp g1CollectorPolicy.hpp
|
g1CollectedHeap.cpp g1CollectorPolicy.hpp
|
||||||
g1CollectedHeap.cpp g1MarkSweep.hpp
|
g1CollectedHeap.cpp g1MarkSweep.hpp
|
||||||
g1CollectedHeap.cpp g1RemSet.hpp
|
g1CollectedHeap.cpp g1RemSet.inline.hpp
|
||||||
g1CollectedHeap.cpp g1OopClosures.inline.hpp
|
g1CollectedHeap.cpp g1OopClosures.inline.hpp
|
||||||
g1CollectedHeap.cpp genOopClosures.inline.hpp
|
g1CollectedHeap.cpp genOopClosures.inline.hpp
|
||||||
g1CollectedHeap.cpp gcLocker.inline.hpp
|
g1CollectedHeap.cpp gcLocker.inline.hpp
|
||||||
@ -151,13 +152,14 @@ g1CollectedHeap.cpp icBuffer.hpp
|
|||||||
g1CollectedHeap.cpp isGCActiveMark.hpp
|
g1CollectedHeap.cpp isGCActiveMark.hpp
|
||||||
g1CollectedHeap.cpp oop.inline.hpp
|
g1CollectedHeap.cpp oop.inline.hpp
|
||||||
g1CollectedHeap.cpp oop.pcgc.inline.hpp
|
g1CollectedHeap.cpp oop.pcgc.inline.hpp
|
||||||
g1CollectedHeap.cpp parGCAllocBuffer.hpp
|
|
||||||
g1CollectedHeap.cpp vm_operations_g1.hpp
|
g1CollectedHeap.cpp vm_operations_g1.hpp
|
||||||
g1CollectedHeap.cpp vmThread.hpp
|
g1CollectedHeap.cpp vmThread.hpp
|
||||||
|
|
||||||
g1CollectedHeap.hpp barrierSet.hpp
|
g1CollectedHeap.hpp barrierSet.hpp
|
||||||
|
g1CollectedHeap.hpp g1RemSet.hpp
|
||||||
g1CollectedHeap.hpp heapRegion.hpp
|
g1CollectedHeap.hpp heapRegion.hpp
|
||||||
g1CollectedHeap.hpp memRegion.hpp
|
g1CollectedHeap.hpp memRegion.hpp
|
||||||
|
g1CollectedHeap.hpp parGCAllocBuffer.hpp
|
||||||
g1CollectedHeap.hpp sharedHeap.hpp
|
g1CollectedHeap.hpp sharedHeap.hpp
|
||||||
|
|
||||||
g1CollectedHeap.inline.hpp concurrentMark.hpp
|
g1CollectedHeap.inline.hpp concurrentMark.hpp
|
||||||
@ -245,6 +247,7 @@ g1RemSet.cpp intHisto.hpp
|
|||||||
g1RemSet.cpp iterator.hpp
|
g1RemSet.cpp iterator.hpp
|
||||||
g1RemSet.cpp oop.inline.hpp
|
g1RemSet.cpp oop.inline.hpp
|
||||||
|
|
||||||
|
g1RemSet.inline.hpp oop.inline.hpp
|
||||||
g1RemSet.inline.hpp g1RemSet.hpp
|
g1RemSet.inline.hpp g1RemSet.hpp
|
||||||
g1RemSet.inline.hpp heapRegionRemSet.hpp
|
g1RemSet.inline.hpp heapRegionRemSet.hpp
|
||||||
|
|
||||||
@ -255,6 +258,7 @@ g1SATBCardTableModRefBS.cpp thread.hpp
|
|||||||
g1SATBCardTableModRefBS.cpp thread_<os_family>.inline.hpp
|
g1SATBCardTableModRefBS.cpp thread_<os_family>.inline.hpp
|
||||||
g1SATBCardTableModRefBS.cpp satbQueue.hpp
|
g1SATBCardTableModRefBS.cpp satbQueue.hpp
|
||||||
|
|
||||||
|
g1SATBCardTableModRefBS.hpp oop.inline.hpp
|
||||||
g1SATBCardTableModRefBS.hpp cardTableModRefBS.hpp
|
g1SATBCardTableModRefBS.hpp cardTableModRefBS.hpp
|
||||||
g1SATBCardTableModRefBS.hpp memRegion.hpp
|
g1SATBCardTableModRefBS.hpp memRegion.hpp
|
||||||
|
|
||||||
|
@ -31,8 +31,9 @@ void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
|
|||||||
bool clear,
|
bool clear,
|
||||||
int n_threads) {
|
int n_threads) {
|
||||||
if (n_threads > 0) {
|
if (n_threads > 0) {
|
||||||
assert(n_threads == (int)ParallelGCThreads, "# worker threads != # requested!");
|
assert((n_threads == 1 && ParallelGCThreads == 0) ||
|
||||||
|
n_threads <= (int)ParallelGCThreads,
|
||||||
|
"# worker threads != # requested!");
|
||||||
// Make sure the LNC array is valid for the space.
|
// Make sure the LNC array is valid for the space.
|
||||||
jbyte** lowest_non_clean;
|
jbyte** lowest_non_clean;
|
||||||
uintptr_t lowest_non_clean_base_chunk_index;
|
uintptr_t lowest_non_clean_base_chunk_index;
|
||||||
|
@ -885,7 +885,7 @@ void ParallelScavengeHeap::print_tracing_info() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) {
|
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
|
||||||
// Why do we need the total_collections()-filter below?
|
// Why do we need the total_collections()-filter below?
|
||||||
if (total_collections() > 0) {
|
if (total_collections() > 0) {
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
|
@ -217,7 +217,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||||||
virtual void gc_threads_do(ThreadClosure* tc) const;
|
virtual void gc_threads_do(ThreadClosure* tc) const;
|
||||||
virtual void print_tracing_info() const;
|
virtual void print_tracing_info() const;
|
||||||
|
|
||||||
void verify(bool allow_dirty, bool silent);
|
void verify(bool allow_dirty, bool silent, bool /* option */);
|
||||||
|
|
||||||
void print_heap_change(size_t prev_used);
|
void print_heap_change(size_t prev_used);
|
||||||
|
|
||||||
|
@ -117,6 +117,7 @@ inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
|
|||||||
process_array_chunk(old);
|
process_array_chunk(old);
|
||||||
} else {
|
} else {
|
||||||
if (p.is_narrow()) {
|
if (p.is_narrow()) {
|
||||||
|
assert(UseCompressedOops, "Error");
|
||||||
PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
|
PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
|
||||||
} else {
|
} else {
|
||||||
PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
|
PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
|
||||||
|
@ -533,7 +533,7 @@ class CollectedHeap : public CHeapObj {
|
|||||||
virtual void print_tracing_info() const = 0;
|
virtual void print_tracing_info() const = 0;
|
||||||
|
|
||||||
// Heap verification
|
// Heap verification
|
||||||
virtual void verify(bool allow_dirty, bool silent) = 0;
|
virtual void verify(bool allow_dirty, bool silent, bool option) = 0;
|
||||||
|
|
||||||
// Non product verification and debugging.
|
// Non product verification and debugging.
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -554,7 +554,6 @@ ciEnv.cpp jvmtiExport.hpp
|
|||||||
ciEnv.cpp linkResolver.hpp
|
ciEnv.cpp linkResolver.hpp
|
||||||
ciEnv.cpp methodDataOop.hpp
|
ciEnv.cpp methodDataOop.hpp
|
||||||
ciEnv.cpp objArrayKlass.hpp
|
ciEnv.cpp objArrayKlass.hpp
|
||||||
ciEnv.cpp oop.hpp
|
|
||||||
ciEnv.cpp oop.inline.hpp
|
ciEnv.cpp oop.inline.hpp
|
||||||
ciEnv.cpp oop.inline2.hpp
|
ciEnv.cpp oop.inline2.hpp
|
||||||
ciEnv.cpp oopFactory.hpp
|
ciEnv.cpp oopFactory.hpp
|
||||||
@ -785,7 +784,6 @@ ciObjectFactory.hpp growableArray.hpp
|
|||||||
ciSignature.cpp allocation.inline.hpp
|
ciSignature.cpp allocation.inline.hpp
|
||||||
ciSignature.cpp ciSignature.hpp
|
ciSignature.cpp ciSignature.hpp
|
||||||
ciSignature.cpp ciUtilities.hpp
|
ciSignature.cpp ciUtilities.hpp
|
||||||
ciSignature.cpp oop.hpp
|
|
||||||
ciSignature.cpp oop.inline.hpp
|
ciSignature.cpp oop.inline.hpp
|
||||||
ciSignature.cpp signature.hpp
|
ciSignature.cpp signature.hpp
|
||||||
|
|
||||||
@ -950,7 +948,6 @@ classLoadingService.hpp perfData.hpp
|
|||||||
classify.cpp classify.hpp
|
classify.cpp classify.hpp
|
||||||
classify.cpp systemDictionary.hpp
|
classify.cpp systemDictionary.hpp
|
||||||
|
|
||||||
classify.hpp oop.hpp
|
|
||||||
classify.hpp oop.inline.hpp
|
classify.hpp oop.inline.hpp
|
||||||
|
|
||||||
codeBlob.cpp allocation.inline.hpp
|
codeBlob.cpp allocation.inline.hpp
|
||||||
@ -1185,7 +1182,6 @@ compilerOracle.cpp handles.inline.hpp
|
|||||||
compilerOracle.cpp jniHandles.hpp
|
compilerOracle.cpp jniHandles.hpp
|
||||||
compilerOracle.cpp klass.hpp
|
compilerOracle.cpp klass.hpp
|
||||||
compilerOracle.cpp methodOop.hpp
|
compilerOracle.cpp methodOop.hpp
|
||||||
compilerOracle.cpp oop.hpp
|
|
||||||
compilerOracle.cpp oop.inline.hpp
|
compilerOracle.cpp oop.inline.hpp
|
||||||
compilerOracle.cpp oopFactory.hpp
|
compilerOracle.cpp oopFactory.hpp
|
||||||
compilerOracle.cpp resourceArea.hpp
|
compilerOracle.cpp resourceArea.hpp
|
||||||
@ -1629,7 +1625,6 @@ frame.cpp methodDataOop.hpp
|
|||||||
frame.cpp methodOop.hpp
|
frame.cpp methodOop.hpp
|
||||||
frame.cpp monitorChunk.hpp
|
frame.cpp monitorChunk.hpp
|
||||||
frame.cpp nativeInst_<arch>.hpp
|
frame.cpp nativeInst_<arch>.hpp
|
||||||
frame.cpp oop.hpp
|
|
||||||
frame.cpp oop.inline.hpp
|
frame.cpp oop.inline.hpp
|
||||||
frame.cpp oop.inline2.hpp
|
frame.cpp oop.inline2.hpp
|
||||||
frame.cpp oopMapCache.hpp
|
frame.cpp oopMapCache.hpp
|
||||||
@ -1797,7 +1792,6 @@ generation.cpp genOopClosures.inline.hpp
|
|||||||
generation.cpp generation.hpp
|
generation.cpp generation.hpp
|
||||||
generation.cpp generation.inline.hpp
|
generation.cpp generation.inline.hpp
|
||||||
generation.cpp java.hpp
|
generation.cpp java.hpp
|
||||||
generation.cpp oop.hpp
|
|
||||||
generation.cpp oop.inline.hpp
|
generation.cpp oop.inline.hpp
|
||||||
generation.cpp spaceDecorator.hpp
|
generation.cpp spaceDecorator.hpp
|
||||||
generation.cpp space.inline.hpp
|
generation.cpp space.inline.hpp
|
||||||
@ -2270,7 +2264,6 @@ java.cpp jvmtiExport.hpp
|
|||||||
java.cpp memprofiler.hpp
|
java.cpp memprofiler.hpp
|
||||||
java.cpp methodOop.hpp
|
java.cpp methodOop.hpp
|
||||||
java.cpp objArrayOop.hpp
|
java.cpp objArrayOop.hpp
|
||||||
java.cpp oop.hpp
|
|
||||||
java.cpp oop.inline.hpp
|
java.cpp oop.inline.hpp
|
||||||
java.cpp oopFactory.hpp
|
java.cpp oopFactory.hpp
|
||||||
java.cpp sharedRuntime.hpp
|
java.cpp sharedRuntime.hpp
|
||||||
@ -2947,7 +2940,7 @@ mutex_<os_family>.inline.hpp thread_<os_family>.inline.hpp
|
|||||||
nativeInst_<arch>.cpp assembler_<arch>.inline.hpp
|
nativeInst_<arch>.cpp assembler_<arch>.inline.hpp
|
||||||
nativeInst_<arch>.cpp handles.hpp
|
nativeInst_<arch>.cpp handles.hpp
|
||||||
nativeInst_<arch>.cpp nativeInst_<arch>.hpp
|
nativeInst_<arch>.cpp nativeInst_<arch>.hpp
|
||||||
nativeInst_<arch>.cpp oop.hpp
|
nativeInst_<arch>.cpp oop.inline.hpp
|
||||||
nativeInst_<arch>.cpp ostream.hpp
|
nativeInst_<arch>.cpp ostream.hpp
|
||||||
nativeInst_<arch>.cpp resourceArea.hpp
|
nativeInst_<arch>.cpp resourceArea.hpp
|
||||||
nativeInst_<arch>.cpp sharedRuntime.hpp
|
nativeInst_<arch>.cpp sharedRuntime.hpp
|
||||||
@ -3842,7 +3835,7 @@ stackMapTable.hpp stackMapFrame.hpp
|
|||||||
stackValue.cpp debugInfo.hpp
|
stackValue.cpp debugInfo.hpp
|
||||||
stackValue.cpp frame.inline.hpp
|
stackValue.cpp frame.inline.hpp
|
||||||
stackValue.cpp handles.inline.hpp
|
stackValue.cpp handles.inline.hpp
|
||||||
stackValue.cpp oop.hpp
|
stackValue.cpp oop.inline.hpp
|
||||||
stackValue.cpp stackValue.hpp
|
stackValue.cpp stackValue.hpp
|
||||||
|
|
||||||
stackValue.hpp handles.hpp
|
stackValue.hpp handles.hpp
|
||||||
@ -4329,7 +4322,6 @@ typeArrayOop.hpp typeArrayKlass.hpp
|
|||||||
unhandledOops.cpp collectedHeap.hpp
|
unhandledOops.cpp collectedHeap.hpp
|
||||||
unhandledOops.cpp gcLocker.inline.hpp
|
unhandledOops.cpp gcLocker.inline.hpp
|
||||||
unhandledOops.cpp globalDefinitions.hpp
|
unhandledOops.cpp globalDefinitions.hpp
|
||||||
unhandledOops.cpp oop.hpp
|
|
||||||
unhandledOops.cpp oop.inline.hpp
|
unhandledOops.cpp oop.inline.hpp
|
||||||
unhandledOops.cpp thread.hpp
|
unhandledOops.cpp thread.hpp
|
||||||
unhandledOops.cpp unhandledOops.hpp
|
unhandledOops.cpp unhandledOops.hpp
|
||||||
@ -4465,7 +4457,6 @@ vframe.cpp javaClasses.hpp
|
|||||||
vframe.cpp nmethod.hpp
|
vframe.cpp nmethod.hpp
|
||||||
vframe.cpp objectMonitor.hpp
|
vframe.cpp objectMonitor.hpp
|
||||||
vframe.cpp objectMonitor.inline.hpp
|
vframe.cpp objectMonitor.inline.hpp
|
||||||
vframe.cpp oop.hpp
|
|
||||||
vframe.cpp oop.inline.hpp
|
vframe.cpp oop.inline.hpp
|
||||||
vframe.cpp oopMapCache.hpp
|
vframe.cpp oopMapCache.hpp
|
||||||
vframe.cpp pcDesc.hpp
|
vframe.cpp pcDesc.hpp
|
||||||
@ -4577,7 +4568,6 @@ vmThread.cpp events.hpp
|
|||||||
vmThread.cpp interfaceSupport.hpp
|
vmThread.cpp interfaceSupport.hpp
|
||||||
vmThread.cpp methodOop.hpp
|
vmThread.cpp methodOop.hpp
|
||||||
vmThread.cpp mutexLocker.hpp
|
vmThread.cpp mutexLocker.hpp
|
||||||
vmThread.cpp oop.hpp
|
|
||||||
vmThread.cpp oop.inline.hpp
|
vmThread.cpp oop.inline.hpp
|
||||||
vmThread.cpp os.hpp
|
vmThread.cpp os.hpp
|
||||||
vmThread.cpp resourceArea.hpp
|
vmThread.cpp resourceArea.hpp
|
||||||
|
@ -47,7 +47,7 @@ dump.cpp javaCalls.hpp
|
|||||||
dump.cpp javaClasses.hpp
|
dump.cpp javaClasses.hpp
|
||||||
dump.cpp loaderConstraints.hpp
|
dump.cpp loaderConstraints.hpp
|
||||||
dump.cpp methodDataOop.hpp
|
dump.cpp methodDataOop.hpp
|
||||||
dump.cpp oop.hpp
|
dump.cpp oop.inline.hpp
|
||||||
dump.cpp oopFactory.hpp
|
dump.cpp oopFactory.hpp
|
||||||
dump.cpp resourceArea.hpp
|
dump.cpp resourceArea.hpp
|
||||||
dump.cpp signature.hpp
|
dump.cpp signature.hpp
|
||||||
@ -237,7 +237,7 @@ serialize.cpp compactingPermGenGen.hpp
|
|||||||
serialize.cpp compiledICHolderOop.hpp
|
serialize.cpp compiledICHolderOop.hpp
|
||||||
serialize.cpp methodDataOop.hpp
|
serialize.cpp methodDataOop.hpp
|
||||||
serialize.cpp objArrayOop.hpp
|
serialize.cpp objArrayOop.hpp
|
||||||
serialize.cpp oop.hpp
|
serialize.cpp oop.inline.hpp
|
||||||
serialize.cpp symbolTable.hpp
|
serialize.cpp symbolTable.hpp
|
||||||
serialize.cpp systemDictionary.hpp
|
serialize.cpp systemDictionary.hpp
|
||||||
|
|
||||||
@ -295,7 +295,7 @@ vmStructs.cpp nmethod.hpp
|
|||||||
vmStructs.cpp objArrayKlass.hpp
|
vmStructs.cpp objArrayKlass.hpp
|
||||||
vmStructs.cpp objArrayKlassKlass.hpp
|
vmStructs.cpp objArrayKlassKlass.hpp
|
||||||
vmStructs.cpp objArrayOop.hpp
|
vmStructs.cpp objArrayOop.hpp
|
||||||
vmStructs.cpp oop.hpp
|
vmStructs.cpp oop.inline.hpp
|
||||||
vmStructs.cpp oopMap.hpp
|
vmStructs.cpp oopMap.hpp
|
||||||
vmStructs.cpp pcDesc.hpp
|
vmStructs.cpp pcDesc.hpp
|
||||||
vmStructs.cpp perfMemory.hpp
|
vmStructs.cpp perfMemory.hpp
|
||||||
|
@ -273,6 +273,7 @@ Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
|
|||||||
compute_index_maps();
|
compute_index_maps();
|
||||||
|
|
||||||
if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
|
if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
|
||||||
|
bool did_rewrite = false;
|
||||||
int i = _methods->length();
|
int i = _methods->length();
|
||||||
while (i-- > 0) {
|
while (i-- > 0) {
|
||||||
methodOop method = (methodOop)_methods->obj_at(i);
|
methodOop method = (methodOop)_methods->obj_at(i);
|
||||||
@ -281,9 +282,11 @@ Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
|
|||||||
// object for finalization if needed.
|
// object for finalization if needed.
|
||||||
methodHandle m(THREAD, method);
|
methodHandle m(THREAD, method);
|
||||||
rewrite_Object_init(m, CHECK);
|
rewrite_Object_init(m, CHECK);
|
||||||
|
did_rewrite = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
assert(did_rewrite, "must find Object::<init> to rewrite it");
|
||||||
}
|
}
|
||||||
|
|
||||||
// rewrite methods, in two passes
|
// rewrite methods, in two passes
|
||||||
|
@ -25,12 +25,27 @@
|
|||||||
# include "incls/_precompiled.incl"
|
# include "incls/_precompiled.incl"
|
||||||
# include "incls/_barrierSet.cpp.incl"
|
# include "incls/_barrierSet.cpp.incl"
|
||||||
|
|
||||||
// count is in HeapWord's
|
// count is number of array elements being written
|
||||||
void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
|
void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
|
||||||
Universe::heap()->barrier_set()->write_ref_array_pre(MemRegion(start, start + count));
|
assert(count <= (size_t)max_intx, "count too large");
|
||||||
|
#if 0
|
||||||
|
warning("Pre: \t" INTPTR_FORMAT "[" SIZE_FORMAT "]\t",
|
||||||
|
start, count);
|
||||||
|
#endif
|
||||||
|
if (UseCompressedOops) {
|
||||||
|
Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count);
|
||||||
|
} else {
|
||||||
|
Universe::heap()->barrier_set()->write_ref_array_pre( (oop*)start, (int)count);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// count is in HeapWord's
|
// count is number of array elements being written
|
||||||
void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
|
void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
|
||||||
Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, start + count));
|
assert(count <= (size_t)max_intx, "count too large");
|
||||||
|
HeapWord* end = start + objArrayOopDesc::array_size((int)count);
|
||||||
|
#if 0
|
||||||
|
warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
|
||||||
|
start, count, start, end);
|
||||||
|
#endif
|
||||||
|
Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, end));
|
||||||
}
|
}
|
||||||
|
@ -81,9 +81,13 @@ public:
|
|||||||
// barrier types. Semantically, it should be thought of as a call to the
|
// barrier types. Semantically, it should be thought of as a call to the
|
||||||
// virtual "_work" function below, which must implement the barrier.)
|
// virtual "_work" function below, which must implement the barrier.)
|
||||||
// First the pre-write versions...
|
// First the pre-write versions...
|
||||||
inline void write_ref_field_pre(void* field, oop new_val);
|
template <class T> inline void write_ref_field_pre(T* field, oop new_val);
|
||||||
|
private:
|
||||||
|
// Keep this private so as to catch violations at build time.
|
||||||
|
virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); };
|
||||||
protected:
|
protected:
|
||||||
virtual void write_ref_field_pre_work(void* field, oop new_val) {};
|
virtual void write_ref_field_pre_work( oop* field, oop new_val) {};
|
||||||
|
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
|
||||||
public:
|
public:
|
||||||
|
|
||||||
// ...then the post-write version.
|
// ...then the post-write version.
|
||||||
@ -117,12 +121,17 @@ public:
|
|||||||
virtual void read_ref_array(MemRegion mr) = 0;
|
virtual void read_ref_array(MemRegion mr) = 0;
|
||||||
virtual void read_prim_array(MemRegion mr) = 0;
|
virtual void read_prim_array(MemRegion mr) = 0;
|
||||||
|
|
||||||
virtual void write_ref_array_pre(MemRegion mr) {}
|
virtual void write_ref_array_pre( oop* dst, int length) {}
|
||||||
|
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
|
||||||
inline void write_ref_array(MemRegion mr);
|
inline void write_ref_array(MemRegion mr);
|
||||||
|
|
||||||
// Static versions, suitable for calling from generated code.
|
// Static versions, suitable for calling from generated code.
|
||||||
static void static_write_ref_array_pre(HeapWord* start, size_t count);
|
static void static_write_ref_array_pre(HeapWord* start, size_t count);
|
||||||
static void static_write_ref_array_post(HeapWord* start, size_t count);
|
static void static_write_ref_array_post(HeapWord* start, size_t count);
|
||||||
|
// Narrow oop versions of the above; count is # of array elements being written,
|
||||||
|
// starting with "start", which is HeapWord-aligned.
|
||||||
|
static void static_write_ref_array_pre_narrow(HeapWord* start, size_t count);
|
||||||
|
static void static_write_ref_array_post_narrow(HeapWord* start, size_t count);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void write_ref_array_work(MemRegion mr) = 0;
|
virtual void write_ref_array_work(MemRegion mr) = 0;
|
||||||
|
@ -23,10 +23,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// Inline functions of BarrierSet, which de-virtualize certain
|
// Inline functions of BarrierSet, which de-virtualize certain
|
||||||
// performance-critical calls when when the barrier is the most common
|
// performance-critical calls when the barrier is the most common
|
||||||
// card-table kind.
|
// card-table kind.
|
||||||
|
|
||||||
void BarrierSet::write_ref_field_pre(void* field, oop new_val) {
|
template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
|
||||||
if (kind() == CardTableModRef) {
|
if (kind() == CardTableModRef) {
|
||||||
((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val);
|
((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val);
|
||||||
} else {
|
} else {
|
||||||
|
@ -287,7 +287,7 @@ public:
|
|||||||
// these functions here for performance.
|
// these functions here for performance.
|
||||||
protected:
|
protected:
|
||||||
void write_ref_field_work(oop obj, size_t offset, oop newVal);
|
void write_ref_field_work(oop obj, size_t offset, oop newVal);
|
||||||
void write_ref_field_work(void* field, oop newVal);
|
virtual void write_ref_field_work(void* field, oop newVal);
|
||||||
public:
|
public:
|
||||||
|
|
||||||
bool has_write_ref_array_opt() { return true; }
|
bool has_write_ref_array_opt() { return true; }
|
||||||
@ -317,10 +317,10 @@ public:
|
|||||||
|
|
||||||
// *** Card-table-barrier-specific things.
|
// *** Card-table-barrier-specific things.
|
||||||
|
|
||||||
inline void inline_write_ref_field_pre(void* field, oop newVal) {}
|
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {}
|
||||||
|
|
||||||
inline void inline_write_ref_field(void* field, oop newVal) {
|
template <class T> inline void inline_write_ref_field(T* field, oop newVal) {
|
||||||
jbyte* byte = byte_for(field);
|
jbyte* byte = byte_for((void*)field);
|
||||||
*byte = dirty_card;
|
*byte = dirty_card;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1194,7 +1194,7 @@ GCStats* GenCollectedHeap::gc_stats(int level) const {
|
|||||||
return _gens[level]->gc_stats();
|
return _gens[level]->gc_stats();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GenCollectedHeap::verify(bool allow_dirty, bool silent) {
|
void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
gclog_or_tty->print("permgen ");
|
gclog_or_tty->print("permgen ");
|
||||||
}
|
}
|
||||||
|
@ -325,7 +325,7 @@ public:
|
|||||||
void prepare_for_verify();
|
void prepare_for_verify();
|
||||||
|
|
||||||
// Override.
|
// Override.
|
||||||
void verify(bool allow_dirty, bool silent);
|
void verify(bool allow_dirty, bool silent, bool /* option */);
|
||||||
|
|
||||||
// Override.
|
// Override.
|
||||||
void print() const;
|
void print() const;
|
||||||
|
@ -57,7 +57,7 @@ class OopsInGenClosure : public OopClosure {
|
|||||||
template <class T> void do_barrier(T* p);
|
template <class T> void do_barrier(T* p);
|
||||||
|
|
||||||
// Version for use by closures that may be called in parallel code.
|
// Version for use by closures that may be called in parallel code.
|
||||||
void par_do_barrier(oop* p);
|
template <class T> void par_do_barrier(T* p);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
OopsInGenClosure() : OopClosure(NULL),
|
OopsInGenClosure() : OopClosure(NULL),
|
||||||
|
@ -40,18 +40,20 @@ inline void OopsInGenClosure::set_generation(Generation* gen) {
|
|||||||
|
|
||||||
template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
|
template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
|
||||||
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
||||||
assert(!oopDesc::is_null(*p), "expected non-null object");
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
// If p points to a younger generation, mark the card.
|
// If p points to a younger generation, mark the card.
|
||||||
if ((HeapWord*)obj < _gen_boundary) {
|
if ((HeapWord*)obj < _gen_boundary) {
|
||||||
_rs->inline_write_ref_field_gc(p, obj);
|
_rs->inline_write_ref_field_gc(p, obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void OopsInGenClosure::par_do_barrier(oop* p) {
|
template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
|
||||||
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
||||||
oop obj = *p;
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
assert(obj != NULL, "expected non-null object");
|
assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
// If p points to a younger generation, mark the card.
|
// If p points to a younger generation, mark the card.
|
||||||
if ((HeapWord*)obj < gen_boundary()) {
|
if ((HeapWord*)obj < gen_boundary()) {
|
||||||
rs()->write_ref_field_gc_par(p, obj);
|
rs()->write_ref_field_gc_par(p, obj);
|
||||||
|
@ -1013,12 +1013,19 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
|||||||
// discovered_addr.
|
// discovered_addr.
|
||||||
oop current_head = refs_list.head();
|
oop current_head = refs_list.head();
|
||||||
|
|
||||||
// Note: In the case of G1, this pre-barrier is strictly
|
// Note: In the case of G1, this specific pre-barrier is strictly
|
||||||
// not necessary because the only case we are interested in
|
// not necessary because the only case we are interested in
|
||||||
// here is when *discovered_addr is NULL, so this will expand to
|
// here is when *discovered_addr is NULL (see the CAS further below),
|
||||||
// nothing. As a result, I am just manually eliding this out for G1.
|
// so this will expand to nothing. As a result, we have manually
|
||||||
|
// elided this out for G1, but left in the test for some future
|
||||||
|
// collector that might have need for a pre-barrier here.
|
||||||
if (_discovered_list_needs_barrier && !UseG1GC) {
|
if (_discovered_list_needs_barrier && !UseG1GC) {
|
||||||
_bs->write_ref_field_pre((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
|
if (UseCompressedOops) {
|
||||||
|
_bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
|
||||||
|
} else {
|
||||||
|
_bs->write_ref_field_pre((oop*)discovered_addr, current_head);
|
||||||
|
}
|
||||||
|
guarantee(false, "Need to check non-G1 collector");
|
||||||
}
|
}
|
||||||
oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
|
oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
|
||||||
NULL);
|
NULL);
|
||||||
@ -1029,9 +1036,8 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
|||||||
refs_list.set_head(obj);
|
refs_list.set_head(obj);
|
||||||
refs_list.inc_length(1);
|
refs_list.inc_length(1);
|
||||||
if (_discovered_list_needs_barrier) {
|
if (_discovered_list_needs_barrier) {
|
||||||
_bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
|
_bs->write_ref_field((void*)discovered_addr, current_head);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// If retest was non NULL, another thread beat us to it:
|
// If retest was non NULL, another thread beat us to it:
|
||||||
// The reference has already been discovered...
|
// The reference has already been discovered...
|
||||||
@ -1177,11 +1183,16 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
|||||||
// pre-value, we can safely elide the pre-barrier here for the case of G1.
|
// pre-value, we can safely elide the pre-barrier here for the case of G1.
|
||||||
assert(discovered == NULL, "control point invariant");
|
assert(discovered == NULL, "control point invariant");
|
||||||
if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
|
if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
|
||||||
|
if (UseCompressedOops) {
|
||||||
|
_bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
|
||||||
|
} else {
|
||||||
_bs->write_ref_field_pre((oop*)discovered_addr, current_head);
|
_bs->write_ref_field_pre((oop*)discovered_addr, current_head);
|
||||||
}
|
}
|
||||||
|
guarantee(false, "Need to check non-G1 collector");
|
||||||
|
}
|
||||||
oop_store_raw(discovered_addr, current_head);
|
oop_store_raw(discovered_addr, current_head);
|
||||||
if (_discovered_list_needs_barrier) {
|
if (_discovered_list_needs_barrier) {
|
||||||
_bs->write_ref_field((oop*)discovered_addr, current_head);
|
_bs->write_ref_field((void*)discovered_addr, current_head);
|
||||||
}
|
}
|
||||||
list->set_head(obj);
|
list->set_head(obj);
|
||||||
list->inc_length(1);
|
list->inc_length(1);
|
||||||
|
@ -106,6 +106,7 @@ class Space: public CHeapObj {
|
|||||||
virtual void set_end(HeapWord* value) { _end = value; }
|
virtual void set_end(HeapWord* value) { _end = value; }
|
||||||
|
|
||||||
virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
|
virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
|
||||||
|
|
||||||
void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
|
void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
|
||||||
|
|
||||||
MemRegionClosure* preconsumptionDirtyCardClosure() const {
|
MemRegionClosure* preconsumptionDirtyCardClosure() const {
|
||||||
|
@ -1170,7 +1170,7 @@ void Universe::print_heap_after_gc(outputStream* st) {
|
|||||||
st->print_cr("}");
|
st->print_cr("}");
|
||||||
}
|
}
|
||||||
|
|
||||||
void Universe::verify(bool allow_dirty, bool silent) {
|
void Universe::verify(bool allow_dirty, bool silent, bool option) {
|
||||||
if (SharedSkipVerify) {
|
if (SharedSkipVerify) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1194,7 +1194,7 @@ void Universe::verify(bool allow_dirty, bool silent) {
|
|||||||
if (!silent) gclog_or_tty->print("[Verifying ");
|
if (!silent) gclog_or_tty->print("[Verifying ");
|
||||||
if (!silent) gclog_or_tty->print("threads ");
|
if (!silent) gclog_or_tty->print("threads ");
|
||||||
Threads::verify();
|
Threads::verify();
|
||||||
heap()->verify(allow_dirty, silent);
|
heap()->verify(allow_dirty, silent, option);
|
||||||
|
|
||||||
if (!silent) gclog_or_tty->print("syms ");
|
if (!silent) gclog_or_tty->print("syms ");
|
||||||
SymbolTable::verify();
|
SymbolTable::verify();
|
||||||
|
@ -343,6 +343,7 @@ class Universe: AllStatic {
|
|||||||
// For UseCompressedOops
|
// For UseCompressedOops
|
||||||
static address* narrow_oop_base_addr() { return &_narrow_oop._base; }
|
static address* narrow_oop_base_addr() { return &_narrow_oop._base; }
|
||||||
static address narrow_oop_base() { return _narrow_oop._base; }
|
static address narrow_oop_base() { return _narrow_oop._base; }
|
||||||
|
static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
|
||||||
static int narrow_oop_shift() { return _narrow_oop._shift; }
|
static int narrow_oop_shift() { return _narrow_oop._shift; }
|
||||||
static void set_narrow_oop_base(address base) { _narrow_oop._base = base; }
|
static void set_narrow_oop_base(address base) { _narrow_oop._base = base; }
|
||||||
static void set_narrow_oop_shift(int shift) { _narrow_oop._shift = shift; }
|
static void set_narrow_oop_shift(int shift) { _narrow_oop._shift = shift; }
|
||||||
@ -398,7 +399,7 @@ class Universe: AllStatic {
|
|||||||
|
|
||||||
// Debugging
|
// Debugging
|
||||||
static bool verify_in_progress() { return _verify_in_progress; }
|
static bool verify_in_progress() { return _verify_in_progress; }
|
||||||
static void verify(bool allow_dirty = true, bool silent = false);
|
static void verify(bool allow_dirty = true, bool silent = false, bool option = true);
|
||||||
static int verify_count() { return _verify_count; }
|
static int verify_count() { return _verify_count; }
|
||||||
static void print();
|
static void print();
|
||||||
static void print_on(outputStream* st);
|
static void print_on(outputStream* st);
|
||||||
|
@ -28,13 +28,14 @@
|
|||||||
template <class T>
|
template <class T>
|
||||||
static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
|
static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
|
||||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||||
oop referent = oopDesc::load_decode_heap_oop(referent_addr);
|
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||||
debug_only(
|
debug_only(
|
||||||
if(TraceReferenceGC && PrintGCDetails) {
|
if(TraceReferenceGC && PrintGCDetails) {
|
||||||
gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
|
gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
if (referent != NULL) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
if (!referent->is_gc_marked() &&
|
if (!referent->is_gc_marked() &&
|
||||||
MarkSweep::ref_processor()->
|
MarkSweep::ref_processor()->
|
||||||
discover_reference(obj, ref->reference_type())) {
|
discover_reference(obj, ref->reference_type())) {
|
||||||
@ -81,13 +82,14 @@ static void specialized_oop_follow_contents(instanceRefKlass* ref,
|
|||||||
ParCompactionManager* cm,
|
ParCompactionManager* cm,
|
||||||
oop obj) {
|
oop obj) {
|
||||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||||
oop referent = oopDesc::load_decode_heap_oop(referent_addr);
|
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||||
debug_only(
|
debug_only(
|
||||||
if(TraceReferenceGC && PrintGCDetails) {
|
if(TraceReferenceGC && PrintGCDetails) {
|
||||||
gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
|
gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
if (referent != NULL) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
|
if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
|
||||||
PSParallelCompact::ref_processor()->
|
PSParallelCompact::ref_processor()->
|
||||||
discover_reference(obj, ref->reference_type())) {
|
discover_reference(obj, ref->reference_type())) {
|
||||||
@ -182,9 +184,10 @@ int instanceRefKlass::oop_adjust_pointers(oop obj) {
|
|||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \
|
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \
|
||||||
oop referent = oopDesc::load_decode_heap_oop(referent_addr); \
|
T heap_oop = oopDesc::load_heap_oop(referent_addr); \
|
||||||
if (referent != NULL && contains(referent_addr)) { \
|
if (!oopDesc::is_null(heap_oop) && contains(referent_addr)) { \
|
||||||
ReferenceProcessor* rp = closure->_ref_processor; \
|
ReferenceProcessor* rp = closure->_ref_processor; \
|
||||||
|
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); \
|
||||||
if (!referent->is_gc_marked() && (rp != NULL) && \
|
if (!referent->is_gc_marked() && (rp != NULL) && \
|
||||||
rp->discover_reference(obj, reference_type())) { \
|
rp->discover_reference(obj, reference_type())) { \
|
||||||
return size; \
|
return size; \
|
||||||
|
@ -68,7 +68,7 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
|
|||||||
m->set_constants(NULL);
|
m->set_constants(NULL);
|
||||||
m->set_max_stack(0);
|
m->set_max_stack(0);
|
||||||
m->set_max_locals(0);
|
m->set_max_locals(0);
|
||||||
m->clear_intrinsic_id_cache();
|
m->set_intrinsic_id(vmIntrinsics::_none);
|
||||||
m->set_method_data(NULL);
|
m->set_method_data(NULL);
|
||||||
m->set_interpreter_throwout_count(0);
|
m->set_interpreter_throwout_count(0);
|
||||||
m->set_vtable_index(methodOopDesc::garbage_vtable_index);
|
m->set_vtable_index(methodOopDesc::garbage_vtable_index);
|
||||||
|
@ -962,26 +962,39 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
|
|||||||
return newm;
|
return newm;
|
||||||
}
|
}
|
||||||
|
|
||||||
vmIntrinsics::ID methodOopDesc::compute_intrinsic_id() const {
|
vmSymbols::SID methodOopDesc::klass_id_for_intrinsics(klassOop holder) {
|
||||||
assert(vmIntrinsics::_none == 0, "correct coding of default case");
|
|
||||||
const uintptr_t max_cache_uint = right_n_bits((int)(sizeof(_intrinsic_id_cache) * BitsPerByte));
|
|
||||||
assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_cache_uint, "else fix cache size");
|
|
||||||
// if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
|
// if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
|
||||||
// because we are not loading from core libraries
|
// because we are not loading from core libraries
|
||||||
if (instanceKlass::cast(method_holder())->class_loader() != NULL) return vmIntrinsics::_none;
|
if (instanceKlass::cast(holder)->class_loader() != NULL)
|
||||||
|
return vmSymbols::NO_SID; // regardless of name, no intrinsics here
|
||||||
|
|
||||||
// see if the klass name is well-known:
|
// see if the klass name is well-known:
|
||||||
symbolOop klass_name = instanceKlass::cast(method_holder())->name();
|
symbolOop klass_name = instanceKlass::cast(holder)->name();
|
||||||
vmSymbols::SID klass_id = vmSymbols::find_sid(klass_name);
|
return vmSymbols::find_sid(klass_name);
|
||||||
if (klass_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
|
}
|
||||||
|
|
||||||
|
void methodOopDesc::init_intrinsic_id() {
|
||||||
|
assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
|
||||||
|
const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
|
||||||
|
assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size");
|
||||||
|
|
||||||
|
// the klass name is well-known:
|
||||||
|
vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder());
|
||||||
|
assert(klass_id != vmSymbols::NO_SID, "caller responsibility");
|
||||||
|
|
||||||
// ditto for method and signature:
|
// ditto for method and signature:
|
||||||
vmSymbols::SID name_id = vmSymbols::find_sid(name());
|
vmSymbols::SID name_id = vmSymbols::find_sid(name());
|
||||||
if (name_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
|
if (name_id == vmSymbols::NO_SID) return;
|
||||||
vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
|
vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
|
||||||
if (sig_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
|
if (sig_id == vmSymbols::NO_SID) return;
|
||||||
jshort flags = access_flags().as_short();
|
jshort flags = access_flags().as_short();
|
||||||
|
|
||||||
|
vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
|
||||||
|
if (id != vmIntrinsics::_none) {
|
||||||
|
set_intrinsic_id(id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// A few slightly irregular cases:
|
// A few slightly irregular cases:
|
||||||
switch (klass_id) {
|
switch (klass_id) {
|
||||||
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
|
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
|
||||||
@ -992,14 +1005,17 @@ vmIntrinsics::ID methodOopDesc::compute_intrinsic_id() const {
|
|||||||
case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
|
case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
|
||||||
// pretend it is the corresponding method in the non-strict class:
|
// pretend it is the corresponding method in the non-strict class:
|
||||||
klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
|
klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
|
||||||
|
id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return intrinsic id if any
|
if (id != vmIntrinsics::_none) {
|
||||||
return vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
|
// Set up its iid. It is an alias method.
|
||||||
|
set_intrinsic_id(id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// These two methods are static since a GC may move the methodOopDesc
|
// These two methods are static since a GC may move the methodOopDesc
|
||||||
bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
|
bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
|
||||||
|
@ -104,7 +104,7 @@ class methodOopDesc : public oopDesc {
|
|||||||
u2 _max_stack; // Maximum number of entries on the expression stack
|
u2 _max_stack; // Maximum number of entries on the expression stack
|
||||||
u2 _max_locals; // Number of local variables used by this method
|
u2 _max_locals; // Number of local variables used by this method
|
||||||
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
|
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
|
||||||
u1 _intrinsic_id_cache; // Cache for intrinsic_id; 0 or 1+vmInt::ID
|
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
|
||||||
u1 _highest_tier_compile; // Highest compile level this method has ever seen.
|
u1 _highest_tier_compile; // Highest compile level this method has ever seen.
|
||||||
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
|
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
|
||||||
u2 _number_of_breakpoints; // fullspeed debugging support
|
u2 _number_of_breakpoints; // fullspeed debugging support
|
||||||
@ -224,8 +224,6 @@ class methodOopDesc : public oopDesc {
|
|||||||
int highest_tier_compile() { return _highest_tier_compile;}
|
int highest_tier_compile() { return _highest_tier_compile;}
|
||||||
void set_highest_tier_compile(int level) { _highest_tier_compile = level;}
|
void set_highest_tier_compile(int level) { _highest_tier_compile = level;}
|
||||||
|
|
||||||
void clear_intrinsic_id_cache() { _intrinsic_id_cache = 0; }
|
|
||||||
|
|
||||||
// Count of times method was exited via exception while interpreting
|
// Count of times method was exited via exception while interpreting
|
||||||
void interpreter_throwout_increment() {
|
void interpreter_throwout_increment() {
|
||||||
if (_interpreter_throwout_count < 65534) {
|
if (_interpreter_throwout_count < 65534) {
|
||||||
@ -571,18 +569,12 @@ class methodOopDesc : public oopDesc {
|
|||||||
void set_cached_itable_index(int index) { instanceKlass::cast(method_holder())->set_cached_itable_index(method_idnum(), index); }
|
void set_cached_itable_index(int index) { instanceKlass::cast(method_holder())->set_cached_itable_index(method_idnum(), index); }
|
||||||
|
|
||||||
// Support for inlining of intrinsic methods
|
// Support for inlining of intrinsic methods
|
||||||
vmIntrinsics::ID intrinsic_id() const { // returns zero if not an intrinsic
|
vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
|
||||||
const u1& cache = _intrinsic_id_cache;
|
void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
|
||||||
if (cache != 0) {
|
|
||||||
return (vmIntrinsics::ID)(cache - 1);
|
// Helper routines for intrinsic_id() and vmIntrinsics::method().
|
||||||
} else {
|
void init_intrinsic_id(); // updates from _none if a match
|
||||||
vmIntrinsics::ID id = compute_intrinsic_id();
|
static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
|
||||||
*(u1*)&cache = ((u1) id) + 1; // force the cache to be non-const
|
|
||||||
vmIntrinsics::verify_method(id, (methodOop) this);
|
|
||||||
assert((vmIntrinsics::ID)(cache - 1) == id, "proper conversion");
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// On-stack replacement support
|
// On-stack replacement support
|
||||||
bool has_osr_nmethod() { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
|
bool has_osr_nmethod() { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
|
||||||
@ -635,9 +627,6 @@ class methodOopDesc : public oopDesc {
|
|||||||
void set_size_of_parameters(int size) { _size_of_parameters = size; }
|
void set_size_of_parameters(int size) { _size_of_parameters = size; }
|
||||||
private:
|
private:
|
||||||
|
|
||||||
// Helper routine for intrinsic_id().
|
|
||||||
vmIntrinsics::ID compute_intrinsic_id() const;
|
|
||||||
|
|
||||||
// Inlined elements
|
// Inlined elements
|
||||||
address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
|
address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
|
||||||
address* signature_handler_addr() const { return native_function_addr() + 1; }
|
address* signature_handler_addr() const { return native_function_addr() + 1; }
|
||||||
|
@ -84,8 +84,6 @@ oop objArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
|
|||||||
template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
||||||
arrayOop d, T* dst, int length, TRAPS) {
|
arrayOop d, T* dst, int length, TRAPS) {
|
||||||
|
|
||||||
const size_t word_len = objArrayOopDesc::array_size(length);
|
|
||||||
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
// For performance reasons, we assume we are that the write barrier we
|
// For performance reasons, we assume we are that the write barrier we
|
||||||
// are using has optimized modes for arrays of references. At least one
|
// are using has optimized modes for arrays of references. At least one
|
||||||
@ -93,11 +91,10 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
|||||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||||
|
|
||||||
MemRegion dst_mr = MemRegion((HeapWord*)dst, word_len);
|
|
||||||
if (s == d) {
|
if (s == d) {
|
||||||
// since source and destination are equal we do not need conversion checks.
|
// since source and destination are equal we do not need conversion checks.
|
||||||
assert(length > 0, "sanity check");
|
assert(length > 0, "sanity check");
|
||||||
bs->write_ref_array_pre(dst_mr);
|
bs->write_ref_array_pre(dst, length);
|
||||||
Copy::conjoint_oops_atomic(src, dst, length);
|
Copy::conjoint_oops_atomic(src, dst, length);
|
||||||
} else {
|
} else {
|
||||||
// We have to make sure all elements conform to the destination array
|
// We have to make sure all elements conform to the destination array
|
||||||
@ -105,7 +102,7 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
|||||||
klassOop stype = objArrayKlass::cast(s->klass())->element_klass();
|
klassOop stype = objArrayKlass::cast(s->klass())->element_klass();
|
||||||
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
|
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
|
||||||
// elements are guaranteed to be subtypes, so no check necessary
|
// elements are guaranteed to be subtypes, so no check necessary
|
||||||
bs->write_ref_array_pre(dst_mr);
|
bs->write_ref_array_pre(dst, length);
|
||||||
Copy::conjoint_oops_atomic(src, dst, length);
|
Copy::conjoint_oops_atomic(src, dst, length);
|
||||||
} else {
|
} else {
|
||||||
// slow case: need individual subtype checks
|
// slow case: need individual subtype checks
|
||||||
@ -137,6 +134,7 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
const size_t word_len = objArrayOopDesc::array_size(length);
|
||||||
bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
|
bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,12 +148,14 @@ inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
|
|||||||
|
|
||||||
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
|
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
|
||||||
assert(!is_null(v), "oop value can never be zero");
|
assert(!is_null(v), "oop value can never be zero");
|
||||||
|
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
|
||||||
address base = Universe::narrow_oop_base();
|
address base = Universe::narrow_oop_base();
|
||||||
int shift = Universe::narrow_oop_shift();
|
int shift = Universe::narrow_oop_shift();
|
||||||
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
|
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
|
||||||
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
|
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
|
||||||
uint64_t result = pd >> shift;
|
uint64_t result = pd >> shift;
|
||||||
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
|
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
|
||||||
|
assert(decode_heap_oop(result) == v, "reversibility");
|
||||||
return (narrowOop)result;
|
return (narrowOop)result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -449,7 +451,7 @@ inline void update_barrier_set(void* p, oop v) {
|
|||||||
oopDesc::bs()->write_ref_field(p, v);
|
oopDesc::bs()->write_ref_field(p, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void update_barrier_set_pre(void* p, oop v) {
|
template <class T> inline void update_barrier_set_pre(T* p, oop v) {
|
||||||
oopDesc::bs()->write_ref_field_pre(p, v);
|
oopDesc::bs()->write_ref_field_pre(p, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,15 +461,15 @@ template <class T> inline void oop_store(T* p, oop v) {
|
|||||||
} else {
|
} else {
|
||||||
update_barrier_set_pre(p, v);
|
update_barrier_set_pre(p, v);
|
||||||
oopDesc::encode_store_heap_oop(p, v);
|
oopDesc::encode_store_heap_oop(p, v);
|
||||||
update_barrier_set(p, v);
|
update_barrier_set((void*)p, v); // cast away type
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> inline void oop_store(volatile T* p, oop v) {
|
template <class T> inline void oop_store(volatile T* p, oop v) {
|
||||||
update_barrier_set_pre((void*)p, v);
|
update_barrier_set_pre((T*)p, v); // cast away volatile
|
||||||
// Used by release_obj_field_put, so use release_store_ptr.
|
// Used by release_obj_field_put, so use release_store_ptr.
|
||||||
oopDesc::release_encode_store_heap_oop(p, v);
|
oopDesc::release_encode_store_heap_oop(p, v);
|
||||||
update_barrier_set((void*)p, v);
|
update_barrier_set((void*)p, v); // cast away type
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> inline void oop_store_without_check(T* p, oop v) {
|
template <class T> inline void oop_store_without_check(T* p, oop v) {
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
typedef juint narrowOop; // Offset instead of address for an oop within a java object
|
typedef juint narrowOop; // Offset instead of address for an oop within a java object
|
||||||
typedef class klassOopDesc* wideKlassOop; // to keep SA happy and unhandled oop
|
typedef class klassOopDesc* wideKlassOop; // to keep SA happy and unhandled oop
|
||||||
// detector happy.
|
// detector happy.
|
||||||
|
typedef void* OopOrNarrowOopStar;
|
||||||
|
|
||||||
#ifndef CHECK_UNHANDLED_OOPS
|
#ifndef CHECK_UNHANDLED_OOPS
|
||||||
|
|
||||||
|
@ -1796,8 +1796,12 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||||||
for (uint i=1; i<req(); ++i) {// For all paths in
|
for (uint i=1; i<req(); ++i) {// For all paths in
|
||||||
Node *ii = in(i);
|
Node *ii = in(i);
|
||||||
if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) {
|
if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) {
|
||||||
|
// Note: in_decodeN is used only to define the type of new phi.
|
||||||
|
// Find a non dead path otherwise phi type will be wrong.
|
||||||
|
if (ii->in(1)->bottom_type() != Type::TOP) {
|
||||||
has_decodeN = true;
|
has_decodeN = true;
|
||||||
in_decodeN = ii->in(1);
|
in_decodeN = ii->in(1);
|
||||||
|
}
|
||||||
} else if (!ii->is_Phi()) {
|
} else if (!ii->is_Phi()) {
|
||||||
may_push = false;
|
may_push = false;
|
||||||
}
|
}
|
||||||
@ -1805,7 +1809,6 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||||||
|
|
||||||
if (has_decodeN && may_push) {
|
if (has_decodeN && may_push) {
|
||||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||||
// Note: in_decodeN is used only to define the type of new phi here.
|
|
||||||
PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN);
|
PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN);
|
||||||
uint orig_cnt = req();
|
uint orig_cnt = req();
|
||||||
for (uint i=1; i<req(); ++i) {// For all paths in
|
for (uint i=1; i<req(); ++i) {// For all paths in
|
||||||
|
@ -101,7 +101,8 @@ CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Lazily create intrinsics for intrinsic IDs well-known in the runtime.
|
// Lazily create intrinsics for intrinsic IDs well-known in the runtime.
|
||||||
if (m->intrinsic_id() != vmIntrinsics::_none) {
|
if (m->intrinsic_id() != vmIntrinsics::_none &&
|
||||||
|
m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
|
||||||
CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
|
CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
|
||||||
if (cg != NULL) {
|
if (cg != NULL) {
|
||||||
// Save it for next time:
|
// Save it for next time:
|
||||||
@ -440,6 +441,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
_orig_pc_slot_offset_in_bytes(0),
|
_orig_pc_slot_offset_in_bytes(0),
|
||||||
_node_bundling_limit(0),
|
_node_bundling_limit(0),
|
||||||
_node_bundling_base(NULL),
|
_node_bundling_base(NULL),
|
||||||
|
_java_calls(0),
|
||||||
|
_inner_loops(0),
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
_trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
|
_trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
|
||||||
_printer(IdealGraphPrinter::printer()),
|
_printer(IdealGraphPrinter::printer()),
|
||||||
@ -710,6 +713,8 @@ Compile::Compile( ciEnv* ci_env,
|
|||||||
_code_buffer("Compile::Fill_buffer"),
|
_code_buffer("Compile::Fill_buffer"),
|
||||||
_node_bundling_limit(0),
|
_node_bundling_limit(0),
|
||||||
_node_bundling_base(NULL),
|
_node_bundling_base(NULL),
|
||||||
|
_java_calls(0),
|
||||||
|
_inner_loops(0),
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
_trace_opto_output(TraceOptoOutput),
|
_trace_opto_output(TraceOptoOutput),
|
||||||
_printer(NULL),
|
_printer(NULL),
|
||||||
@ -1850,22 +1855,26 @@ struct Final_Reshape_Counts : public StackObj {
|
|||||||
int _float_count; // count float ops requiring 24-bit precision
|
int _float_count; // count float ops requiring 24-bit precision
|
||||||
int _double_count; // count double ops requiring more precision
|
int _double_count; // count double ops requiring more precision
|
||||||
int _java_call_count; // count non-inlined 'java' calls
|
int _java_call_count; // count non-inlined 'java' calls
|
||||||
|
int _inner_loop_count; // count loops which need alignment
|
||||||
VectorSet _visited; // Visitation flags
|
VectorSet _visited; // Visitation flags
|
||||||
Node_List _tests; // Set of IfNodes & PCTableNodes
|
Node_List _tests; // Set of IfNodes & PCTableNodes
|
||||||
|
|
||||||
Final_Reshape_Counts() :
|
Final_Reshape_Counts() :
|
||||||
_call_count(0), _float_count(0), _double_count(0), _java_call_count(0),
|
_call_count(0), _float_count(0), _double_count(0),
|
||||||
|
_java_call_count(0), _inner_loop_count(0),
|
||||||
_visited( Thread::current()->resource_area() ) { }
|
_visited( Thread::current()->resource_area() ) { }
|
||||||
|
|
||||||
void inc_call_count () { _call_count ++; }
|
void inc_call_count () { _call_count ++; }
|
||||||
void inc_float_count () { _float_count ++; }
|
void inc_float_count () { _float_count ++; }
|
||||||
void inc_double_count() { _double_count++; }
|
void inc_double_count() { _double_count++; }
|
||||||
void inc_java_call_count() { _java_call_count++; }
|
void inc_java_call_count() { _java_call_count++; }
|
||||||
|
void inc_inner_loop_count() { _inner_loop_count++; }
|
||||||
|
|
||||||
int get_call_count () const { return _call_count ; }
|
int get_call_count () const { return _call_count ; }
|
||||||
int get_float_count () const { return _float_count ; }
|
int get_float_count () const { return _float_count ; }
|
||||||
int get_double_count() const { return _double_count; }
|
int get_double_count() const { return _double_count; }
|
||||||
int get_java_call_count() const { return _java_call_count; }
|
int get_java_call_count() const { return _java_call_count; }
|
||||||
|
int get_inner_loop_count() const { return _inner_loop_count; }
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool oop_offset_is_sane(const TypeInstPtr* tp) {
|
static bool oop_offset_is_sane(const TypeInstPtr* tp) {
|
||||||
@ -1877,7 +1886,7 @@ static bool oop_offset_is_sane(const TypeInstPtr* tp) {
|
|||||||
|
|
||||||
//------------------------------final_graph_reshaping_impl----------------------
|
//------------------------------final_graph_reshaping_impl----------------------
|
||||||
// Implement items 1-5 from final_graph_reshaping below.
|
// Implement items 1-5 from final_graph_reshaping below.
|
||||||
static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
|
||||||
|
|
||||||
if ( n->outcnt() == 0 ) return; // dead node
|
if ( n->outcnt() == 0 ) return; // dead node
|
||||||
uint nop = n->Opcode();
|
uint nop = n->Opcode();
|
||||||
@ -1919,13 +1928,13 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
case Op_CmpF:
|
case Op_CmpF:
|
||||||
case Op_CmpF3:
|
case Op_CmpF3:
|
||||||
// case Op_ConvL2F: // longs are split into 32-bit halves
|
// case Op_ConvL2F: // longs are split into 32-bit halves
|
||||||
fpu.inc_float_count();
|
frc.inc_float_count();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case Op_ConvF2D:
|
case Op_ConvF2D:
|
||||||
case Op_ConvD2F:
|
case Op_ConvD2F:
|
||||||
fpu.inc_float_count();
|
frc.inc_float_count();
|
||||||
fpu.inc_double_count();
|
frc.inc_double_count();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// Count all double operations that may use FPU
|
// Count all double operations that may use FPU
|
||||||
@ -1942,7 +1951,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
case Op_ConD:
|
case Op_ConD:
|
||||||
case Op_CmpD:
|
case Op_CmpD:
|
||||||
case Op_CmpD3:
|
case Op_CmpD3:
|
||||||
fpu.inc_double_count();
|
frc.inc_double_count();
|
||||||
break;
|
break;
|
||||||
case Op_Opaque1: // Remove Opaque Nodes before matching
|
case Op_Opaque1: // Remove Opaque Nodes before matching
|
||||||
case Op_Opaque2: // Remove Opaque Nodes before matching
|
case Op_Opaque2: // Remove Opaque Nodes before matching
|
||||||
@ -1951,7 +1960,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
case Op_CallStaticJava:
|
case Op_CallStaticJava:
|
||||||
case Op_CallJava:
|
case Op_CallJava:
|
||||||
case Op_CallDynamicJava:
|
case Op_CallDynamicJava:
|
||||||
fpu.inc_java_call_count(); // Count java call site;
|
frc.inc_java_call_count(); // Count java call site;
|
||||||
case Op_CallRuntime:
|
case Op_CallRuntime:
|
||||||
case Op_CallLeaf:
|
case Op_CallLeaf:
|
||||||
case Op_CallLeafNoFP: {
|
case Op_CallLeafNoFP: {
|
||||||
@ -1962,7 +1971,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
|
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
|
||||||
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
|
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
|
||||||
if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
|
if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
|
||||||
fpu.inc_call_count(); // Count the call site
|
frc.inc_call_count(); // Count the call site
|
||||||
} else { // See if uncommon argument is shared
|
} else { // See if uncommon argument is shared
|
||||||
Node *n = call->in(TypeFunc::Parms);
|
Node *n = call->in(TypeFunc::Parms);
|
||||||
int nop = n->Opcode();
|
int nop = n->Opcode();
|
||||||
@ -1983,11 +1992,11 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
case Op_StoreD:
|
case Op_StoreD:
|
||||||
case Op_LoadD:
|
case Op_LoadD:
|
||||||
case Op_LoadD_unaligned:
|
case Op_LoadD_unaligned:
|
||||||
fpu.inc_double_count();
|
frc.inc_double_count();
|
||||||
goto handle_mem;
|
goto handle_mem;
|
||||||
case Op_StoreF:
|
case Op_StoreF:
|
||||||
case Op_LoadF:
|
case Op_LoadF:
|
||||||
fpu.inc_float_count();
|
frc.inc_float_count();
|
||||||
goto handle_mem;
|
goto handle_mem;
|
||||||
|
|
||||||
case Op_StoreB:
|
case Op_StoreB:
|
||||||
@ -2324,6 +2333,12 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
n->subsume_by(btp);
|
n->subsume_by(btp);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case Op_Loop:
|
||||||
|
case Op_CountedLoop:
|
||||||
|
if (n->as_Loop()->is_inner_loop()) {
|
||||||
|
frc.inc_inner_loop_count();
|
||||||
|
}
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
assert( !n->is_Call(), "" );
|
assert( !n->is_Call(), "" );
|
||||||
assert( !n->is_Mem(), "" );
|
assert( !n->is_Mem(), "" );
|
||||||
@ -2332,17 +2347,17 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
|
|
||||||
// Collect CFG split points
|
// Collect CFG split points
|
||||||
if (n->is_MultiBranch())
|
if (n->is_MultiBranch())
|
||||||
fpu._tests.push(n);
|
frc._tests.push(n);
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------final_graph_reshaping_walk---------------------
|
//------------------------------final_graph_reshaping_walk---------------------
|
||||||
// Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
|
// Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
|
||||||
// requires that the walk visits a node's inputs before visiting the node.
|
// requires that the walk visits a node's inputs before visiting the node.
|
||||||
static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &fpu ) {
|
static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
|
||||||
ResourceArea *area = Thread::current()->resource_area();
|
ResourceArea *area = Thread::current()->resource_area();
|
||||||
Unique_Node_List sfpt(area);
|
Unique_Node_List sfpt(area);
|
||||||
|
|
||||||
fpu._visited.set(root->_idx); // first, mark node as visited
|
frc._visited.set(root->_idx); // first, mark node as visited
|
||||||
uint cnt = root->req();
|
uint cnt = root->req();
|
||||||
Node *n = root;
|
Node *n = root;
|
||||||
uint i = 0;
|
uint i = 0;
|
||||||
@ -2351,7 +2366,7 @@ static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Re
|
|||||||
// Place all non-visited non-null inputs onto stack
|
// Place all non-visited non-null inputs onto stack
|
||||||
Node* m = n->in(i);
|
Node* m = n->in(i);
|
||||||
++i;
|
++i;
|
||||||
if (m != NULL && !fpu._visited.test_set(m->_idx)) {
|
if (m != NULL && !frc._visited.test_set(m->_idx)) {
|
||||||
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
|
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
|
||||||
sfpt.push(m);
|
sfpt.push(m);
|
||||||
cnt = m->req();
|
cnt = m->req();
|
||||||
@ -2361,7 +2376,7 @@ static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Re
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Now do post-visit work
|
// Now do post-visit work
|
||||||
final_graph_reshaping_impl( n, fpu );
|
final_graph_reshaping_impl( n, frc );
|
||||||
if (nstack.is_empty())
|
if (nstack.is_empty())
|
||||||
break; // finished
|
break; // finished
|
||||||
n = nstack.node(); // Get node from stack
|
n = nstack.node(); // Get node from stack
|
||||||
@ -2442,16 +2457,16 @@ bool Compile::final_graph_reshaping() {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
Final_Reshape_Counts fpu;
|
Final_Reshape_Counts frc;
|
||||||
|
|
||||||
// Visit everybody reachable!
|
// Visit everybody reachable!
|
||||||
// Allocate stack of size C->unique()/2 to avoid frequent realloc
|
// Allocate stack of size C->unique()/2 to avoid frequent realloc
|
||||||
Node_Stack nstack(unique() >> 1);
|
Node_Stack nstack(unique() >> 1);
|
||||||
final_graph_reshaping_walk(nstack, root(), fpu);
|
final_graph_reshaping_walk(nstack, root(), frc);
|
||||||
|
|
||||||
// Check for unreachable (from below) code (i.e., infinite loops).
|
// Check for unreachable (from below) code (i.e., infinite loops).
|
||||||
for( uint i = 0; i < fpu._tests.size(); i++ ) {
|
for( uint i = 0; i < frc._tests.size(); i++ ) {
|
||||||
MultiBranchNode *n = fpu._tests[i]->as_MultiBranch();
|
MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
|
||||||
// Get number of CFG targets.
|
// Get number of CFG targets.
|
||||||
// Note that PCTables include exception targets after calls.
|
// Note that PCTables include exception targets after calls.
|
||||||
uint required_outcnt = n->required_outcnt();
|
uint required_outcnt = n->required_outcnt();
|
||||||
@ -2497,7 +2512,7 @@ bool Compile::final_graph_reshaping() {
|
|||||||
// Check that I actually visited all kids. Unreached kids
|
// Check that I actually visited all kids. Unreached kids
|
||||||
// must be infinite loops.
|
// must be infinite loops.
|
||||||
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
|
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
|
||||||
if (!fpu._visited.test(n->fast_out(j)->_idx)) {
|
if (!frc._visited.test(n->fast_out(j)->_idx)) {
|
||||||
record_method_not_compilable("infinite loop");
|
record_method_not_compilable("infinite loop");
|
||||||
return true; // Found unvisited kid; must be unreach
|
return true; // Found unvisited kid; must be unreach
|
||||||
}
|
}
|
||||||
@ -2506,13 +2521,14 @@ bool Compile::final_graph_reshaping() {
|
|||||||
// If original bytecodes contained a mixture of floats and doubles
|
// If original bytecodes contained a mixture of floats and doubles
|
||||||
// check if the optimizer has made it homogenous, item (3).
|
// check if the optimizer has made it homogenous, item (3).
|
||||||
if( Use24BitFPMode && Use24BitFP &&
|
if( Use24BitFPMode && Use24BitFP &&
|
||||||
fpu.get_float_count() > 32 &&
|
frc.get_float_count() > 32 &&
|
||||||
fpu.get_double_count() == 0 &&
|
frc.get_double_count() == 0 &&
|
||||||
(10 * fpu.get_call_count() < fpu.get_float_count()) ) {
|
(10 * frc.get_call_count() < frc.get_float_count()) ) {
|
||||||
set_24_bit_selection_and_mode( false, true );
|
set_24_bit_selection_and_mode( false, true );
|
||||||
}
|
}
|
||||||
|
|
||||||
set_has_java_calls(fpu.get_java_call_count() > 0);
|
set_java_calls(frc.get_java_call_count());
|
||||||
|
set_inner_loops(frc.get_inner_loop_count());
|
||||||
|
|
||||||
// No infinite loops, no reason to bail out.
|
// No infinite loops, no reason to bail out.
|
||||||
return false;
|
return false;
|
||||||
|
@ -223,7 +223,8 @@ class Compile : public Phase {
|
|||||||
PhaseCFG* _cfg; // Results of CFG finding
|
PhaseCFG* _cfg; // Results of CFG finding
|
||||||
bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
|
bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
|
||||||
bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
|
bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
|
||||||
bool _has_java_calls; // True if the method has java calls
|
int _java_calls; // Number of java calls in the method
|
||||||
|
int _inner_loops; // Number of inner loops in the method
|
||||||
Matcher* _matcher; // Engine to map ideal to machine instructions
|
Matcher* _matcher; // Engine to map ideal to machine instructions
|
||||||
PhaseRegAlloc* _regalloc; // Results of register allocation.
|
PhaseRegAlloc* _regalloc; // Results of register allocation.
|
||||||
int _frame_slots; // Size of total frame in stack slots
|
int _frame_slots; // Size of total frame in stack slots
|
||||||
@ -505,7 +506,9 @@ class Compile : public Phase {
|
|||||||
PhaseCFG* cfg() { return _cfg; }
|
PhaseCFG* cfg() { return _cfg; }
|
||||||
bool select_24_bit_instr() const { return _select_24_bit_instr; }
|
bool select_24_bit_instr() const { return _select_24_bit_instr; }
|
||||||
bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
|
bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
|
||||||
bool has_java_calls() const { return _has_java_calls; }
|
bool has_java_calls() const { return _java_calls > 0; }
|
||||||
|
int java_calls() const { return _java_calls; }
|
||||||
|
int inner_loops() const { return _inner_loops; }
|
||||||
Matcher* matcher() { return _matcher; }
|
Matcher* matcher() { return _matcher; }
|
||||||
PhaseRegAlloc* regalloc() { return _regalloc; }
|
PhaseRegAlloc* regalloc() { return _regalloc; }
|
||||||
int frame_slots() const { return _frame_slots; }
|
int frame_slots() const { return _frame_slots; }
|
||||||
@ -532,7 +535,8 @@ class Compile : public Phase {
|
|||||||
_in_24_bit_fp_mode = mode;
|
_in_24_bit_fp_mode = mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_has_java_calls(bool z) { _has_java_calls = z; }
|
void set_java_calls(int z) { _java_calls = z; }
|
||||||
|
void set_inner_loops(int z) { _inner_loops = z; }
|
||||||
|
|
||||||
// Instruction bits passed off to the VM
|
// Instruction bits passed off to the VM
|
||||||
int code_size() { return _method_size; }
|
int code_size() { return _method_size; }
|
||||||
|
@ -578,11 +578,24 @@ PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, Gro
|
|||||||
if (phi_alias_idx == alias_idx) {
|
if (phi_alias_idx == alias_idx) {
|
||||||
return orig_phi;
|
return orig_phi;
|
||||||
}
|
}
|
||||||
// have we already created a Phi for this alias index?
|
// Have we recently created a Phi for this alias index?
|
||||||
PhiNode *result = get_map_phi(orig_phi->_idx);
|
PhiNode *result = get_map_phi(orig_phi->_idx);
|
||||||
if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
|
if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
// Previous check may fail when the same wide memory Phi was split into Phis
|
||||||
|
// for different memory slices. Search all Phis for this region.
|
||||||
|
if (result != NULL) {
|
||||||
|
Node* region = orig_phi->in(0);
|
||||||
|
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
|
||||||
|
Node* phi = region->fast_out(i);
|
||||||
|
if (phi->is_Phi() &&
|
||||||
|
C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
|
||||||
|
assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
|
||||||
|
return phi->as_Phi();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
|
if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
|
||||||
if (C->do_escape_analysis() == true && !C->failing()) {
|
if (C->do_escape_analysis() == true && !C->failing()) {
|
||||||
// Retry compilation without escape analysis.
|
// Retry compilation without escape analysis.
|
||||||
@ -595,6 +608,7 @@ PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, Gro
|
|||||||
orig_phi_worklist.append_if_missing(orig_phi);
|
orig_phi_worklist.append_if_missing(orig_phi);
|
||||||
const TypePtr *atype = C->get_adr_type(alias_idx);
|
const TypePtr *atype = C->get_adr_type(alias_idx);
|
||||||
result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
|
result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
|
||||||
|
C->copy_node_notes_to(result, orig_phi);
|
||||||
set_map_phi(orig_phi->_idx, result);
|
set_map_phi(orig_phi->_idx, result);
|
||||||
igvn->set_type(result, result->bottom_type());
|
igvn->set_type(result, result->bottom_type());
|
||||||
record_for_optimizer(result);
|
record_for_optimizer(result);
|
||||||
|
@ -1373,6 +1373,7 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
|
|||||||
return st;
|
return st;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void GraphKit::pre_barrier(Node* ctl,
|
void GraphKit::pre_barrier(Node* ctl,
|
||||||
Node* obj,
|
Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
@ -1431,39 +1432,33 @@ void GraphKit::post_barrier(Node* ctl,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* GraphKit::store_oop_to_object(Node* ctl,
|
Node* GraphKit::store_oop(Node* ctl,
|
||||||
Node* obj,
|
Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
const TypePtr* adr_type,
|
const TypePtr* adr_type,
|
||||||
Node* val,
|
Node* val,
|
||||||
const TypeOopPtr* val_type,
|
const TypeOopPtr* val_type,
|
||||||
BasicType bt) {
|
BasicType bt,
|
||||||
|
bool use_precise) {
|
||||||
|
|
||||||
|
set_control(ctl);
|
||||||
|
if (stopped()) return top(); // Dead path ?
|
||||||
|
|
||||||
|
assert(bt == T_OBJECT, "sanity");
|
||||||
|
assert(val != NULL, "not dead path");
|
||||||
uint adr_idx = C->get_alias_index(adr_type);
|
uint adr_idx = C->get_alias_index(adr_type);
|
||||||
Node* store;
|
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
|
||||||
pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
|
|
||||||
store = store_to_memory(control(), adr, val, bt, adr_idx);
|
pre_barrier(control(), obj, adr, adr_idx, val, val_type, bt);
|
||||||
post_barrier(control(), store, obj, adr, adr_idx, val, bt, false);
|
Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
|
||||||
return store;
|
post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
|
||||||
}
|
|
||||||
|
|
||||||
Node* GraphKit::store_oop_to_array(Node* ctl,
|
|
||||||
Node* obj,
|
|
||||||
Node* adr,
|
|
||||||
const TypePtr* adr_type,
|
|
||||||
Node *val,
|
|
||||||
const TypeOopPtr* val_type,
|
|
||||||
BasicType bt) {
|
|
||||||
uint adr_idx = C->get_alias_index(adr_type);
|
|
||||||
Node* store;
|
|
||||||
pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
|
|
||||||
store = store_to_memory(control(), adr, val, bt, adr_idx);
|
|
||||||
post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
|
|
||||||
return store;
|
return store;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Could be an array or object we don't know at compile time (unsafe ref.)
|
||||||
Node* GraphKit::store_oop_to_unknown(Node* ctl,
|
Node* GraphKit::store_oop_to_unknown(Node* ctl,
|
||||||
Node* obj,
|
Node* obj, // containing obj
|
||||||
Node* adr,
|
Node* adr, // actual adress to store val at
|
||||||
const TypePtr* adr_type,
|
const TypePtr* adr_type,
|
||||||
Node* val,
|
Node* val,
|
||||||
BasicType bt) {
|
BasicType bt) {
|
||||||
@ -1485,12 +1480,7 @@ Node* GraphKit::store_oop_to_unknown(Node* ctl,
|
|||||||
if (val_type == NULL) {
|
if (val_type == NULL) {
|
||||||
val_type = TypeInstPtr::BOTTOM;
|
val_type = TypeInstPtr::BOTTOM;
|
||||||
}
|
}
|
||||||
|
return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
|
||||||
uint adr_idx = at->index();
|
|
||||||
pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
|
|
||||||
Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
|
|
||||||
post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
|
|
||||||
return store;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1804,93 +1794,6 @@ Node* GraphKit::just_allocated_object(Node* current_control) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//------------------------------store_barrier----------------------------------
|
|
||||||
// Insert a write-barrier store. This is to let generational GC work; we have
|
|
||||||
// to flag all oop-stores before the next GC point.
|
|
||||||
void GraphKit::write_barrier_post(Node* oop_store, Node* obj, Node* adr,
|
|
||||||
Node* val, bool use_precise) {
|
|
||||||
// No store check needed if we're storing a NULL or an old object
|
|
||||||
// (latter case is probably a string constant). The concurrent
|
|
||||||
// mark sweep garbage collector, however, needs to have all nonNull
|
|
||||||
// oop updates flagged via card-marks.
|
|
||||||
if (val != NULL && val->is_Con()) {
|
|
||||||
// must be either an oop or NULL
|
|
||||||
const Type* t = val->bottom_type();
|
|
||||||
if (t == TypePtr::NULL_PTR || t == Type::TOP)
|
|
||||||
// stores of null never (?) need barriers
|
|
||||||
return;
|
|
||||||
ciObject* con = t->is_oopptr()->const_oop();
|
|
||||||
if (con != NULL
|
|
||||||
&& con->is_perm()
|
|
||||||
&& Universe::heap()->can_elide_permanent_oop_store_barriers())
|
|
||||||
// no store barrier needed, because no old-to-new ref created
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (use_ReduceInitialCardMarks()
|
|
||||||
&& obj == just_allocated_object(control())) {
|
|
||||||
// We can skip marks on a freshly-allocated object.
|
|
||||||
// Keep this code in sync with do_eager_card_mark in runtime.cpp.
|
|
||||||
// That routine eagerly marks the occasional object which is produced
|
|
||||||
// by the slow path, so that we don't have to do it here.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!use_precise) {
|
|
||||||
// All card marks for a (non-array) instance are in one place:
|
|
||||||
adr = obj;
|
|
||||||
}
|
|
||||||
// (Else it's an array (or unknown), and we want more precise card marks.)
|
|
||||||
assert(adr != NULL, "");
|
|
||||||
|
|
||||||
// Get the alias_index for raw card-mark memory
|
|
||||||
int adr_type = Compile::AliasIdxRaw;
|
|
||||||
// Convert the pointer to an int prior to doing math on it
|
|
||||||
Node* cast = _gvn.transform(new (C, 2) CastP2XNode(control(), adr));
|
|
||||||
// Divide by card size
|
|
||||||
assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
|
|
||||||
"Only one we handle so far.");
|
|
||||||
CardTableModRefBS* ct =
|
|
||||||
(CardTableModRefBS*)(Universe::heap()->barrier_set());
|
|
||||||
Node *b = _gvn.transform(new (C, 3) URShiftXNode( cast, _gvn.intcon(CardTableModRefBS::card_shift) ));
|
|
||||||
// We store into a byte array, so do not bother to left-shift by zero
|
|
||||||
Node *c = byte_map_base_node();
|
|
||||||
// Combine
|
|
||||||
Node *sb_ctl = control();
|
|
||||||
Node *sb_adr = _gvn.transform(new (C, 4) AddPNode( top()/*no base ptr*/, c, b ));
|
|
||||||
Node *sb_val = _gvn.intcon(0);
|
|
||||||
// Smash zero into card
|
|
||||||
if( !UseConcMarkSweepGC ) {
|
|
||||||
BasicType bt = T_BYTE;
|
|
||||||
store_to_memory(sb_ctl, sb_adr, sb_val, bt, adr_type);
|
|
||||||
} else {
|
|
||||||
// Specialized path for CM store barrier
|
|
||||||
cms_card_mark( sb_ctl, sb_adr, sb_val, oop_store);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Specialized path for CMS store barrier
|
|
||||||
void GraphKit::cms_card_mark(Node* ctl, Node* adr, Node* val, Node *oop_store) {
|
|
||||||
BasicType bt = T_BYTE;
|
|
||||||
int adr_idx = Compile::AliasIdxRaw;
|
|
||||||
Node* mem = memory(adr_idx);
|
|
||||||
|
|
||||||
// The type input is NULL in PRODUCT builds
|
|
||||||
const TypePtr* type = NULL;
|
|
||||||
debug_only(type = C->get_adr_type(adr_idx));
|
|
||||||
|
|
||||||
// Add required edge to oop_store, optimizer does not support precedence edges.
|
|
||||||
// Convert required edge to precedence edge before allocation.
|
|
||||||
Node *store = _gvn.transform( new (C, 5) StoreCMNode(ctl, mem, adr, type, val, oop_store) );
|
|
||||||
set_memory(store, adr_idx);
|
|
||||||
|
|
||||||
// For CMS, back-to-back card-marks can only remove the first one
|
|
||||||
// and this requires DU info. Push on worklist for optimizer.
|
|
||||||
if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
|
|
||||||
record_for_igvn(store);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void GraphKit::round_double_arguments(ciMethod* dest_method) {
|
void GraphKit::round_double_arguments(ciMethod* dest_method) {
|
||||||
// (Note: TypeFunc::make has a cache that makes this fast.)
|
// (Note: TypeFunc::make has a cache that makes this fast.)
|
||||||
const TypeFunc* tf = TypeFunc::make(dest_method);
|
const TypeFunc* tf = TypeFunc::make(dest_method);
|
||||||
@ -3215,6 +3118,79 @@ InitializeNode* AllocateNode::initialization() {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//----------------------------- store barriers ----------------------------
|
||||||
|
#define __ ideal.
|
||||||
|
|
||||||
|
void GraphKit::sync_kit(IdealKit& ideal) {
|
||||||
|
// Final sync IdealKit and graphKit.
|
||||||
|
__ drain_delay_transform();
|
||||||
|
set_all_memory(__ merged_memory());
|
||||||
|
set_control(__ ctrl());
|
||||||
|
}
|
||||||
|
|
||||||
|
// vanilla/CMS post barrier
|
||||||
|
// Insert a write-barrier store. This is to let generational GC work; we have
|
||||||
|
// to flag all oop-stores before the next GC point.
|
||||||
|
void GraphKit::write_barrier_post(Node* oop_store,
|
||||||
|
Node* obj,
|
||||||
|
Node* adr,
|
||||||
|
Node* val,
|
||||||
|
bool use_precise) {
|
||||||
|
// No store check needed if we're storing a NULL or an old object
|
||||||
|
// (latter case is probably a string constant). The concurrent
|
||||||
|
// mark sweep garbage collector, however, needs to have all nonNull
|
||||||
|
// oop updates flagged via card-marks.
|
||||||
|
if (val != NULL && val->is_Con()) {
|
||||||
|
// must be either an oop or NULL
|
||||||
|
const Type* t = val->bottom_type();
|
||||||
|
if (t == TypePtr::NULL_PTR || t == Type::TOP)
|
||||||
|
// stores of null never (?) need barriers
|
||||||
|
return;
|
||||||
|
ciObject* con = t->is_oopptr()->const_oop();
|
||||||
|
if (con != NULL
|
||||||
|
&& con->is_perm()
|
||||||
|
&& Universe::heap()->can_elide_permanent_oop_store_barriers())
|
||||||
|
// no store barrier needed, because no old-to-new ref created
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!use_precise) {
|
||||||
|
// All card marks for a (non-array) instance are in one place:
|
||||||
|
adr = obj;
|
||||||
|
}
|
||||||
|
// (Else it's an array (or unknown), and we want more precise card marks.)
|
||||||
|
assert(adr != NULL, "");
|
||||||
|
|
||||||
|
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
||||||
|
|
||||||
|
// Convert the pointer to an int prior to doing math on it
|
||||||
|
Node* cast = __ CastPX(__ ctrl(), adr);
|
||||||
|
|
||||||
|
// Divide by card size
|
||||||
|
assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
|
||||||
|
"Only one we handle so far.");
|
||||||
|
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
|
||||||
|
|
||||||
|
// Combine card table base and card offset
|
||||||
|
Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
|
||||||
|
|
||||||
|
// Get the alias_index for raw card-mark memory
|
||||||
|
int adr_type = Compile::AliasIdxRaw;
|
||||||
|
// Smash zero into card
|
||||||
|
Node* zero = __ ConI(0);
|
||||||
|
BasicType bt = T_BYTE;
|
||||||
|
if( !UseConcMarkSweepGC ) {
|
||||||
|
__ store(__ ctrl(), card_adr, zero, bt, adr_type);
|
||||||
|
} else {
|
||||||
|
// Specialized path for CM store barrier
|
||||||
|
__ storeCM(__ ctrl(), card_adr, zero, oop_store, bt, adr_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final sync IdealKit and GraphKit.
|
||||||
|
sync_kit(ideal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// G1 pre/post barriers
|
||||||
void GraphKit::g1_write_barrier_pre(Node* obj,
|
void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
uint alias_idx,
|
uint alias_idx,
|
||||||
@ -3222,10 +3198,8 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
|||||||
const TypeOopPtr* val_type,
|
const TypeOopPtr* val_type,
|
||||||
BasicType bt) {
|
BasicType bt) {
|
||||||
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
||||||
#define __ ideal.
|
|
||||||
__ declares_done();
|
|
||||||
|
|
||||||
Node* thread = __ thread();
|
Node* tls = __ thread(); // ThreadLocalStorage
|
||||||
|
|
||||||
Node* no_ctrl = NULL;
|
Node* no_ctrl = NULL;
|
||||||
Node* no_base = __ top();
|
Node* no_base = __ top();
|
||||||
@ -3248,9 +3222,9 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
|||||||
|
|
||||||
// set_control( ctl);
|
// set_control( ctl);
|
||||||
|
|
||||||
Node* marking_adr = __ AddP(no_base, thread, __ ConX(marking_offset));
|
Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
|
||||||
Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset));
|
Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
|
||||||
Node* index_adr = __ AddP(no_base, thread, __ ConX(index_offset));
|
Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
|
||||||
|
|
||||||
// Now some of the values
|
// Now some of the values
|
||||||
|
|
||||||
@ -3281,47 +3255,44 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
|||||||
// We could refine the type for what it's worth
|
// We could refine the type for what it's worth
|
||||||
// const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
|
// const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
|
||||||
next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
|
next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
|
||||||
#endif // _LP64
|
#endif
|
||||||
|
|
||||||
// Now get the buffer location we will log the original value into and store it
|
// Now get the buffer location we will log the original value into and store it
|
||||||
|
|
||||||
Node *log_addr = __ AddP(no_base, buffer, next_indexX);
|
Node *log_addr = __ AddP(no_base, buffer, next_indexX);
|
||||||
// __ store(__ ctrl(), log_addr, orig, T_OBJECT, C->get_alias_index(TypeOopPtr::BOTTOM));
|
|
||||||
__ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
|
__ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
|
|
||||||
// update the index
|
// update the index
|
||||||
// __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
|
__ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
|
||||||
// This is a hack to force this store to occur before the oop store that is coming up
|
|
||||||
__ store(__ ctrl(), index_adr, next_index, T_INT, C->get_alias_index(TypeOopPtr::BOTTOM));
|
|
||||||
|
|
||||||
} __ else_(); {
|
} __ else_(); {
|
||||||
|
|
||||||
// logging buffer is full, call the runtime
|
// logging buffer is full, call the runtime
|
||||||
const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
|
const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
|
||||||
// __ make_leaf_call(tf, OptoRuntime::g1_wb_pre_Java(), "g1_wb_pre", orig, thread);
|
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls);
|
||||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, thread);
|
} __ end_if(); // (!index)
|
||||||
} __ end_if();
|
} __ end_if(); // (orig != NULL)
|
||||||
} __ end_if();
|
} __ end_if(); // (!marking)
|
||||||
} __ end_if();
|
|
||||||
|
|
||||||
__ drain_delay_transform();
|
// Final sync IdealKit and GraphKit.
|
||||||
set_control( __ ctrl());
|
sync_kit(ideal);
|
||||||
set_all_memory( __ merged_memory());
|
|
||||||
|
|
||||||
#undef __
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Update the card table and add card address to the queue
|
// Update the card table and add card address to the queue
|
||||||
//
|
//
|
||||||
void GraphKit::g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store, Node* index, Node* index_adr, Node* buffer, const TypeFunc* tf) {
|
void GraphKit::g1_mark_card(IdealKit& ideal,
|
||||||
#define __ ideal->
|
Node* card_adr,
|
||||||
|
Node* oop_store,
|
||||||
|
Node* index,
|
||||||
|
Node* index_adr,
|
||||||
|
Node* buffer,
|
||||||
|
const TypeFunc* tf) {
|
||||||
|
|
||||||
Node* zero = __ ConI(0);
|
Node* zero = __ ConI(0);
|
||||||
Node* no_base = __ top();
|
Node* no_base = __ top();
|
||||||
BasicType card_bt = T_BYTE;
|
BasicType card_bt = T_BYTE;
|
||||||
// Smash zero into card. MUST BE ORDERED WRT TO STORE
|
// Smash zero into card. MUST BE ORDERED WRT TO STORE
|
||||||
__ storeCM(__ ctrl(), card_adr, zero, store, card_bt, Compile::AliasIdxRaw);
|
__ storeCM(__ ctrl(), card_adr, zero, oop_store, card_bt, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
// Now do the queue work
|
// Now do the queue work
|
||||||
__ if_then(index, BoolTest::ne, zero); {
|
__ if_then(index, BoolTest::ne, zero); {
|
||||||
@ -3341,10 +3312,10 @@ void GraphKit::g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store, Node*
|
|||||||
} __ else_(); {
|
} __ else_(); {
|
||||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
|
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
|
||||||
} __ end_if();
|
} __ end_if();
|
||||||
#undef __
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void GraphKit::g1_write_barrier_post(Node* store,
|
void GraphKit::g1_write_barrier_post(Node* oop_store,
|
||||||
Node* obj,
|
Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
uint alias_idx,
|
uint alias_idx,
|
||||||
@ -3369,10 +3340,8 @@ void GraphKit::g1_write_barrier_post(Node* store,
|
|||||||
assert(adr != NULL, "");
|
assert(adr != NULL, "");
|
||||||
|
|
||||||
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
||||||
#define __ ideal.
|
|
||||||
__ declares_done();
|
|
||||||
|
|
||||||
Node* thread = __ thread();
|
Node* tls = __ thread(); // ThreadLocalStorage
|
||||||
|
|
||||||
Node* no_ctrl = NULL;
|
Node* no_ctrl = NULL;
|
||||||
Node* no_base = __ top();
|
Node* no_base = __ top();
|
||||||
@ -3394,8 +3363,8 @@ void GraphKit::g1_write_barrier_post(Node* store,
|
|||||||
|
|
||||||
// Pointers into the thread
|
// Pointers into the thread
|
||||||
|
|
||||||
Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset));
|
Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
|
||||||
Node* index_adr = __ AddP(no_base, thread, __ ConX(index_offset));
|
Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
|
||||||
|
|
||||||
// Now some values
|
// Now some values
|
||||||
|
|
||||||
@ -3404,12 +3373,8 @@ void GraphKit::g1_write_barrier_post(Node* store,
|
|||||||
|
|
||||||
|
|
||||||
// Convert the store obj pointer to an int prior to doing math on it
|
// Convert the store obj pointer to an int prior to doing math on it
|
||||||
// Use addr not obj gets accurate card marks
|
|
||||||
|
|
||||||
// Node* cast = __ CastPX(no_ctrl, adr /* obj */);
|
|
||||||
|
|
||||||
// Must use ctrl to prevent "integerized oop" existing across safepoint
|
// Must use ctrl to prevent "integerized oop" existing across safepoint
|
||||||
Node* cast = __ CastPX(__ ctrl(), ( use_precise ? adr : obj ));
|
Node* cast = __ CastPX(__ ctrl(), adr);
|
||||||
|
|
||||||
// Divide pointer by card size
|
// Divide pointer by card size
|
||||||
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
|
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
|
||||||
@ -3439,18 +3404,17 @@ void GraphKit::g1_write_barrier_post(Node* store,
|
|||||||
Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
|
Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
__ if_then(card_val, BoolTest::ne, zero); {
|
__ if_then(card_val, BoolTest::ne, zero); {
|
||||||
g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf);
|
g1_mark_card(ideal, card_adr, oop_store, index, index_adr, buffer, tf);
|
||||||
} __ end_if();
|
} __ end_if();
|
||||||
} __ end_if();
|
} __ end_if();
|
||||||
} __ end_if();
|
} __ end_if();
|
||||||
} else {
|
} else {
|
||||||
g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf);
|
// Object.clone() instrinsic uses this path.
|
||||||
|
g1_mark_card(ideal, card_adr, oop_store, index, index_adr, buffer, tf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Final sync IdealKit and GraphKit.
|
||||||
__ drain_delay_transform();
|
sync_kit(ideal);
|
||||||
set_control( __ ctrl());
|
}
|
||||||
set_all_memory( __ merged_memory());
|
|
||||||
#undef __
|
#undef __
|
||||||
|
|
||||||
}
|
|
||||||
|
@ -449,13 +449,24 @@ class GraphKit : public Phase {
|
|||||||
//
|
//
|
||||||
// If val==NULL, it is taken to be a completely unknown value. QQQ
|
// If val==NULL, it is taken to be a completely unknown value. QQQ
|
||||||
|
|
||||||
|
Node* store_oop(Node* ctl,
|
||||||
|
Node* obj, // containing obj
|
||||||
|
Node* adr, // actual adress to store val at
|
||||||
|
const TypePtr* adr_type,
|
||||||
|
Node* val,
|
||||||
|
const TypeOopPtr* val_type,
|
||||||
|
BasicType bt,
|
||||||
|
bool use_precise);
|
||||||
|
|
||||||
Node* store_oop_to_object(Node* ctl,
|
Node* store_oop_to_object(Node* ctl,
|
||||||
Node* obj, // containing obj
|
Node* obj, // containing obj
|
||||||
Node* adr, // actual adress to store val at
|
Node* adr, // actual adress to store val at
|
||||||
const TypePtr* adr_type,
|
const TypePtr* adr_type,
|
||||||
Node* val,
|
Node* val,
|
||||||
const TypeOopPtr* val_type,
|
const TypeOopPtr* val_type,
|
||||||
BasicType bt);
|
BasicType bt) {
|
||||||
|
return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);
|
||||||
|
}
|
||||||
|
|
||||||
Node* store_oop_to_array(Node* ctl,
|
Node* store_oop_to_array(Node* ctl,
|
||||||
Node* obj, // containing obj
|
Node* obj, // containing obj
|
||||||
@ -463,7 +474,9 @@ class GraphKit : public Phase {
|
|||||||
const TypePtr* adr_type,
|
const TypePtr* adr_type,
|
||||||
Node* val,
|
Node* val,
|
||||||
const TypeOopPtr* val_type,
|
const TypeOopPtr* val_type,
|
||||||
BasicType bt);
|
BasicType bt) {
|
||||||
|
return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
|
||||||
|
}
|
||||||
|
|
||||||
// Could be an array or object we don't know at compile time (unsafe ref.)
|
// Could be an array or object we don't know at compile time (unsafe ref.)
|
||||||
Node* store_oop_to_unknown(Node* ctl,
|
Node* store_oop_to_unknown(Node* ctl,
|
||||||
@ -488,9 +501,6 @@ class GraphKit : public Phase {
|
|||||||
// Return a load of array element at idx.
|
// Return a load of array element at idx.
|
||||||
Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
|
Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
|
||||||
|
|
||||||
// CMS card-marks have an input from the corresponding oop_store
|
|
||||||
void cms_card_mark(Node* ctl, Node* adr, Node* val, Node* oop_store);
|
|
||||||
|
|
||||||
//---------------- Dtrace support --------------------
|
//---------------- Dtrace support --------------------
|
||||||
void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
|
void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
|
||||||
void make_dtrace_method_entry(ciMethod* method) {
|
void make_dtrace_method_entry(ciMethod* method) {
|
||||||
@ -582,9 +592,6 @@ class GraphKit : public Phase {
|
|||||||
return C->too_many_recompiles(method(), bci(), reason);
|
return C->too_many_recompiles(method(), bci(), reason);
|
||||||
}
|
}
|
||||||
|
|
||||||
// vanilla/CMS post barrier
|
|
||||||
void write_barrier_post(Node *store, Node* obj, Node* adr, Node* val, bool use_precise);
|
|
||||||
|
|
||||||
// Returns the object (if any) which was created the moment before.
|
// Returns the object (if any) which was created the moment before.
|
||||||
Node* just_allocated_object(Node* current_control);
|
Node* just_allocated_object(Node* current_control);
|
||||||
|
|
||||||
@ -593,6 +600,11 @@ class GraphKit : public Phase {
|
|||||||
&& Universe::heap()->can_elide_tlab_store_barriers());
|
&& Universe::heap()->can_elide_tlab_store_barriers());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void sync_kit(IdealKit& ideal);
|
||||||
|
|
||||||
|
// vanilla/CMS post barrier
|
||||||
|
void write_barrier_post(Node *store, Node* obj, Node* adr, Node* val, bool use_precise);
|
||||||
|
|
||||||
// G1 pre/post barriers
|
// G1 pre/post barriers
|
||||||
void g1_write_barrier_pre(Node* obj,
|
void g1_write_barrier_pre(Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
@ -610,7 +622,7 @@ class GraphKit : public Phase {
|
|||||||
bool use_precise);
|
bool use_precise);
|
||||||
// Helper function for g1
|
// Helper function for g1
|
||||||
private:
|
private:
|
||||||
void g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store, Node* index, Node* index_adr,
|
void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, Node* index, Node* index_adr,
|
||||||
Node* buffer, const TypeFunc* tf);
|
Node* buffer, const TypeFunc* tf);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
const uint IdealKit::first_var = TypeFunc::Parms + 1;
|
const uint IdealKit::first_var = TypeFunc::Parms + 1;
|
||||||
|
|
||||||
//----------------------------IdealKit-----------------------------------------
|
//----------------------------IdealKit-----------------------------------------
|
||||||
IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_transforms) :
|
IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_transforms, bool has_declarations) :
|
||||||
_gvn(gvn), C(gvn.C) {
|
_gvn(gvn), C(gvn.C) {
|
||||||
_initial_ctrl = control;
|
_initial_ctrl = control;
|
||||||
_initial_memory = mem;
|
_initial_memory = mem;
|
||||||
@ -47,6 +47,9 @@ IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_trans
|
|||||||
_pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
|
_pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
|
||||||
_delay_transform = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
|
_delay_transform = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
|
||||||
DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
|
DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
|
||||||
|
if (!has_declarations) {
|
||||||
|
declarations_done();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------if_then-------------------------------------
|
//-------------------------------if_then-------------------------------------
|
||||||
@ -97,7 +100,7 @@ void IdealKit::else_() {
|
|||||||
//-------------------------------end_if-------------------------------------
|
//-------------------------------end_if-------------------------------------
|
||||||
// Merge the "then" and "else" cvstates.
|
// Merge the "then" and "else" cvstates.
|
||||||
//
|
//
|
||||||
// The if_then() pushed the current state for later use
|
// The if_then() pushed a copy of the current state for later use
|
||||||
// as the initial state for a future "else" clause. The
|
// as the initial state for a future "else" clause. The
|
||||||
// current state then became the initial state for the
|
// current state then became the initial state for the
|
||||||
// then clause. If an "else" clause was encountered, it will
|
// then clause. If an "else" clause was encountered, it will
|
||||||
@ -258,8 +261,8 @@ Node* IdealKit::promote_to_phi(Node* n, Node* reg) {
|
|||||||
return delay_transform(PhiNode::make(reg, n, ct));
|
return delay_transform(PhiNode::make(reg, n, ct));
|
||||||
}
|
}
|
||||||
|
|
||||||
//-----------------------------declares_done-----------------------------------
|
//-----------------------------declarations_done-------------------------------
|
||||||
void IdealKit::declares_done() {
|
void IdealKit::declarations_done() {
|
||||||
_cvstate = new_cvstate(); // initialize current cvstate
|
_cvstate = new_cvstate(); // initialize current cvstate
|
||||||
set_ctrl(_initial_ctrl); // initialize control in current cvstate
|
set_ctrl(_initial_ctrl); // initialize control in current cvstate
|
||||||
set_all_memory(_initial_memory);// initialize memory in current cvstate
|
set_all_memory(_initial_memory);// initialize memory in current cvstate
|
||||||
@ -277,7 +280,9 @@ Node* IdealKit::transform(Node* n) {
|
|||||||
|
|
||||||
//-----------------------------delay_transform-----------------------------------
|
//-----------------------------delay_transform-----------------------------------
|
||||||
Node* IdealKit::delay_transform(Node* n) {
|
Node* IdealKit::delay_transform(Node* n) {
|
||||||
|
if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
|
||||||
gvn().set_type(n, n->bottom_type());
|
gvn().set_type(n, n->bottom_type());
|
||||||
|
}
|
||||||
_delay_transform->push(n);
|
_delay_transform->push(n);
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
@ -321,7 +326,9 @@ IdealVariable::IdealVariable(IdealKit &k) {
|
|||||||
Node* IdealKit::memory(uint alias_idx) {
|
Node* IdealKit::memory(uint alias_idx) {
|
||||||
MergeMemNode* mem = merged_memory();
|
MergeMemNode* mem = merged_memory();
|
||||||
Node* p = mem->memory_at(alias_idx);
|
Node* p = mem->memory_at(alias_idx);
|
||||||
|
if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
|
||||||
_gvn.set_type(p, Type::MEMORY); // must be mapped
|
_gvn.set_type(p, Type::MEMORY); // must be mapped
|
||||||
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -462,9 +469,6 @@ void IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
|
|||||||
const TypePtr* adr_type = TypeRawPtr::BOTTOM;
|
const TypePtr* adr_type = TypeRawPtr::BOTTOM;
|
||||||
uint adr_idx = C->get_alias_index(adr_type);
|
uint adr_idx = C->get_alias_index(adr_type);
|
||||||
|
|
||||||
// Clone initial memory
|
|
||||||
MergeMemNode* cloned_mem = MergeMemNode::make(C, merged_memory());
|
|
||||||
|
|
||||||
// Slow-path leaf call
|
// Slow-path leaf call
|
||||||
int size = slow_call_type->domain()->cnt();
|
int size = slow_call_type->domain()->cnt();
|
||||||
CallNode *call = (CallNode*)new (C, size) CallLeafNode( slow_call_type, slow_call, leaf_name, adr_type);
|
CallNode *call = (CallNode*)new (C, size) CallLeafNode( slow_call_type, slow_call, leaf_name, adr_type);
|
||||||
@ -489,9 +493,6 @@ void IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
|
|||||||
|
|
||||||
set_ctrl(transform( new (C, 1) ProjNode(call,TypeFunc::Control) ));
|
set_ctrl(transform( new (C, 1) ProjNode(call,TypeFunc::Control) ));
|
||||||
|
|
||||||
// Set the incoming clone of memory as current memory
|
|
||||||
set_all_memory(cloned_mem);
|
|
||||||
|
|
||||||
// Make memory for the call
|
// Make memory for the call
|
||||||
Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
|
Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@
|
|||||||
// Example:
|
// Example:
|
||||||
// Node* limit = ??
|
// Node* limit = ??
|
||||||
// IdealVariable i(kit), j(kit);
|
// IdealVariable i(kit), j(kit);
|
||||||
// declares_done();
|
// declarations_done();
|
||||||
// Node* exit = make_label(1); // 1 goto
|
// Node* exit = make_label(1); // 1 goto
|
||||||
// set(j, ConI(0));
|
// set(j, ConI(0));
|
||||||
// loop(i, ConI(0), BoolTest::lt, limit); {
|
// loop(i, ConI(0), BoolTest::lt, limit); {
|
||||||
@ -101,10 +101,7 @@ class IdealKit: public StackObj {
|
|||||||
Node* new_cvstate(); // Create a new cvstate
|
Node* new_cvstate(); // Create a new cvstate
|
||||||
Node* cvstate() { return _cvstate; } // current cvstate
|
Node* cvstate() { return _cvstate; } // current cvstate
|
||||||
Node* copy_cvstate(); // copy current cvstate
|
Node* copy_cvstate(); // copy current cvstate
|
||||||
void set_ctrl(Node* ctrl) { _cvstate->set_req(TypeFunc::Control, ctrl); }
|
|
||||||
|
|
||||||
// Should this assert this is a MergeMem???
|
|
||||||
void set_all_memory(Node* mem){ _cvstate->set_req(TypeFunc::Memory, mem); }
|
|
||||||
void set_memory(Node* mem, uint alias_idx );
|
void set_memory(Node* mem, uint alias_idx );
|
||||||
void do_memory_merge(Node* merging, Node* join);
|
void do_memory_merge(Node* merging, Node* join);
|
||||||
void clear(Node* m); // clear a cvstate
|
void clear(Node* m); // clear a cvstate
|
||||||
@ -132,15 +129,17 @@ class IdealKit: public StackObj {
|
|||||||
Node* memory(uint alias_idx);
|
Node* memory(uint alias_idx);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
IdealKit(PhaseGVN &gvn, Node* control, Node* memory, bool delay_all_transforms = false);
|
IdealKit(PhaseGVN &gvn, Node* control, Node* memory, bool delay_all_transforms = false, bool has_declarations = false);
|
||||||
~IdealKit() {
|
~IdealKit() {
|
||||||
stop();
|
stop();
|
||||||
drain_delay_transform();
|
drain_delay_transform();
|
||||||
}
|
}
|
||||||
// Control
|
// Control
|
||||||
Node* ctrl() { return _cvstate->in(TypeFunc::Control); }
|
Node* ctrl() { return _cvstate->in(TypeFunc::Control); }
|
||||||
|
void set_ctrl(Node* ctrl) { _cvstate->set_req(TypeFunc::Control, ctrl); }
|
||||||
Node* top() { return C->top(); }
|
Node* top() { return C->top(); }
|
||||||
MergeMemNode* merged_memory() { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); }
|
MergeMemNode* merged_memory() { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); }
|
||||||
|
void set_all_memory(Node* mem) { _cvstate->set_req(TypeFunc::Memory, mem); }
|
||||||
void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); }
|
void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); }
|
||||||
Node* value(IdealVariable& v) { return _cvstate->in(first_var + v.id()); }
|
Node* value(IdealVariable& v) { return _cvstate->in(first_var + v.id()); }
|
||||||
void dead(IdealVariable& v) { set(v, (Node*)NULL); }
|
void dead(IdealVariable& v) { set(v, (Node*)NULL); }
|
||||||
@ -155,7 +154,7 @@ class IdealKit: public StackObj {
|
|||||||
Node* make_label(int goto_ct);
|
Node* make_label(int goto_ct);
|
||||||
void bind(Node* lab);
|
void bind(Node* lab);
|
||||||
void goto_(Node* lab, bool bind = false);
|
void goto_(Node* lab, bool bind = false);
|
||||||
void declares_done();
|
void declarations_done();
|
||||||
void drain_delay_transform();
|
void drain_delay_transform();
|
||||||
|
|
||||||
Node* IfTrue(IfNode* iff) { return transform(new (C,1) IfTrueNode(iff)); }
|
Node* IfTrue(IfNode* iff) { return transform(new (C,1) IfTrueNode(iff)); }
|
||||||
|
@ -378,7 +378,18 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
|
|||||||
|
|
||||||
// Force the original merge dead
|
// Force the original merge dead
|
||||||
igvn->hash_delete(r);
|
igvn->hash_delete(r);
|
||||||
r->set_req_X(0,NULL,igvn);
|
// First, remove region's dead users.
|
||||||
|
for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) {
|
||||||
|
Node* u = r->last_out(l);
|
||||||
|
if( u == r ) {
|
||||||
|
r->set_req(0, NULL);
|
||||||
|
} else {
|
||||||
|
assert(u->outcnt() == 0, "only dead users");
|
||||||
|
igvn->remove_dead_node(u);
|
||||||
|
}
|
||||||
|
l -= 1;
|
||||||
|
}
|
||||||
|
igvn->remove_dead_node(r);
|
||||||
|
|
||||||
// Now remove the bogus extra edges used to keep things alive
|
// Now remove the bogus extra edges used to keep things alive
|
||||||
igvn->remove_dead_node( hook );
|
igvn->remove_dead_node( hook );
|
||||||
|
@ -310,11 +310,6 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
|||||||
if (!InlineAtomicLong) return NULL;
|
if (!InlineAtomicLong) return NULL;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case vmIntrinsics::_Object_init:
|
|
||||||
case vmIntrinsics::_invoke:
|
|
||||||
// We do not intrinsify these; they are marked for other purposes.
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
case vmIntrinsics::_getCallerClass:
|
case vmIntrinsics::_getCallerClass:
|
||||||
if (!UseNewReflection) return NULL;
|
if (!UseNewReflection) return NULL;
|
||||||
if (!InlineReflectionGetCallerClass) return NULL;
|
if (!InlineReflectionGetCallerClass) return NULL;
|
||||||
@ -327,6 +322,8 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
|
||||||
|
assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -394,19 +391,12 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (PrintIntrinsics) {
|
if (PrintIntrinsics) {
|
||||||
switch (intrinsic_id()) {
|
|
||||||
case vmIntrinsics::_invoke:
|
|
||||||
case vmIntrinsics::_Object_init:
|
|
||||||
// We do not expect to inline these, so do not produce any noise about them.
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
tty->print("Did not inline intrinsic %s%s at bci:%d in",
|
tty->print("Did not inline intrinsic %s%s at bci:%d in",
|
||||||
vmIntrinsics::name_at(intrinsic_id()),
|
vmIntrinsics::name_at(intrinsic_id()),
|
||||||
(is_virtual() ? " (virtual)" : ""), kit.bci());
|
(is_virtual() ? " (virtual)" : ""), kit.bci());
|
||||||
kit.caller()->print_short_name(tty);
|
kit.caller()->print_short_name(tty);
|
||||||
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
|
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
|
||||||
}
|
}
|
||||||
}
|
|
||||||
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
|
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1030,7 +1020,7 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
|
|||||||
const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
|
const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
|
||||||
const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
|
const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
|
||||||
|
|
||||||
IdealKit kit(gvn(), control(), merged_memory());
|
IdealKit kit(gvn(), control(), merged_memory(), false, true);
|
||||||
#define __ kit.
|
#define __ kit.
|
||||||
Node* zero = __ ConI(0);
|
Node* zero = __ ConI(0);
|
||||||
Node* one = __ ConI(1);
|
Node* one = __ ConI(1);
|
||||||
@ -1042,7 +1032,7 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
|
|||||||
Node* targetOffset = __ ConI(targetOffset_i);
|
Node* targetOffset = __ ConI(targetOffset_i);
|
||||||
Node* sourceEnd = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
|
Node* sourceEnd = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
|
||||||
|
|
||||||
IdealVariable rtn(kit), i(kit), j(kit); __ declares_done();
|
IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
|
||||||
Node* outer_loop = __ make_label(2 /* goto */);
|
Node* outer_loop = __ make_label(2 /* goto */);
|
||||||
Node* return_ = __ make_label(1);
|
Node* return_ = __ make_label(1);
|
||||||
|
|
||||||
@ -1079,9 +1069,9 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
|
|||||||
__ bind(outer_loop);
|
__ bind(outer_loop);
|
||||||
}__ end_loop(); __ dead(i);
|
}__ end_loop(); __ dead(i);
|
||||||
__ bind(return_);
|
__ bind(return_);
|
||||||
__ drain_delay_transform();
|
|
||||||
|
|
||||||
set_control(__ ctrl());
|
// Final sync IdealKit and GraphKit.
|
||||||
|
sync_kit(kit);
|
||||||
Node* result = __ value(rtn);
|
Node* result = __ value(rtn);
|
||||||
#undef __
|
#undef __
|
||||||
C->set_has_loops(true);
|
C->set_has_loops(true);
|
||||||
@ -2183,14 +2173,23 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
|
|||||||
// of it. So we need to emit code to conditionally do the proper type of
|
// of it. So we need to emit code to conditionally do the proper type of
|
||||||
// store.
|
// store.
|
||||||
|
|
||||||
IdealKit kit(gvn(), control(), merged_memory());
|
IdealKit ideal(gvn(), control(), merged_memory());
|
||||||
kit.declares_done();
|
#define __ ideal.
|
||||||
// QQQ who knows what probability is here??
|
// QQQ who knows what probability is here??
|
||||||
kit.if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
|
__ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
|
||||||
(void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
|
// Sync IdealKit and graphKit.
|
||||||
} kit.else_(); {
|
set_all_memory( __ merged_memory());
|
||||||
(void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
|
set_control(__ ctrl());
|
||||||
} kit.end_if();
|
Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
|
||||||
|
// Update IdealKit memory.
|
||||||
|
__ set_all_memory(merged_memory());
|
||||||
|
__ set_ctrl(control());
|
||||||
|
} __ else_(); {
|
||||||
|
__ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile);
|
||||||
|
} __ end_if();
|
||||||
|
// Final sync IdealKit and GraphKit.
|
||||||
|
sync_kit(ideal);
|
||||||
|
#undef __
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -346,7 +346,10 @@ Node *PhaseIdealLoop::remix_address_expressions( Node *n ) {
|
|||||||
|
|
||||||
// Yes! Reshape address expression!
|
// Yes! Reshape address expression!
|
||||||
Node *inv_scale = new (C, 3) LShiftINode( add_invar, scale );
|
Node *inv_scale = new (C, 3) LShiftINode( add_invar, scale );
|
||||||
register_new_node( inv_scale, add_invar_ctrl );
|
Node *inv_scale_ctrl =
|
||||||
|
dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
|
||||||
|
add_invar_ctrl : scale_ctrl;
|
||||||
|
register_new_node( inv_scale, inv_scale_ctrl );
|
||||||
Node *var_scale = new (C, 3) LShiftINode( add_var, scale );
|
Node *var_scale = new (C, 3) LShiftINode( add_var, scale );
|
||||||
register_new_node( var_scale, n_ctrl );
|
register_new_node( var_scale, n_ctrl );
|
||||||
Node *var_add = new (C, 3) AddINode( var_scale, inv_scale );
|
Node *var_add = new (C, 3) AddINode( var_scale, inv_scale );
|
||||||
|
@ -300,6 +300,12 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
adr_type = t_disp->add_offset(offset);
|
adr_type = t_disp->add_offset(offset);
|
||||||
|
} else if( base == NULL && offset != 0 && offset != Type::OffsetBot ) {
|
||||||
|
// Use ideal type if it is oop ptr.
|
||||||
|
const TypePtr *tp = oper->type()->isa_ptr();
|
||||||
|
if( tp != NULL) {
|
||||||
|
adr_type = tp;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,6 +200,8 @@ void PhaseMacroExpand::extract_call_projections(CallNode *call) {
|
|||||||
// Eliminate a card mark sequence. p2x is a ConvP2XNode
|
// Eliminate a card mark sequence. p2x is a ConvP2XNode
|
||||||
void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
|
void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
|
||||||
assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required");
|
assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required");
|
||||||
|
if (!UseG1GC) {
|
||||||
|
// vanilla/CMS post barrier
|
||||||
Node *shift = p2x->unique_out();
|
Node *shift = p2x->unique_out();
|
||||||
Node *addp = shift->unique_out();
|
Node *addp = shift->unique_out();
|
||||||
for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
|
for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
|
||||||
@ -207,6 +209,69 @@ void PhaseMacroExpand::eliminate_card_mark(Node *p2x) {
|
|||||||
assert(st->is_Store(), "store required");
|
assert(st->is_Store(), "store required");
|
||||||
_igvn.replace_node(st, st->in(MemNode::Memory));
|
_igvn.replace_node(st, st->in(MemNode::Memory));
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// G1 pre/post barriers
|
||||||
|
assert(p2x->outcnt() == 2, "expects 2 users: Xor and URShift nodes");
|
||||||
|
// It could be only one user, URShift node, in Object.clone() instrinsic
|
||||||
|
// but the new allocation is passed to arraycopy stub and it could not
|
||||||
|
// be scalar replaced. So we don't check the case.
|
||||||
|
|
||||||
|
// Remove G1 post barrier.
|
||||||
|
|
||||||
|
// Search for CastP2X->Xor->URShift->Cmp path which
|
||||||
|
// checks if the store done to a different from the value's region.
|
||||||
|
// And replace Cmp with #0 (false) to collapse G1 post barrier.
|
||||||
|
Node* xorx = NULL;
|
||||||
|
for (DUIterator_Fast imax, i = p2x->fast_outs(imax); i < imax; i++) {
|
||||||
|
Node* u = p2x->fast_out(i);
|
||||||
|
if (u->Opcode() == Op_XorX) {
|
||||||
|
xorx = u;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(xorx != NULL, "missing G1 post barrier");
|
||||||
|
Node* shift = xorx->unique_out();
|
||||||
|
Node* cmpx = shift->unique_out();
|
||||||
|
assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
|
||||||
|
cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
|
||||||
|
"missing region check in G1 post barrier");
|
||||||
|
_igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
|
||||||
|
|
||||||
|
// Remove G1 pre barrier.
|
||||||
|
|
||||||
|
// Search "if (marking != 0)" check and set it to "false".
|
||||||
|
Node* this_region = p2x->in(0);
|
||||||
|
assert(this_region != NULL, "");
|
||||||
|
// There is no G1 pre barrier if previous stored value is NULL
|
||||||
|
// (for example, after initialization).
|
||||||
|
if (this_region->is_Region() && this_region->req() == 3) {
|
||||||
|
int ind = 1;
|
||||||
|
if (!this_region->in(ind)->is_IfFalse()) {
|
||||||
|
ind = 2;
|
||||||
|
}
|
||||||
|
if (this_region->in(ind)->is_IfFalse()) {
|
||||||
|
Node* bol = this_region->in(ind)->in(0)->in(1);
|
||||||
|
assert(bol->is_Bool(), "");
|
||||||
|
cmpx = bol->in(1);
|
||||||
|
if (bol->as_Bool()->_test._test == BoolTest::ne &&
|
||||||
|
cmpx->is_Cmp() && cmpx->in(2) == intcon(0) &&
|
||||||
|
cmpx->in(1)->is_Load()) {
|
||||||
|
Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
|
||||||
|
const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||||
|
PtrQueue::byte_offset_of_active());
|
||||||
|
if (adr->is_AddP() && adr->in(AddPNode::Base) == top() &&
|
||||||
|
adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
|
||||||
|
adr->in(AddPNode::Offset) == MakeConX(marking_offset)) {
|
||||||
|
_igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Now CastP2X can be removed since it is used only on dead path
|
||||||
|
// which currently still alive until igvn optimize it.
|
||||||
|
assert(p2x->unique_out()->Opcode() == Op_URShiftX, "");
|
||||||
|
_igvn.replace_node(p2x, top());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search for a memory operation for the specified memory slice.
|
// Search for a memory operation for the specified memory slice.
|
||||||
@ -760,14 +825,11 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
|
|||||||
if (n->is_Store()) {
|
if (n->is_Store()) {
|
||||||
_igvn.replace_node(n, n->in(MemNode::Memory));
|
_igvn.replace_node(n, n->in(MemNode::Memory));
|
||||||
} else {
|
} else {
|
||||||
assert( n->Opcode() == Op_CastP2X, "CastP2X required");
|
|
||||||
eliminate_card_mark(n);
|
eliminate_card_mark(n);
|
||||||
}
|
}
|
||||||
k -= (oc2 - use->outcnt());
|
k -= (oc2 - use->outcnt());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert( !use->is_SafePoint(), "safepoint uses must have been already elimiated");
|
|
||||||
assert( use->Opcode() == Op_CastP2X, "CastP2X required");
|
|
||||||
eliminate_card_mark(use);
|
eliminate_card_mark(use);
|
||||||
}
|
}
|
||||||
j -= (oc1 - res->outcnt());
|
j -= (oc1 - res->outcnt());
|
||||||
|
@ -1489,8 +1489,7 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
|
|||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Verify adr type after matching memory operation
|
// Verify adr type after matching memory operation
|
||||||
const MachOper* oper = mach->memory_operand();
|
const MachOper* oper = mach->memory_operand();
|
||||||
if (oper != NULL && oper != (MachOper*)-1 &&
|
if (oper != NULL && oper != (MachOper*)-1) {
|
||||||
mach->adr_type() != TypeRawPtr::BOTTOM) { // non-direct addressing mode
|
|
||||||
// It has a unique memory operand. Find corresponding ideal mem node.
|
// It has a unique memory operand. Find corresponding ideal mem node.
|
||||||
Node* m = NULL;
|
Node* m = NULL;
|
||||||
if (leaf->is_Mem()) {
|
if (leaf->is_Mem()) {
|
||||||
|
@ -50,6 +50,13 @@ void Compile::Output() {
|
|||||||
init_scratch_buffer_blob();
|
init_scratch_buffer_blob();
|
||||||
if (failing()) return; // Out of memory
|
if (failing()) return; // Out of memory
|
||||||
|
|
||||||
|
// The number of new nodes (mostly MachNop) is proportional to
|
||||||
|
// the number of java calls and inner loops which are aligned.
|
||||||
|
if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
|
||||||
|
C->inner_loops()*(OptoLoopAlignment-1)),
|
||||||
|
"out of nodes before code generation" ) ) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
// Make sure I can find the Start Node
|
// Make sure I can find the Start Node
|
||||||
Block_Array& bbs = _cfg->_bbs;
|
Block_Array& bbs = _cfg->_bbs;
|
||||||
Block *entry = _cfg->_blocks[1];
|
Block *entry = _cfg->_blocks[1];
|
||||||
@ -1105,7 +1112,7 @@ void Compile::Fill_buffer() {
|
|||||||
uint *call_returns = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
|
uint *call_returns = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
|
||||||
|
|
||||||
uint return_offset = 0;
|
uint return_offset = 0;
|
||||||
MachNode *nop = new (this) MachNopNode();
|
int nop_size = (new (this) MachNopNode())->size(_regalloc);
|
||||||
|
|
||||||
int previous_offset = 0;
|
int previous_offset = 0;
|
||||||
int current_offset = 0;
|
int current_offset = 0;
|
||||||
@ -1188,7 +1195,6 @@ void Compile::Fill_buffer() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// align the instruction if necessary
|
// align the instruction if necessary
|
||||||
int nop_size = nop->size(_regalloc);
|
|
||||||
int padding = mach->compute_padding(current_offset);
|
int padding = mach->compute_padding(current_offset);
|
||||||
// Make sure safepoint node for polling is distinct from a call's
|
// Make sure safepoint node for polling is distinct from a call's
|
||||||
// return by adding a nop if needed.
|
// return by adding a nop if needed.
|
||||||
@ -1372,7 +1378,6 @@ void Compile::Fill_buffer() {
|
|||||||
|
|
||||||
// If the next block is the top of a loop, pad this block out to align
|
// If the next block is the top of a loop, pad this block out to align
|
||||||
// the loop top a little. Helps prevent pipe stalls at loop back branches.
|
// the loop top a little. Helps prevent pipe stalls at loop back branches.
|
||||||
int nop_size = (new (this) MachNopNode())->size(_regalloc);
|
|
||||||
if( i<_cfg->_num_blocks-1 ) {
|
if( i<_cfg->_num_blocks-1 ) {
|
||||||
Block *nb = _cfg->_blocks[i+1];
|
Block *nb = _cfg->_blocks[i+1];
|
||||||
uint padding = nb->alignment_padding(current_offset);
|
uint padding = nb->alignment_padding(current_offset);
|
||||||
|
@ -450,6 +450,8 @@ public:
|
|||||||
subsume_node(old, nn);
|
subsume_node(old, nn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool delay_transform() const { return _delay_transform; }
|
||||||
|
|
||||||
void set_delay_transform(bool delay) {
|
void set_delay_transform(bool delay) {
|
||||||
_delay_transform = delay;
|
_delay_transform = delay;
|
||||||
}
|
}
|
||||||
|
@ -1216,6 +1216,8 @@ inline bool Type::is_floatingpoint() const {
|
|||||||
#define Op_AndX Op_AndL
|
#define Op_AndX Op_AndL
|
||||||
#define Op_AddX Op_AddL
|
#define Op_AddX Op_AddL
|
||||||
#define Op_SubX Op_SubL
|
#define Op_SubX Op_SubL
|
||||||
|
#define Op_XorX Op_XorL
|
||||||
|
#define Op_URShiftX Op_URShiftL
|
||||||
// conversions
|
// conversions
|
||||||
#define ConvI2X(x) ConvI2L(x)
|
#define ConvI2X(x) ConvI2L(x)
|
||||||
#define ConvL2X(x) (x)
|
#define ConvL2X(x) (x)
|
||||||
@ -1258,6 +1260,8 @@ inline bool Type::is_floatingpoint() const {
|
|||||||
#define Op_AndX Op_AndI
|
#define Op_AndX Op_AndI
|
||||||
#define Op_AddX Op_AddI
|
#define Op_AddX Op_AddI
|
||||||
#define Op_SubX Op_SubI
|
#define Op_SubX Op_SubI
|
||||||
|
#define Op_XorX Op_XorI
|
||||||
|
#define Op_URShiftX Op_URShiftI
|
||||||
// conversions
|
// conversions
|
||||||
#define ConvI2X(x) (x)
|
#define ConvI2X(x) (x)
|
||||||
#define ConvL2X(x) ConvL2I(x)
|
#define ConvL2X(x) ConvL2I(x)
|
||||||
|
@ -1048,7 +1048,11 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe,
|
|||||||
oop e = JNIHandles::resolve(e_h);
|
oop e = JNIHandles::resolve(e_h);
|
||||||
oop p = JNIHandles::resolve(obj);
|
oop p = JNIHandles::resolve(obj);
|
||||||
HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
|
HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
|
||||||
update_barrier_set_pre((void*)addr, e);
|
if (UseCompressedOops) {
|
||||||
|
update_barrier_set_pre((narrowOop*)addr, e);
|
||||||
|
} else {
|
||||||
|
update_barrier_set_pre((oop*)addr, e);
|
||||||
|
}
|
||||||
oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e);
|
oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e);
|
||||||
jboolean success = (res == e);
|
jboolean success = (res == e);
|
||||||
if (success)
|
if (success)
|
||||||
|
@ -1202,18 +1202,13 @@ void Arguments::set_ergonomics_flags() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// Compressed Headers do not work with CMS, which uses a bit in the klass
|
|
||||||
// field offset to determine free list chunk markers.
|
|
||||||
// Check that UseCompressedOops can be set with the max heap size allocated
|
// Check that UseCompressedOops can be set with the max heap size allocated
|
||||||
// by ergonomics.
|
// by ergonomics.
|
||||||
if (MaxHeapSize <= max_heap_for_compressed_oops()) {
|
if (MaxHeapSize <= max_heap_for_compressed_oops()) {
|
||||||
if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
|
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
|
||||||
// Turn off until bug is fixed.
|
// Turn off until bug is fixed.
|
||||||
// the following line to return it to default status.
|
// the following line to return it to default status.
|
||||||
// FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
// FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
||||||
} else if (UseCompressedOops && UseG1GC) {
|
|
||||||
warning(" UseCompressedOops does not currently work with UseG1GC; switching off UseCompressedOops. ");
|
|
||||||
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
|
||||||
}
|
}
|
||||||
#ifdef _WIN64
|
#ifdef _WIN64
|
||||||
if (UseLargePages && UseCompressedOops) {
|
if (UseLargePages && UseCompressedOops) {
|
||||||
@ -1454,6 +1449,7 @@ bool Arguments::check_gc_consistency() {
|
|||||||
if (UseSerialGC) i++;
|
if (UseSerialGC) i++;
|
||||||
if (UseConcMarkSweepGC || UseParNewGC) i++;
|
if (UseConcMarkSweepGC || UseParNewGC) i++;
|
||||||
if (UseParallelGC || UseParallelOldGC) i++;
|
if (UseParallelGC || UseParallelOldGC) i++;
|
||||||
|
if (UseG1GC) i++;
|
||||||
if (i > 1) {
|
if (i > 1) {
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
"Conflicting collector combinations in option list; "
|
"Conflicting collector combinations in option list; "
|
||||||
@ -2603,22 +2599,6 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// These are hacks until G1 is fully supported and tested
|
|
||||||
// but lets you force -XX:+UseG1GC in PRT and get it where it (mostly) works
|
|
||||||
if (UseG1GC) {
|
|
||||||
if (UseConcMarkSweepGC || UseParNewGC || UseParallelGC || UseParallelOldGC || UseSerialGC) {
|
|
||||||
#ifndef PRODUCT
|
|
||||||
tty->print_cr("-XX:+UseG1GC is incompatible with other collectors, using UseG1GC");
|
|
||||||
#endif // PRODUCT
|
|
||||||
UseConcMarkSweepGC = false;
|
|
||||||
UseParNewGC = false;
|
|
||||||
UseParallelGC = false;
|
|
||||||
UseParallelOldGC = false;
|
|
||||||
UseSerialGC = false;
|
|
||||||
}
|
|
||||||
no_shared_spaces();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (TraceBytecodesAt != 0) {
|
if (TraceBytecodesAt != 0) {
|
||||||
TraceBytecodes = true;
|
TraceBytecodes = true;
|
||||||
@ -2676,10 +2656,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
|||||||
} else if (UseParNewGC) {
|
} else if (UseParNewGC) {
|
||||||
// Set some flags for ParNew
|
// Set some flags for ParNew
|
||||||
set_parnew_gc_flags();
|
set_parnew_gc_flags();
|
||||||
}
|
} else if (UseG1GC) {
|
||||||
// Temporary; make the "if" an "else-if" before
|
|
||||||
// we integrate G1. XXX
|
|
||||||
if (UseG1GC) {
|
|
||||||
// Set some flags for garbage-first, if needed.
|
// Set some flags for garbage-first, if needed.
|
||||||
set_g1_gc_flags();
|
set_g1_gc_flags();
|
||||||
}
|
}
|
||||||
|
@ -49,7 +49,7 @@ void SafepointSynchronize::begin() {
|
|||||||
// In the future we should investigate whether CMS can use the
|
// In the future we should investigate whether CMS can use the
|
||||||
// more-general mechanism below. DLD (01/05).
|
// more-general mechanism below. DLD (01/05).
|
||||||
ConcurrentMarkSweepThread::synchronize(false);
|
ConcurrentMarkSweepThread::synchronize(false);
|
||||||
} else {
|
} else if (UseG1GC) {
|
||||||
ConcurrentGCThread::safepoint_synchronize();
|
ConcurrentGCThread::safepoint_synchronize();
|
||||||
}
|
}
|
||||||
#endif // SERIALGC
|
#endif // SERIALGC
|
||||||
@ -400,7 +400,7 @@ void SafepointSynchronize::end() {
|
|||||||
// If there are any concurrent GC threads resume them.
|
// If there are any concurrent GC threads resume them.
|
||||||
if (UseConcMarkSweepGC) {
|
if (UseConcMarkSweepGC) {
|
||||||
ConcurrentMarkSweepThread::desynchronize(false);
|
ConcurrentMarkSweepThread::desynchronize(false);
|
||||||
} else {
|
} else if (UseG1GC) {
|
||||||
ConcurrentGCThread::safepoint_desynchronize();
|
ConcurrentGCThread::safepoint_desynchronize();
|
||||||
}
|
}
|
||||||
#endif // SERIALGC
|
#endif // SERIALGC
|
||||||
|
@ -119,6 +119,7 @@ JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
|
|||||||
assert(false, "should be optimized out");
|
assert(false, "should be optimized out");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
assert(orig->is_oop(true /* ignore mark word */), "Error");
|
||||||
// store the original value that was in the field reference
|
// store the original value that was in the field reference
|
||||||
thread->satb_mark_queue().enqueue(orig);
|
thread->satb_mark_queue().enqueue(orig);
|
||||||
JRT_END
|
JRT_END
|
||||||
|
@ -104,7 +104,17 @@ StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* r
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
case Location::oop: {
|
case Location::oop: {
|
||||||
Handle h(*(oop *)value_addr); // Wrap a handle around the oop
|
oop val = *(oop *)value_addr;
|
||||||
|
#ifdef _LP64
|
||||||
|
if (Universe::is_narrow_oop_base(val)) {
|
||||||
|
// Compiled code may produce decoded oop = narrow_oop_base
|
||||||
|
// when a narrow oop implicit null check is used.
|
||||||
|
// The narrow_oop_base could be NULL or be the address
|
||||||
|
// of the page below heap. Use NULL value for both cases.
|
||||||
|
val = (oop)NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
Handle h(val); // Wrap a handle around the oop
|
||||||
return new StackValue(h);
|
return new StackValue(h);
|
||||||
}
|
}
|
||||||
case Location::addr: {
|
case Location::addr: {
|
||||||
|
@ -64,15 +64,18 @@ bool ParallelTaskTerminator::peek_in_queue_set() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ParallelTaskTerminator::yield() {
|
void ParallelTaskTerminator::yield() {
|
||||||
|
assert(_offered_termination <= _n_threads, "Invariant");
|
||||||
os::yield();
|
os::yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParallelTaskTerminator::sleep(uint millis) {
|
void ParallelTaskTerminator::sleep(uint millis) {
|
||||||
|
assert(_offered_termination <= _n_threads, "Invariant");
|
||||||
os::sleep(Thread::current(), millis, false);
|
os::sleep(Thread::current(), millis, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||||
|
assert(_offered_termination < _n_threads, "Invariant");
|
||||||
Atomic::inc(&_offered_termination);
|
Atomic::inc(&_offered_termination);
|
||||||
|
|
||||||
uint yield_count = 0;
|
uint yield_count = 0;
|
||||||
@ -96,6 +99,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
|||||||
// Loop waiting for all threads to offer termination or
|
// Loop waiting for all threads to offer termination or
|
||||||
// more work.
|
// more work.
|
||||||
while (true) {
|
while (true) {
|
||||||
|
assert(_offered_termination <= _n_threads, "Invariant");
|
||||||
// Are all threads offering termination?
|
// Are all threads offering termination?
|
||||||
if (_offered_termination == _n_threads) {
|
if (_offered_termination == _n_threads) {
|
||||||
return true;
|
return true;
|
||||||
@ -151,6 +155,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
|||||||
if (peek_in_queue_set() ||
|
if (peek_in_queue_set() ||
|
||||||
(terminator != NULL && terminator->should_exit_termination())) {
|
(terminator != NULL && terminator->should_exit_termination())) {
|
||||||
Atomic::dec(&_offered_termination);
|
Atomic::dec(&_offered_termination);
|
||||||
|
assert(_offered_termination < _n_threads, "Invariant");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -560,8 +560,14 @@ typedef GenericTaskQueueSet<Task> OopTaskQueueSet;
|
|||||||
class StarTask {
|
class StarTask {
|
||||||
void* _holder; // either union oop* or narrowOop*
|
void* _holder; // either union oop* or narrowOop*
|
||||||
public:
|
public:
|
||||||
StarTask(narrowOop *p) { _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); }
|
StarTask(narrowOop* p) {
|
||||||
StarTask(oop *p) { _holder = (void*)p; }
|
assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
|
||||||
|
_holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
|
||||||
|
}
|
||||||
|
StarTask(oop* p) {
|
||||||
|
assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
|
||||||
|
_holder = (void*)p;
|
||||||
|
}
|
||||||
StarTask() { _holder = NULL; }
|
StarTask() { _holder = NULL; }
|
||||||
operator oop*() { return (oop*)_holder; }
|
operator oop*() { return (oop*)_holder; }
|
||||||
operator narrowOop*() {
|
operator narrowOop*() {
|
||||||
|
75
hotspot/test/compiler/6826736/Test.java
Normal file
75
hotspot/test/compiler/6826736/Test.java
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6826736
|
||||||
|
* @summary CMS: core dump with -XX:+UseCompressedOops
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test {
|
||||||
|
int[] arr;
|
||||||
|
int[] arr2;
|
||||||
|
int test(int r) {
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
for (int j = i; j < 100; j++) {
|
||||||
|
int a = 0;
|
||||||
|
for (long k = 0; k < 100; k++) {
|
||||||
|
a += k;
|
||||||
|
}
|
||||||
|
if (arr != null)
|
||||||
|
a = arr[j];
|
||||||
|
r += a;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
int r = 0;
|
||||||
|
Test t = new Test();
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
t.arr = new int[100];
|
||||||
|
r = t.test(r);
|
||||||
|
}
|
||||||
|
System.out.println("Warmup 1 is done.");
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
t.arr = null;
|
||||||
|
r = t.test(r);
|
||||||
|
}
|
||||||
|
System.out.println("Warmup 2 is done.");
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
t.arr = new int[100];
|
||||||
|
r = t.test(r);
|
||||||
|
}
|
||||||
|
System.out.println("Warmup is done.");
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
t.arr = new int[1000000];
|
||||||
|
t.arr = null;
|
||||||
|
r = t.test(r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
* Copyright 2009 Google Inc. All Rights Reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
124
hotspot/test/compiler/6851282/Test.java
Normal file
124
hotspot/test/compiler/6851282/Test.java
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6851282
|
||||||
|
* @summary JIT miscompilation results in null entry in array when using CompressedOops
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops Test
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public class Test {
|
||||||
|
void foo(A a, A[] as) {
|
||||||
|
for (A a1 : as) {
|
||||||
|
B[] filtered = a.c(a1);
|
||||||
|
for (B b : filtered) {
|
||||||
|
if (b == null) {
|
||||||
|
System.out.println("bug: b == null");
|
||||||
|
System.exit(97);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
List<A> as = new ArrayList<A>();
|
||||||
|
for (int i = 0; i < 5000; i++) {
|
||||||
|
List<B> bs = new ArrayList<B>();
|
||||||
|
for (int j = i; j < i + 1000; j++)
|
||||||
|
bs.add(new B(j));
|
||||||
|
as.add(new A(bs.toArray(new B[0])));
|
||||||
|
}
|
||||||
|
new Test().foo(as.get(0), as.subList(1, as.size()).toArray(new A[0]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class A {
|
||||||
|
final B[] bs;
|
||||||
|
|
||||||
|
public A(B[] bs) {
|
||||||
|
this.bs = bs;
|
||||||
|
}
|
||||||
|
|
||||||
|
final B[] c(final A a) {
|
||||||
|
return new BoxedArray<B>(bs).filter(new Function<B, Boolean>() {
|
||||||
|
public Boolean apply(B arg) {
|
||||||
|
for (B b : a.bs) {
|
||||||
|
if (b.d == arg.d)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class BoxedArray<T> {
|
||||||
|
|
||||||
|
private final T[] array;
|
||||||
|
|
||||||
|
BoxedArray(T[] array) {
|
||||||
|
this.array = array;
|
||||||
|
}
|
||||||
|
|
||||||
|
public T[] filter(Function<T, Boolean> function) {
|
||||||
|
boolean[] include = new boolean[array.length];
|
||||||
|
int len = 0;
|
||||||
|
int i = 0;
|
||||||
|
while (i < array.length) {
|
||||||
|
if (function.apply(array[i])) {
|
||||||
|
include[i] = true;
|
||||||
|
len += 1;
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
T[] result = (T[]) java.lang.reflect.Array.newInstance(array.getClass().getComponentType(), len);
|
||||||
|
len = 0;
|
||||||
|
i = 0;
|
||||||
|
while (len < result.length) {
|
||||||
|
if (include[i]) {
|
||||||
|
result[len] = array[i];
|
||||||
|
len += 1;
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Function<T, R> {
|
||||||
|
R apply(T arg);
|
||||||
|
}
|
||||||
|
|
||||||
|
class B {
|
||||||
|
final int d;
|
||||||
|
public B(int d) {
|
||||||
|
this.d = d;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
68
hotspot/test/compiler/6857159/Test6857159.java
Normal file
68
hotspot/test/compiler/6857159/Test6857159.java
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6857159
|
||||||
|
* @summary local schedule failed with checkcast of Thread.currentThread()
|
||||||
|
*
|
||||||
|
* @run shell Test6857159.sh
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test6857159 extends Thread {
|
||||||
|
static class ct0 extends Test6857159 {
|
||||||
|
public void message() {
|
||||||
|
// System.out.println("message");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void run() {
|
||||||
|
message();
|
||||||
|
ct0 ct = (ct0) Thread.currentThread();
|
||||||
|
ct.message();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static class ct1 extends ct0 {
|
||||||
|
public void message() {
|
||||||
|
// System.out.println("message");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static class ct2 extends ct0 {
|
||||||
|
public void message() {
|
||||||
|
// System.out.println("message");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
for (int i = 0; i < 100000; i++) {
|
||||||
|
Thread t = null;
|
||||||
|
switch (i % 3) {
|
||||||
|
case 0: t = new ct0(); break;
|
||||||
|
case 1: t = new ct1(); break;
|
||||||
|
case 2: t = new ct2(); break;
|
||||||
|
}
|
||||||
|
t.start();
|
||||||
|
t.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
65
hotspot/test/compiler/6857159/Test6857159.sh
Normal file
65
hotspot/test/compiler/6857159/Test6857159.sh
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
#
|
||||||
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
|
# under the terms of the GNU General Public License version 2 only, as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
# version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
# accompanied this code).
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License version
|
||||||
|
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
#
|
||||||
|
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
# CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
# have any questions.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
if [ "${TESTSRC}" = "" ]
|
||||||
|
then
|
||||||
|
echo "TESTSRC not set. Test cannot execute. Failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "TESTSRC=${TESTSRC}"
|
||||||
|
if [ "${TESTJAVA}" = "" ]
|
||||||
|
then
|
||||||
|
echo "TESTJAVA not set. Test cannot execute. Failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "TESTJAVA=${TESTJAVA}"
|
||||||
|
if [ "${TESTCLASSES}" = "" ]
|
||||||
|
then
|
||||||
|
echo "TESTCLASSES not set. Test cannot execute. Failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "TESTCLASSES=${TESTCLASSES}"
|
||||||
|
echo "CLASSPATH=${CLASSPATH}"
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
cp ${TESTSRC}/Test6857159.java .
|
||||||
|
cp ${TESTSRC}/Test6857159.sh .
|
||||||
|
|
||||||
|
${TESTJAVA}/bin/javac -d . Test6857159.java
|
||||||
|
|
||||||
|
${TESTJAVA}/bin/java ${TESTVMOPTS} -Xbatch -XX:+PrintCompilation -XX:CompileOnly=Test6857159\$ct.run Test6857159 > test.out 2>&1
|
||||||
|
|
||||||
|
grep "COMPILE SKIPPED" test.out
|
||||||
|
|
||||||
|
result=$?
|
||||||
|
if [ $result -eq 1 ]
|
||||||
|
then
|
||||||
|
echo "Passed"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo "Failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
51
hotspot/test/compiler/6859338/Test6859338.java
Normal file
51
hotspot/test/compiler/6859338/Test6859338.java
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6859338
|
||||||
|
* @summary Assertion failure in sharedRuntime.cpp
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-InlineObjectHash -Xbatch -XX:-ProfileInterpreter Test6859338
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test6859338 {
|
||||||
|
static Object[] o = new Object[] { new Object(), null };
|
||||||
|
public static void main(String[] args) {
|
||||||
|
int total = 0;
|
||||||
|
try {
|
||||||
|
// Exercise the implicit null check in the unverified entry point
|
||||||
|
for (int i = 0; i < 40000; i++) {
|
||||||
|
int limit = o.length;
|
||||||
|
if (i < 20000) limit = 1;
|
||||||
|
for (int j = 0; j < limit; j++) {
|
||||||
|
total += o[j].hashCode();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch (NullPointerException e) {
|
||||||
|
// this is expected. A true failure causes a crash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
71
hotspot/test/compiler/6860469/Test.java
Normal file
71
hotspot/test/compiler/6860469/Test.java
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2009 Google Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6860469
|
||||||
|
* @summary remix_address_expressions reshapes address expression with bad control
|
||||||
|
*
|
||||||
|
* @run main/othervm -Xcomp -XX:CompileOnly=Test.C Test
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test {
|
||||||
|
|
||||||
|
private static final int H = 16;
|
||||||
|
private static final int F = 9;
|
||||||
|
|
||||||
|
static int[] fl = new int[1 << F];
|
||||||
|
|
||||||
|
static int C(int ll, int f) {
|
||||||
|
int max = -1;
|
||||||
|
int min = H + 1;
|
||||||
|
|
||||||
|
if (ll != 0) {
|
||||||
|
if (ll < min) {
|
||||||
|
min = ll;
|
||||||
|
}
|
||||||
|
if (ll > max) {
|
||||||
|
max = ll;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (f > max) {
|
||||||
|
f = max;
|
||||||
|
}
|
||||||
|
if (min > f) {
|
||||||
|
min = f;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int mc = 1 >> max - f; mc <= 0; mc++) {
|
||||||
|
int i = mc << (32 - f);
|
||||||
|
fl[i] = max;
|
||||||
|
}
|
||||||
|
|
||||||
|
return min;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String argv[]) {
|
||||||
|
C(0, 10);
|
||||||
|
}
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user