Merge
This commit is contained in:
commit
e003616106
@ -1075,6 +1075,7 @@ enum {
|
||||
};
|
||||
|
||||
|
||||
// Below length is the # elements copied.
|
||||
template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
|
||||
oopDesc* dst, T* dst_addr,
|
||||
int length) {
|
||||
@ -1083,22 +1084,22 @@ template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
|
||||
// barrier. The assert will fail if this is not the case.
|
||||
// Note that we use the non-virtual inlineable variant of write_ref_array.
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(),
|
||||
"Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||
if (src == dst) {
|
||||
// same object, no check
|
||||
bs->write_ref_array_pre(dst_addr, length);
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
bs->write_ref_array((HeapWord*)dst_addr, length);
|
||||
return ac_ok;
|
||||
} else {
|
||||
klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
|
||||
klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
|
||||
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
|
||||
// Elements are guaranteed to be subtypes, so no check necessary
|
||||
bs->write_ref_array_pre(dst_addr, length);
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
bs->write_ref_array((HeapWord*)dst_addr, length);
|
||||
return ac_ok;
|
||||
}
|
||||
}
|
||||
@ -1162,9 +1163,16 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
|
||||
#endif
|
||||
|
||||
if (num == 0) return;
|
||||
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
bs->write_ref_array(MemRegion(dst, dst + num));
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||
if (UseCompressedOops) {
|
||||
bs->write_ref_array_pre((narrowOop*)dst, num);
|
||||
} else {
|
||||
bs->write_ref_array_pre((oop*)dst, num);
|
||||
}
|
||||
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
|
||||
bs->write_ref_array(dst, num);
|
||||
JRT_END
|
||||
|
||||
|
||||
|
@ -1121,10 +1121,23 @@ class BacktraceBuilder: public StackObj {
|
||||
}
|
||||
|
||||
void flush() {
|
||||
// The following appears to have been an optimization to save from
|
||||
// doing a barrier for each individual store into the _methods array,
|
||||
// but rather to do it for the entire array after the series of writes.
|
||||
// That optimization seems to have been lost when compressed oops was
|
||||
// implemented. However, the extra card-marks below was left in place,
|
||||
// but is now redundant because the individual stores into the
|
||||
// _methods array already execute the barrier code. CR 6918185 has
|
||||
// been filed so the original code may be restored by deferring the
|
||||
// barriers until after the entire sequence of stores, thus re-enabling
|
||||
// the intent of the original optimization. In the meantime the redundant
|
||||
// card mark below is now disabled.
|
||||
if (_dirty && _methods != NULL) {
|
||||
#if 0
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
|
||||
#endif
|
||||
_dirty = false;
|
||||
}
|
||||
}
|
||||
@ -1168,9 +1181,7 @@ class BacktraceBuilder: public StackObj {
|
||||
method = mhandle();
|
||||
}
|
||||
|
||||
_methods->obj_at_put(_index, method);
|
||||
// bad for UseCompressedOops
|
||||
// *_methods->obj_at_addr(_index) = method;
|
||||
_methods->obj_at_put(_index, method);
|
||||
_bcis->ushort_at_put(_index, bci);
|
||||
_index++;
|
||||
_dirty = true;
|
||||
|
@ -300,7 +300,23 @@ jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
|
||||
int count;
|
||||
jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
|
||||
assert(cached_ptr != NULL, "bad cached card ptr");
|
||||
assert(!is_young_card(cached_ptr), "shouldn't get a card in young region");
|
||||
|
||||
if (is_young_card(cached_ptr)) {
|
||||
// The region containing cached_ptr has been freed during a clean up
|
||||
// pause, reallocated, and tagged as young.
|
||||
assert(cached_ptr != card_ptr, "shouldn't be");
|
||||
|
||||
// We've just inserted a new old-gen card pointer into the card count
|
||||
// cache and evicted the previous contents of that count slot.
|
||||
// The evicted card pointer has been determined to be in a young region
|
||||
// and so cannot be the newly inserted card pointer (that will be
|
||||
// in an old region).
|
||||
// The count for newly inserted card will be set to zero during the
|
||||
// insertion, so we don't want to defer the cleaning of the newly
|
||||
// inserted card pointer.
|
||||
assert(*defer == false, "deferring non-hot card");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// The card pointer we obtained from card count cache is not hot
|
||||
// so do not store it in the cache; return it for immediate
|
||||
|
@ -2505,6 +2505,7 @@ G1CollectedHeap* G1CollectedHeap::heap() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
||||
// always_do_update_barrier = false;
|
||||
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
|
||||
// Call allocation profiler
|
||||
AllocationProfiler::iterate_since_last_gc();
|
||||
@ -2518,6 +2519,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||
// is set.
|
||||
COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
|
||||
"derived pointer present"));
|
||||
// always_do_update_barrier = true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_collection_pause() {
|
||||
|
@ -175,6 +175,7 @@ arguments.cpp jvmtiExport.hpp
|
||||
arguments.cpp management.hpp
|
||||
arguments.cpp oop.inline.hpp
|
||||
arguments.cpp os_<os_family>.inline.hpp
|
||||
arguments.cpp referenceProcessor.hpp
|
||||
arguments.cpp universe.inline.hpp
|
||||
arguments.cpp vm_version_<arch>.hpp
|
||||
|
||||
|
@ -124,8 +124,6 @@ public:
|
||||
// Below length is the # array elements being written
|
||||
virtual void write_ref_array_pre( oop* dst, int length) {}
|
||||
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
|
||||
// Below MemRegion mr is expected to be HeapWord-aligned
|
||||
inline void write_ref_array(MemRegion mr);
|
||||
// Below count is the # array elements being written, starting
|
||||
// at the address "start", which may not necessarily be HeapWord-aligned
|
||||
inline void write_ref_array(HeapWord* start, size_t count);
|
||||
|
@ -42,16 +42,6 @@ void BarrierSet::write_ref_field(void* field, oop new_val) {
|
||||
}
|
||||
}
|
||||
|
||||
void BarrierSet::write_ref_array(MemRegion mr) {
|
||||
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
|
||||
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
|
||||
if (kind() == CardTableModRef) {
|
||||
((CardTableModRefBS*)this)->inline_write_ref_array(mr);
|
||||
} else {
|
||||
write_ref_array_work(mr);
|
||||
}
|
||||
}
|
||||
|
||||
// count is number of array elements being written
|
||||
void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
||||
assert(count <= (size_t)max_intx, "count too large");
|
||||
@ -61,12 +51,12 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
||||
// strictly necessary for current uses, but a case of good hygiene and,
|
||||
// if you will, aesthetics) and the second upward (this is essential for
|
||||
// current uses) to a HeapWord boundary, so we mark all cards overlapping
|
||||
// this write. In the event that this evolves in the future to calling a
|
||||
// this write. If this evolves in the future to calling a
|
||||
// logging barrier of narrow oop granularity, like the pre-barrier for G1
|
||||
// (mentioned here merely by way of example), we will need to change this
|
||||
// interface, much like the pre-barrier one above, so it is "exactly precise"
|
||||
// (if i may be allowed the adverbial redundancy for emphasis) and does not
|
||||
// include narrow oop slots not included in the original write interval.
|
||||
// interface, so it is "exactly precise" (if i may be allowed the adverbial
|
||||
// redundancy for emphasis) and does not include narrow oop slots not
|
||||
// included in the original write interval.
|
||||
HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
|
||||
HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
|
||||
// If compressed oops were not being used, these should already be aligned
|
||||
|
@ -263,10 +263,13 @@ class ReferenceProcessor : public CHeapObj {
|
||||
int parallel_gc_threads = 1,
|
||||
bool mt_processing = false,
|
||||
bool discovered_list_needs_barrier = false);
|
||||
|
||||
// RefDiscoveryPolicy values
|
||||
enum {
|
||||
enum DiscoveryPolicy {
|
||||
ReferenceBasedDiscovery = 0,
|
||||
ReferentBasedDiscovery = 1
|
||||
ReferentBasedDiscovery = 1,
|
||||
DiscoveryPolicyMin = ReferenceBasedDiscovery,
|
||||
DiscoveryPolicyMax = ReferentBasedDiscovery
|
||||
};
|
||||
|
||||
static void init_statics();
|
||||
|
@ -1487,6 +1487,20 @@ bool Arguments::created_by_java_launcher() {
|
||||
//===========================================================================================================
|
||||
// Parsing of main arguments
|
||||
|
||||
bool Arguments::verify_interval(uintx val, uintx min,
|
||||
uintx max, const char* name) {
|
||||
// Returns true iff value is in the inclusive interval [min..max]
|
||||
// false, otherwise.
|
||||
if (val >= min && val <= max) {
|
||||
return true;
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"%s of " UINTX_FORMAT " is invalid; must be between " UINTX_FORMAT
|
||||
" and " UINTX_FORMAT "\n",
|
||||
name, val, min, max);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Arguments::verify_percentage(uintx value, const char* name) {
|
||||
if (value <= 100) {
|
||||
return true;
|
||||
@ -1723,6 +1737,16 @@ bool Arguments::check_vm_args_consistency() {
|
||||
status = false;
|
||||
}
|
||||
|
||||
status = status && verify_interval(RefDiscoveryPolicy,
|
||||
ReferenceProcessor::DiscoveryPolicyMin,
|
||||
ReferenceProcessor::DiscoveryPolicyMax,
|
||||
"RefDiscoveryPolicy");
|
||||
|
||||
// Limit the lower bound of this flag to 1 as it is used in a division
|
||||
// expression.
|
||||
status = status && verify_interval(TLABWasteTargetPercent,
|
||||
1, 100, "TLABWasteTargetPercent");
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -336,6 +336,8 @@ class Arguments : AllStatic {
|
||||
static bool is_bad_option(const JavaVMOption* option, jboolean ignore) {
|
||||
return is_bad_option(option, ignore, NULL);
|
||||
}
|
||||
static bool verify_interval(uintx val, uintx min,
|
||||
uintx max, const char* name);
|
||||
static bool verify_percentage(uintx value, const char* name);
|
||||
static void describe_range_error(ArgsRange errcode);
|
||||
static ArgsRange check_memory_size(julong size, julong min_size);
|
||||
|
@ -196,11 +196,19 @@ void stubRoutines_init2() { StubRoutines::initialize2(); }
|
||||
// Default versions of arraycopy functions
|
||||
//
|
||||
|
||||
static void gen_arraycopy_barrier_pre(oop* dest, size_t count) {
|
||||
assert(count != 0, "count should be non-zero");
|
||||
assert(count <= (size_t)max_intx, "count too large");
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt");
|
||||
bs->write_ref_array_pre(dest, (int)count);
|
||||
}
|
||||
|
||||
static void gen_arraycopy_barrier(oop* dest, size_t count) {
|
||||
assert(count != 0, "count should be non-zero");
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dest, (HeapWord*)(dest + count)));
|
||||
bs->write_ref_array((HeapWord*)dest, count);
|
||||
}
|
||||
|
||||
JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
|
||||
@ -240,6 +248,7 @@ JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
|
||||
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
|
||||
#endif // !PRODUCT
|
||||
assert(count != 0, "count should be non-zero");
|
||||
gen_arraycopy_barrier_pre(dest, count);
|
||||
Copy::conjoint_oops_atomic(src, dest, count);
|
||||
gen_arraycopy_barrier(dest, count);
|
||||
JRT_END
|
||||
@ -281,6 +290,7 @@ JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, siz
|
||||
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
|
||||
#endif // !PRODUCT
|
||||
assert(count != 0, "count should be non-zero");
|
||||
gen_arraycopy_barrier_pre((oop *) dest, count);
|
||||
Copy::arrayof_conjoint_oops(src, dest, count);
|
||||
gen_arraycopy_barrier((oop *) dest, count);
|
||||
JRT_END
|
||||
|
Loading…
x
Reference in New Issue
Block a user