Merge
This commit is contained in:
commit
b7251c4eb4
@ -101,36 +101,6 @@ bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators,Basic
|
||||
return (on_weak_ref || unknown) && (keep_alive || is_traversal_mode);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
|
||||
shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
|
||||
if (_heap->is_concurrent_mark_in_progress()) {
|
||||
T heap_oop = RawAccess<>::oop_load(field);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
enqueue(CompressedOops::decode(heap_oop));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// These are the more general virtual versions.
|
||||
void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
|
||||
inline_write_ref_field_pre(field, new_val);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
|
||||
inline_write_ref_field_pre(field, new_val);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
|
||||
guarantee(false, "Not needed");
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
|
||||
shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
|
||||
shenandoah_assert_not_forwarded_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
|
||||
shenandoah_assert_not_in_cset_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
|
||||
}
|
||||
|
||||
oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
|
||||
if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
|
||||
return load_reference_barrier_impl(obj);
|
||||
@ -234,30 +204,6 @@ oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSet::storeval_barrier(oop obj) {
|
||||
if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {
|
||||
enqueue(obj);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
|
||||
if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
|
||||
enqueue(obj);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSet::enqueue(oop obj) {
|
||||
shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
|
||||
assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
|
||||
|
||||
// Filter marked objects before hitting the SATB queues. The same predicate would
|
||||
// be used by SATBMQ::filter to eliminate already marked objects downstream, but
|
||||
// filtering here helps to avoid wasteful SATB queueing work to begin with.
|
||||
if (!_heap->requires_marking<false>(obj)) return;
|
||||
|
||||
ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
|
||||
// Create thread local data
|
||||
ShenandoahThreadLocalData::create(thread);
|
||||
|
@ -77,17 +77,6 @@ public:
|
||||
inline void clone_barrier(oop src);
|
||||
void clone_barrier_runtime(oop src);
|
||||
|
||||
// We export this to make it available in cases where the static
|
||||
// type of the barrier set is known. Note that it is non-virtual.
|
||||
template <class T> inline void inline_write_ref_field_pre(T* field, oop new_val);
|
||||
|
||||
// These are the more general virtual versions.
|
||||
void write_ref_field_pre_work(oop* field, oop new_val);
|
||||
void write_ref_field_pre_work(narrowOop* field, oop new_val);
|
||||
void write_ref_field_pre_work(void* field, oop new_val);
|
||||
|
||||
void write_ref_field_work(void* v, oop o, bool release = false);
|
||||
|
||||
virtual void on_thread_create(Thread* thread);
|
||||
virtual void on_thread_destroy(Thread* thread);
|
||||
virtual void on_thread_attach(Thread* thread);
|
||||
@ -96,8 +85,17 @@ public:
|
||||
static inline oop resolve_forwarded_not_null(oop p);
|
||||
static inline oop resolve_forwarded(oop p);
|
||||
|
||||
void storeval_barrier(oop obj);
|
||||
void keep_alive_barrier(oop obj);
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline void satb_barrier(T* field);
|
||||
inline void satb_enqueue(oop value);
|
||||
inline void storeval_barrier(oop obj);
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline void keep_alive_if_weak(oop value);
|
||||
inline void keep_alive_if_weak(DecoratorSet decorators, oop value);
|
||||
inline void keep_alive_barrier(oop value);
|
||||
|
||||
inline void enqueue(oop obj);
|
||||
|
||||
oop load_reference_barrier(oop obj);
|
||||
oop load_reference_barrier_not_null(oop obj);
|
||||
@ -111,8 +109,6 @@ public:
|
||||
oop load_reference_barrier_native(oop obj, oop* load_addr);
|
||||
oop load_reference_barrier_native(oop obj, narrowOop* load_addr);
|
||||
|
||||
void enqueue(oop obj);
|
||||
|
||||
private:
|
||||
template <class T>
|
||||
inline void arraycopy_pre_work(T* src, T* dst, size_t count);
|
||||
@ -126,27 +122,12 @@ private:
|
||||
template <class T>
|
||||
oop load_reference_barrier_native_impl(oop obj, T* load_addr);
|
||||
|
||||
static void keep_alive_if_weak(DecoratorSet decorators, oop value) {
|
||||
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
|
||||
const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
|
||||
const bool peek = (decorators & AS_NO_KEEPALIVE) != 0;
|
||||
if (!peek && !on_strong_oop_ref && value != NULL) {
|
||||
ShenandoahBarrierSet::barrier_set()->keep_alive_barrier(value);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
// Callbacks for runtime accesses.
|
||||
template <DecoratorSet decorators, typename BarrierSetT = ShenandoahBarrierSet>
|
||||
class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
|
||||
typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap_impl(T* addr, oop new_value);
|
||||
|
||||
public:
|
||||
// Heap oop accesses. These accessors get resolved when
|
||||
// IN_HEAP is set (e.g. when using the HeapAccess API), it is
|
||||
|
@ -48,41 +48,124 @@ inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) {
|
||||
}
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) {
|
||||
oop value = Raw::oop_load_in_heap(addr);
|
||||
value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value);
|
||||
keep_alive_if_weak(decorators, value);
|
||||
return value;
|
||||
inline void ShenandoahBarrierSet::enqueue(oop obj) {
|
||||
shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
|
||||
assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
|
||||
|
||||
// Filter marked objects before hitting the SATB queues. The same predicate would
|
||||
// be used by SATBMQ::filter to eliminate already marked objects downstream, but
|
||||
// filtering here helps to avoid wasteful SATB queueing work to begin with.
|
||||
if (!_heap->requires_marking<false>(obj)) return;
|
||||
|
||||
ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
|
||||
oop value = Raw::oop_load_in_heap_at(base, offset);
|
||||
value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value);
|
||||
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
|
||||
return value;
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline void ShenandoahBarrierSet::satb_barrier(T *field) {
|
||||
if (HasDecorator<decorators, IS_DEST_UNINITIALIZED>::value ||
|
||||
HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
|
||||
return;
|
||||
}
|
||||
if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
|
||||
T heap_oop = RawAccess<>::oop_load(field);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
enqueue(CompressedOops::decode(heap_oop));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void ShenandoahBarrierSet::satb_enqueue(oop value) {
|
||||
assert(value != NULL, "checked before");
|
||||
if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
|
||||
enqueue(value);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ShenandoahBarrierSet::storeval_barrier(oop obj) {
|
||||
if (obj != NULL && ShenandoahStoreValEnqueueBarrier && _heap->is_concurrent_traversal_in_progress()) {
|
||||
enqueue(obj);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ShenandoahBarrierSet::keep_alive_barrier(oop value) {
|
||||
assert(value != NULL, "checked before");
|
||||
if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
|
||||
enqueue(value);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ShenandoahBarrierSet::keep_alive_if_weak(DecoratorSet decorators, oop value) {
|
||||
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
|
||||
const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
|
||||
const bool peek = (decorators & AS_NO_KEEPALIVE) != 0;
|
||||
if (!peek && !on_strong_oop_ref) {
|
||||
keep_alive_barrier(value);
|
||||
}
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline void ShenandoahBarrierSet::keep_alive_if_weak(oop value) {
|
||||
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
|
||||
if (!HasDecorator<decorators, ON_STRONG_OOP_REF>::value &&
|
||||
!HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
|
||||
keep_alive_barrier(value);
|
||||
}
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_heap(T* addr) {
|
||||
oop value = Raw::oop_load_not_in_heap(addr);
|
||||
value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_native(value, addr);
|
||||
keep_alive_if_weak(decorators, value);
|
||||
if (value != NULL) {
|
||||
ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set();
|
||||
value = bs->load_reference_barrier_native(value, addr);
|
||||
bs->keep_alive_if_weak<decorators>(value);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_in_heap(T* addr, oop value) {
|
||||
ShenandoahBarrierSet::barrier_set()->storeval_barrier(value);
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive) {
|
||||
ShenandoahBarrierSet::barrier_set()->write_ref_field_pre_work(addr, value);
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) {
|
||||
oop value = Raw::oop_load_in_heap(addr);
|
||||
if (value != NULL) {
|
||||
ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set();
|
||||
value = bs->load_reference_barrier_not_null(value);
|
||||
bs->keep_alive_if_weak<decorators>(value);
|
||||
}
|
||||
Raw::oop_store_in_heap(addr, value);
|
||||
return value;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
|
||||
oop value = Raw::oop_load_in_heap_at(base, offset);
|
||||
if (value != NULL) {
|
||||
ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set();
|
||||
value = bs->load_reference_barrier_not_null(value);
|
||||
bs->keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset),
|
||||
value);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_not_in_heap(T* addr, oop value) {
|
||||
shenandoah_assert_marked_if(NULL, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress());
|
||||
ShenandoahBarrierSet* const bs = ShenandoahBarrierSet::barrier_set();
|
||||
bs->storeval_barrier(value);
|
||||
bs->satb_barrier<decorators>(addr);
|
||||
Raw::oop_store(addr, value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_in_heap(T* addr, oop value) {
|
||||
shenandoah_assert_not_in_cset_loc_except(addr, ShenandoahHeap::heap()->cancelled_gc());
|
||||
shenandoah_assert_not_forwarded_except (addr, value, value == NULL || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
|
||||
shenandoah_assert_not_in_cset_except (addr, value, value == NULL || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
|
||||
|
||||
oop_store_not_in_heap(addr, value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
@ -90,16 +173,12 @@ inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_st
|
||||
oop_store_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_not_in_heap(T* addr, oop value) {
|
||||
shenandoah_assert_marked_if(NULL, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress());
|
||||
Raw::oop_store(addr, value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
|
||||
bs->storeval_barrier(new_value);
|
||||
|
||||
oop res;
|
||||
oop expected = compare_value;
|
||||
do {
|
||||
@ -107,79 +186,53 @@ inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_ato
|
||||
res = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
|
||||
expected = res;
|
||||
} while ((compare_value != expected) && (resolve_forwarded(compare_value) == resolve_forwarded(expected)));
|
||||
if (res != NULL) {
|
||||
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(res);
|
||||
} else {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value) {
|
||||
ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
|
||||
oop result = oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value);
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
|
||||
(result == compare_value) &&
|
||||
ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
|
||||
ShenandoahBarrierSet::barrier_set()->enqueue(result);
|
||||
// Note: We don't need a keep-alive-barrier here. We already enqueue any loaded reference for SATB anyway,
|
||||
// because it must be the previous value.
|
||||
if (res != NULL) {
|
||||
res = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(res);
|
||||
bs->satb_enqueue(res);
|
||||
}
|
||||
return result;
|
||||
return res;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
oop result = oop_atomic_cmpxchg_in_heap_impl(addr, compare_value, new_value);
|
||||
keep_alive_if_weak(decorators, result);
|
||||
return result;
|
||||
return oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
|
||||
oop result = oop_atomic_cmpxchg_in_heap_impl(AccessInternal::oop_field_addr<decorators>(base, offset), compare_value, new_value);
|
||||
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), result);
|
||||
return result;
|
||||
return oop_atomic_cmpxchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), compare_value, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
|
||||
oop previous = Raw::oop_atomic_xchg(addr, new_value);
|
||||
if (previous != NULL) {
|
||||
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous);
|
||||
} else {
|
||||
return previous;
|
||||
}
|
||||
}
|
||||
ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
|
||||
bs->storeval_barrier(new_value);
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_impl(T* addr, oop new_value) {
|
||||
ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
|
||||
oop result = oop_atomic_xchg_not_in_heap(addr, new_value);
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
|
||||
ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
|
||||
ShenandoahBarrierSet::barrier_set()->enqueue(result);
|
||||
oop previous = Raw::oop_atomic_xchg(addr, new_value);
|
||||
|
||||
// Note: We don't need a keep-alive-barrier here. We already enqueue any loaded reference for SATB anyway,
|
||||
// because it must be the previous value.
|
||||
if (previous != NULL) {
|
||||
previous = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous);
|
||||
bs->satb_enqueue(previous);
|
||||
}
|
||||
return result;
|
||||
return previous;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(T* addr, oop new_value) {
|
||||
oop result = oop_atomic_xchg_in_heap_impl(addr, new_value);
|
||||
keep_alive_if_weak(addr, result);
|
||||
return result;
|
||||
return oop_atomic_xchg_not_in_heap(addr, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
|
||||
oop result = oop_atomic_xchg_in_heap_impl(AccessInternal::oop_field_addr<decorators>(base, offset), new_value);
|
||||
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), result);
|
||||
return result;
|
||||
return oop_atomic_xchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), new_value);
|
||||
}
|
||||
|
||||
// Clone barrier support
|
||||
|
@ -325,7 +325,7 @@ void Arena::destruct_contents() {
|
||||
// change the size
|
||||
void Arena::set_size_in_bytes(size_t size) {
|
||||
if (_size_in_bytes != size) {
|
||||
long delta = (long)(size - size_in_bytes());
|
||||
ssize_t delta = size - size_in_bytes();
|
||||
_size_in_bytes = size;
|
||||
MemTracker::record_arena_size_change(delta, _flags);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,8 +31,11 @@
|
||||
|
||||
void ResourceArea::bias_to(MEMFLAGS new_flags) {
|
||||
if (new_flags != _flags) {
|
||||
size_t size = size_in_bytes();
|
||||
MemTracker::record_arena_size_change(-ssize_t(size), _flags);
|
||||
MemTracker::record_arena_free(_flags);
|
||||
MemTracker::record_new_arena(new_flags);
|
||||
MemTracker::record_arena_size_change(ssize_t(size), new_flags);
|
||||
_flags = new_flags;
|
||||
}
|
||||
}
|
||||
|
@ -797,6 +797,21 @@ WB_ENTRY(jint, WB_NMTGetHashSize(JNIEnv* env, jobject o))
|
||||
assert(hash_size > 0, "NMT hash_size should be > 0");
|
||||
return (jint)hash_size;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_NMTNewArena(JNIEnv* env, jobject o, jlong init_size))
|
||||
Arena* arena = new (mtTest) Arena(mtTest, size_t(init_size));
|
||||
return (jlong)arena;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_NMTFreeArena(JNIEnv* env, jobject o, jlong arena))
|
||||
Arena* a = (Arena*)arena;
|
||||
delete a;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_NMTArenaMalloc(JNIEnv* env, jobject o, jlong arena, jlong size))
|
||||
Arena* a = (Arena*)arena;
|
||||
a->Amalloc(size_t(size));
|
||||
WB_END
|
||||
#endif // INCLUDE_NMT
|
||||
|
||||
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
|
||||
@ -2244,6 +2259,9 @@ static JNINativeMethod methods[] = {
|
||||
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
|
||||
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
|
||||
{CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize },
|
||||
{CC"NMTNewArena", CC"(J)J", (void*)&WB_NMTNewArena },
|
||||
{CC"NMTFreeArena", CC"(J)V", (void*)&WB_NMTFreeArena },
|
||||
{CC"NMTArenaMalloc", CC"(JJ)V", (void*)&WB_NMTArenaMalloc },
|
||||
#endif // INCLUDE_NMT
|
||||
{CC"deoptimizeFrames", CC"(Z)I", (void*)&WB_DeoptimizeFrames },
|
||||
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
|
||||
|
@ -289,20 +289,24 @@ void HandshakeState::clear_handshake(JavaThread* target) {
|
||||
void HandshakeState::process_self_inner(JavaThread* thread) {
|
||||
assert(Thread::current() == thread, "should call from thread");
|
||||
assert(!thread->is_terminated(), "should not be a terminated thread");
|
||||
assert(thread->thread_state() != _thread_blocked, "should not be in a blocked state");
|
||||
assert(thread->thread_state() != _thread_in_native, "should not be in native");
|
||||
|
||||
ThreadInVMForHandshake tivm(thread);
|
||||
if (!_semaphore.trywait()) {
|
||||
_semaphore.wait_with_safepoint_check(thread);
|
||||
}
|
||||
HandshakeOperation* op = Atomic::load_acquire(&_operation);
|
||||
if (op != NULL) {
|
||||
HandleMark hm(thread);
|
||||
CautiouslyPreserveExceptionMark pem(thread);
|
||||
// Disarm before execute the operation
|
||||
clear_handshake(thread);
|
||||
op->do_handshake(thread);
|
||||
}
|
||||
_semaphore.signal();
|
||||
do {
|
||||
ThreadInVMForHandshake tivm(thread);
|
||||
if (!_semaphore.trywait()) {
|
||||
_semaphore.wait_with_safepoint_check(thread);
|
||||
}
|
||||
HandshakeOperation* op = Atomic::load_acquire(&_operation);
|
||||
if (op != NULL) {
|
||||
HandleMark hm(thread);
|
||||
CautiouslyPreserveExceptionMark pem(thread);
|
||||
// Disarm before execute the operation
|
||||
clear_handshake(thread);
|
||||
op->do_handshake(thread);
|
||||
}
|
||||
_semaphore.signal();
|
||||
} while (has_operation());
|
||||
}
|
||||
|
||||
bool HandshakeState::vmthread_can_process_handshake(JavaThread* target) {
|
||||
|
@ -70,8 +70,9 @@ class MemoryCounter {
|
||||
}
|
||||
}
|
||||
|
||||
inline void resize(long sz) {
|
||||
inline void resize(ssize_t sz) {
|
||||
if (sz != 0) {
|
||||
assert(sz >= 0 || _size >= size_t(-sz), "Must be");
|
||||
Atomic::add(&_size, size_t(sz));
|
||||
DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
|
||||
}
|
||||
@ -113,7 +114,7 @@ class MallocMemory {
|
||||
_arena.deallocate(0);
|
||||
}
|
||||
|
||||
inline void record_arena_size_change(long sz) {
|
||||
inline void record_arena_size_change(ssize_t sz) {
|
||||
_arena.resize(sz);
|
||||
}
|
||||
|
||||
@ -207,7 +208,7 @@ class MallocMemorySummary : AllStatic {
|
||||
as_snapshot()->by_type(flag)->record_arena_free();
|
||||
}
|
||||
|
||||
static inline void record_arena_size_change(long size, MEMFLAGS flag) {
|
||||
static inline void record_arena_size_change(ssize_t size, MEMFLAGS flag) {
|
||||
as_snapshot()->by_type(flag)->record_arena_size_change(size);
|
||||
}
|
||||
|
||||
@ -361,7 +362,7 @@ class MallocTracker : AllStatic {
|
||||
MallocMemorySummary::record_arena_free(flags);
|
||||
}
|
||||
|
||||
static inline void record_arena_size_change(int size, MEMFLAGS flags) {
|
||||
static inline void record_arena_size_change(ssize_t size, MEMFLAGS flags) {
|
||||
MallocMemorySummary::record_arena_size_change(size, flags);
|
||||
}
|
||||
private:
|
||||
|
@ -63,7 +63,7 @@ class MemTracker : AllStatic {
|
||||
|
||||
static inline void record_new_arena(MEMFLAGS flag) { }
|
||||
static inline void record_arena_free(MEMFLAGS flag) { }
|
||||
static inline void record_arena_size_change(int diff, MEMFLAGS flag) { }
|
||||
static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) { }
|
||||
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
|
||||
MEMFLAGS flag = mtNone) { }
|
||||
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
|
||||
@ -203,7 +203,7 @@ class MemTracker : AllStatic {
|
||||
|
||||
// Record arena size change. Arena size is the size of all arena
|
||||
// chuncks that backing up the arena.
|
||||
static inline void record_arena_size_change(int diff, MEMFLAGS flag) {
|
||||
static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) {
|
||||
if (tracking_level() < NMT_summary) return;
|
||||
MallocTracker::record_arena_size_change(diff, flag);
|
||||
}
|
||||
|
@ -292,4 +292,8 @@ public final class ChunkHeader {
|
||||
static long headerSize() {
|
||||
return HEADER_SIZE;
|
||||
}
|
||||
|
||||
public long getLastNanos() {
|
||||
return getStartNanos() + getDurationNanos();
|
||||
}
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ public final class ChunkParser {
|
||||
long lastValid = absoluteChunkEnd;
|
||||
long metadataPoistion = chunkHeader.getMetataPosition();
|
||||
long contantPosition = chunkHeader.getConstantPoolPosition();
|
||||
chunkFinished = awaitUpdatedHeader(absoluteChunkEnd);
|
||||
chunkFinished = awaitUpdatedHeader(absoluteChunkEnd, configuration.filterEnd);
|
||||
if (chunkFinished) {
|
||||
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "At chunk end");
|
||||
return null;
|
||||
@ -279,11 +279,14 @@ public final class ChunkParser {
|
||||
}
|
||||
}
|
||||
|
||||
private boolean awaitUpdatedHeader(long absoluteChunkEnd) throws IOException {
|
||||
private boolean awaitUpdatedHeader(long absoluteChunkEnd, long filterEnd) throws IOException {
|
||||
if (Logger.shouldLog(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO)) {
|
||||
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Waiting for more data (streaming). Read so far: " + chunkHeader.getChunkSize() + " bytes");
|
||||
}
|
||||
while (true) {
|
||||
if (chunkHeader.getLastNanos() > filterEnd) {
|
||||
return true;
|
||||
}
|
||||
chunkHeader.refresh();
|
||||
if (absoluteChunkEnd != chunkHeader.getEnd()) {
|
||||
return false;
|
||||
|
87
test/hotspot/jtreg/runtime/NMT/HugeArenaTracking.java
Normal file
87
test/hotspot/jtreg/runtime/NMT/HugeArenaTracking.java
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @key nmt jcmd
|
||||
* @library /test/lib
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build sun.hotspot.WhiteBox
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail HugeArenaTracking
|
||||
*/
|
||||
|
||||
import java.util.Random;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.JDKToolFinder;
|
||||
import sun.hotspot.WhiteBox;
|
||||
|
||||
public class HugeArenaTracking {
|
||||
private static final long GB = 1024 * 1024 * 1024;
|
||||
|
||||
public static void main(String args[]) throws Exception {
|
||||
OutputAnalyzer output;
|
||||
final WhiteBox wb = WhiteBox.getWhiteBox();
|
||||
|
||||
// Grab my own PID
|
||||
String pid = Long.toString(ProcessTools.getProcessId());
|
||||
ProcessBuilder pb = new ProcessBuilder();
|
||||
|
||||
long arena1 = wb.NMTNewArena(1024);
|
||||
long arena2 = wb.NMTNewArena(1024);
|
||||
|
||||
// Run 'jcmd <pid> VM.native_memory summary'
|
||||
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Test (reserved=2KB, committed=2KB)");
|
||||
|
||||
Random rand = new Random();
|
||||
|
||||
// Allocate 2GB+ from arena
|
||||
long total = 0;
|
||||
while (total < 2 * GB) {
|
||||
// Cap to 10M
|
||||
long inc = rand.nextInt(10 * 1024 * 1024);
|
||||
wb.NMTArenaMalloc(arena1, inc);
|
||||
total += inc;
|
||||
}
|
||||
|
||||
ProcessBuilder pb2 = new ProcessBuilder();
|
||||
// Run 'jcmd <pid> VM.native_memory summary'
|
||||
pb2.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=GB"});
|
||||
output = new OutputAnalyzer(pb2.start());
|
||||
output.shouldContain("Test (reserved=2GB, committed=2GB)");
|
||||
|
||||
wb.NMTFreeArena(arena1);
|
||||
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Test (reserved=1KB, committed=1KB)");
|
||||
wb.NMTFreeArena(arena2);
|
||||
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldNotContain("Test (reserved");
|
||||
}
|
||||
}
|
@ -63,6 +63,14 @@ public final class TestSetEndTime {
|
||||
public static void main(String... args) throws Exception {
|
||||
testEventStream();
|
||||
testRecordingStream();
|
||||
testEmptyStream();
|
||||
}
|
||||
|
||||
private static void testEmptyStream() {
|
||||
try (RecordingStream rs = new RecordingStream()) {
|
||||
rs.setEndTime(Instant.now().plusMillis(1100));
|
||||
rs.start();
|
||||
}
|
||||
}
|
||||
|
||||
private static void testRecordingStream() throws Exception {
|
||||
@ -89,10 +97,10 @@ public final class TestSetEndTime {
|
||||
}
|
||||
closed.await();
|
||||
System.out.println("Found events: " + count.get());
|
||||
if (count.get() < 50) {
|
||||
if (count.get() > 0 && count.get() < 50) {
|
||||
return;
|
||||
}
|
||||
System.out.println("Found 50 events. Retrying");
|
||||
System.out.println("Retrying");
|
||||
System.out.println();
|
||||
}
|
||||
}
|
||||
|
@ -222,6 +222,9 @@ public class WhiteBox {
|
||||
public native long NMTMallocWithPseudoStackAndType(long size, int index, int type);
|
||||
public native boolean NMTChangeTrackingLevel();
|
||||
public native int NMTGetHashSize();
|
||||
public native long NMTNewArena(long initSize);
|
||||
public native void NMTFreeArena(long arena);
|
||||
public native void NMTArenaMalloc(long arena, long size);
|
||||
|
||||
// Compiler
|
||||
public native int matchesMethod(Executable method, String pattern);
|
||||
|
Loading…
Reference in New Issue
Block a user