8252105: Parallel heap inspection for ZCollectedHeap

Reviewed-by: ayang, eosterlund
This commit is contained in:
Per Liden 2020-10-12 07:04:59 +00:00
parent 45b09a3f25
commit c73a0fffaa
8 changed files with 298 additions and 103 deletions

@ -237,6 +237,10 @@ void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
_heap.object_iterate(cl, true /* visit_weaks */);
}
ParallelObjectIterator* ZCollectedHeap::parallel_object_iterator(uint nworkers) {
return _heap.parallel_object_iterator(nworkers, true /* visit_weaks */);
}
void ZCollectedHeap::keep_alive(oop obj) {
_heap.keep_alive(obj);
}

@ -94,6 +94,7 @@ public:
virtual GrowableArray<MemoryPool*> memory_pools();
virtual void object_iterate(ObjectClosure* cl);
virtual ParallelObjectIterator* parallel_object_iterator(uint nworkers);
virtual void keep_alive(oop obj);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,6 +47,9 @@ public:
T get(uintptr_t offset) const;
void put(uintptr_t offset, T value);
void put(uintptr_t offset, size_t size, T value);
T get_acquire(uintptr_t offset) const;
void release_put(uintptr_t offset, T value);
};
template <typename T>

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
@ -46,7 +47,6 @@ template <typename T>
inline size_t ZGranuleMap<T>::index_for_offset(uintptr_t offset) const {
const size_t index = offset >> ZGranuleSizeShift;
assert(index < _size, "Invalid index");
return index;
}
@ -73,6 +73,18 @@ inline void ZGranuleMap<T>::put(uintptr_t offset, size_t size, T value) {
}
}
template <typename T>
inline T ZGranuleMap<T>::get_acquire(uintptr_t offset) const {
const size_t index = index_for_offset(offset);
return Atomic::load_acquire(_map + index);
}
template <typename T>
inline void ZGranuleMap<T>::release_put(uintptr_t offset, T value) {
const size_t index = index_for_offset(offset);
Atomic::release_store(_map + index, value);
}
template <typename T>
inline ZGranuleMapIterator<T>::ZGranuleMapIterator(const ZGranuleMap<T>* map) :
_map(map),

@ -436,9 +436,13 @@ void ZHeap::relocate() {
void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZHeapIterator iter(1 /* nworkers */, visit_weaks);
iter.object_iterate(cl, 0 /* worker_id */);
}
ZHeapIterator iter;
iter.objects_do(cl, visit_weaks);
ParallelObjectIterator* ZHeap::parallel_object_iterator(uint nworkers, bool visit_weaks) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
return new ZHeapIterator(nworkers, visit_weaks);
}
void ZHeap::pages_do(ZPageClosure* cl) {

@ -141,6 +141,7 @@ public:
// Iteration
void object_iterate(ObjectClosure* cl, bool visit_weaks);
ParallelObjectIterator* parallel_object_iterator(uint nworkers, bool visit_weaks);
void pages_do(ZPageClosure* cl);
// Serviceability

@ -23,42 +23,80 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
#include "gc/z/zHeapIterator.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zOop.inline.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "gc/z/zStat.hpp"
#include "memory/iterator.inline.hpp"
#include "runtime/stackWatermarkSet.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/stack.inline.hpp"
class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
private:
CHeapBitMap _map;
CHeapBitMap _bitmap;
public:
ZHeapIteratorBitMap(size_t size_in_bits) :
_map(size_in_bits) {}
_bitmap(size_in_bits, mtGC) {}
bool try_set_bit(size_t index) {
if (_map.at(index)) {
return false;
}
return _bitmap.par_set_bit(index);
}
};
_map.set_bit(index);
return true;
class ZHeapIteratorContext {
private:
ZHeapIterator* const _iter;
ZHeapIteratorQueue* const _queue;
ZHeapIteratorArrayQueue* const _array_queue;
const uint _worker_id;
ZStatTimerDisable _timer_disable;
public:
ZHeapIteratorContext(ZHeapIterator* iter, uint worker_id) :
_iter(iter),
_queue(_iter->_queues.queue(worker_id)),
_array_queue(_iter->_array_queues.queue(worker_id)),
_worker_id(worker_id) {}
void mark_and_push(oop obj) const {
if (_iter->mark_object(obj)) {
_queue->push(obj);
}
}
void push_array(const ObjArrayTask& array) const {
_array_queue->push(array);
}
bool pop(oop& obj) const {
return _queue->pop_overflow(obj) || _queue->pop_local(obj);
}
bool pop_array(ObjArrayTask& array) const {
return _array_queue->pop_overflow(array) || _array_queue->pop_local(array);
}
bool steal(oop& obj) const {
return _iter->_queues.steal(_worker_id, obj);
}
bool steal_array(ObjArrayTask& array) const {
return _iter->_array_queues.steal(_worker_id, array);
}
bool is_drained() const {
return _queue->is_empty() && _array_queue->is_empty();
}
};
template <bool Concurrent, bool Weak>
class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
private:
ZHeapIterator* const _iter;
const ZHeapIteratorContext& _context;
oop load_oop(oop* p) {
if (Weak) {
@ -73,12 +111,12 @@ private:
}
public:
ZHeapIteratorRootOopClosure(ZHeapIterator* iter) :
_iter(iter) {}
ZHeapIteratorRootOopClosure(const ZHeapIteratorContext& context) :
_context(context) {}
virtual void do_oop(oop* p) {
const oop obj = load_oop(p);
_iter->push(obj);
_context.mark_and_push(obj);
}
virtual void do_oop(narrowOop* p) {
@ -94,8 +132,8 @@ public:
template <bool VisitReferents>
class ZHeapIteratorOopClosure : public ClaimMetadataVisitingOopIterateClosure {
private:
ZHeapIterator* const _iter;
const oop _base;
const ZHeapIteratorContext& _context;
const oop _base;
oop load_oop(oop* p) {
if (VisitReferents) {
@ -106,9 +144,9 @@ private:
}
public:
ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) :
ZHeapIteratorOopClosure(const ZHeapIteratorContext& context, oop base) :
ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other),
_iter(iter),
_context(context),
_base(base) {}
virtual ReferenceIterationMode reference_iteration_mode() {
@ -117,7 +155,7 @@ public:
virtual void do_oop(oop* p) {
const oop obj = load_oop(p);
_iter->push(obj);
_context.mark_and_push(obj);
}
virtual void do_oop(narrowOop* p) {
@ -131,16 +169,50 @@ public:
#endif
};
ZHeapIterator::ZHeapIterator() :
_visit_stack(),
_visit_map(ZAddressOffsetMax) {}
ZHeapIterator::ZHeapIterator(uint nworkers, bool visit_weaks) :
_visit_weaks(visit_weaks),
_timer_disable(),
_bitmaps(ZAddressOffsetMax),
_bitmaps_lock(),
_queues(nworkers),
_array_queues(nworkers),
_roots(),
_concurrent_roots(),
_weak_roots(),
_concurrent_weak_roots(),
_terminator(nworkers, &_queues) {
// Create queues
for (uint i = 0; i < _queues.size(); i++) {
ZHeapIteratorQueue* const queue = new ZHeapIteratorQueue();
queue->initialize();
_queues.register_queue(i, queue);
}
// Create array queues
for (uint i = 0; i < _array_queues.size(); i++) {
ZHeapIteratorArrayQueue* const array_queue = new ZHeapIteratorArrayQueue();
array_queue->initialize();
_array_queues.register_queue(i, array_queue);
}
}
ZHeapIterator::~ZHeapIterator() {
ZVisitMapIterator iter(&_visit_map);
for (ZHeapIteratorBitMap* map; iter.next(&map);) {
delete map;
// Destroy bitmaps
ZHeapIteratorBitMapsIterator iter(&_bitmaps);
for (ZHeapIteratorBitMap* bitmap; iter.next(&bitmap);) {
delete bitmap;
}
// Destroy array queues
for (uint i = 0; i < _array_queues.size(); i++) {
delete _array_queues.queue(i);
}
// Destroy queues
for (uint i = 0; i < _queues.size(); i++) {
delete _queues.queue(i);
}
ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_other);
}
static size_t object_index_max() {
@ -154,75 +226,137 @@ static size_t object_index(oop obj) {
return (offset & mask) >> ZObjectAlignmentSmallShift;
}
ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
ZHeapIteratorBitMap* ZHeapIterator::object_bitmap(oop obj) {
const uintptr_t offset = ZAddress::offset(ZOop::to_address(obj));
ZHeapIteratorBitMap* map = _visit_map.get(offset);
if (map == NULL) {
map = new ZHeapIteratorBitMap(object_index_max());
_visit_map.put(offset, map);
ZHeapIteratorBitMap* bitmap = _bitmaps.get_acquire(offset);
if (bitmap == NULL) {
ZLocker<ZLock> locker(&_bitmaps_lock);
bitmap = _bitmaps.get(offset);
if (bitmap == NULL) {
// Install new bitmap
bitmap = new ZHeapIteratorBitMap(object_index_max());
_bitmaps.release_put(offset, bitmap);
}
}
return map;
return bitmap;
}
void ZHeapIterator::push(oop obj) {
bool ZHeapIterator::mark_object(oop obj) {
if (obj == NULL) {
// Ignore
return;
return false;
}
ZHeapIteratorBitMap* const map = object_map(obj);
ZHeapIteratorBitMap* const bitmap = object_bitmap(obj);
const size_t index = object_index(obj);
if (!map->try_set_bit(index)) {
// Already pushed
return;
}
// Push
_visit_stack.push(obj);
return bitmap->try_set_bit(index);
}
template <typename RootsIterator, bool Concurrent, bool Weak>
void ZHeapIterator::push_roots() {
ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(this);
RootsIterator roots;
roots.oops_do(&cl);
template <bool Concurrent, bool Weak, typename RootsIterator>
void ZHeapIterator::push_roots(const ZHeapIteratorContext& context, RootsIterator& iter) {
ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(context);
iter.oops_do(&cl);
}
template <bool VisitReferents>
void ZHeapIterator::push_fields(oop obj) {
ZHeapIteratorOopClosure<VisitReferents> cl(this, obj);
void ZHeapIterator::follow_object(const ZHeapIteratorContext& context, oop obj) {
ZHeapIteratorOopClosure<VisitReferents> cl(context, obj);
obj->oop_iterate(&cl);
}
void ZHeapIterator::follow_array(const ZHeapIteratorContext& context, oop obj) {
// Follow klass
ZHeapIteratorOopClosure<false /* VisitReferents */> cl(context, obj);
cl.do_klass(obj->klass());
// Push array chunk
context.push_array(ObjArrayTask(obj, 0 /* index */));
}
void ZHeapIterator::follow_array_chunk(const ZHeapIteratorContext& context, const ObjArrayTask& array) {
const objArrayOop obj = objArrayOop(array.obj());
const int length = obj->length();
const int start = array.index();
const int stride = MIN2<int>(length - start, ObjArrayMarkingStride);
const int end = start + stride;
// Push remaining array chunk first
if (end < length) {
context.push_array(ObjArrayTask(obj, end));
}
// Follow array chunk
ZHeapIteratorOopClosure<false /* VisitReferents */> cl(context, obj);
obj->oop_iterate_range(&cl, start, end);
}
template <bool VisitWeaks>
void ZHeapIterator::objects_do(ObjectClosure* cl) {
ZStatTimerDisable disable;
void ZHeapIterator::visit_and_follow(const ZHeapIteratorContext& context, ObjectClosure* cl, oop obj) {
// Visit
cl->do_object(obj);
// Push roots to visit
push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>();
push_roots<ZConcurrentRootsIteratorClaimOther, true /* Concurrent */, false /* Weak */>();
if (VisitWeaks) {
push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>();
push_roots<ZConcurrentWeakRootsIterator, true /* Concurrent */, true /* Weak */>();
}
// Drain stack
while (!_visit_stack.is_empty()) {
const oop obj = _visit_stack.pop();
// Visit object
cl->do_object(obj);
// Push fields to visit
push_fields<VisitWeaks>(obj);
}
}
void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_weaks) {
if (visit_weaks) {
objects_do<true /* VisitWeaks */>(cl);
// Follow
if (obj->is_objArray()) {
follow_array(context, obj);
} else {
objects_do<false /* VisitWeaks */>(cl);
follow_object<VisitWeaks>(context, obj);
}
}
template <bool VisitWeaks>
void ZHeapIterator::drain(const ZHeapIteratorContext& context, ObjectClosure* cl) {
ObjArrayTask array;
oop obj;
do {
while (context.pop(obj)) {
visit_and_follow<VisitWeaks>(context, cl, obj);
}
if (context.pop_array(array)) {
follow_array_chunk(context, array);
}
} while (!context.is_drained());
}
template <bool VisitWeaks>
void ZHeapIterator::steal(const ZHeapIteratorContext& context, ObjectClosure* cl) {
ObjArrayTask array;
oop obj;
if (context.steal_array(array)) {
follow_array_chunk(context, array);
} else if (context.steal(obj)) {
visit_and_follow<VisitWeaks>(context, cl, obj);
}
}
template <bool VisitWeaks>
void ZHeapIterator::drain_and_steal(const ZHeapIteratorContext& context, ObjectClosure* cl) {
do {
drain<VisitWeaks>(context, cl);
steal<VisitWeaks>(context, cl);
} while (!context.is_drained() || !_terminator.offer_termination());
}
template <bool VisitWeaks>
void ZHeapIterator::object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* cl) {
push_roots<false /* Concurrent */, false /* Weak */>(context, _roots);
push_roots<true /* Concurrent */, false /* Weak */>(context, _concurrent_roots);
if (VisitWeaks) {
push_roots<false /* Concurrent */, true /* Weak */>(context, _weak_roots);
push_roots<true /* Concurrent */, true /* Weak */>(context, _concurrent_weak_roots);
}
drain_and_steal<VisitWeaks>(context, cl);
}
void ZHeapIterator::object_iterate(ObjectClosure* cl, uint worker_id) {
ZHeapIteratorContext context(this, worker_id);
if (_visit_weaks) {
object_iterate_inner<true /* VisitWeaks */>(context, cl);
} else {
object_iterate_inner<false /* VisitWeaks */>(context, cl);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,37 +24,73 @@
#ifndef SHARE_GC_Z_ZHEAPITERATOR_HPP
#define SHARE_GC_Z_ZHEAPITERATOR_HPP
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/taskqueue.hpp"
#include "gc/z/zGranuleMap.hpp"
#include "memory/allocation.hpp"
#include "utilities/stack.hpp"
#include "gc/z/zLock.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "gc/z/zStat.hpp"
class ObjectClosure;
class ZHeapIteratorBitMap;
class ZHeapIteratorContext;
class ZHeapIterator : public StackObj {
template<bool Concurrent, bool Weak> friend class ZHeapIteratorRootOopClosure;
template<bool VisitReferents> friend class ZHeapIteratorOopClosure;
using ZHeapIteratorBitMaps = ZGranuleMap<ZHeapIteratorBitMap*>;
using ZHeapIteratorBitMapsIterator = ZGranuleMapIterator<ZHeapIteratorBitMap*>;
using ZHeapIteratorQueue = OverflowTaskQueue<oop, mtGC>;
using ZHeapIteratorQueues = GenericTaskQueueSet<ZHeapIteratorQueue, mtGC>;
using ZHeapIteratorArrayQueue = OverflowTaskQueue<ObjArrayTask, mtGC>;
using ZHeapIteratorArrayQueues = GenericTaskQueueSet<ZHeapIteratorArrayQueue, mtGC>;
class ZHeapIterator : public ParallelObjectIterator {
friend class ZHeapIteratorContext;
private:
typedef ZGranuleMap<ZHeapIteratorBitMap*> ZVisitMap;
typedef ZGranuleMapIterator<ZHeapIteratorBitMap*> ZVisitMapIterator;
typedef Stack<oop, mtGC> ZVisitStack;
const bool _visit_weaks;
ZStatTimerDisable _timer_disable;
ZHeapIteratorBitMaps _bitmaps;
ZLock _bitmaps_lock;
ZHeapIteratorQueues _queues;
ZHeapIteratorArrayQueues _array_queues;
ZRootsIterator _roots;
ZConcurrentRootsIteratorClaimOther _concurrent_roots;
ZWeakRootsIterator _weak_roots;
ZConcurrentWeakRootsIterator _concurrent_weak_roots;
TaskTerminator _terminator;
ZVisitStack _visit_stack;
ZVisitMap _visit_map;
ZHeapIteratorBitMap* object_bitmap(oop obj);
ZHeapIteratorBitMap* object_map(oop obj);
void push(oop obj);
bool mark_object(oop obj);
template <typename RootsIterator, bool Concurrent, bool Weak> void push_roots();
template <bool VisitReferents> void push_fields(oop obj);
template <bool VisitReferents> void objects_do(ObjectClosure* cl);
template <bool Concurrent, bool Weak, typename RootsIterator>
void push_roots(const ZHeapIteratorContext& context, RootsIterator& iter);
template <bool VisitReferents>
void follow_object(const ZHeapIteratorContext& context, oop obj);
void follow_array(const ZHeapIteratorContext& context, oop obj);
void follow_array_chunk(const ZHeapIteratorContext& context, const ObjArrayTask& array);
template <bool VisitWeaks>
void visit_and_follow(const ZHeapIteratorContext& context, ObjectClosure* cl, oop obj);
template <bool VisitWeaks>
void drain(const ZHeapIteratorContext& context, ObjectClosure* cl);
template <bool VisitWeaks>
void steal(const ZHeapIteratorContext& context, ObjectClosure* cl);
template <bool VisitWeaks>
void drain_and_steal(const ZHeapIteratorContext& context, ObjectClosure* cl);
template <bool VisitWeaks>
void object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* cl);
public:
ZHeapIterator();
~ZHeapIterator();
ZHeapIterator(uint nworkers, bool visit_weaks);
virtual ~ZHeapIterator();
void objects_do(ObjectClosure* cl, bool visit_weaks);
virtual void object_iterate(ObjectClosure* cl, uint worker_id);
};
#endif // SHARE_GC_Z_ZHEAPITERATOR_HPP