8229189: Improve JFR leak profiler tracing to deal with discontiguous heaps
Reviewed-by: mgronlun, egahlin
This commit is contained in:
parent
655cf14138
commit
d19e6eae9e
@ -22,7 +22,7 @@
|
||||
*
|
||||
*/
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
|
||||
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/edge.hpp"
|
||||
|
@ -22,37 +22,25 @@
|
||||
*
|
||||
*/
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.hpp"
|
||||
#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
|
||||
|
||||
BitSet::BitSet(const MemRegion& covered_region) :
|
||||
_vmm(NULL),
|
||||
_region_start(covered_region.start()),
|
||||
_region_size(covered_region.word_size()) {
|
||||
BitSet::BitMapFragment::BitMapFragment(uintptr_t granule, BitMapFragment* next) :
|
||||
_bits(_bitmap_granularity_size >> LogMinObjAlignmentInBytes, mtTracing, true /* clear */),
|
||||
_next(next) {
|
||||
}
|
||||
|
||||
BitSet::BitSet() :
|
||||
_bitmap_fragments(32),
|
||||
_fragment_list(NULL),
|
||||
_last_fragment_bits(NULL),
|
||||
_last_fragment_granule(0) {
|
||||
}
|
||||
|
||||
BitSet::~BitSet() {
|
||||
delete _vmm;
|
||||
}
|
||||
|
||||
bool BitSet::initialize() {
|
||||
assert(_vmm == NULL, "invariant");
|
||||
_vmm = new JfrVirtualMemory();
|
||||
if (_vmm == NULL) {
|
||||
return false;
|
||||
BitMapFragment* current = _fragment_list;
|
||||
while (current != NULL) {
|
||||
BitMapFragment* next = current->next();
|
||||
delete current;
|
||||
current = next;
|
||||
}
|
||||
|
||||
const BitMap::idx_t bits = _region_size >> LogMinObjAlignment;
|
||||
const size_t words = bits / BitsPerWord;
|
||||
const size_t raw_bytes = words * sizeof(BitMap::idx_t);
|
||||
|
||||
// the virtual memory invocation will reserve and commit the entire space
|
||||
BitMap::bm_word_t* map = (BitMap::bm_word_t*)_vmm->initialize(raw_bytes, raw_bytes);
|
||||
if (map == NULL) {
|
||||
return false;
|
||||
}
|
||||
_bits = BitMapView(map, bits);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -26,53 +26,91 @@
|
||||
#define SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
|
||||
class JfrVirtualMemory;
|
||||
class MemRegion;
|
||||
|
||||
class BitSet : public CHeapObj<mtTracing> {
|
||||
private:
|
||||
JfrVirtualMemory* _vmm;
|
||||
const HeapWord* const _region_start;
|
||||
BitMapView _bits;
|
||||
const size_t _region_size;
|
||||
const static size_t _bitmap_granularity_shift = 26; // 64M
|
||||
const static size_t _bitmap_granularity_size = (size_t)1 << _bitmap_granularity_shift;
|
||||
const static size_t _bitmap_granularity_mask = _bitmap_granularity_size - 1;
|
||||
|
||||
class BitMapFragment;
|
||||
|
||||
class BitMapFragmentTable : public BasicHashtable<mtTracing> {
|
||||
class Entry : public BasicHashtableEntry<mtTracing> {
|
||||
public:
|
||||
uintptr_t _key;
|
||||
CHeapBitMap* _value;
|
||||
|
||||
Entry* next() {
|
||||
return (Entry*)BasicHashtableEntry<mtTracing>::next();
|
||||
}
|
||||
};
|
||||
|
||||
protected:
|
||||
Entry* bucket(int i) const;
|
||||
|
||||
Entry* new_entry(unsigned int hashValue, uintptr_t key, CHeapBitMap* value);
|
||||
|
||||
unsigned hash_segment(uintptr_t key) {
|
||||
unsigned hash = (unsigned)key;
|
||||
return hash ^ (hash >> 3);
|
||||
}
|
||||
|
||||
unsigned hash_to_index(unsigned hash) {
|
||||
return hash & (BasicHashtable<mtTracing>::table_size() - 1);
|
||||
}
|
||||
|
||||
public:
|
||||
BitMapFragmentTable(int table_size) : BasicHashtable<mtTracing>(table_size, sizeof(Entry)) {}
|
||||
void add(uintptr_t key, CHeapBitMap* value);
|
||||
CHeapBitMap** lookup(uintptr_t key);
|
||||
};
|
||||
|
||||
CHeapBitMap* get_fragment_bits(uintptr_t addr);
|
||||
|
||||
BitMapFragmentTable _bitmap_fragments;
|
||||
BitMapFragment* _fragment_list;
|
||||
CHeapBitMap* _last_fragment_bits;
|
||||
uintptr_t _last_fragment_granule;
|
||||
|
||||
public:
|
||||
BitSet(const MemRegion& covered_region);
|
||||
BitSet();
|
||||
~BitSet();
|
||||
|
||||
bool initialize();
|
||||
BitMap::idx_t addr_to_bit(uintptr_t addr) const;
|
||||
|
||||
BitMap::idx_t mark_obj(const HeapWord* addr) {
|
||||
const BitMap::idx_t bit = addr_to_bit(addr);
|
||||
_bits.set_bit(bit);
|
||||
return bit;
|
||||
void mark_obj(uintptr_t addr);
|
||||
|
||||
void mark_obj(oop obj) {
|
||||
return mark_obj(cast_from_oop<uintptr_t>(obj));
|
||||
}
|
||||
|
||||
BitMap::idx_t mark_obj(oop obj) {
|
||||
return mark_obj((HeapWord*)obj);
|
||||
bool is_marked(uintptr_t addr);
|
||||
|
||||
bool is_marked(oop obj) {
|
||||
return is_marked(cast_from_oop<uintptr_t>(obj));
|
||||
}
|
||||
};
|
||||
|
||||
class BitSet::BitMapFragment : public CHeapObj<mtTracing> {
|
||||
CHeapBitMap _bits;
|
||||
BitMapFragment* _next;
|
||||
|
||||
public:
|
||||
BitMapFragment(uintptr_t granule, BitMapFragment* next);
|
||||
|
||||
BitMapFragment* next() const {
|
||||
return _next;
|
||||
}
|
||||
|
||||
bool is_marked(const HeapWord* addr) const {
|
||||
return is_marked(addr_to_bit(addr));
|
||||
}
|
||||
|
||||
bool is_marked(oop obj) const {
|
||||
return is_marked((HeapWord*)obj);
|
||||
}
|
||||
|
||||
BitMap::idx_t size() const {
|
||||
return _bits.size();
|
||||
}
|
||||
|
||||
BitMap::idx_t addr_to_bit(const HeapWord* addr) const {
|
||||
return pointer_delta(addr, _region_start) >> LogMinObjAlignment;
|
||||
}
|
||||
|
||||
bool is_marked(const BitMap::idx_t bit) const {
|
||||
return _bits.at(bit);
|
||||
CHeapBitMap* bits() {
|
||||
return &_bits;
|
||||
}
|
||||
};
|
||||
|
||||
|
106
src/hotspot/share/jfr/leakprofiler/chains/bitset.inline.hpp
Normal file
106
src/hotspot/share/jfr/leakprofiler/chains/bitset.inline.hpp
Normal file
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_INLINE_HPP
|
||||
#define SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_INLINE_HPP
|
||||
|
||||
#include "jfr/leakprofiler/chains/bitset.hpp"
|
||||
#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
|
||||
inline BitSet::BitMapFragmentTable::Entry* BitSet::BitMapFragmentTable::bucket(int i) const {
|
||||
return (Entry*)BasicHashtable<mtTracing>::bucket(i);
|
||||
}
|
||||
|
||||
inline BitSet::BitMapFragmentTable::Entry* BitSet::BitMapFragmentTable::new_entry(unsigned int hash,
|
||||
uintptr_t key,
|
||||
CHeapBitMap* value) {
|
||||
Entry* entry = (Entry*)BasicHashtable<mtTracing>::new_entry(hash);
|
||||
entry->_key = key;
|
||||
entry->_value = value;
|
||||
return entry;
|
||||
}
|
||||
|
||||
inline void BitSet::BitMapFragmentTable::add(uintptr_t key, CHeapBitMap* value) {
|
||||
unsigned hash = hash_segment(key);
|
||||
Entry* entry = new_entry(hash, key, value);
|
||||
BasicHashtable<mtTracing>::add_entry(hash_to_index(hash), entry);
|
||||
}
|
||||
|
||||
inline CHeapBitMap** BitSet::BitMapFragmentTable::lookup(uintptr_t key) {
|
||||
unsigned hash = hash_segment(key);
|
||||
int index = hash_to_index(hash);
|
||||
for (Entry* e = bucket(index); e != NULL; e = e->next()) {
|
||||
if (e->hash() == hash && e->_key == key) {
|
||||
return &(e->_value);
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inline BitMap::idx_t BitSet::addr_to_bit(uintptr_t addr) const {
|
||||
return (addr & _bitmap_granularity_mask) >> LogMinObjAlignmentInBytes;
|
||||
}
|
||||
|
||||
inline CHeapBitMap* BitSet::get_fragment_bits(uintptr_t addr) {
|
||||
uintptr_t granule = addr >> _bitmap_granularity_shift;
|
||||
if (granule == _last_fragment_granule) {
|
||||
return _last_fragment_bits;
|
||||
}
|
||||
CHeapBitMap* bits = NULL;
|
||||
|
||||
CHeapBitMap** found = _bitmap_fragments.lookup(granule);
|
||||
if (found != NULL) {
|
||||
bits = *found;
|
||||
} else {
|
||||
BitMapFragment* fragment = new BitMapFragment(granule, _fragment_list);
|
||||
bits = fragment->bits();
|
||||
_fragment_list = fragment;
|
||||
if (_bitmap_fragments.number_of_entries() * 100 / _bitmap_fragments.table_size() > 25) {
|
||||
_bitmap_fragments.resize(_bitmap_fragments.table_size() * 2);
|
||||
}
|
||||
_bitmap_fragments.add(granule, bits);
|
||||
}
|
||||
|
||||
_last_fragment_bits = bits;
|
||||
_last_fragment_granule = granule;
|
||||
|
||||
return bits;
|
||||
}
|
||||
|
||||
inline void BitSet::mark_obj(uintptr_t addr) {
|
||||
CHeapBitMap* bits = get_fragment_bits(addr);
|
||||
const BitMap::idx_t bit = addr_to_bit(addr);
|
||||
bits->set_bit(bit);
|
||||
}
|
||||
|
||||
inline bool BitSet::is_marked(uintptr_t addr) {
|
||||
CHeapBitMap* bits = get_fragment_bits(addr);
|
||||
const BitMap::idx_t bit = addr_to_bit(addr);
|
||||
return bits->at(bit);
|
||||
}
|
||||
|
||||
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_INLINE_HPP
|
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
|
||||
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/edge.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeStore.hpp"
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.hpp"
|
||||
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
|
||||
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
|
||||
#include "jfr/leakprofiler/chains/edge.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
|
||||
@ -57,8 +57,8 @@ PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore
|
||||
* Initial memory reservation: 5% of the heap OR at least 32 Mb
|
||||
* Commit ratio: 1 : 10 (subject to allocation granularties)
|
||||
*/
|
||||
static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
|
||||
const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
|
||||
static size_t edge_queue_memory_reservation() {
|
||||
const size_t memory_reservation_bytes = MAX2(MaxHeapSize / 20, 32*M);
|
||||
assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
|
||||
return memory_reservation_bytes;
|
||||
}
|
||||
@ -84,17 +84,16 @@ void PathToGcRootsOperation::doit() {
|
||||
assert(_cutoff_ticks > 0, "invariant");
|
||||
|
||||
// The bitset used for marking is dimensioned as a function of the heap size
|
||||
const MemRegion heap_region = Universe::heap()->reserved_region();
|
||||
BitSet mark_bits(heap_region);
|
||||
BitSet mark_bits;
|
||||
|
||||
// The edge queue is dimensioned as a fraction of the heap size
|
||||
const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
|
||||
const size_t edge_queue_reservation_size = edge_queue_memory_reservation();
|
||||
EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
|
||||
|
||||
// The initialize() routines will attempt to reserve and allocate backing storage memory.
|
||||
// Failure to accommodate will render root chain processing impossible.
|
||||
// As a fallback on failure, just write out the existing samples, flat, without chains.
|
||||
if (!(mark_bits.initialize() && edge_queue.initialize())) {
|
||||
if (!edge_queue.initialize()) {
|
||||
log_warning(jfr)("Unable to allocate memory for root chain processing");
|
||||
return;
|
||||
}
|
||||
|
@ -306,6 +306,7 @@ template class BasicHashtable<mtCode>;
|
||||
template class BasicHashtable<mtInternal>;
|
||||
template class BasicHashtable<mtModule>;
|
||||
template class BasicHashtable<mtCompiler>;
|
||||
template class BasicHashtable<mtTracing>;
|
||||
|
||||
template void BasicHashtable<mtClass>::verify_table<DictionaryEntry>(char const*);
|
||||
template void BasicHashtable<mtModule>::verify_table<ModuleEntry>(char const*);
|
||||
|
Loading…
x
Reference in New Issue
Block a user