8215624: Add parallel heap iteration for jmap –histo
Chunk and parallelize the heap scan Reviewed-by: sspitsyn, stefank, phh
This commit is contained in:
parent
e44575ad3e
commit
9f090cb6f8
src
hotspot/share
gc
epsilon
g1
parallel
serial
shared
shenandoah
z
memory
runtime
services
jdk.jcmd/share/classes/sun/tools/jmap
test/jdk/sun/tools/jmap
@ -119,6 +119,10 @@ public:
|
||||
// No GC threads
|
||||
virtual void gc_threads_do(ThreadClosure* tc) const {}
|
||||
|
||||
// Runs the given AbstractGangTask with the current active workers
|
||||
// No workGang for EpsilonHeap, work serially with thread 0
|
||||
virtual void run_task(AbstractGangTask* task) { task->work(0); }
|
||||
|
||||
// No nmethod handling
|
||||
virtual void register_nmethod(nmethod* nm) {}
|
||||
virtual void unregister_nmethod(nmethod* nm) {}
|
||||
|
@ -89,6 +89,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/heapInspection.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
@ -161,9 +162,13 @@ void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_region
|
||||
reset_from_card_cache(start_idx, num_regions);
|
||||
}
|
||||
|
||||
Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
|
||||
Ticks start = Ticks::now();
|
||||
void G1CollectedHeap::run_task(AbstractGangTask* task) {
|
||||
workers()->run_task(task, workers()->active_workers());
|
||||
}
|
||||
|
||||
Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) {
|
||||
Ticks start = Ticks::now();
|
||||
run_task(task);
|
||||
return Ticks::now() - start;
|
||||
}
|
||||
|
||||
@ -2301,6 +2306,30 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
class G1ParallelObjectIterator : public ParallelObjectIterator {
|
||||
private:
|
||||
G1CollectedHeap* _heap;
|
||||
HeapRegionClaimer _claimer;
|
||||
|
||||
public:
|
||||
G1ParallelObjectIterator(uint thread_num) :
|
||||
_heap(G1CollectedHeap::heap()),
|
||||
_claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {}
|
||||
|
||||
virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
|
||||
_heap->object_iterate_parallel(cl, worker_id, &_claimer);
|
||||
}
|
||||
};
|
||||
|
||||
ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) {
|
||||
return new G1ParallelObjectIterator(thread_num);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) {
|
||||
IterateObjectClosureRegionClosure blk(cl);
|
||||
heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::keep_alive(oop obj) {
|
||||
G1BarrierSet::enqueue(obj);
|
||||
}
|
||||
@ -3694,7 +3723,7 @@ void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_i
|
||||
|
||||
{
|
||||
G1PrepareEvacuationTask g1_prep_task(this);
|
||||
Tickspan task_time = run_task(&g1_prep_task);
|
||||
Tickspan task_time = run_task_timed(&g1_prep_task);
|
||||
|
||||
phase_times()->record_register_regions(task_time.seconds() * 1000.0,
|
||||
g1_prep_task.humongous_total(),
|
||||
@ -3843,7 +3872,7 @@ void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* p
|
||||
{
|
||||
G1RootProcessor root_processor(this, num_workers);
|
||||
G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
|
||||
task_time = run_task(&g1_par_task);
|
||||
task_time = run_task_timed(&g1_par_task);
|
||||
// Closing the inner scope will execute the destructor for the G1RootProcessor object.
|
||||
// To extract its code root fixup time we measure total time of this scope and
|
||||
// subtract from the time the WorkGang task took.
|
||||
@ -3882,7 +3911,7 @@ void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* pe
|
||||
{
|
||||
G1MarkScope code_mark_scope;
|
||||
G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
|
||||
task_time = run_task(&task);
|
||||
task_time = run_task_timed(&task);
|
||||
// See comment in evacuate_collection_set() for the reason of the scope.
|
||||
}
|
||||
Tickspan total_processing = Ticks::now() - start_processing;
|
||||
|
@ -551,9 +551,12 @@ public:
|
||||
|
||||
WorkGang* workers() const { return _workers; }
|
||||
|
||||
// Runs the given AbstractGangTask with the current active workers, returning the
|
||||
// total time taken.
|
||||
Tickspan run_task(AbstractGangTask* task);
|
||||
// Runs the given AbstractGangTask with the current active workers.
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
|
||||
// Runs the given AbstractGangTask with the current active workers,
|
||||
// returning the total time taken.
|
||||
Tickspan run_task_timed(AbstractGangTask* task);
|
||||
|
||||
G1Allocator* allocator() {
|
||||
return _allocator;
|
||||
@ -1173,9 +1176,13 @@ public:
|
||||
|
||||
// Iteration functions.
|
||||
|
||||
void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
|
||||
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
|
||||
virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
|
||||
|
||||
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
|
||||
virtual void keep_alive(oop obj);
|
||||
|
||||
|
@ -539,7 +539,6 @@ void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
|
||||
old_gen()->object_iterate(cl);
|
||||
}
|
||||
|
||||
|
||||
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
|
||||
if (young_gen()->is_in_reserved(addr)) {
|
||||
assert(young_gen()->is_in(addr),
|
||||
@ -611,6 +610,10 @@ void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||
ParallelScavengeHeap::heap()->workers().threads_do(tc);
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::run_task(AbstractGangTask* task) {
|
||||
_workers.run_task(task);
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::print_tracing_info() const {
|
||||
AdaptiveSizePolicyOutput::print();
|
||||
log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
|
||||
|
@ -218,6 +218,8 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
virtual void print_on(outputStream* st) const;
|
||||
virtual void print_on_error(outputStream* st) const;
|
||||
virtual void gc_threads_do(ThreadClosure* tc) const;
|
||||
// Runs the given AbstractGangTask with the current active workers.
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
virtual void print_tracing_info() const;
|
||||
|
||||
virtual WorkGang* get_safepoint_workers() { return &_workers; }
|
||||
|
@ -87,3 +87,8 @@ GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
|
||||
memory_pools.append(_old_pool);
|
||||
return memory_pools;
|
||||
}
|
||||
|
||||
// No workGang for SerialHeap, work serially with thread 0.
|
||||
void SerialHeap::run_task(AbstractGangTask* task) {
|
||||
task->work(0);
|
||||
}
|
||||
|
@ -75,6 +75,10 @@ public:
|
||||
template <typename OopClosureType1, typename OopClosureType2>
|
||||
void oop_since_save_marks_iterate(OopClosureType1* cur,
|
||||
OopClosureType2* older);
|
||||
|
||||
// Runs the given AbstractGangTask with the current active workers.
|
||||
// No workGang for SerialHeap, work serially with thread 0.
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SERIAL_SERIALHEAP_HPP
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc/shared/gcWhen.hpp"
|
||||
#include "gc/shared/verifyOption.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/heapInspection.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
@ -44,6 +45,7 @@
|
||||
// class defines the functions that a heap must implement, and contains
|
||||
// infrastructure common to all heaps.
|
||||
|
||||
class AbstractGangTask;
|
||||
class AdaptiveSizePolicy;
|
||||
class BarrierSet;
|
||||
class GCHeapSummary;
|
||||
@ -85,6 +87,11 @@ class GCHeapLog : public EventLogBase<GCMessage> {
|
||||
}
|
||||
};
|
||||
|
||||
class ParallelObjectIterator : public CHeapObj<mtGC> {
|
||||
public:
|
||||
virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// CollectedHeap
|
||||
// GenCollectedHeap
|
||||
@ -407,6 +414,10 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl) = 0;
|
||||
|
||||
virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
|
||||
virtual void keep_alive(oop obj) {}
|
||||
|
||||
@ -456,6 +467,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// Iterator for all GC threads (other than VM thread)
|
||||
virtual void gc_threads_do(ThreadClosure* tc) const = 0;
|
||||
|
||||
// Run given task. Possibly in parallel if the GC supports it.
|
||||
virtual void run_task(AbstractGangTask* task) = 0;
|
||||
|
||||
// Print any relevant tracing info that flags imply.
|
||||
// Default implementation does nothing.
|
||||
virtual void print_tracing_info() const = 0;
|
||||
|
@ -149,7 +149,7 @@ void VM_GC_HeapInspection::doit() {
|
||||
}
|
||||
}
|
||||
HeapInspection inspect;
|
||||
inspect.heap_inspection(_out);
|
||||
inspect.heap_inspection(_out, _parallel_thread_num);
|
||||
}
|
||||
|
||||
|
||||
|
@ -125,12 +125,15 @@ class VM_GC_HeapInspection: public VM_GC_Operation {
|
||||
private:
|
||||
outputStream* _out;
|
||||
bool _full_gc;
|
||||
uint _parallel_thread_num;
|
||||
public:
|
||||
VM_GC_HeapInspection(outputStream* out, bool request_full_gc) :
|
||||
VM_GC_HeapInspection(outputStream* out, bool request_full_gc,
|
||||
uint parallel_thread_num = 1) :
|
||||
VM_GC_Operation(0 /* total collections, dummy, ignored */,
|
||||
GCCause::_heap_inspection /* GC Cause */,
|
||||
0 /* total full collections, dummy, ignored */,
|
||||
request_full_gc), _out(out), _full_gc(request_full_gc) {}
|
||||
request_full_gc), _out(out), _full_gc(request_full_gc),
|
||||
_parallel_thread_num(parallel_thread_num) {}
|
||||
|
||||
~VM_GC_HeapInspection() {}
|
||||
virtual VMOp_Type type() const { return VMOp_GC_HeapInspection; }
|
||||
|
@ -1195,6 +1195,10 @@ void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeap::run_task(AbstractGangTask* task) {
|
||||
workers()->run_task(task, workers()->active_workers());
|
||||
}
|
||||
|
||||
void ShenandoahHeap::print_tracing_info() const {
|
||||
LogTarget(Info, gc, stats) lt;
|
||||
if (lt.is_enabled()) {
|
||||
|
@ -198,6 +198,8 @@ public:
|
||||
WorkGang* get_safepoint_workers();
|
||||
|
||||
void gc_threads_do(ThreadClosure* tcl) const;
|
||||
// Runs the given AbstractGangTask with the current active workers.
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
|
||||
// ---------- Heap regions handling machinery
|
||||
//
|
||||
|
@ -253,6 +253,10 @@ void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||
_heap.object_iterate(cl, true /* visit_weaks */);
|
||||
}
|
||||
|
||||
void ZCollectedHeap::run_task(AbstractGangTask* task) {
|
||||
return _heap.run_task(task);
|
||||
}
|
||||
|
||||
void ZCollectedHeap::keep_alive(oop obj) {
|
||||
_heap.keep_alive(obj);
|
||||
}
|
||||
|
@ -98,6 +98,8 @@ public:
|
||||
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
|
||||
virtual void run_task(AbstractGangTask* task);
|
||||
|
||||
virtual void keep_alive(oop obj);
|
||||
|
||||
virtual void register_nmethod(nmethod* nm);
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "gc/z/zRelocationSetSelector.inline.hpp"
|
||||
#include "gc/z/zResurrection.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zTask.hpp"
|
||||
#include "gc/z/zThread.inline.hpp"
|
||||
#include "gc/z/zVerify.hpp"
|
||||
#include "gc/z/zWorkers.inline.hpp"
|
||||
@ -185,6 +186,26 @@ void ZHeap::threads_do(ThreadClosure* tc) const {
|
||||
_workers.threads_do(tc);
|
||||
}
|
||||
|
||||
// Adapter class from AbstractGangTask to Ztask
|
||||
class ZAbstractGangTaskAdapter : public ZTask {
|
||||
private:
|
||||
AbstractGangTask* _task;
|
||||
|
||||
public:
|
||||
ZAbstractGangTaskAdapter(AbstractGangTask* task) :
|
||||
ZTask(task->name()),
|
||||
_task(task) { }
|
||||
|
||||
virtual void work() {
|
||||
_task->work(ZThread::worker_id());
|
||||
}
|
||||
};
|
||||
|
||||
void ZHeap::run_task(AbstractGangTask* task) {
|
||||
ZAbstractGangTaskAdapter ztask(task);
|
||||
_workers.run_parallel(&ztask);
|
||||
}
|
||||
|
||||
void ZHeap::out_of_memory() {
|
||||
ResourceMark rm;
|
||||
|
||||
|
@ -98,6 +98,7 @@ public:
|
||||
uint nconcurrent_no_boost_worker_threads() const;
|
||||
void set_boost_worker_threads(bool boost);
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
void run_task(AbstractGangTask* task);
|
||||
|
||||
// Reference processing
|
||||
ReferenceDiscoverer* reference_discoverer();
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/reflectionAccessorImplKlassHelper.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
@ -237,6 +238,41 @@ size_t KlassInfoTable::size_of_instances_in_words() const {
|
||||
return _size_of_instances_in_words;
|
||||
}
|
||||
|
||||
// Return false if the entry could not be recorded on account
|
||||
// of running out of space required to create a new entry.
|
||||
bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
|
||||
Klass* k = cie->klass();
|
||||
KlassInfoEntry* elt = lookup(k);
|
||||
// elt may be NULL if it's a new klass for which we
|
||||
// could not allocate space for a new entry in the hashtable.
|
||||
if (elt != NULL) {
|
||||
elt->set_count(elt->count() + cie->count());
|
||||
elt->set_words(elt->words() + cie->words());
|
||||
_size_of_instances_in_words += cie->words();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
class KlassInfoTableMergeClosure : public KlassInfoClosure {
|
||||
private:
|
||||
KlassInfoTable* _dest;
|
||||
bool _success;
|
||||
public:
|
||||
KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
|
||||
void do_cinfo(KlassInfoEntry* cie) {
|
||||
_success &= _dest->merge_entry(cie);
|
||||
}
|
||||
bool success() { return _success; }
|
||||
};
|
||||
|
||||
// merge from table
|
||||
bool KlassInfoTable::merge(KlassInfoTable* table) {
|
||||
KlassInfoTableMergeClosure closure(this);
|
||||
table->iterate(&closure);
|
||||
return closure.success();
|
||||
}
|
||||
|
||||
int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
|
||||
return (*e1)->compare(*e1,*e2);
|
||||
}
|
||||
@ -482,7 +518,7 @@ class HistoClosure : public KlassInfoClosure {
|
||||
class RecordInstanceClosure : public ObjectClosure {
|
||||
private:
|
||||
KlassInfoTable* _cit;
|
||||
size_t _missed_count;
|
||||
uintx _missed_count;
|
||||
BoolObjectClosure* _filter;
|
||||
public:
|
||||
RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
|
||||
@ -496,7 +532,7 @@ class RecordInstanceClosure : public ObjectClosure {
|
||||
}
|
||||
}
|
||||
|
||||
size_t missed_count() { return _missed_count; }
|
||||
uintx missed_count() { return _missed_count; }
|
||||
|
||||
private:
|
||||
bool should_visit(oop obj) {
|
||||
@ -504,23 +540,68 @@ class RecordInstanceClosure : public ObjectClosure {
|
||||
}
|
||||
};
|
||||
|
||||
size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {
|
||||
ResourceMark rm;
|
||||
// Heap inspection for every worker.
|
||||
// When native OOM happens for KlassInfoTable, set _success to false.
|
||||
void ParHeapInspectTask::work(uint worker_id) {
|
||||
uintx missed_count = 0;
|
||||
bool merge_success = true;
|
||||
if (!Atomic::load(&_success)) {
|
||||
// other worker has failed on parallel iteration.
|
||||
return;
|
||||
}
|
||||
|
||||
KlassInfoTable cit(false);
|
||||
if (cit.allocation_failed()) {
|
||||
// fail to allocate memory, stop parallel mode
|
||||
Atomic::store(&_success, false);
|
||||
return;
|
||||
}
|
||||
RecordInstanceClosure ric(&cit, _filter);
|
||||
_poi->object_iterate(&ric, worker_id);
|
||||
missed_count = ric.missed_count();
|
||||
{
|
||||
MutexLocker x(&_mutex);
|
||||
merge_success = _shared_cit->merge(&cit);
|
||||
}
|
||||
if (merge_success) {
|
||||
Atomic::add(&_missed_count, missed_count);
|
||||
} else {
|
||||
Atomic::store(&_success, false);
|
||||
}
|
||||
}
|
||||
|
||||
uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
|
||||
|
||||
// Try parallel first.
|
||||
if (parallel_thread_num > 1) {
|
||||
ResourceMark rm;
|
||||
ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
|
||||
if (poi != NULL) {
|
||||
ParHeapInspectTask task(poi, cit, filter);
|
||||
Universe::heap()->run_task(&task);
|
||||
delete poi;
|
||||
if (task.success()) {
|
||||
return task.missed_count();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
// If no parallel iteration available, run serially.
|
||||
RecordInstanceClosure ric(cit, filter);
|
||||
Universe::heap()->object_iterate(&ric);
|
||||
return ric.missed_count();
|
||||
}
|
||||
|
||||
void HeapInspection::heap_inspection(outputStream* st) {
|
||||
void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
|
||||
ResourceMark rm;
|
||||
|
||||
KlassInfoTable cit(false);
|
||||
if (!cit.allocation_failed()) {
|
||||
// populate table with object allocation info
|
||||
size_t missed_count = populate_table(&cit);
|
||||
uintx missed_count = populate_table(&cit, NULL, parallel_thread_num);
|
||||
if (missed_count != 0) {
|
||||
log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
|
||||
log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
|
||||
" total instances in data below",
|
||||
missed_count);
|
||||
}
|
||||
|
@ -30,6 +30,9 @@
|
||||
#include "oops/oop.hpp"
|
||||
#include "oops/annotations.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
|
||||
class ParallelObjectIterator;
|
||||
|
||||
#if INCLUDE_SERVICES
|
||||
|
||||
@ -122,6 +125,8 @@ class KlassInfoTable: public StackObj {
|
||||
void iterate(KlassInfoClosure* cic);
|
||||
bool allocation_failed() { return _buckets == NULL; }
|
||||
size_t size_of_instances_in_words() const;
|
||||
bool merge(KlassInfoTable* table);
|
||||
bool merge_entry(const KlassInfoEntry* cie);
|
||||
|
||||
friend class KlassInfoHisto;
|
||||
friend class KlassHierarchy;
|
||||
@ -211,11 +216,46 @@ class KlassInfoClosure;
|
||||
|
||||
class HeapInspection : public StackObj {
|
||||
public:
|
||||
void heap_inspection(outputStream* st) NOT_SERVICES_RETURN;
|
||||
size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN_(0);
|
||||
void heap_inspection(outputStream* st, uint parallel_thread_num = 1) NOT_SERVICES_RETURN;
|
||||
uintx populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL, uint parallel_thread_num = 1) NOT_SERVICES_RETURN_(0);
|
||||
static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
|
||||
private:
|
||||
void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL);
|
||||
};
|
||||
|
||||
// Parallel heap inspection task. Parallel inspection can fail due to
|
||||
// a native OOM when allocating memory for TL-KlassInfoTable.
|
||||
// _success will be set false on an OOM, and serial inspection tried.
|
||||
class ParHeapInspectTask : public AbstractGangTask {
|
||||
private:
|
||||
ParallelObjectIterator* _poi;
|
||||
KlassInfoTable* _shared_cit;
|
||||
BoolObjectClosure* _filter;
|
||||
uintx _missed_count;
|
||||
bool _success;
|
||||
Mutex _mutex;
|
||||
|
||||
public:
|
||||
ParHeapInspectTask(ParallelObjectIterator* poi,
|
||||
KlassInfoTable* shared_cit,
|
||||
BoolObjectClosure* filter) :
|
||||
AbstractGangTask("Iterating heap"),
|
||||
_poi(poi),
|
||||
_shared_cit(shared_cit),
|
||||
_filter(filter),
|
||||
_missed_count(0),
|
||||
_success(true),
|
||||
_mutex(Mutex::leaf, "Parallel heap iteration data merge lock") {}
|
||||
|
||||
uintx missed_count() const {
|
||||
return _missed_count;
|
||||
}
|
||||
|
||||
bool success() {
|
||||
return _success;
|
||||
}
|
||||
|
||||
virtual void work(uint worker_id);
|
||||
};
|
||||
|
||||
#endif // SHARE_MEMORY_HEAPINSPECTION_HPP
|
||||
|
@ -449,12 +449,6 @@ class Arguments : AllStatic {
|
||||
static ArgsRange check_memory_size(julong size, julong min_size, julong max_size);
|
||||
static ArgsRange parse_memory_size(const char* s, julong* long_arg,
|
||||
julong min_size, julong max_size = max_uintx);
|
||||
// Parse a string for a unsigned integer. Returns true if value
|
||||
// is an unsigned integer greater than or equal to the minimum
|
||||
// parameter passed and returns the value in uintx_arg. Returns
|
||||
// false otherwise, with uintx_arg undefined.
|
||||
static bool parse_uintx(const char* value, uintx* uintx_arg,
|
||||
uintx min_size);
|
||||
|
||||
// methods to build strings from individual args
|
||||
static void build_jvm_args(const char* arg);
|
||||
@ -498,6 +492,12 @@ class Arguments : AllStatic {
|
||||
public:
|
||||
// Parses the arguments, first phase
|
||||
static jint parse(const JavaVMInitArgs* args);
|
||||
// Parse a string for a unsigned integer. Returns true if value
|
||||
// is an unsigned integer greater than or equal to the minimum
|
||||
// parameter passed and returns the value in uintx_arg. Returns
|
||||
// false otherwise, with uintx_arg undefined.
|
||||
static bool parse_uintx(const char* value, uintx* uintx_arg,
|
||||
uintx min_size);
|
||||
// Apply ergonomics
|
||||
static jint apply_ergo();
|
||||
// Adjusts the arguments after the OS have adjusted the arguments
|
||||
|
@ -248,11 +248,13 @@ jint dump_heap(AttachOperation* op, outputStream* out) {
|
||||
// Input arguments :-
|
||||
// arg0: "-live" or "-all"
|
||||
// arg1: Name of the dump file or NULL
|
||||
// arg2: parallel thread number
|
||||
static jint heap_inspection(AttachOperation* op, outputStream* out) {
|
||||
bool live_objects_only = true; // default is true to retain the behavior before this change is made
|
||||
outputStream* os = out; // if path not specified or path is NULL, use out
|
||||
fileStream* fs = NULL;
|
||||
const char* arg0 = op->arg(0);
|
||||
uint parallel_thread_num = MAX2<uint>(1, (uint)os::initial_active_processor_count() * 3 / 8);
|
||||
if (arg0 != NULL && (strlen(arg0) > 0)) {
|
||||
if (strcmp(arg0, "-all") != 0 && strcmp(arg0, "-live") != 0) {
|
||||
out->print_cr("Invalid argument to inspectheap operation: %s", arg0);
|
||||
@ -262,21 +264,26 @@ static jint heap_inspection(AttachOperation* op, outputStream* out) {
|
||||
}
|
||||
|
||||
const char* path = op->arg(1);
|
||||
if (path != NULL) {
|
||||
if (path[0] == '\0') {
|
||||
out->print_cr("No dump file specified");
|
||||
} else {
|
||||
// create file
|
||||
fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path);
|
||||
if (fs == NULL) {
|
||||
out->print_cr("Failed to allocate space for file: %s", path);
|
||||
return JNI_ERR;
|
||||
}
|
||||
os = fs;
|
||||
if (path != NULL && path[0] != '\0') {
|
||||
// create file
|
||||
fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path);
|
||||
if (fs == NULL) {
|
||||
out->print_cr("Failed to allocate space for file: %s", path);
|
||||
}
|
||||
os = fs;
|
||||
}
|
||||
|
||||
VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */);
|
||||
const char* num_str = op->arg(2);
|
||||
if (num_str != NULL && num_str[0] != '\0') {
|
||||
uintx num;
|
||||
if (!Arguments::parse_uintx(num_str, &num, 0)) {
|
||||
out->print_cr("Invalid parallel thread number: [%s]", num_str);
|
||||
return JNI_ERR;
|
||||
}
|
||||
parallel_thread_num = num == 0 ? parallel_thread_num : (uint)num;
|
||||
}
|
||||
|
||||
VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */, parallel_thread_num);
|
||||
VMThread::execute(&heapop);
|
||||
if (os != NULL && os != out) {
|
||||
out->print_cr("Heap inspection file created: %s", path);
|
||||
|
@ -169,6 +169,7 @@ public class JMap {
|
||||
UnsupportedEncodingException {
|
||||
String liveopt = "-all";
|
||||
String filename = null;
|
||||
String parallel = null;
|
||||
String subopts[] = options.split(",");
|
||||
|
||||
for (int i = 0; i < subopts.length; i++) {
|
||||
@ -180,9 +181,17 @@ public class JMap {
|
||||
} else if (subopt.startsWith("file=")) {
|
||||
filename = parseFileName(subopt);
|
||||
if (filename == null) {
|
||||
usage(1); // invalid options or no filename
|
||||
System.err.println("Fail: invalid option or no file name '" + subopt +"'");
|
||||
usage(1);
|
||||
}
|
||||
} else if (subopt.startsWith("parallel=")) {
|
||||
parallel = subopt.substring("parallel=".length());
|
||||
if (parallel == null) {
|
||||
System.err.println("Fail: no number provided in option: '" + subopt + "'");
|
||||
usage(1);
|
||||
}
|
||||
} else {
|
||||
System.err.println("Fail: invalid option: '" + subopt + "'");
|
||||
usage(1);
|
||||
}
|
||||
}
|
||||
@ -190,7 +199,7 @@ public class JMap {
|
||||
System.out.flush();
|
||||
|
||||
// inspectHeap is not the same as jcmd GC.class_histogram
|
||||
executeCommandForPid(pid, "inspectheap", liveopt, filename);
|
||||
executeCommandForPid(pid, "inspectheap", liveopt, filename, parallel);
|
||||
}
|
||||
|
||||
private static void dump(String pid, String options)
|
||||
@ -211,7 +220,8 @@ public class JMap {
|
||||
}
|
||||
|
||||
if (filename == null) {
|
||||
usage(1); // invalid options or no filename
|
||||
System.err.println("Fail: invalid option or no file name");
|
||||
usage(1);
|
||||
}
|
||||
|
||||
// dumpHeap is not the same as jcmd GC.heap_dump
|
||||
@ -287,6 +297,10 @@ public class JMap {
|
||||
System.err.println(" live count only live objects");
|
||||
System.err.println(" all count all objects in the heap (default if one of \"live\" or \"all\" is not specified)");
|
||||
System.err.println(" file=<file> dump data to <file>");
|
||||
System.err.println(" parallel=<number> parallel threads number for heap iteration:");
|
||||
System.err.println(" parallel=0 default behavior, use predefined number of threads");
|
||||
System.err.println(" parallel=1 disable parallel heap iteration");
|
||||
System.err.println(" parallel=<N> use N threads for parallel heap iteration");
|
||||
System.err.println("");
|
||||
System.err.println(" Example: jmap -histo:live,file=/tmp/histo.data <pid>");
|
||||
System.exit(exit);
|
||||
|
@ -79,40 +79,68 @@ public class BasicJMapTest {
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
private static void testHistoParallelZero() throws Exception {
|
||||
OutputAnalyzer output = jmap("-histo:parallel=0");
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
private static void testHistoParallel() throws Exception {
|
||||
OutputAnalyzer output = jmap("-histo:parallel=2");
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
private static void testHistoNonParallel() throws Exception {
|
||||
OutputAnalyzer output = jmap("-histo:parallel=1");
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
private static void testHistoToFile() throws Exception {
|
||||
histoToFile(false);
|
||||
histoToFile(false, false, 1);
|
||||
}
|
||||
|
||||
private static void testHistoLiveToFile() throws Exception {
|
||||
histoToFile(true);
|
||||
histoToFile(true, false, 1);
|
||||
}
|
||||
|
||||
private static void testHistoAllToFile() throws Exception {
|
||||
boolean explicitAll = true;
|
||||
histoToFile(false, explicitAll);
|
||||
histoToFile(false, true, 1);
|
||||
}
|
||||
|
||||
private static void histoToFile(boolean live) throws Exception {
|
||||
boolean explicitAll = false;
|
||||
histoToFile(live, explicitAll);
|
||||
private static void testHistoFileParallelZero() throws Exception {
|
||||
histoToFile(false, false, 0);
|
||||
}
|
||||
|
||||
private static void histoToFile(boolean live, boolean explicitAll) throws Exception {
|
||||
if (live == true && explicitAll == true) {
|
||||
private static void testHistoFileParallel() throws Exception {
|
||||
histoToFile(false, false, 2);
|
||||
}
|
||||
|
||||
private static void histoToFile(boolean live,
|
||||
boolean explicitAll,
|
||||
int parallelThreadNum) throws Exception {
|
||||
String liveArg = "";
|
||||
String fileArg = "";
|
||||
String parArg = "parallel=" + parallelThreadNum;
|
||||
String allArgs = "-histo:";
|
||||
|
||||
if (live && explicitAll) {
|
||||
fail("Illegal argument setting for jmap -histo");
|
||||
}
|
||||
if (live) {
|
||||
liveArg = "live,";
|
||||
}
|
||||
if (explicitAll) {
|
||||
liveArg = "all,";
|
||||
}
|
||||
|
||||
File file = new File("jmap.histo.file" + System.currentTimeMillis() + ".histo");
|
||||
if (file.exists()) {
|
||||
file.delete();
|
||||
}
|
||||
fileArg = "file=" + file.getName();
|
||||
|
||||
OutputAnalyzer output;
|
||||
if (live) {
|
||||
output = jmap("-histo:live,file=" + file.getName());
|
||||
} else if (explicitAll == true) {
|
||||
output = jmap("-histo:all,file=" + file.getName());
|
||||
} else {
|
||||
output = jmap("-histo:file=" + file.getName());
|
||||
}
|
||||
allArgs = allArgs + liveArg + fileArg + ',' + parArg;
|
||||
output = jmap(allArgs);
|
||||
output.shouldHaveExitValue(0);
|
||||
output.shouldContain("Heap inspection file created");
|
||||
file.delete();
|
||||
@ -129,43 +157,45 @@ public class BasicJMapTest {
|
||||
}
|
||||
|
||||
private static void testDump() throws Exception {
|
||||
dump(false);
|
||||
dump(false, false);
|
||||
}
|
||||
|
||||
private static void testDumpLive() throws Exception {
|
||||
dump(true);
|
||||
dump(true, false);
|
||||
}
|
||||
|
||||
private static void testDumpAll() throws Exception {
|
||||
boolean explicitAll = true;
|
||||
dump(false, explicitAll);
|
||||
}
|
||||
|
||||
private static void dump(boolean live) throws Exception {
|
||||
boolean explicitAll = false;
|
||||
dump(live, explicitAll);
|
||||
dump(false, true);
|
||||
}
|
||||
|
||||
private static void dump(boolean live, boolean explicitAll) throws Exception {
|
||||
if (live == true && explicitAll == true) {
|
||||
fail("Illegal argument setting for jmap -dump");
|
||||
String liveArg = "";
|
||||
String fileArg = "";
|
||||
String allArgs = "-dump:";
|
||||
|
||||
if (live && explicitAll) {
|
||||
fail("Illegal argument setting for jmap -dump");
|
||||
}
|
||||
File dump = new File("jmap.dump." + System.currentTimeMillis() + ".hprof");
|
||||
if (dump.exists()) {
|
||||
dump.delete();
|
||||
}
|
||||
OutputAnalyzer output;
|
||||
if (live) {
|
||||
output = jmap("-dump:live,format=b,file=" + dump.getName());
|
||||
} else if (explicitAll == true) {
|
||||
output = jmap("-dump:all,format=b,file=" + dump.getName());
|
||||
} else {
|
||||
output = jmap("-dump:format=b,file=" + dump.getName());
|
||||
liveArg = "live,";
|
||||
}
|
||||
if (explicitAll) {
|
||||
liveArg = "all,";
|
||||
}
|
||||
|
||||
File file = new File("jmap.dump" + System.currentTimeMillis() + ".hprof");
|
||||
if (file.exists()) {
|
||||
file.delete();
|
||||
}
|
||||
fileArg = "file=" + file.getName();
|
||||
|
||||
OutputAnalyzer output;
|
||||
allArgs = allArgs + liveArg + "format=b," + fileArg;
|
||||
output = jmap(allArgs);
|
||||
output.shouldHaveExitValue(0);
|
||||
output.shouldContain("Heap dump file created");
|
||||
verifyDumpFile(dump);
|
||||
dump.delete();
|
||||
verifyDumpFile(file);
|
||||
file.delete();
|
||||
}
|
||||
|
||||
private static void verifyDumpFile(File dump) {
|
||||
@ -195,5 +225,4 @@ public class BasicJMapTest {
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user