8299426: Heap dump does not contain virtual Thread stack references

Reviewed-by: cjplummer, sspitsyn, lmesnik
This commit is contained in:
Alex Menkov 2023-12-07 23:18:23 +00:00
parent 959a443a9e
commit 354ea4c28f
3 changed files with 238 additions and 164 deletions
src/hotspot/share/services
test/hotspot/jtreg/serviceability

@ -1473,6 +1473,25 @@ void SymbolTableDumper::do_symbol(Symbol** p) {
}
}
// Support class used to generate HPROF_GC_CLASS_DUMP records
class ClassDumper : public KlassClosure {
private:
AbstractDumpWriter* _writer;
AbstractDumpWriter* writer() const { return _writer; }
public:
ClassDumper(AbstractDumpWriter* writer) : _writer(writer) {}
void do_klass(Klass* k) {
if (k->is_instance_klass()) {
DumperSupport::dump_instance_class(writer(), k);
} else {
DumperSupport::dump_array_class(writer(), k);
}
}
};
// Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
class JNILocalsDumper : public OopClosure {
@ -1860,21 +1879,25 @@ vframe* ThreadDumper::get_top_frame() const {
return nullptr;
}
// Callback to dump thread-related data for unmounted virtual threads;
// implemented by VM_HeapDumper.
class UnmountedVThreadDumper {
public:
virtual void dump_vthread(oop vt, AbstractDumpWriter* segment_writer) = 0;
};
class VM_HeapDumper;
// Support class using when iterating over the heap.
// Support class used when iterating over the heap.
class HeapObjectDumper : public ObjectClosure {
private:
AbstractDumpWriter* _writer;
AbstractDumpWriter* writer() { return _writer; }
UnmountedVThreadDumper* _vthread_dumper;
DumperClassCacheTable _class_cache;
public:
HeapObjectDumper(AbstractDumpWriter* writer) {
_writer = writer;
}
HeapObjectDumper(AbstractDumpWriter* writer, UnmountedVThreadDumper* vthread_dumper)
: _writer(writer), _vthread_dumper(vthread_dumper) {}
// called for each object in the heap
void do_object(oop o);
@ -1895,6 +1918,9 @@ void HeapObjectDumper::do_object(oop o) {
if (o->is_instance()) {
// create a HPROF_GC_INSTANCE record for each object
DumperSupport::dump_instance(writer(), o, &_class_cache);
if (java_lang_VirtualThread::is_instance(o) && ThreadDumper::should_dump_vthread(o)) {
_vthread_dumper->dump_vthread(o, writer());
}
} else if (o->is_objArray()) {
// create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
DumperSupport::dump_object_array(writer(), objArrayOop(o));
@ -1908,16 +1934,52 @@ void HeapObjectDumper::do_object(oop o) {
class DumperController : public CHeapObj<mtInternal> {
private:
Monitor* _lock;
Mutex* _global_writer_lock;
const uint _dumper_number;
uint _complete_number;
bool _started; // VM dumper started and acquired global writer lock
public:
DumperController(uint number) :
_lock(new (std::nothrow) PaddedMonitor(Mutex::safepoint, "DumperController_lock")),
// _lock and _global_writer_lock are used for synchronization between GC worker threads inside safepoint,
// so we lock with _no_safepoint_check_flag.
// signal_start() acquires _lock when global writer is locked,
// its rank must be less than _global_writer_lock rank.
_lock(new (std::nothrow) PaddedMonitor(Mutex::nosafepoint - 1, "DumperController_lock")),
_global_writer_lock(new (std::nothrow) Mutex(Mutex::nosafepoint, "DumpWriter_lock")),
_dumper_number(number),
_complete_number(0) { }
_complete_number(0),
_started(false)
{}
~DumperController() { delete _lock; }
~DumperController() {
delete _lock;
delete _global_writer_lock;
}
// parallel (non VM) dumpers must wait until VM dumper acquires global writer lock
void wait_for_start_signal() {
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
while (_started == false) {
ml.wait();
}
}
void signal_start() {
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
_started = true;
ml.notify_all();
}
void lock_global_writer() {
_global_writer_lock->lock_without_safepoint_check();
}
void unlock_global_writer() {
_global_writer_lock->unlock();
}
void dumper_complete(DumpWriter* local_writer, DumpWriter* global_writer) {
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
@ -1946,7 +2008,7 @@ private:
int _dump_seq;
private:
void merge_file(char* path);
void merge_file(const char* path);
void merge_done();
void set_error(const char* msg);
@ -1958,8 +2020,28 @@ public:
_dump_seq(dump_seq) {}
void do_merge();
// returns path for the parallel DumpWriter (resource allocated)
static char* get_writer_path(const char* base_path, int seq);
};
char* DumpMerger::get_writer_path(const char* base_path, int seq) {
// approximate required buffer size
size_t buf_size = strlen(base_path)
+ 2 // ".p"
+ 10 // number (that's enough for 2^32 parallel dumpers)
+ 1; // '\0'
char* path = NEW_RESOURCE_ARRAY(char, buf_size);
memset(path, 0, buf_size);
os::snprintf(path, buf_size, "%s.p%d", base_path, seq);
return path;
}
void DumpMerger::merge_done() {
// Writes the HPROF_HEAP_DUMP_END record.
if (!_has_error) {
@ -1980,7 +2062,7 @@ void DumpMerger::set_error(const char* msg) {
// Merge segmented heap files via sendfile, it's more efficient than the
// read+write combination, which would require transferring data to and from
// user space.
void DumpMerger::merge_file(char* path) {
void DumpMerger::merge_file(const char* path) {
assert(!SafepointSynchronize::is_at_safepoint(), "merging happens outside safepoint");
TraceTime timer("Merge segmented heap file directly", TRACETIME_LOG(Info, heapdump));
@ -2018,7 +2100,7 @@ void DumpMerger::merge_file(char* path) {
}
#else
// Generic implementation using read+write
void DumpMerger::merge_file(char* path) {
void DumpMerger::merge_file(const char* path) {
assert(!SafepointSynchronize::is_at_safepoint(), "merging happens outside safepoint");
TraceTime timer("Merge segmented heap file", TRACETIME_LOG(Info, heapdump));
@ -2054,10 +2136,9 @@ void DumpMerger::do_merge() {
// Merge the content of the remaining files into base file. Regardless of whether
// the merge process is successful or not, these segmented files will be deleted.
char path[JVM_MAXPATHLEN];
for (int i = 0; i < _dump_seq; i++) {
memset(path, 0, JVM_MAXPATHLEN);
os::snprintf(path, JVM_MAXPATHLEN, "%s.p%d", _path, i);
ResourceMark rm;
const char* path = get_writer_path(_path, i);
if (!_has_error) {
merge_file(path);
}
@ -2087,7 +2168,7 @@ public:
};
// The VM operation that performs the heap dump
class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public UnmountedVThreadDumper {
private:
static VM_HeapDumper* _global_dumper;
static DumpWriter* _global_writer;
@ -2107,10 +2188,15 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
uint _num_dumper_threads;
DumperController* _dumper_controller;
ParallelObjectIterator* _poi;
// worker id of VMDumper thread.
static const size_t VMDumperWorkerId = 0;
// Dumper id of VMDumper thread.
static const int VMDumperId = 0;
// VM dumper dumps both heap and non-heap data, other dumpers dump heap-only data.
static bool is_vm_dumper(uint worker_id) { return worker_id == VMDumperWorkerId; }
static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; }
// the 1st dumper calling get_next_dumper_id becomes VM dumper
int get_next_dumper_id() {
return Atomic::fetch_then_add(&_dump_seq, 1);
}
// accessors and setters
static VM_HeapDumper* dumper() { assert(_global_dumper != nullptr, "Error"); return _global_dumper; }
@ -2129,17 +2215,11 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
bool skip_operation() const;
// create dump writer for every parallel dump thread
DumpWriter* create_local_writer();
// writes a HPROF_LOAD_CLASS record
// writes a HPROF_LOAD_CLASS record to global writer
static void do_load_class(Klass* k);
// writes a HPROF_GC_CLASS_DUMP record for the given class
static void do_class_dump(Klass* k);
// HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads
void dump_threads();
void dump_threads(AbstractDumpWriter* writer);
void add_class_serial_number(Klass* k, int serial_num) {
_klass_map->at_put_grow(serial_num, k);
@ -2150,7 +2230,7 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
}
// HPROF_TRACE and HPROF_FRAME records for platform and mounted virtual threads
void dump_stack_traces();
void dump_stack_traces(AbstractDumpWriter* writer);
public:
VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome, uint num_dump_threads) :
@ -2168,7 +2248,7 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
_thread_serial_num = 1;
_frame_serial_num = 1;
_dump_seq = 0;
_dump_seq = VMDumperId;
_num_dumper_threads = num_dump_threads;
_dumper_controller = nullptr;
_poi = nullptr;
@ -2202,12 +2282,15 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask {
}
int dump_seq() { return _dump_seq; }
bool is_parallel_dump() { return _num_dumper_threads > 1; }
bool can_parallel_dump(WorkerThreads* workers);
void prepare_parallel_dump(WorkerThreads* workers);
VMOp_Type type() const { return VMOp_HeapDumper; }
virtual bool doit_prologue();
void doit();
void work(uint worker_id);
// UnmountedVThreadDumper implementation
void dump_vthread(oop vt, AbstractDumpWriter* segment_writer);
};
VM_HeapDumper* VM_HeapDumper::_global_dumper = nullptr;
@ -2251,22 +2334,13 @@ void VM_HeapDumper::do_load_class(Klass* k) {
writer()->write_symbolID(name);
}
// writes a HPROF_GC_CLASS_DUMP record for the given class
void VM_HeapDumper::do_class_dump(Klass* k) {
if (k->is_instance_klass()) {
DumperSupport::dump_instance_class(writer(), k);
} else {
DumperSupport::dump_array_class(writer(), k);
}
}
// Write a HPROF_GC_ROOT_THREAD_OBJ record for platform/carrier and mounted virtual threads.
// Then walk the stack so that locals and JNI locals are dumped.
void VM_HeapDumper::dump_threads() {
for (int i = 0; i < _thread_dumpers_count; i++) {
_thread_dumpers[i]->dump_thread_obj(writer());
_thread_dumpers[i]->dump_stack_refs(writer());
}
void VM_HeapDumper::dump_threads(AbstractDumpWriter* writer) {
for (int i = 0; i < _thread_dumpers_count; i++) {
_thread_dumpers[i]->dump_thread_obj(writer);
_thread_dumpers[i]->dump_stack_refs(writer);
}
}
bool VM_HeapDumper::doit_prologue() {
@ -2280,31 +2354,21 @@ bool VM_HeapDumper::doit_prologue() {
return VM_GC_Operation::doit_prologue();
}
bool VM_HeapDumper::can_parallel_dump(WorkerThreads* workers) {
bool can_parallel = true;
void VM_HeapDumper::prepare_parallel_dump(WorkerThreads* workers) {
uint num_active_workers = workers != nullptr ? workers->active_workers() : 0;
uint num_requested_dump_threads = _num_dumper_threads;
// check if we can dump in parallel based on requested and active threads
if (num_active_workers <= 1 || num_requested_dump_threads <= 1) {
_num_dumper_threads = 1;
can_parallel = false;
} else {
// check if we have extra path room to accommodate segmented heap files
const char* base_path = writer()->get_file_path();
assert(base_path != nullptr, "sanity check");
if ((strlen(base_path) + 7/*.p\d\d\d\d\0*/) >= JVM_MAXPATHLEN) {
_num_dumper_threads = 1;
can_parallel = false;
} else {
_num_dumper_threads = clamp(num_requested_dump_threads, 2U, num_active_workers);
}
_num_dumper_threads = clamp(num_requested_dump_threads, 2U, num_active_workers);
}
_dumper_controller = new (std::nothrow) DumperController(_num_dumper_threads);
bool can_parallel = _num_dumper_threads > 1;
log_info(heapdump)("Requested dump threads %u, active dump threads %u, "
"actual dump threads %u, parallelism %s",
num_requested_dump_threads, num_active_workers,
_num_dumper_threads, can_parallel ? "true" : "false");
return can_parallel;
}
// The VM operation that dumps the heap. The dump consists of the following
@ -2352,11 +2416,11 @@ void VM_HeapDumper::doit() {
set_global_writer();
WorkerThreads* workers = ch->safepoint_workers();
if (!can_parallel_dump(workers)) {
work(VMDumperWorkerId);
prepare_parallel_dump(workers);
if (!is_parallel_dump()) {
work(VMDumperId);
} else {
uint heap_only_dumper_threads = _num_dumper_threads - 1 /* VMDumper thread */;
_dumper_controller = new (std::nothrow) DumperController(heap_only_dumper_threads);
ParallelObjectIterator poi(_num_dumper_threads);
_poi = &poi;
workers->run_task(this, _num_dumper_threads);
@ -2368,26 +2432,19 @@ void VM_HeapDumper::doit() {
clear_global_writer();
}
// prepare DumpWriter for every parallel dump thread
DumpWriter* VM_HeapDumper::create_local_writer() {
char* path = NEW_RESOURCE_ARRAY(char, JVM_MAXPATHLEN);
memset(path, 0, JVM_MAXPATHLEN);
// generate segmented heap file path
const char* base_path = writer()->get_file_path();
// share global compressor, local DumpWriter is not responsible for its life cycle
AbstractCompressor* compressor = writer()->compressor();
int seq = Atomic::fetch_then_add(&_dump_seq, 1);
os::snprintf(path, JVM_MAXPATHLEN, "%s.p%d", base_path, seq);
// create corresponding writer for that
DumpWriter* local_writer = new DumpWriter(path, writer()->is_overwrite(), compressor);
return local_writer;
}
void VM_HeapDumper::work(uint worker_id) {
// VM Dumper works on all non-heap data dumping and part of heap iteration.
if (is_vm_dumper(worker_id)) {
int dumper_id = get_next_dumper_id();
if (is_vm_dumper(dumper_id)) {
// lock global writer, it will be unlocked after VM Dumper finishes with non-heap data
_dumper_controller->lock_global_writer();
_dumper_controller->signal_start();
} else {
_dumper_controller->wait_for_start_signal();
}
if (is_vm_dumper(dumper_id)) {
TraceTime timer("Dump non-objects", TRACETIME_LOG(Info, heapdump));
// Write the file header - we always use 1.0.2
const char* header = "JAVA PROFILE 1.0.2";
@ -2409,79 +2466,82 @@ void VM_HeapDumper::work(uint worker_id) {
// write HPROF_FRAME and HPROF_TRACE records
// this must be called after _klass_map is built when iterating the classes above.
dump_stack_traces();
dump_stack_traces(writer());
// HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
// Writes HPROF_GC_CLASS_DUMP records
{
LockedClassesDo locked_dump_class(&do_class_dump);
ClassLoaderDataGraph::classes_do(&locked_dump_class);
}
// HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
dump_threads();
// HPROF_GC_ROOT_JNI_GLOBAL
JNIGlobalsDumper jni_dumper(writer());
JNIHandles::oops_do(&jni_dumper);
// technically not jni roots, but global roots
// for things like preallocated throwable backtraces
Universe::vm_global()->oops_do(&jni_dumper);
// HPROF_GC_ROOT_STICKY_CLASS
// These should be classes in the null class loader data, and not all classes
// if !ClassUnloading
StickyClassDumper class_dumper(writer());
ClassLoaderData::the_null_class_loader_data()->classes_do(&class_dumper);
// unlock global writer, so parallel dumpers can dump stack traces of unmounted virtual threads
_dumper_controller->unlock_global_writer();
}
// Heap iteration.
// writes HPROF_GC_INSTANCE_DUMP records.
// After each sub-record is written check_segment_length will be invoked
// to check if the current segment exceeds a threshold. If so, a new
// segment is started.
// The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
// of the heap dump.
if (!is_parallel_dump()) {
assert(is_vm_dumper(worker_id), "must be");
// == Serial dump
ResourceMark rm;
TraceTime timer("Dump heap objects", TRACETIME_LOG(Info, heapdump));
HeapObjectDumper obj_dumper(writer());
Universe::heap()->object_iterate(&obj_dumper);
writer()->finish_dump_segment();
// Writes the HPROF_HEAP_DUMP_END record because merge does not happen in serial dump
DumperSupport::end_of_dump(writer());
writer()->flush();
} else {
// == Parallel dump
ResourceMark rm;
TraceTime timer("Dump heap objects in parallel", TRACETIME_LOG(Info, heapdump));
DumpWriter* local_writer = is_vm_dumper(worker_id) ? writer() : create_local_writer();
if (!local_writer->has_error()) {
HeapObjectDumper obj_dumper(local_writer);
_poi->object_iterate(&obj_dumper, worker_id);
local_writer->finish_dump_segment();
local_writer->flush();
// HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here
ResourceMark rm;
// share global compressor, local DumpWriter is not responsible for its life cycle
DumpWriter segment_writer(DumpMerger::get_writer_path(writer()->get_file_path(), dumper_id),
writer()->is_overwrite(), writer()->compressor());
if (!segment_writer.has_error()) {
if (is_vm_dumper(dumper_id)) {
// dump some non-heap subrecords to heap dump segment
TraceTime timer("Dump non-objects (part 2)", TRACETIME_LOG(Info, heapdump));
// Writes HPROF_GC_CLASS_DUMP records
ClassDumper class_dumper(&segment_writer);
ClassLoaderDataGraph::classes_do(&class_dumper);
// HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
dump_threads(&segment_writer);
// HPROF_GC_ROOT_JNI_GLOBAL
JNIGlobalsDumper jni_dumper(&segment_writer);
JNIHandles::oops_do(&jni_dumper);
// technically not jni roots, but global roots
// for things like preallocated throwable backtraces
Universe::vm_global()->oops_do(&jni_dumper);
// HPROF_GC_ROOT_STICKY_CLASS
// These should be classes in the null class loader data, and not all classes
// if !ClassUnloading
StickyClassDumper stiky_class_dumper(&segment_writer);
ClassLoaderData::the_null_class_loader_data()->classes_do(&stiky_class_dumper);
}
if (is_vm_dumper(worker_id)) {
_dumper_controller->wait_all_dumpers_complete();
// Heap iteration.
// writes HPROF_GC_INSTANCE_DUMP records.
// After each sub-record is written check_segment_length will be invoked
// to check if the current segment exceeds a threshold. If so, a new
// segment is started.
// The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
// of the heap dump.
TraceTime timer(is_parallel_dump() ? "Dump heap objects in parallel" : "Dump heap objects", TRACETIME_LOG(Info, heapdump));
HeapObjectDumper obj_dumper(&segment_writer, this);
if (!is_parallel_dump()) {
Universe::heap()->object_iterate(&obj_dumper);
} else {
_dumper_controller->dumper_complete(local_writer, writer());
delete local_writer;
return;
// == Parallel dump
_poi->object_iterate(&obj_dumper, worker_id);
}
segment_writer.finish_dump_segment();
segment_writer.flush();
}
_dumper_controller->dumper_complete(&segment_writer, writer());
if (is_vm_dumper(dumper_id)) {
_dumper_controller->wait_all_dumpers_complete();
// flush global writer
writer()->flush();
// At this point, all fragments of the heapdump have been written to separate files.
// We need to merge them into a complete heapdump and write HPROF_HEAP_DUMP_END at that time.
}
// At this point, all fragments of the heapdump have been written to separate files.
// We need to merge them into a complete heapdump and write HPROF_HEAP_DUMP_END at that time.
}
void VM_HeapDumper::dump_stack_traces() {
void VM_HeapDumper::dump_stack_traces(AbstractDumpWriter* writer) {
// write a HPROF_TRACE record without any frames to be referenced as object alloc sites
DumperSupport::write_header(writer(), HPROF_TRACE, 3 * sizeof(u4));
writer()->write_u4((u4)STACK_TRACE_ID);
writer()->write_u4(0); // thread number
writer()->write_u4(0); // frame count
DumperSupport::write_header(writer, HPROF_TRACE, 3 * sizeof(u4));
writer->write_u4((u4)STACK_TRACE_ID);
writer->write_u4(0); // thread number
writer->write_u4(0); // frame count
// max number if every platform thread is carrier with mounted virtual thread
_thread_dumpers = NEW_C_HEAP_ARRAY(ThreadDumper*, Threads::number_of_threads() * 2, mtInternal);
@ -2505,7 +2565,7 @@ void VM_HeapDumper::dump_stack_traces() {
add_oom_frame = false;
}
thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num);
thread_dumper->dump_stack_traces(writer(), _klass_map);
thread_dumper->dump_stack_traces(writer, _klass_map);
}
// platform or carrier thread
@ -2515,11 +2575,27 @@ void VM_HeapDumper::dump_stack_traces() {
thread_dumper->add_oom_frame(_oome_constructor);
}
thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num);
thread_dumper->dump_stack_traces(writer(), _klass_map);
thread_dumper->dump_stack_traces(writer, _klass_map);
}
}
}
void VM_HeapDumper::dump_vthread(oop vt, AbstractDumpWriter* segment_writer) {
// unmounted vthread has no JavaThread
ThreadDumper thread_dumper(ThreadDumper::ThreadType::UnmountedVirtual, nullptr, vt);
thread_dumper.init_serial_nums(&_thread_serial_num, &_frame_serial_num);
// write HPROF_TRACE/HPROF_FRAME records to global writer
_dumper_controller->lock_global_writer();
thread_dumper.dump_stack_traces(writer(), _klass_map);
_dumper_controller->unlock_global_writer();
// write HPROF_GC_ROOT_THREAD_OBJ/HPROF_GC_ROOT_JAVA_FRAME/HPROF_GC_ROOT_JNI_LOCAL subrecord
// to segment writer
thread_dumper.dump_thread_obj(segment_writer);
thread_dumper.dump_stack_refs(segment_writer);
}
// dump the heap to given path.
int HeapDumper::dump(const char* path, outputStream* out, int compression, bool overwrite, uint num_dump_threads) {
assert(path != nullptr && strlen(path) > 0, "path missing");
@ -2561,28 +2637,27 @@ int HeapDumper::dump(const char* path, outputStream* out, int compression, bool
// record any error that the writer may have encountered
set_error(writer.error());
// For serial dump, once VM_HeapDumper completes, the whole heap dump process
// is done, no further phases needed. For parallel dump, the whole heap dump
// process is done in two phases
// Heap dump process is done in two phases
//
// Phase 1: Concurrent threads directly write heap data to multiple heap files.
// This is done by VM_HeapDumper, which is performed within safepoint.
//
// Phase 2: Merge multiple heap files into one complete heap dump file.
// This is done by DumpMerger, which is performed outside safepoint
if (dumper.is_parallel_dump()) {
DumpMerger merger(path, &writer, dumper.dump_seq());
Thread* current_thread = Thread::current();
if (current_thread->is_AttachListener_thread()) {
// perform heapdump file merge operation in the current thread prevents us
// from occupying the VM Thread, which in turn affects the occurrence of
// GC and other VM operations.
merger.do_merge();
} else {
// otherwise, performs it by VM thread
VM_HeapDumpMerge op(&merger);
VMThread::execute(&op);
}
DumpMerger merger(path, &writer, dumper.dump_seq());
Thread* current_thread = Thread::current();
if (current_thread->is_AttachListener_thread()) {
// perform heapdump file merge operation in the current thread prevents us
// from occupying the VM Thread, which in turn affects the occurrence of
// GC and other VM operations.
merger.do_merge();
} else {
// otherwise, performs it by VM thread
VM_HeapDumpMerge op(&merger);
VMThread::execute(&op);
}
if (writer.error() != nullptr) {
set_error(writer.error());
}

@ -66,7 +66,6 @@ public class HeapDumpParallelTest {
appOut.shouldContain("Merge heap files complete");
} else {
appOut.shouldNotContain("Dump heap objects in parallel");
appOut.shouldNotContain("Merge heap files complete");
}
HprofParser.parseAndVerify(heapDumpFile);

@ -183,6 +183,7 @@ public class VThreadInHeapDump {
List<String> extraVMArgs = new ArrayList<>();
extraVMArgs.add("-Djdk.virtualThreadScheduler.parallelism=1");
extraVMArgs.add("-Xlog:heapdump");
extraVMArgs.addAll(Arrays.asList(extraOptions));
LingeredApp.startApp(theApp, extraVMArgs.toArray(new String[0]));
@ -252,8 +253,7 @@ public class VThreadInHeapDump {
// Verify objects from thread stacks are dumped.
test(snapshot, VThreadInHeapDumpTarg.VThreadMountedReferenced.class);
test(snapshot, VThreadInHeapDumpTarg.PThreadReferenced.class);
// Dumping of unmounted vthreads is not implemented yet
//test(snapshot, VThreadInHeapDumpTarg.VThreadUnmountedReferenced.class);
test(snapshot, VThreadInHeapDumpTarg.VThreadUnmountedReferenced.class);
}
}