8256811: Delayed/missed jdwp class unloading events

Co-authored-by: Chris Plummer <cjplummer@openjdk.org>
Reviewed-by: cjplummer, coleenp, sspitsyn
This commit is contained in:
Zhengyu Gu 2022-07-22 12:27:04 +00:00
parent 75c1e99968
commit 54854d9300
12 changed files with 345 additions and 241 deletions

@ -1690,10 +1690,11 @@ void JvmtiExport::continuation_yield_cleanup(JavaThread* thread, jint continuati
}
}
void JvmtiExport::post_object_free(JvmtiEnv* env, jlong tag) {
Thread *thread = Thread::current();
void JvmtiExport::post_object_free(JvmtiEnv* env, GrowableArray<jlong>* objects) {
assert(objects != NULL, "Nothing to post");
if (thread->is_Java_thread() && JavaThread::cast(thread)->is_in_VTMS_transition()) {
JavaThread *javaThread = JavaThread::current();
if (javaThread->is_in_VTMS_transition()) {
return; // no events should be posted if thread is in a VTMS transition
}
assert(env->is_enabled(JVMTI_EVENT_OBJECT_FREE), "checking");
@ -1701,9 +1702,13 @@ void JvmtiExport::post_object_free(JvmtiEnv* env, jlong tag) {
EVT_TRIG_TRACE(JVMTI_EVENT_OBJECT_FREE, ("[?] Trg Object Free triggered" ));
EVT_TRACE(JVMTI_EVENT_OBJECT_FREE, ("[?] Evt Object Free sent"));
JvmtiThreadEventMark jem(javaThread);
JvmtiJavaThreadEventTransition jet(javaThread);
jvmtiEventObjectFree callback = env->callbacks()->ObjectFree;
if (callback != NULL) {
(*callback)(env->jvmti_external(), tag);
for (int index = 0; index < objects->length(); index++) {
(*callback)(env->jvmti_external(), objects->at(index));
}
}
}

@ -386,7 +386,7 @@ class JvmtiExport : public AllStatic {
static void post_monitor_contended_entered(JavaThread *thread, ObjectMonitor *obj_mntr) NOT_JVMTI_RETURN;
static void post_monitor_wait(JavaThread *thread, oop obj, jlong timeout) NOT_JVMTI_RETURN;
static void post_monitor_waited(JavaThread *thread, ObjectMonitor *obj_mntr, jboolean timed_out) NOT_JVMTI_RETURN;
static void post_object_free(JvmtiEnv* env, jlong tag) NOT_JVMTI_RETURN;
static void post_object_free(JvmtiEnv* env, GrowableArray<jlong>* objects) NOT_JVMTI_RETURN;
static void post_resource_exhausted(jint resource_exhausted_flags, const char* detail) NOT_JVMTI_RETURN;
static void record_vm_internal_object_allocation(oop object) NOT_JVMTI_RETURN;
// Post objects collected by vm_object_alloc_event_collector.

@ -77,7 +77,8 @@ JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) :
_env(env),
_lock(Mutex::nosafepoint, "JvmtiTagMap_lock"),
_needs_rehashing(false),
_needs_cleaning(false) {
_needs_cleaning(false),
_posting_events(false) {
assert(JvmtiThreadState_lock->is_locked(), "sanity check");
assert(((JvmtiEnvBase *)env)->tag_map() == NULL, "tag map already exists for environment");
@ -136,18 +137,16 @@ bool JvmtiTagMap::is_empty() {
}
// This checks for posting and rehashing before operations that
// this tagmap table. The calls from a JavaThread only rehash, posting is
// only done before heap walks.
void JvmtiTagMap::check_hashmap(bool post_events) {
assert(!post_events || SafepointSynchronize::is_at_safepoint(), "precondition");
// this tagmap table.
void JvmtiTagMap::check_hashmap(GrowableArray<jlong>* objects) {
assert(is_locked(), "checking");
if (is_empty()) { return; }
if (_needs_cleaning &&
post_events &&
objects != NULL &&
env()->is_enabled(JVMTI_EVENT_OBJECT_FREE)) {
remove_dead_entries_locked(true /* post_object_free */);
remove_dead_entries_locked(objects);
}
if (_needs_rehashing) {
log_info(jvmti, table)("TagMap table needs rehashing");
@ -157,7 +156,7 @@ void JvmtiTagMap::check_hashmap(bool post_events) {
}
// This checks for posting and rehashing and is called from the heap walks.
void JvmtiTagMap::check_hashmaps_for_heapwalk() {
void JvmtiTagMap::check_hashmaps_for_heapwalk(GrowableArray<jlong>* objects) {
assert(SafepointSynchronize::is_at_safepoint(), "called from safepoints");
// Verify that the tag map tables are valid and unconditionally post events
@ -168,7 +167,7 @@ void JvmtiTagMap::check_hashmaps_for_heapwalk() {
if (tag_map != NULL) {
// The ZDriver may be walking the hashmaps concurrently so this lock is needed.
MutexLocker ml(tag_map->lock(), Mutex::_no_safepoint_check_flag);
tag_map->check_hashmap(/*post_events*/ true);
tag_map->check_hashmap(objects);
}
}
}
@ -359,7 +358,7 @@ void JvmtiTagMap::set_tag(jobject object, jlong tag) {
// SetTag should not post events because the JavaThread has to
// transition to native for the callback and this cannot stop for
// safepoints with the hashmap lock held.
check_hashmap(/*post_events*/ false);
check_hashmap(NULL); /* don't collect dead objects */
// resolve the object
oop o = JNIHandles::resolve_non_null(object);
@ -394,7 +393,7 @@ jlong JvmtiTagMap::get_tag(jobject object) {
// GetTag should not post events because the JavaThread has to
// transition to native for the callback and this cannot stop for
// safepoints with the hashmap lock held.
check_hashmap(/*post_events*/ false);
check_hashmap(NULL); /* don't collect dead objects */
// resolve the object
oop o = JNIHandles::resolve_non_null(object);
@ -889,15 +888,17 @@ static jint invoke_primitive_field_callback_for_instance_fields(
class VM_HeapIterateOperation: public VM_Operation {
private:
ObjectClosure* _blk;
GrowableArray<jlong>* const _dead_objects;
public:
VM_HeapIterateOperation(ObjectClosure* blk) { _blk = blk; }
VM_HeapIterateOperation(ObjectClosure* blk, GrowableArray<jlong>* objects) :
_blk(blk), _dead_objects(objects) { }
VMOp_Type type() const { return VMOp_HeapIterateOperation; }
void doit() {
// allows class files maps to be cached during iteration
ClassFieldMapCacheMark cm;
JvmtiTagMap::check_hashmaps_for_heapwalk();
JvmtiTagMap::check_hashmaps_for_heapwalk(_dead_objects);
// make sure that heap is parsable (fills TLABs with filler objects)
Universe::heap()->ensure_parsability(false); // no need to retire TLABs
@ -911,7 +912,6 @@ class VM_HeapIterateOperation: public VM_Operation {
// do the iteration
Universe::heap()->object_iterate(_blk);
}
};
@ -1136,14 +1136,20 @@ void JvmtiTagMap::iterate_over_heap(jvmtiHeapObjectFilter object_filter,
object_filter == JVMTI_HEAP_OBJECT_EITHER,
JavaThread::current());
eb.deoptimize_objects_all_threads();
MutexLocker ml(Heap_lock);
IterateOverHeapObjectClosure blk(this,
klass,
object_filter,
heap_object_callback,
user_data);
VM_HeapIterateOperation op(&blk);
VMThread::execute(&op);
Arena dead_object_arena(mtServiceability);
GrowableArray <jlong> dead_objects(&dead_object_arena, 10, 0, 0);
{
MutexLocker ml(Heap_lock);
IterateOverHeapObjectClosure blk(this,
klass,
object_filter,
heap_object_callback,
user_data);
VM_HeapIterateOperation op(&blk, &dead_objects);
VMThread::execute(&op);
}
// Post events outside of Heap_lock
post_dead_objects(&dead_objects);
}
@ -1157,67 +1163,83 @@ void JvmtiTagMap::iterate_through_heap(jint heap_filter,
// disabled if vritual threads are enabled with --enable-preview
EscapeBarrier eb(!(heap_filter & JVMTI_HEAP_FILTER_UNTAGGED), JavaThread::current());
eb.deoptimize_objects_all_threads();
MutexLocker ml(Heap_lock);
IterateThroughHeapObjectClosure blk(this,
klass,
heap_filter,
callbacks,
user_data);
VM_HeapIterateOperation op(&blk);
VMThread::execute(&op);
Arena dead_object_arena(mtServiceability);
GrowableArray<jlong> dead_objects(&dead_object_arena, 10, 0, 0);
{
MutexLocker ml(Heap_lock);
IterateThroughHeapObjectClosure blk(this,
klass,
heap_filter,
callbacks,
user_data);
VM_HeapIterateOperation op(&blk, &dead_objects);
VMThread::execute(&op);
}
// Post events outside of Heap_lock
post_dead_objects(&dead_objects);
}
void JvmtiTagMap::remove_dead_entries_locked(bool post_object_free) {
void JvmtiTagMap::remove_dead_entries_locked(GrowableArray<jlong>* objects) {
assert(is_locked(), "precondition");
if (_needs_cleaning) {
// Recheck whether to post object free events under the lock.
post_object_free = post_object_free && env()->is_enabled(JVMTI_EVENT_OBJECT_FREE);
if (!env()->is_enabled(JVMTI_EVENT_OBJECT_FREE)) {
objects = NULL;
}
log_info(jvmti, table)("TagMap table needs cleaning%s",
(post_object_free ? " and posting" : ""));
hashmap()->remove_dead_entries(env(), post_object_free);
((objects != NULL) ? " and posting" : ""));
hashmap()->remove_dead_entries(objects);
_needs_cleaning = false;
}
}
void JvmtiTagMap::remove_dead_entries(bool post_object_free) {
void JvmtiTagMap::remove_dead_entries(GrowableArray<jlong>* objects) {
MutexLocker ml(lock(), Mutex::_no_safepoint_check_flag);
remove_dead_entries_locked(post_object_free);
remove_dead_entries_locked(objects);
}
class VM_JvmtiPostObjectFree: public VM_Operation {
JvmtiTagMap* _tag_map;
public:
VM_JvmtiPostObjectFree(JvmtiTagMap* tag_map) : _tag_map(tag_map) {}
VMOp_Type type() const { return VMOp_Cleanup; }
void doit() {
_tag_map->remove_dead_entries(true /* post_object_free */);
void JvmtiTagMap::post_dead_objects(GrowableArray<jlong>* const objects) {
assert(Thread::current()->is_Java_thread(), "Must post from JavaThread");
if (objects != NULL && objects->length() > 0) {
JvmtiExport::post_object_free(env(), objects);
log_info(jvmti)("%d free object posted", objects->length());
}
}
// Doesn't need a safepoint, just the VM thread
virtual bool evaluate_at_safepoint() const { return false; }
};
// PostObjectFree can't be called by JavaThread, so call it from the VM thread.
void JvmtiTagMap::post_dead_objects_on_vm_thread() {
VM_JvmtiPostObjectFree op(this);
VMThread::execute(&op);
void JvmtiTagMap::remove_and_post_dead_objects() {
ResourceMark rm;
GrowableArray<jlong> objects;
remove_dead_entries(&objects);
post_dead_objects(&objects);
}
void JvmtiTagMap::flush_object_free_events() {
assert_not_at_safepoint();
if (env()->is_enabled(JVMTI_EVENT_OBJECT_FREE)) {
{
MutexLocker ml(lock(), Mutex::_no_safepoint_check_flag);
MonitorLocker ml(lock(), Mutex::_no_safepoint_check_flag);
// If another thread is posting events, let it finish
while (_posting_events) {
ml.wait();
}
if (!_needs_cleaning || is_empty()) {
_needs_cleaning = false;
return;
}
_posting_events = true;
} // Drop the lock so we can do the cleaning on the VM thread.
// Needs both cleaning and event posting (up to some other thread
// getting there first after we dropped the lock).
post_dead_objects_on_vm_thread();
remove_and_post_dead_objects();
{
MonitorLocker ml(lock(), Mutex::_no_safepoint_check_flag);
_posting_events = false;
ml.notify_all();
}
} else {
remove_dead_entries(false);
remove_dead_entries(NULL);
}
}
@ -1329,9 +1351,6 @@ jvmtiError JvmtiTagMap::get_objects_with_tags(const jlong* tags,
// it is collected yet.
entry_iterate(&collector);
}
if (collector.some_dead_found() && env()->is_enabled(JVMTI_EVENT_OBJECT_FREE)) {
post_dead_objects_on_vm_thread();
}
return collector.result(count_ptr, object_result_ptr, tag_result_ptr);
}
@ -2252,6 +2271,9 @@ class VM_HeapWalkOperation: public VM_Operation {
JVMTIBitSet _bitset;
// Dead object tags in JvmtiTagMap
GrowableArray<jlong>* _dead_objects;
bool _following_object_refs; // are we following object references
bool _reporting_primitive_fields; // optional reporting
@ -2293,12 +2315,14 @@ class VM_HeapWalkOperation: public VM_Operation {
VM_HeapWalkOperation(JvmtiTagMap* tag_map,
Handle initial_object,
BasicHeapWalkContext callbacks,
const void* user_data);
const void* user_data,
GrowableArray<jlong>* objects);
VM_HeapWalkOperation(JvmtiTagMap* tag_map,
Handle initial_object,
AdvancedHeapWalkContext callbacks,
const void* user_data);
const void* user_data,
GrowableArray<jlong>* objects);
~VM_HeapWalkOperation();
@ -2310,7 +2334,8 @@ class VM_HeapWalkOperation: public VM_Operation {
VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
Handle initial_object,
BasicHeapWalkContext callbacks,
const void* user_data) {
const void* user_data,
GrowableArray<jlong>* objects) {
_is_advanced_heap_walk = false;
_tag_map = tag_map;
_initial_object = initial_object;
@ -2319,6 +2344,7 @@ VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
_reporting_primitive_array_values = false;
_reporting_string_values = false;
_visit_stack = create_visit_stack();
_dead_objects = objects;
CallbackInvoker::initialize_for_basic_heap_walk(tag_map, _visit_stack, user_data, callbacks, &_bitset);
}
@ -2326,7 +2352,8 @@ VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
Handle initial_object,
AdvancedHeapWalkContext callbacks,
const void* user_data) {
const void* user_data,
GrowableArray<jlong>* objects) {
_is_advanced_heap_walk = true;
_tag_map = tag_map;
_initial_object = initial_object;
@ -2335,6 +2362,7 @@ VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
_reporting_primitive_array_values = (callbacks.array_primitive_value_callback() != NULL);;
_reporting_string_values = (callbacks.string_primitive_value_callback() != NULL);;
_visit_stack = create_visit_stack();
_dead_objects = objects;
CallbackInvoker::initialize_for_advanced_heap_walk(tag_map, _visit_stack, user_data, callbacks, &_bitset);
}
@ -2803,7 +2831,7 @@ void VM_HeapWalkOperation::doit() {
ResourceMark rm;
ClassFieldMapCacheMark cm;
JvmtiTagMap::check_hashmaps_for_heapwalk();
JvmtiTagMap::check_hashmaps_for_heapwalk(_dead_objects);
assert(visit_stack()->is_empty(), "visit stack must be empty");
@ -2842,10 +2870,16 @@ void JvmtiTagMap::iterate_over_reachable_objects(jvmtiHeapRootCallback heap_root
JavaThread* jt = JavaThread::current();
EscapeBarrier eb(true, jt);
eb.deoptimize_objects_all_threads();
MutexLocker ml(Heap_lock);
BasicHeapWalkContext context(heap_root_callback, stack_ref_callback, object_ref_callback);
VM_HeapWalkOperation op(this, Handle(), context, user_data);
VMThread::execute(&op);
Arena dead_object_arena(mtServiceability);
GrowableArray<jlong> dead_objects(&dead_object_arena, 10, 0, 0);
{
MutexLocker ml(Heap_lock);
BasicHeapWalkContext context(heap_root_callback, stack_ref_callback, object_ref_callback);
VM_HeapWalkOperation op(this, Handle(), context, user_data, &dead_objects);
VMThread::execute(&op);
}
// Post events outside of Heap_lock
post_dead_objects(&dead_objects);
}
// iterate over all objects that are reachable from a given object
@ -2855,10 +2889,16 @@ void JvmtiTagMap::iterate_over_objects_reachable_from_object(jobject object,
oop obj = JNIHandles::resolve(object);
Handle initial_object(Thread::current(), obj);
MutexLocker ml(Heap_lock);
BasicHeapWalkContext context(NULL, NULL, object_ref_callback);
VM_HeapWalkOperation op(this, initial_object, context, user_data);
VMThread::execute(&op);
Arena dead_object_arena(mtServiceability);
GrowableArray<jlong> dead_objects(&dead_object_arena, 10, 0, 0);
{
MutexLocker ml(Heap_lock);
BasicHeapWalkContext context(NULL, NULL, object_ref_callback);
VM_HeapWalkOperation op(this, initial_object, context, user_data, &dead_objects);
VMThread::execute(&op);
}
// Post events outside of Heap_lock
post_dead_objects(&dead_objects);
}
// follow references from an initial object or the GC roots
@ -2876,10 +2916,17 @@ void JvmtiTagMap::follow_references(jint heap_filter,
!(heap_filter & JVMTI_HEAP_FILTER_UNTAGGED),
jt);
eb.deoptimize_objects_all_threads();
MutexLocker ml(Heap_lock);
AdvancedHeapWalkContext context(heap_filter, klass, callbacks);
VM_HeapWalkOperation op(this, initial_object, context, user_data);
VMThread::execute(&op);
Arena dead_object_arena(mtServiceability);
GrowableArray<jlong> dead_objects(&dead_object_arena, 10, 0, 0);
{
MutexLocker ml(Heap_lock);
AdvancedHeapWalkContext context(heap_filter, klass, callbacks);
VM_HeapWalkOperation op(this, initial_object, context, user_data, &dead_objects);
VMThread::execute(&op);
}
// Post events outside of Heap_lock
post_dead_objects(&dead_objects);
}
// Concurrent GC needs to call this in relocation pause, so after the objects are moved

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,10 +38,11 @@ class JvmtiTagMap : public CHeapObj<mtInternal> {
private:
JvmtiEnv* _env; // the jvmti environment
Mutex _lock; // lock for this tag map
Monitor _lock; // lock for this tag map
JvmtiTagMapTable* _hashmap; // the hashmap for tags
bool _needs_rehashing;
bool _needs_cleaning;
bool _posting_events;
static bool _has_object_free_events;
@ -51,15 +52,14 @@ class JvmtiTagMap : public CHeapObj<mtInternal> {
// accessors
inline JvmtiEnv* env() const { return _env; }
void check_hashmap(bool post_events);
void check_hashmap(GrowableArray<jlong>* objects);
void entry_iterate(JvmtiTagMapEntryClosure* closure);
void post_dead_objects_on_vm_thread();
public:
// indicates if this tag map is locked
bool is_locked() { return lock()->is_locked(); }
inline Mutex* lock() { return &_lock; }
inline Monitor* lock() { return &_lock; }
JvmtiTagMapTable* hashmap() { return _hashmap; }
@ -109,11 +109,12 @@ class JvmtiTagMap : public CHeapObj<mtInternal> {
jint* count_ptr, jobject** object_result_ptr,
jlong** tag_result_ptr);
void remove_and_post_dead_objects();
void remove_dead_entries(GrowableArray<jlong>* objects);
void remove_dead_entries_locked(GrowableArray<jlong>* objects);
void post_dead_objects(GrowableArray<jlong>* const objects);
void remove_dead_entries(bool post_object_free);
void remove_dead_entries_locked(bool post_object_free);
static void check_hashmaps_for_heapwalk();
static void check_hashmaps_for_heapwalk(GrowableArray<jlong>* objects);
static void set_needs_rehashing() NOT_JVMTI_RETURN;
static void set_needs_cleaning() NOT_JVMTI_RETURN;
static void gc_notification(size_t num_dead_entries) NOT_JVMTI_RETURN;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -186,8 +186,9 @@ void JvmtiTagMapTable::resize_if_needed() {
}
}
// Serially remove entries for dead oops from the table, and notify jvmti.
void JvmtiTagMapTable::remove_dead_entries(JvmtiEnv* env, bool post_object_free) {
// Serially remove entries for dead oops from the table and store dead oops'
// tag in objects array if provided.
void JvmtiTagMapTable::remove_dead_entries(GrowableArray<jlong>* objects) {
int oops_removed = 0;
int oops_counted = 0;
for (int i = 0; i < table_size(); ++i) {
@ -206,19 +207,18 @@ void JvmtiTagMapTable::remove_dead_entries(JvmtiEnv* env, bool post_object_free)
*p = entry->next();
free_entry(entry);
// post the event to the profiler
if (post_object_free) {
JvmtiExport::post_object_free(env, tag);
// collect object tags for posting JVMTI events later
if (objects != NULL) {
objects->append(tag);
}
}
// get next entry
entry = *p;
}
}
log_info(jvmti, table) ("JvmtiTagMap entries counted %d removed %d; %s",
oops_counted, oops_removed, post_object_free ? "free object posted" : "no posting");
log_info(jvmti, table) ("JvmtiTagMap entries counted %d removed %d",
oops_counted, oops_removed);
}
// Rehash oops in the table

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -87,8 +87,8 @@ public:
bool is_empty() const { return number_of_entries() == 0; }
// Cleanup cleared entries and post
void remove_dead_entries(JvmtiEnv* env, bool post_object_free);
// Cleanup cleared entries and store dead object tags in objects array
void remove_dead_entries(GrowableArray<jlong>* objects);
void rehash();
void clear();
};

@ -37,6 +37,7 @@
#include "util.h"
#include "bag.h"
#include "classTrack.h"
#include "eventHandler.h"
#define NOT_TAGGED 0
@ -46,64 +47,14 @@
static jvmtiEnv* trackingEnv;
/*
* A bag containing all the deleted classes' signatures. Must be accessed under
* classTrackLock.
* Invoke the callback when classes are freed.
*/
struct bag* deletedSignatures;
/*
* Lock to keep integrity of deletedSignatures.
*/
static jrawMonitorID classTrackLock;
/*
* Invoke the callback when classes are freed, find and record the signature
* in deletedSignatures. Those are only used in addPreparedClass() by the
* same thread.
*/
static void JNICALL
void JNICALL
cbTrackingObjectFree(jvmtiEnv* jvmti_env, jlong tag)
{
debugMonitorEnter(classTrackLock);
if (deletedSignatures == NULL) {
debugMonitorExit(classTrackLock);
return;
}
*(char**)bagAdd(deletedSignatures) = (char*)jlong_to_ptr(tag);
debugMonitorExit(classTrackLock);
eventHandler_synthesizeUnloadEvent((char*)jlong_to_ptr(tag), getEnv());
}
/*
* Called after class unloads have occurred.
* The signatures of classes which were unloaded are returned.
*/
struct bag *
classTrack_processUnloads(JNIEnv *env)
{
if (deletedSignatures == NULL) {
return NULL;
}
/* Allocate new bag outside classTrackLock lock to avoid deadlock.
*
* Note: jvmtiAllocate/jvmtiDeallocate() may be blocked by ongoing safepoints.
* It is dangerous to call them (via bagCreateBag/bagDestroyBag()) while holding monitor(s),
* because jvmti may post events, e.g. JVMTI_EVENT_OBJECT_FREE at safepoints and event processing
* code may acquire the same monitor(s), e.g. classTrackLock in cbTrackingObjectFree(),
* which can lead to deadlock.
*/
struct bag* new_bag = bagCreateBag(sizeof(char*), 10);
debugMonitorEnter(classTrackLock);
struct bag* deleted = deletedSignatures;
deletedSignatures = new_bag;
debugMonitorExit(classTrackLock);
return deleted;
}
/*
* Add a class to the prepared class table.
*/
void
classTrack_addPreparedClass(JNIEnv *env_unused, jclass klass)
{
@ -169,8 +120,6 @@ setupEvents()
void
classTrack_initialize(JNIEnv *env)
{
deletedSignatures = NULL;
classTrackLock = debugMonitorCreate("Deleted class tag lock");
trackingEnv = getSpecialJvmti();
if (trackingEnv == NULL) {
EXIT_ERROR(AGENT_ERROR_INTERNAL, "Failed to allocate tag-tracking jvmtiEnv");
@ -202,44 +151,3 @@ classTrack_initialize(JNIEnv *env)
EXIT_ERROR(error,"loaded classes array");
}
}
/*
* Called to activate class-tracking when a listener registers for EI_GC_FINISH.
*/
void
classTrack_activate(JNIEnv *env)
{
// Allocate bag outside classTrackLock lock to avoid deadlock.
// See comments in classTrack_processUnloads() for details.
struct bag* new_bag = bagCreateBag(sizeof(char*), 1000);
debugMonitorEnter(classTrackLock);
deletedSignatures = new_bag;
debugMonitorExit(classTrackLock);
}
static jboolean
cleanDeleted(void *signatureVoid, void *arg)
{
char* sig = *(char**)signatureVoid;
jvmtiDeallocate(sig);
return JNI_TRUE;
}
/*
* Called when agent detaches.
*/
void
classTrack_reset(void)
{
debugMonitorEnter(classTrackLock);
struct bag* to_delete = deletedSignatures;
deletedSignatures = NULL;
debugMonitorExit(classTrackLock);
// Deallocate bag outside classTrackLock to avoid deadlock.
// See comments in classTrack_processUnloads() for details.
if (to_delete != NULL) {
bagEnumerateOver(to_delete, cleanDeleted, NULL);
bagDestroyBag(to_delete);
}
}

@ -795,7 +795,6 @@ debugInit_reset(JNIEnv *env)
threadControl_reset();
util_reset();
commonRef_reset(env);
classTrack_reset();
/*
* If this is a server, we are now ready to accept another connection.

@ -457,16 +457,10 @@ reportEvents(JNIEnv *env, jbyte sessionID, jthread thread, EventIndex ei,
}
}
/* A bagEnumerateFunction. Create a synthetic class unload event
* for every class no longer present. Analogous to event_callback
* combined with a handler in a unload specific (no event
* structure) kind of way.
*/
static jboolean
synthesizeUnloadEvent(void *signatureVoid, void *envVoid)
/* Create a synthetic class unload event for the specified signature. */
jboolean
eventHandler_synthesizeUnloadEvent(char *signature, JNIEnv *env)
{
JNIEnv *env = (JNIEnv *)envVoid;
char *signature = *(char **)signatureVoid;
char *classname;
HandlerNode *node;
jbyte eventSessionID = currentSessionID;
@ -620,39 +614,10 @@ event_callback(JNIEnv *env, EventInfo *evinfo)
currentException = JNI_FUNC_PTR(env,ExceptionOccurred)(env);
JNI_FUNC_PTR(env,ExceptionClear)(env);
/* See if a garbage collection finish event happened earlier.
*
* Note: The "if" is an optimization to avoid entering the lock on every
* event; garbageCollected may be zapped before we enter
* the lock but then this just becomes one big no-op.
*/
if ( garbageCollected > 0 ) {
struct bag *unloadedSignatures = NULL;
/* We want to compact the hash table of all
* objects sent to the front end by removing objects that have
* been collected.
*/
/* See if a garbage collection finish event happened earlier. */
if ( garbageCollected > 0) {
commonRef_compact();
/* We also need to simulate the class unload events. */
debugMonitorEnter(handlerLock);
/* Clear garbage collection counter */
garbageCollected = 0;
/* Analyze which class unloads occurred */
unloadedSignatures = classTrack_processUnloads(env);
debugMonitorExit(handlerLock);
/* Generate the synthetic class unload events and/or just cleanup. */
if ( unloadedSignatures != NULL ) {
(void)bagEnumerateOver(unloadedSignatures, synthesizeUnloadEvent,
(void *)env);
bagDestroyBag(unloadedSignatures);
}
}
thread = evinfo->thread;
@ -1709,9 +1674,6 @@ installHandler(HandlerNode *node,
node->handlerID = external? ++requestIdCounter : 0;
error = eventFilterRestricted_install(node);
if (node->ei == EI_GC_FINISH) {
classTrack_activate(getEnv());
}
if (error == JVMTI_ERROR_NONE) {
insert(getHandlerChain(node->ei), node);
}

@ -76,6 +76,7 @@ void eventHandler_reset(jbyte sessionID);
void eventHandler_lock(void);
void eventHandler_unlock(void);
jboolean eventHandler_synthesizeUnloadEvent(char *signature, JNIEnv *env);
jclass getMethodClass(jvmtiEnv *jvmti_env, jmethodID method);

@ -136,7 +136,6 @@ serviceability/sa/TestJmapCoreMetaspace.java 8269982,8267433 macosx-aarch64,maco
#############################################################################
vmTestbase/nsk/jdi/HiddenClass/events/events001.java 8257705 generic-all
vmTestbase/nsk/jdi/ThreadReference/stop/stop001/TestDescription.java 7034630 generic-all
vmTestbase/metaspace/gc/firstGC_10m/TestDescription.java 8208250 generic-all

@ -0,0 +1,182 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8256811
* @modules java.base/jdk.internal.org.objectweb.asm
* java.base/jdk.internal.misc
* @library /test/lib
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm/native ClassUnloadEventTest run
*/
import jdk.internal.org.objectweb.asm.ClassWriter;
import jdk.internal.org.objectweb.asm.Label;
import jdk.internal.org.objectweb.asm.MethodVisitor;
import jdk.internal.org.objectweb.asm.Opcodes;
import jdk.test.lib.classloader.ClassUnloadCommon;
import com.sun.jdi.*;
import com.sun.jdi.connect.*;
import com.sun.jdi.event.*;
import com.sun.jdi.request.*;
import java.util.*;
import java.io.*;
public class ClassUnloadEventTest {
static final String CLASS_NAME_PREFIX = "SampleClass__";
static final String CLASS_NAME_ALT_PREFIX = CLASS_NAME_PREFIX + "Alt__";
static final int NUM_CLASSES = 10;
static final int NUM_ALT_CLASSES = NUM_CLASSES / 2;
public static void main(String[] args) throws Exception {
if (args.length == 0) {
runDebuggee();
} else {
runDebugger();
}
}
private static class TestClassLoader extends ClassLoader implements Opcodes {
private static byte[] generateSampleClass(String name) {
ClassWriter cw = new ClassWriter(0);
cw.visit(52, ACC_SUPER | ACC_PUBLIC, name, null, "java/lang/Object", null);
MethodVisitor mv = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, "m", "()V", null, null);
mv.visitCode();
mv.visitInsn(RETURN);
mv.visitMaxs(0, 0);
mv.visitEnd();
cw.visitEnd();
return cw.toByteArray();
}
@Override
protected Class<?> findClass(String name) throws ClassNotFoundException {
if (name.startsWith(CLASS_NAME_PREFIX)) {
byte[] bytecode = generateSampleClass(name);
return defineClass(name, bytecode, 0, bytecode.length);
} else {
return super.findClass(name);
}
}
}
private static void runDebuggee() {
System.out.println("Running debuggee");
ClassLoader loader = new TestClassLoader();
for (int index = 0; index < NUM_CLASSES; index++) {
try {
if (index < NUM_ALT_CLASSES) {
Class.forName(CLASS_NAME_ALT_PREFIX + index, true, loader);
} else {
Class.forName(CLASS_NAME_PREFIX + index, true, loader);
}
} catch (Exception e) {
throw new RuntimeException("Failed to create Sample class");
}
}
loader = null;
// Trigger class unloading
ClassUnloadCommon.triggerUnloading();
}
private static void runDebugger() throws Exception {
System.out.println("Running debugger");
HashSet<String> unloadedSampleClasses = new HashSet<>();
HashSet<String> unloadedSampleClasses_alt = new HashSet<>();
VirtualMachine vm = null;
vm = connectAndLaunchVM();
ClassUnloadRequest classUnloadRequest = vm.eventRequestManager().createClassUnloadRequest();
classUnloadRequest.addClassFilter(CLASS_NAME_PREFIX + "*");
classUnloadRequest.enable();
ClassUnloadRequest classUnloadRequest_alt = vm.eventRequestManager().createClassUnloadRequest();
classUnloadRequest_alt.addClassFilter(CLASS_NAME_ALT_PREFIX + "*");
classUnloadRequest_alt.enable();
EventSet eventSet = null;
boolean exited = false;
while (!exited && (eventSet = vm.eventQueue().remove()) != null) {
System.out.println("EventSet: " + eventSet);
for (Event event : eventSet) {
if (event instanceof ClassUnloadEvent) {
String className = ((ClassUnloadEvent)event).className();
// The unloaded class should always match CLASS_NAME_PREFIX.
if (className.indexOf(CLASS_NAME_PREFIX) == -1) {
throw new RuntimeException("FAILED: Unexpected unloaded class: " + className);
}
// Unloaded classes with ALT names should only occur on the classUnloadRequest_alt.
if (event.request() == classUnloadRequest_alt) {
unloadedSampleClasses_alt.add(className);
if (className.indexOf(CLASS_NAME_ALT_PREFIX) == -1) {
throw new RuntimeException("FAILED: non-alt class unload event for classUnloadRequest_alt.");
}
} else {
unloadedSampleClasses.add(className);
}
// If the unloaded class matches the ALT prefix, then we should have
// unload events in this EventSet for each of the two ClassUnloadRequesta.
int expectedEventSetSize;
if (className.indexOf(CLASS_NAME_ALT_PREFIX) != -1) {
expectedEventSetSize = 2;
} else {
expectedEventSetSize = 1;
}
if (eventSet.size() != expectedEventSetSize) {
throw new RuntimeException("FAILED: Unexpected eventSet size: " + eventSet.size());
}
}
if (event instanceof VMDeathEvent) {
exited = true;
break;
}
}
eventSet.resume();
}
if (unloadedSampleClasses.size() != NUM_CLASSES) {
throw new RuntimeException("Wrong number of class unload events: expected " + NUM_CLASSES + " got " + unloadedSampleClasses.size());
}
if (unloadedSampleClasses_alt.size() != NUM_ALT_CLASSES) {
throw new RuntimeException("Wrong number of alt class unload events: expected " + NUM_ALT_CLASSES + " got " + unloadedSampleClasses_alt.size());
}
}
private static VirtualMachine connectAndLaunchVM() throws IOException,
IllegalConnectorArgumentsException,
VMStartException {
LaunchingConnector launchingConnector = Bootstrap.virtualMachineManager().defaultConnector();
Map<String, Connector.Argument> arguments = launchingConnector.defaultArguments();
arguments.get("main").setValue(ClassUnloadEventTest.class.getName());
arguments.get("options").setValue("--add-exports java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI");
return launchingConnector.launch(arguments);
}
}