8212879: Make JVMTI TagMap table concurrent

Co-authored-by: Kim Barrett <kbarrett@openjdk.org>
Co-authored-by: Coleen Phillimore <coleenp@openjdk.org>
Reviewed-by: stefank, ihse, zgu, eosterlund, sspitsyn, kbarrett
This commit is contained in:
Coleen Phillimore 2020-11-19 14:30:02 +00:00
parent 3a4b90f086
commit ba721f5f2f
49 changed files with 904 additions and 1127 deletions

View File

@ -84,7 +84,7 @@ ifneq ($(call check-jvm-feature, jvmti), true)
jvmtiImpl.cpp jvmtiManageCapabilities.cpp jvmtiRawMonitor.cpp jvmtiUtil.cpp jvmtiTrace.cpp \
jvmtiCodeBlobEvents.cpp jvmtiEnv.cpp jvmtiRedefineClasses.cpp jvmtiEnvBase.cpp jvmtiEnvThreadState.cpp \
jvmtiTagMap.cpp jvmtiEventController.cpp evmCompat.cpp jvmtiEnter.xsl jvmtiExport.cpp \
jvmtiClassFileReconstituter.cpp
jvmtiClassFileReconstituter.cpp jvmtiTagMapTable.cpp
endif
ifneq ($(call check-jvm-feature, jvmci), true)

View File

@ -96,23 +96,6 @@ void Dictionary::free_entry(DictionaryEntry* entry) {
}
const int _resize_load_trigger = 5; // load factor that will trigger the resize
const double _resize_factor = 2.0; // by how much we will resize using current number of entries
const int _resize_max_size = 40423; // the max dictionary size allowed
const int _primelist[] = {107, 1009, 2017, 4049, 5051, 10103, 20201, _resize_max_size};
const int _prime_array_size = sizeof(_primelist)/sizeof(int);
// Calculate next "good" dictionary size based on requested count
static int calculate_dictionary_size(int requested) {
int newsize = _primelist[0];
int index = 0;
for (newsize = _primelist[index]; index < (_prime_array_size - 1);
newsize = _primelist[++index]) {
if (requested <= newsize) {
break;
}
}
return newsize;
}
bool Dictionary::does_any_dictionary_needs_resizing() {
return Dictionary::_some_dictionary_needs_resizing;
@ -128,15 +111,14 @@ void Dictionary::check_if_needs_resize() {
}
bool Dictionary::resize_if_needed() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
int desired_size = 0;
if (_needs_resizing == true) {
desired_size = calculate_dictionary_size((int)(_resize_factor*number_of_entries()));
if (desired_size >= _resize_max_size) {
desired_size = _resize_max_size;
// We have reached the limit, turn resizing off
_resizable = false;
}
if ((desired_size != 0) && (desired_size != table_size())) {
desired_size = calculate_resize(false);
assert(desired_size != 0, "bug in calculate_resize");
if (desired_size == table_size()) {
_resizable = false; // hit max
} else {
if (!resize(desired_size)) {
// Something went wrong, turn resizing off
_resizable = false;

View File

@ -38,7 +38,7 @@ class OopStorageSet : public AllStatic {
public:
// Must be updated when new OopStorages are introduced
static const uint strong_count = 4 JVMTI_ONLY(+ 1);
static const uint weak_count = 5 JFR_ONLY(+ 1);
static const uint weak_count = 5 JVMTI_ONLY(+ 1) JFR_ONLY(+ 1);
static const uint all_count = strong_count + weak_count;
private:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,16 +37,30 @@
#include "runtime/globals.hpp"
#include "utilities/macros.hpp"
void WeakProcessor::do_serial_parts(BoolObjectClosure* is_alive,
OopClosure* keep_alive) {
WeakProcessorPhases::Iterator it = WeakProcessorPhases::serial_iterator();
for ( ; !it.is_end(); ++it) {
WeakProcessorPhases::processor(*it)(is_alive, keep_alive);
}
#if INCLUDE_JVMTI
#include "prims/jvmtiTagMap.hpp"
#endif // INCLUDE_JVMTI
void notify_jvmti_tagmaps() {
#if INCLUDE_JVMTI
// Notify JVMTI tagmaps that a STW weak reference processing might be
// clearing entries, so the tagmaps need cleaning. Doing this here allows
// the tagmap's oopstorage notification handler to not care whether it's
// invoked by STW or concurrent reference processing.
JvmtiTagMap::set_needs_cleaning();
// Notify JVMTI tagmaps that a STW collection may have moved objects, so
// the tagmaps need rehashing. This isn't the right place for this, but
// is convenient because all the STW collectors use WeakProcessor. One
// problem is that the end of a G1 concurrent collection also comes here,
// possibly triggering unnecessary rehashes.
JvmtiTagMap::set_needs_rehashing();
#endif // INCLUDE_JVMTI
}
void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive) {
do_serial_parts(is_alive, keep_alive);
notify_jvmti_tagmaps();
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
for ( ; !it.is_end(); ++it) {
@ -61,8 +75,6 @@ void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_a
}
void WeakProcessor::oops_do(OopClosure* closure) {
AlwaysTrueClosure always_true;
do_serial_parts(&always_true, closure);
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
for ( ; !it.is_end(); ++it) {
@ -106,12 +118,12 @@ void WeakProcessor::Task::initialize() {
if (_phase_times) {
_phase_times->set_active_workers(_nworkers);
}
notify_jvmti_tagmaps();
}
WeakProcessor::Task::Task(uint nworkers) :
_phase_times(NULL),
_nworkers(nworkers),
_serial_phases_done(WeakProcessorPhases::serial_phase_count),
_storage_states()
{
initialize();
@ -120,7 +132,6 @@ WeakProcessor::Task::Task(uint nworkers) :
WeakProcessor::Task::Task(WeakProcessorPhaseTimes* phase_times, uint nworkers) :
_phase_times(phase_times),
_nworkers(nworkers),
_serial_phases_done(WeakProcessorPhases::serial_phase_count),
_storage_states()
{
initialize();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,8 +72,6 @@ public:
private:
class GangTask;
static void do_serial_parts(BoolObjectClosure* is_alive, OopClosure* keep_alive);
};
class WeakProcessor::Task {
@ -81,7 +79,6 @@ class WeakProcessor::Task {
WeakProcessorPhaseTimes* _phase_times;
uint _nworkers;
SubTasksDone _serial_phases_done;
OopStorageSetWeakParState<false, false> _storage_states;
void initialize();

View File

@ -96,33 +96,17 @@ void WeakProcessor::Task::work(uint worker_id,
typedef WeakProcessorPhases::Iterator Iterator;
for (Iterator it = WeakProcessorPhases::serial_iterator(); !it.is_end(); ++it) {
WeakProcessorPhase phase = *it;
CountingIsAliveClosure<IsAlive> cl(is_alive);
uint serial_index = WeakProcessorPhases::serial_index(phase);
if (_serial_phases_done.try_claim_task(serial_index)) {
WeakProcessorPhaseTimeTracker pt(_phase_times, phase);
WeakProcessorPhases::processor(phase)(&cl, keep_alive);
if (_phase_times != NULL) {
_phase_times->record_phase_items(phase, cl.num_dead(), cl.num_total());
}
}
}
for (Iterator it = WeakProcessorPhases::oopstorage_iterator(); !it.is_end(); ++it) {
WeakProcessorPhase phase = *it;
CountingSkippedIsAliveClosure<IsAlive, KeepAlive> cl(is_alive, keep_alive);
WeakProcessorPhaseTimeTracker pt(_phase_times, phase, worker_id);
uint oopstorage_index = WeakProcessorPhases::oopstorage_index(phase);
StorageState* cur_state = _storage_states.par_state(oopstorage_index);
StorageState* cur_state = _storage_states.par_state(phase);
cur_state->oops_do(&cl);
cur_state->increment_num_dead(cl.num_skipped() + cl.num_dead());
if (_phase_times != NULL) {
_phase_times->record_worker_items(worker_id, phase, cl.num_dead(), cl.num_total());
}
}
_serial_phases_done.all_tasks_completed(_nworkers);
}
class WeakProcessor::GangTask : public AbstractGangTask {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,48 +33,12 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/ticks.hpp"
static uint serial_phase_index(WeakProcessorPhase phase) {
return WeakProcessorPhases::serial_index(phase);
}
static bool is_serial_phase(WeakProcessorPhase phase) {
return WeakProcessorPhases::is_serial(phase);
}
static void assert_serial_phase(WeakProcessorPhase phase) {
assert(is_serial_phase(phase),
"Not a serial phase %u", static_cast<uint>(phase));
}
static void assert_oopstorage_phase(WeakProcessorPhase phase) {
assert(WeakProcessorPhases::is_oopstorage(phase),
"Not an oopstorage phase %u", static_cast<uint>(phase));
}
const double uninitialized_time = -1.0;
#ifdef ASSERT
static bool is_initialized_time(double t) { return t >= 0.0; }
static bool is_initialized_items(size_t i) { return i != 0; }
#endif // ASSERT
static void reset_times(double* times, size_t ntimes) {
for (size_t i = 0; i < ntimes; ++i) {
times[i] = uninitialized_time;
}
}
static void reset_items(size_t* items, size_t nitems) {
for (size_t i = 0; i < nitems; ++i) {
items[i] = 0;
}
}
void WeakProcessorPhaseTimes::reset_phase_data() {
reset_times(_phase_times_sec, ARRAY_SIZE(_phase_times_sec));
reset_items(_phase_dead_items, ARRAY_SIZE(_phase_dead_items));
reset_items(_phase_total_items, ARRAY_SIZE(_phase_total_items));
}
WeakProcessorPhaseTimes::WeakProcessorPhaseTimes(uint max_threads) :
_max_threads(max_threads),
@ -84,8 +48,6 @@ WeakProcessorPhaseTimes::WeakProcessorPhaseTimes(uint max_threads) :
{
assert(_max_threads > 0, "max_threads must not be zero");
reset_phase_data();
WorkerDataArray<double>** wpt = _worker_data;
OopStorageSet::Iterator it = OopStorageSet::weak_iterator();
for ( ; !it.is_end(); ++it) {
@ -122,7 +84,6 @@ void WeakProcessorPhaseTimes::set_active_workers(uint n) {
void WeakProcessorPhaseTimes::reset() {
_active_workers = 0;
_total_time_sec = uninitialized_time;
reset_phase_data();
for (size_t i = 0; i < ARRAY_SIZE(_worker_data); ++i) {
_worker_data[i]->reset();
}
@ -138,34 +99,8 @@ void WeakProcessorPhaseTimes::record_total_time_sec(double time_sec) {
_total_time_sec = time_sec;
}
double WeakProcessorPhaseTimes::phase_time_sec(WeakProcessorPhase phase) const {
assert_serial_phase(phase);
assert(is_initialized_time(_phase_times_sec[serial_phase_index(phase)]),
"phase time not set %u", serial_phase_index(phase));
return _phase_times_sec[serial_phase_index(phase)];
}
void WeakProcessorPhaseTimes::record_phase_time_sec(WeakProcessorPhase phase, double time_sec) {
assert_serial_phase(phase);
assert(!is_initialized_time(_phase_times_sec[serial_phase_index(phase)]),
"Already set time for phase %u", serial_phase_index(phase));
_phase_times_sec[serial_phase_index(phase)] = time_sec;
}
void WeakProcessorPhaseTimes::record_phase_items(WeakProcessorPhase phase, size_t num_dead, size_t num_total) {
assert_serial_phase(phase);
uint p = serial_phase_index(phase);
assert(!is_initialized_items(_phase_dead_items[p]),
"Already set dead items for phase %u", p);
assert(!is_initialized_items(_phase_total_items[p]),
"Already set total items for phase %u", p);
_phase_dead_items[p] = num_dead;
_phase_total_items[p] = num_total;
}
WorkerDataArray<double>* WeakProcessorPhaseTimes::worker_data(WeakProcessorPhase phase) const {
assert_oopstorage_phase(phase);
return _worker_data[WeakProcessorPhases::oopstorage_index(phase)];
return _worker_data[phase];
}
double WeakProcessorPhaseTimes::worker_time_sec(uint worker_id, WeakProcessorPhase phase) const {
@ -213,29 +148,15 @@ WeakProcessorPhaseTimeTracker::WeakProcessorPhaseTimeTracker(WeakProcessorPhaseT
_worker_id(worker_id),
_start_time(Ticks::now())
{
assert_oopstorage_phase(_phase);
assert(_times == NULL || worker_id < _times->active_workers(),
"Invalid worker_id %u", worker_id);
}
WeakProcessorPhaseTimeTracker::WeakProcessorPhaseTimeTracker(WeakProcessorPhaseTimes* times,
WeakProcessorPhase phase) :
_times(times),
_phase(phase),
_worker_id(0),
_start_time(Ticks::now())
{
assert_serial_phase(phase);
}
WeakProcessorPhaseTimeTracker::~WeakProcessorPhaseTimeTracker() {
if (_times != NULL) {
double time_sec = elapsed_time_sec(_start_time, Ticks::now());
if (is_serial_phase(_phase)) {
_times->record_phase_time_sec(_phase, time_sec);
} else {
_times->record_worker_time_sec(_worker_id, _phase, time_sec);
}
_times->record_worker_time_sec(_worker_id, _phase, time_sec);
}
}
@ -251,25 +172,6 @@ static const char* indent_str(size_t i) {
#define TIME_FORMAT "%.1lfms"
void WeakProcessorPhaseTimes::log_st_phase(WeakProcessorPhase phase,
uint indent) const {
assert_serial_phase(phase);
log_debug(gc, phases)("%s%s: " TIME_FORMAT,
indent_str(indent),
WeakProcessorPhases::description(phase),
phase_time_sec(phase) * MILLIUNITS);
log_debug(gc, phases)("%s%s: " SIZE_FORMAT,
indent_str(indent + 1),
"Dead",
_phase_dead_items[serial_phase_index(phase)]);
log_debug(gc, phases)("%s%s: " SIZE_FORMAT,
indent_str(indent + 1),
"Total",
_phase_total_items[serial_phase_index(phase)]);
}
void WeakProcessorPhaseTimes::log_mt_phase_summary(WeakProcessorPhase phase,
uint indent) const {
LogTarget(Debug, gc, phases) lt;
@ -302,9 +204,6 @@ void WeakProcessorPhaseTimes::log_mt_phase_details(WorkerDataArray<T>* data,
void WeakProcessorPhaseTimes::log_print_phases(uint indent) const {
if (log_is_enabled(Debug, gc, phases)) {
typedef WeakProcessorPhases::Iterator Iterator;
for (Iterator it = WeakProcessorPhases::serial_iterator(); !it.is_end(); ++it) {
log_st_phase(*it, indent);
}
for (Iterator it = WeakProcessorPhases::oopstorage_iterator(); !it.is_end(); ++it) {
log_mt_phase_summary(*it, indent);
}

View File

@ -43,15 +43,6 @@ class WeakProcessorPhaseTimes : public CHeapObj<mtGC> {
// Total time for weak processor.
double _total_time_sec;
// Total time and associated items for each serially processed phase.
static const uint phase_data_count = WeakProcessorPhases::serial_phase_count;
// +1 because serial_phase_count == 0 in some build configurations.
// Simpler to always allocate extra space than conditionalize.
double _phase_times_sec[phase_data_count + 1];
size_t _phase_dead_items[phase_data_count + 1];
size_t _phase_total_items[phase_data_count + 1];
void reset_phase_data();
// Per-worker times and linked items.
static const uint worker_data_count = WeakProcessorPhases::oopstorage_phase_count;
WorkerDataArray<double>* _worker_data[worker_data_count];
@ -108,14 +99,8 @@ private:
Ticks _start_time;
public:
// For tracking serial phase times.
// Precondition: WeakProcessorPhases::is_serial(phase)
WeakProcessorPhaseTimeTracker(WeakProcessorPhaseTimes* times,
WeakProcessorPhase phase);
// For tracking possibly parallel phase times (even if processed by
// only one thread).
// Precondition: WeakProcessorPhases::is_oopstorage(phase)
// Precondition: worker_id < times->max_threads().
WeakProcessorPhaseTimeTracker(WeakProcessorPhaseTimes* times,
WeakProcessorPhase phase,

View File

@ -27,63 +27,6 @@
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_JVMTI
#include "prims/jvmtiExport.hpp"
#endif // INCLUDE_JVMTI
// serial_phase_count is 0 if JVMTI is not built,
// requiring some code to be careful to avoid tautological checks
// that some compilers warn about.
#define HAVE_SERIAL_PHASES INCLUDE_JVMTI
WeakProcessorPhases::Phase WeakProcessorPhases::serial_phase(uint value) {
#if HAVE_SERIAL_PHASES
assert(value < serial_phase_count, "Invalid serial phase value %u", value);
return static_cast<Phase>(value + serial_phase_start);
#else
STATIC_ASSERT(serial_phase_count == 0);
fatal("invalid serial phase value %u", value);
return static_cast<Phase>(serial_phase_start);
#endif // HAVE_SERIAL_PHASES
}
WeakProcessorPhases::Phase WeakProcessorPhases::oopstorage_phase(uint value) {
assert(value < oopstorage_phase_count, "Invalid oopstorage phase value %u", value);
return static_cast<Phase>(value + oopstorage_phase_start);
}
static uint raw_phase_index(WeakProcessorPhases::Phase phase) {
return static_cast<uint>(phase);
}
uint WeakProcessorPhases::serial_index(Phase phase) {
assert(is_serial(phase), "not serial phase %u", raw_phase_index(phase));
return raw_phase_index(phase) - serial_phase_start;
}
uint WeakProcessorPhases::oopstorage_index(Phase phase) {
assert(is_oopstorage(phase), "not oopstorage phase %u", raw_phase_index(phase));
return raw_phase_index(phase) - oopstorage_phase_start;
}
static bool is_phase(WeakProcessorPhases::Phase phase, uint start, uint count) {
return (raw_phase_index(phase) - start) < count;
}
bool WeakProcessorPhases::is_serial(Phase phase) {
#if HAVE_SERIAL_PHASES
return is_phase(phase, serial_phase_start, serial_phase_count);
#else
STATIC_ASSERT(serial_phase_count == 0);
return false;
#endif // HAVE_SERIAL_PHASES
}
bool WeakProcessorPhases::is_oopstorage(Phase phase) {
return is_phase(phase, oopstorage_phase_start, oopstorage_phase_count);
}
#ifdef ASSERT
void WeakProcessorPhases::Iterator::verify_nonsingular() const {
@ -101,21 +44,3 @@ void WeakProcessorPhases::Iterator::verify_dereferenceable() const {
}
#endif // ASSERT
const char* WeakProcessorPhases::description(Phase phase) {
switch (phase) {
JVMTI_ONLY(case jvmti: return "JVMTI weak processing";)
default:
ShouldNotReachHere();
return "Invalid serial weak processing phase";
}
}
WeakProcessorPhases::Processor WeakProcessorPhases::processor(Phase phase) {
switch (phase) {
JVMTI_ONLY(case jvmti: return &JvmtiExport::weak_oops_do;)
default:
ShouldNotReachHere();
return NULL;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,43 +38,15 @@ class WeakProcessorPhases : AllStatic {
public:
class Iterator;
typedef void (*Processor)(BoolObjectClosure*, OopClosure*);
enum Phase {
// Serial phase.
JVMTI_ONLY(jvmti)
// Additional implicit phase values follow for oopstorages.
// Implicit phase values for oopstorages.
};
static const uint serial_phase_start = 0;
static const uint serial_phase_count = 0 JVMTI_ONLY(+ 1);
static const uint oopstorage_phase_start = serial_phase_count;
static const uint oopstorage_phase_start = 0;
static const uint oopstorage_phase_count = OopStorageSet::weak_count;
static const uint phase_count = serial_phase_count + oopstorage_phase_count;
static const uint phase_count = oopstorage_phase_count;
// Precondition: value < serial_phase_count
static Phase serial_phase(uint value);
// Precondition: value < oopstorage_phase_count
static Phase oopstorage_phase(uint value);
// Indexes relative to the corresponding phase_start constant.
// Precondition: is_serial(phase) or is_oopstorage(phase) accordingly
static uint serial_index(Phase phase);
static uint oopstorage_index(Phase phase);
static bool is_serial(Phase phase);
static bool is_oopstorage(Phase phase);
static Iterator serial_iterator();
static Iterator oopstorage_iterator();
// Precondition: is_serial(phase)
static const char* description(Phase phase);
// Precondition: is_serial(phase)
static Processor processor(Phase phase);
};
typedef WeakProcessorPhases::Phase WeakProcessorPhase;
@ -111,13 +83,12 @@ public:
return !operator==(other);
}
Phase operator*() const {
WeakProcessorPhase operator*() const {
verify_dereferenceable();
return static_cast<Phase>(_index);
return static_cast<WeakProcessorPhase>(_index);
}
// Phase doesn't have members, so no operator->().
Iterator& operator++() {
verify_dereferenceable();
++_index;
@ -140,10 +111,6 @@ public:
}
};
inline WeakProcessorPhases::Iterator WeakProcessorPhases::serial_iterator() {
return Iterator(serial_phase_start, serial_phase_start + serial_phase_count);
}
inline WeakProcessorPhases::Iterator WeakProcessorPhases::oopstorage_iterator() {
return Iterator(oopstorage_phase_start, oopstorage_phase_start + oopstorage_phase_count);
}

View File

@ -74,6 +74,7 @@
#include "memory/classLoaderMetaspace.hpp"
#include "oops/compressedOops.inline.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -1686,6 +1687,9 @@ void ShenandoahHeap::op_final_mark() {
set_concurrent_mark_in_progress(false);
mark_complete_marking_context();
// Notify JVMTI that the tagmap table will need cleaning.
JvmtiTagMap::set_needs_cleaning();
parallel_cleaning(false /* full gc*/);
if (ShenandoahVerify) {
@ -1747,6 +1751,9 @@ void ShenandoahHeap::op_final_mark() {
evacuate_and_update_roots();
}
// Notify JVMTI that oops are changed.
JvmtiTagMap::set_needs_rehashing();
if (ShenandoahPacing) {
pacer()->setup_for_evac();
}

View File

@ -38,7 +38,6 @@ class ShenandoahParallelWeakRootsCleaningTask : public AbstractGangTask {
protected:
ShenandoahPhaseTimings::Phase _phase;
WeakProcessor::Task _weak_processing_task;
ShenandoahSerialWeakRoots _serial_weak_roots;
IsAlive* _is_alive;
KeepAlive* _keep_alive;
bool _include_concurrent_roots;

View File

@ -39,7 +39,7 @@ ShenandoahParallelWeakRootsCleaningTask<IsAlive, KeepAlive>::ShenandoahParallelW
uint num_workers,
bool include_concurrent_roots) :
AbstractGangTask("Shenandoah Weak Root Cleaning"),
_phase(phase), _weak_processing_task(num_workers), _serial_weak_roots(phase),
_phase(phase), _weak_processing_task(num_workers),
_is_alive(is_alive), _keep_alive(keep_alive), _include_concurrent_roots(include_concurrent_roots) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
@ -62,8 +62,6 @@ template<typename IsAlive, typename KeepAlive>
void ShenandoahParallelWeakRootsCleaningTask<IsAlive, KeepAlive>::work(uint worker_id) {
if (_include_concurrent_roots) {
_weak_processing_task.work<IsAlive, KeepAlive>(worker_id, _is_alive, _keep_alive);
} else {
_serial_weak_roots.weak_oops_do(_is_alive, _keep_alive, worker_id);
}
if (ShenandoahStringDedup::is_enabled()) {

View File

@ -40,7 +40,6 @@ class outputStream;
f(CNT_PREFIX ## VMStrongRoots, DESC_PREFIX "VM Strong Roots") \
f(CNT_PREFIX ## VMWeakRoots, DESC_PREFIX "VM Weak Roots") \
f(CNT_PREFIX ## CLDGRoots, DESC_PREFIX "CLDG Roots") \
f(CNT_PREFIX ## JVMTIWeakRoots, DESC_PREFIX "JVMTI Weak Roots") \
f(CNT_PREFIX ## StringDedupTableRoots, DESC_PREFIX "Dedup Table Roots") \
f(CNT_PREFIX ## StringDedupQueueRoots, DESC_PREFIX "Dedup Queue Roots") \
f(CNT_PREFIX ## FinishQueues, DESC_PREFIX "Finish Queues") \

View File

@ -37,33 +37,6 @@
#include "prims/jvmtiExport.hpp"
#include "runtime/thread.hpp"
ShenandoahWeakSerialRoot::ShenandoahWeakSerialRoot(ShenandoahWeakSerialRoot::WeakOopsDo weak_oops_do,
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase) :
_weak_oops_do(weak_oops_do), _phase(phase), _par_phase(par_phase) {
}
void ShenandoahWeakSerialRoot::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id) {
if (_claimed.try_set()) {
ShenandoahWorkerTimingsTracker timer(_phase, _par_phase, worker_id);
_weak_oops_do(is_alive, keep_alive);
}
}
#if INCLUDE_JVMTI
ShenandoahJVMTIWeakRoot::ShenandoahJVMTIWeakRoot(ShenandoahPhaseTimings::Phase phase) :
ShenandoahWeakSerialRoot(&JvmtiExport::weak_oops_do, phase, ShenandoahPhaseTimings::JVMTIWeakRoots) {
}
#endif // INCLUDE_JVMTI
void ShenandoahSerialWeakRoots::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id) {
JVMTI_ONLY(_jvmti_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);)
}
void ShenandoahSerialWeakRoots::weak_oops_do(OopClosure* cl, uint worker_id) {
AlwaysTrueClosure always_true;
weak_oops_do(&always_true, cl, worker_id);
}
ShenandoahThreadRoots::ShenandoahThreadRoots(ShenandoahPhaseTimings::Phase phase, bool is_par) :
_phase(phase), _is_par(is_par) {
Threads::change_thread_claim_token();
@ -184,8 +157,7 @@ void ShenandoahRootScanner::roots_do(uint worker_id, OopClosure* oops, CodeBlobC
ShenandoahRootEvacuator::ShenandoahRootEvacuator(uint n_workers,
ShenandoahPhaseTimings::Phase phase) :
ShenandoahRootProcessor(phase),
_thread_roots(phase, n_workers > 1),
_serial_weak_roots(phase) {
_thread_roots(phase, n_workers > 1) {
nmethod::oops_do_marking_prologue();
}
@ -197,8 +169,6 @@ void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) {
// Always disarm on-stack nmethods, because we are evacuating/updating them
// here
ShenandoahCodeBlobAndDisarmClosure codeblob_cl(oops);
// Process serial-claiming roots first
_serial_weak_roots.weak_oops_do(oops, worker_id);
// Process light-weight/limited parallel roots then
_thread_roots.oops_do(oops, &codeblob_cl, worker_id);
@ -209,7 +179,6 @@ ShenandoahRootUpdater::ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimi
_vm_roots(phase),
_cld_roots(phase, n_workers),
_thread_roots(phase, n_workers > 1),
_serial_weak_roots(phase),
_weak_roots(phase),
_dedup_roots(phase),
_code_roots(phase) {
@ -220,7 +189,6 @@ ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTi
_vm_roots(phase),
_cld_roots(phase, n_workers),
_thread_roots(phase, n_workers > 1),
_serial_weak_roots(phase),
_weak_roots(phase),
_dedup_roots(phase),
_code_roots(phase) {
@ -236,9 +204,6 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong);
AlwaysTrueClosure always_true;
// Process serial-claiming roots first
_serial_weak_roots.weak_oops_do(oops, worker_id);
// Process light-weight/limited parallel roots then
_vm_roots.oops_do(oops, worker_id);
_weak_roots.oops_do<OopClosure>(oops, worker_id);
@ -255,7 +220,6 @@ ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
_thread_roots(ShenandoahPhaseTimings::heap_iteration_roots, false /*is par*/),
_vm_roots(ShenandoahPhaseTimings::heap_iteration_roots),
_cld_roots(ShenandoahPhaseTimings::heap_iteration_roots, 1),
_serial_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
_dedup_roots(ShenandoahPhaseTimings::heap_iteration_roots),
_code_roots(ShenandoahPhaseTimings::heap_iteration_roots) {
@ -271,9 +235,6 @@ ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
ResourceMark rm;
// Process serial-claiming roots first
_serial_weak_roots.weak_oops_do(oops, 0);
// Process light-weight/limited parallel roots then
_vm_roots.oops_do(oops, 0);
_weak_roots.oops_do<OopClosure>(oops, 0);

View File

@ -34,39 +34,6 @@
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "memory/iterator.hpp"
class ShenandoahWeakSerialRoot {
typedef void (*WeakOopsDo)(BoolObjectClosure*, OopClosure*);
private:
ShenandoahSharedFlag _claimed;
const WeakOopsDo _weak_oops_do;
const ShenandoahPhaseTimings::Phase _phase;
const ShenandoahPhaseTimings::ParPhase _par_phase;
public:
ShenandoahWeakSerialRoot(WeakOopsDo oops_do,
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase);
void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id);
};
#if INCLUDE_JVMTI
class ShenandoahJVMTIWeakRoot : public ShenandoahWeakSerialRoot {
public:
ShenandoahJVMTIWeakRoot(ShenandoahPhaseTimings::Phase phase);
};
#endif // INCLUDE_JVMTI
class ShenandoahSerialWeakRoots {
private:
JVMTI_ONLY(ShenandoahJVMTIWeakRoot _jvmti_weak_roots;)
public:
ShenandoahSerialWeakRoots(ShenandoahPhaseTimings::Phase phase)
JVMTI_ONLY(: _jvmti_weak_roots(phase))
{};
void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id);
void weak_oops_do(OopClosure* cl, uint worker_id);
};
template <bool CONCURRENT>
class ShenandoahVMWeakRoots {
private:
@ -218,7 +185,6 @@ private:
ShenandoahVMRoots<false /*concurrent*/> _vm_roots;
ShenandoahClassLoaderDataRoots<false /*concurrent*/, true /*single threaded*/>
_cld_roots;
ShenandoahSerialWeakRoots _serial_weak_roots;
ShenandoahVMWeakRoots<false /*concurrent*/> _weak_roots;
ShenandoahConcurrentStringDedupRoots _dedup_roots;
ShenandoahCodeCacheRoots _code_roots;
@ -233,7 +199,6 @@ public:
class ShenandoahRootEvacuator : public ShenandoahRootProcessor {
private:
ShenandoahThreadRoots _thread_roots;
ShenandoahSerialWeakRoots _serial_weak_roots;
public:
ShenandoahRootEvacuator(uint n_workers, ShenandoahPhaseTimings::Phase phase);
~ShenandoahRootEvacuator();
@ -248,7 +213,6 @@ private:
ShenandoahClassLoaderDataRoots<false /*concurrent*/, false /*single threaded*/>
_cld_roots;
ShenandoahThreadRoots _thread_roots;
ShenandoahSerialWeakRoots _serial_weak_roots;
ShenandoahVMWeakRoots<false /*concurrent*/> _weak_roots;
ShenandoahStringDedupRoots _dedup_roots;
ShenandoahCodeCacheRoots _code_roots;
@ -267,7 +231,6 @@ private:
ShenandoahClassLoaderDataRoots<false /*concurrent*/, false /*single threaded*/>
_cld_roots;
ShenandoahThreadRoots _thread_roots;
ShenandoahSerialWeakRoots _serial_weak_roots;
ShenandoahVMWeakRoots<false /*concurrent*/> _weak_roots;
ShenandoahStringDedupRoots _dedup_roots;
ShenandoahCodeCacheRoots _code_roots;

View File

@ -198,9 +198,6 @@ void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAliv
CLDToOopClosure clds(keep_alive, ClassLoaderData::_claim_strong);
// Process serial-claiming roots first
_serial_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
// Process light-weight/limited parallel roots then
_vm_roots.oops_do(keep_alive, worker_id);
_weak_roots.weak_oops_do<IsAlive, KeepAlive>(is_alive, keep_alive, worker_id);

View File

@ -99,11 +99,7 @@ void ShenandoahRootVerifier::oops_do(OopClosure* oops) {
if (verify(WeakRoots)) {
shenandoah_assert_safepoint();
serial_weak_roots_do(oops);
concurrent_weak_roots_do(oops);
} else if (verify(SerialWeakRoots)) {
shenandoah_assert_safepoint();
serial_weak_roots_do(oops);
} else if (verify(ConcurrentWeakRoots)) {
concurrent_weak_roots_do(oops);
}
@ -159,14 +155,6 @@ void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) {
Threads::possibly_parallel_oops_do(true, oops, &blobs);
}
void ShenandoahRootVerifier::serial_weak_roots_do(OopClosure* cl) {
WeakProcessorPhases::Iterator itr = WeakProcessorPhases::serial_iterator();
AlwaysTrueClosure always_true;
for ( ; !itr.is_end(); ++itr) {
WeakProcessorPhases::processor(*itr)(&always_true, cl);
}
}
void ShenandoahRootVerifier::concurrent_weak_roots_do(OopClosure* cl) {
for (OopStorageSet::Iterator it = OopStorageSet::weak_iterator(); !it.is_end(); ++it) {
OopStorage* storage = *it;

View File

@ -72,7 +72,6 @@ public:
private:
bool verify(RootTypes type) const;
void serial_weak_roots_do(OopClosure* cl);
void concurrent_weak_roots_do(OopClosure* cl);
};

View File

@ -42,6 +42,7 @@
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/handshake.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
@ -300,12 +301,12 @@ bool ZHeap::mark_end() {
// Block resurrection of weak/phantom references
ZResurrection::block();
// Process weak roots
_weak_roots_processor.process_weak_roots();
// Prepare to unload stale metadata and nmethods
_unload.prepare();
// Notify JVMTI that some tagmap entry objects may have died.
JvmtiTagMap::set_needs_cleaning();
return true;
}
@ -446,8 +447,8 @@ void ZHeap::relocate_start() {
ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
// Remap/Relocate roots
_relocate.start();
// Notify JVMTI
JvmtiTagMap::set_needs_rehashing();
}
void ZHeap::relocate() {

View File

@ -163,7 +163,6 @@ ZHeapIterator::ZHeapIterator(uint nworkers, bool visit_weaks) :
_queues(nworkers),
_array_queues(nworkers),
_concurrent_roots(ClassLoaderData::_claim_other),
_weak_roots(),
_concurrent_weak_roots(),
_terminator(nworkers, &_queues) {
@ -290,9 +289,6 @@ void ZHeapIterator::push_strong_roots(const ZHeapIteratorContext& context) {
void ZHeapIterator::push_weak_roots(const ZHeapIteratorContext& context) {
ZHeapIteratorRootOopClosure<true /* Weak */> cl(context);
_concurrent_weak_roots.apply(&cl);
AlwaysTrueClosure is_alive;
_weak_roots.apply(&is_alive, &cl);
}
template <bool VisitWeaks>

View File

@ -53,7 +53,6 @@ private:
ZHeapIteratorQueues _queues;
ZHeapIteratorArrayQueues _array_queues;
ZConcurrentRootsIterator _concurrent_roots;
ZWeakRootsIterator _weak_roots;
ZConcurrentWeakRootsIterator _concurrent_weak_roots;
TaskTerminator _terminator;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -55,32 +55,6 @@ public:
}
};
class ZRelocateRootsTask : public ZTask {
private:
ZRelocateRootsIteratorClosure _cl;
public:
ZRelocateRootsTask() :
ZTask("ZRelocateRootsTask") {}
virtual void work() {
// Allocation path assumes that relocating GC threads are ZWorkers
assert(ZThread::is_worker(), "Relocation code needs to be run as a worker");
assert(ZThread::worker_id() == 0, "No multi-thread support");
// During relocation we need to visit the JVMTI
// tag map to rehash the entries with the new oop addresses.
ZStatTimer timer(ZSubPhasePauseRootsJVMTITagMap);
AlwaysTrueClosure always_alive;
JvmtiTagMap::weak_oops_do(&always_alive, &_cl);
}
};
void ZRelocate::start() {
ZRelocateRootsTask task;
_workers->run_serial(&task);
}
uintptr_t ZRelocate::relocate_object_inner(ZForwarding* forwarding, uintptr_t from_index, uintptr_t from_offset) const {
ZForwardingCursor cursor;

View File

@ -39,7 +39,6 @@ static const ZStatSubPhase ZSubPhaseConcurrentRootsOopStorageSet("Concurrent Roo
static const ZStatSubPhase ZSubPhaseConcurrentRootsClassLoaderDataGraph("Concurrent Roots ClassLoaderDataGraph");
static const ZStatSubPhase ZSubPhaseConcurrentRootsJavaThreads("Concurrent Roots JavaThreads");
static const ZStatSubPhase ZSubPhaseConcurrentRootsCodeCache("Concurrent Roots CodeCache");
static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTITagMap("Pause Weak Roots JVMTITagMap");
static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsOopStorageSet("Concurrent Weak Roots OopStorageSet");
template <typename Iterator>
@ -53,12 +52,6 @@ void ZParallelApply<Iterator>::apply(ClosureType* cl) {
}
}
template <typename Iterator>
void ZSerialWeakApply<Iterator>::apply(BoolObjectClosure* is_alive, OopClosure* cl) {
if (!Atomic::load(&_claimed) && Atomic::cmpxchg(&_claimed, false, true) == false) {
_iter.apply(is_alive, cl);
}
}
ZStrongOopStorageSetIterator::ZStrongOopStorageSetIterator() :
_iter() {}
@ -129,20 +122,6 @@ void ZConcurrentRootsIterator::apply(OopClosure* cl,
}
}
ZWeakRootsIterator::ZWeakRootsIterator() :
_jvmti_tag_map() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
}
void ZWeakRootsIterator::apply(BoolObjectClosure* is_alive, OopClosure* cl) {
_jvmti_tag_map.apply(is_alive, cl);
}
void ZJVMTITagMapIterator::apply(BoolObjectClosure* is_alive, OopClosure* cl) {
ZStatTimer timer(ZSubPhasePauseWeakRootsJVMTITagMap);
JvmtiTagMap::weak_oops_do(is_alive, cl);
}
ZWeakOopStorageSetIterator::ZWeakOopStorageSetIterator() :
_iter() {}

View File

@ -48,20 +48,6 @@ public:
}
};
template <typename Iterator>
class ZSerialWeakApply {
private:
Iterator _iter;
volatile bool _claimed;
public:
ZSerialWeakApply() :
_iter(),
_claimed(false) {}
void apply(BoolObjectClosure* is_alive, OopClosure* cl);
};
class ZStrongOopStorageSetIterator {
OopStorageSetStrongParState<true /* concurrent */, false /* is_const */> _iter;
@ -125,21 +111,6 @@ public:
void report_num_dead();
};
class ZJVMTITagMapIterator {
public:
void apply(BoolObjectClosure* is_alive, OopClosure* cl);
};
class ZWeakRootsIterator {
private:
ZSerialWeakApply<ZJVMTITagMapIterator> _jvmti_tag_map;
public:
ZWeakRootsIterator();
void apply(BoolObjectClosure* is_alive, OopClosure* cl);
};
class ZConcurrentWeakRootsIterator {
private:
ZParallelApply<ZWeakOopStorageSetIterator> _oop_storage_set;

View File

@ -31,7 +31,7 @@
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zNMethod.hpp"
#include "gc/z/zOopClosures.hpp"
#include "gc/z/zOopClosures.inline.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zUnload.hpp"
#include "oops/access.inline.hpp"

View File

@ -264,13 +264,6 @@ void ZVerify::roots_concurrent_strong(bool verify_fixed) {
&nm_cl);
}
void ZVerify::roots_weak() {
AlwaysTrueClosure is_alive;
ZVerifyRootClosure cl(true /* verify_fixed */);
ZWeakRootsIterator iter;
iter.apply(&is_alive, &cl);
}
void ZVerify::roots_concurrent_weak() {
ZVerifyRootClosure cl(true /* verify_fixed */);
ZConcurrentWeakRootsIterator iter;
@ -284,7 +277,6 @@ void ZVerify::roots(bool verify_concurrent_strong, bool verify_weaks) {
if (ZVerifyRoots) {
roots_concurrent_strong(verify_concurrent_strong);
if (verify_weaks) {
roots_weak();
roots_concurrent_weak();
}
}

View File

@ -32,7 +32,6 @@ class ZPageAllocator;
class ZVerify : public AllStatic {
private:
static void roots_concurrent_strong(bool verify_fixed);
static void roots_weak();
static void roots_concurrent_weak();
static void roots(bool verify_concurrent_strong, bool verify_weaks);

View File

@ -29,27 +29,6 @@
ZWeakRootsProcessor::ZWeakRootsProcessor(ZWorkers* workers) :
_workers(workers) {}
class ZProcessWeakRootsTask : public ZTask {
private:
ZWeakRootsIterator _weak_roots;
public:
ZProcessWeakRootsTask() :
ZTask("ZProcessWeakRootsTask"),
_weak_roots() {}
virtual void work() {
ZPhantomIsAliveObjectClosure is_alive;
ZPhantomKeepAliveOopClosure keep_alive;
_weak_roots.apply(&is_alive, &keep_alive);
}
};
void ZWeakRootsProcessor::process_weak_roots() {
ZProcessWeakRootsTask task;
_workers->run_serial(&task);
}
class ZProcessConcurrentWeakRootsTask : public ZTask {
private:
ZConcurrentWeakRootsIterator _concurrent_weak_roots;

View File

@ -31,6 +31,7 @@
#include "prims/jvmtiEventController.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "prims/jvmtiThreadState.inline.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
@ -304,6 +305,8 @@ public:
static void trace_changed(JvmtiThreadState *state, jlong now_enabled, jlong changed);
static void trace_changed(jlong now_enabled, jlong changed);
static void flush_object_free_events(JvmtiEnvBase *env);
};
bool JvmtiEventControllerPrivate::_initialized = false;
@ -394,6 +397,18 @@ JvmtiEventControllerPrivate::trace_changed(jlong now_enabled, jlong changed) {
}
void
JvmtiEventControllerPrivate::flush_object_free_events(JvmtiEnvBase* env) {
// Some of the objects recorded by this env may have died. If we're
// (potentially) changing the enable state for ObjectFree events, we
// need to ensure the env is cleaned up and any events that should
// be posted are posted.
JvmtiTagMap* tag_map = env->tag_map_acquire();
if (tag_map != NULL) {
tag_map->flush_object_free_events();
}
}
// For the specified env: compute the currently truly enabled events
// set external state accordingly.
// Return value and set value must include all events.
@ -685,6 +700,9 @@ void JvmtiEventControllerPrivate::set_event_callbacks(JvmtiEnvBase *env,
assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
EC_TRACE(("[*] # set event callbacks"));
// May be changing the event handler for ObjectFree.
flush_object_free_events(env);
env->set_event_callbacks(callbacks, size_of_callbacks);
jlong enabled_bits = 0;
for (int ei = JVMTI_MIN_EVENT_TYPE_VAL; ei <= JVMTI_MAX_EVENT_TYPE_VAL; ++ei) {
@ -797,6 +815,10 @@ JvmtiEventControllerPrivate::set_user_enabled(JvmtiEnvBase *env, JavaThread *thr
thread==NULL? "ALL": JvmtiTrace::safe_get_thread_name(thread),
enabled? "enabled" : "disabled", JvmtiTrace::event_name(event_type)));
if (event_type == JVMTI_EVENT_OBJECT_FREE) {
flush_object_free_events(env);
}
if (thread == NULL) {
env->env_event_enable()->set_user_enabled(event_type, enabled);
} else {

View File

@ -680,16 +680,24 @@ void JvmtiExport::post_vm_start() {
}
static OopStorage* _jvmti_oop_storage = NULL;
static OopStorage* _weak_tag_storage = NULL;
OopStorage* JvmtiExport::jvmti_oop_storage() {
assert(_jvmti_oop_storage != NULL, "not yet initialized");
return _jvmti_oop_storage;
}
OopStorage* JvmtiExport::weak_tag_storage() {
assert(_weak_tag_storage != NULL, "not yet initialized");
return _weak_tag_storage;
}
void JvmtiExport::initialize_oop_storage() {
// OopStorage needs to be created early in startup and unconditionally
// because of OopStorageSet static array indices.
_jvmti_oop_storage = OopStorageSet::create_strong("JVMTI OopStorage");
_weak_tag_storage = OopStorageSet::create_weak("JVMTI Tag Weak OopStorage");
_weak_tag_storage->register_num_dead_callback(&JvmtiTagMap::gc_notification);
}
void JvmtiExport::post_vm_initialized() {
@ -1479,7 +1487,6 @@ void JvmtiExport::post_thread_end(JavaThread *thread) {
}
void JvmtiExport::post_object_free(JvmtiEnv* env, jlong tag) {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at safepoint");
assert(env->is_enabled(JVMTI_EVENT_OBJECT_FREE), "checking");
EVT_TRIG_TRACE(JVMTI_EVENT_OBJECT_FREE, ("[?] Trg Object Free triggered" ));
@ -2636,10 +2643,6 @@ void JvmtiExport::clear_detected_exception(JavaThread* thread) {
}
}
void JvmtiExport::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
JvmtiTagMap::weak_oops_do(is_alive, f);
}
// Onload raw monitor transition.
void JvmtiExport::transition_pending_onload_raw_monitors() {
JvmtiPendingMonitors::transition_raw_monitors();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -167,6 +167,7 @@ class JvmtiExport : public AllStatic {
static void initialize_oop_storage() NOT_JVMTI_RETURN;
static OopStorage* jvmti_oop_storage();
static OopStorage* weak_tag_storage();
private:
// GenerateEvents support to allow posting of CompiledMethodLoad and
@ -407,8 +408,6 @@ class JvmtiExport : public AllStatic {
static void cleanup_thread (JavaThread* thread) NOT_JVMTI_RETURN;
static void clear_detected_exception (JavaThread* thread) NOT_JVMTI_RETURN;
static void weak_oops_do(BoolObjectClosure* b, OopClosure* f) NOT_JVMTI_RETURN;
static void transition_pending_onload_raw_monitors() NOT_JVMTI_RETURN;
#if INCLUDE_SERVICES

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,29 +27,23 @@
#ifndef SHARE_PRIMS_JVMTITAGMAP_HPP
#define SHARE_PRIMS_JVMTITAGMAP_HPP
#include "gc/shared/collectedHeap.hpp"
#include "jvmtifiles/jvmti.h"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "memory/allocation.hpp"
// forward references
class JvmtiTagHashmap;
class JvmtiTagHashmapEntry;
class JvmtiTagHashmapEntryClosure;
class JvmtiEnv;
class JvmtiTagMapTable;
class JvmtiTagMapEntryClosure;
class JvmtiTagMap : public CHeapObj<mtInternal> {
private:
enum{
max_free_entries = 4096 // maximum number of free entries per env
};
JvmtiEnv* _env; // the jvmti environment
Mutex _lock; // lock for this tag map
JvmtiTagHashmap* _hashmap; // the hashmap
JvmtiTagMapTable* _hashmap; // the hashmap for tags
bool _needs_rehashing;
bool _needs_cleaning;
JvmtiTagHashmapEntry* _free_entries; // free list for this environment
int _free_entries_count; // number of entries on the free list
static bool _has_object_free_events;
// create a tag map
JvmtiTagMap(JvmtiEnv* env);
@ -58,21 +52,16 @@ class JvmtiTagMap : public CHeapObj<mtInternal> {
inline Mutex* lock() { return &_lock; }
inline JvmtiEnv* env() const { return _env; }
void do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f);
void check_hashmap(bool post_events);
// iterate over all entries in this tag map
void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
void entry_iterate(JvmtiTagMapEntryClosure* closure);
void post_dead_objects_on_vm_thread();
public:
// indicates if this tag map is locked
bool is_locked() { return lock()->is_locked(); }
JvmtiTagHashmap* hashmap() { return _hashmap; }
// create/destroy entries
JvmtiTagHashmapEntry* create_entry(oop ref, jlong tag);
void destroy_entry(JvmtiTagHashmapEntry* entry);
JvmtiTagMapTable* hashmap() { return _hashmap; }
// returns true if the hashmaps are empty
bool is_empty();
@ -120,8 +109,20 @@ class JvmtiTagMap : public CHeapObj<mtInternal> {
jint* count_ptr, jobject** object_result_ptr,
jlong** tag_result_ptr);
static void weak_oops_do(
BoolObjectClosure* is_alive, OopClosure* f) NOT_JVMTI_RETURN;
void remove_dead_entries(bool post_object_free);
void remove_dead_entries_locked(bool post_object_free);
static void check_hashmaps_for_heapwalk();
static void set_needs_rehashing() NOT_JVMTI_RETURN;
static void set_needs_cleaning() NOT_JVMTI_RETURN;
static void gc_notification(size_t num_dead_entries) NOT_JVMTI_RETURN;
void flush_object_free_events();
// For ServiceThread
static void flush_all_object_free_events() NOT_JVMTI_RETURN;
static bool has_object_free_events_and_reset() NOT_JVMTI_RETURN_(false);
};
#endif // SHARE_PRIMS_JVMTITAGMAP_HPP

View File

@ -0,0 +1,261 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/oopStorage.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "oops/weakHandle.inline.hpp"
#include "prims/jvmtiEventController.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiTagMapTable.hpp"
#include "utilities/hashtable.inline.hpp"
#include "utilities/macros.hpp"
oop JvmtiTagMapEntry::object() {
return literal().resolve();
}
oop JvmtiTagMapEntry::object_no_keepalive() {
// Just peek at the object without keeping it alive.
return literal().peek();
}
JvmtiTagMapTable::JvmtiTagMapTable()
: Hashtable<WeakHandle, mtServiceability>(_table_size, sizeof(JvmtiTagMapEntry)) {}
JvmtiTagMapTable::~JvmtiTagMapTable() {
// Delete this table
log_debug(jvmti, table)("JvmtiTagMapTable deleted");
for (int i = 0; i < table_size(); ++i) {
for (JvmtiTagMapEntry* m = bucket(i); m != NULL;) {
JvmtiTagMapEntry* entry = m;
// read next before freeing.
m = m->next();
free_entry(entry);
}
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on JvmtiTagMapTable's free list");
}
// Entries are C_Heap allocated
JvmtiTagMapEntry* JvmtiTagMapTable::new_entry(unsigned int hash, WeakHandle w, jlong tag) {
JvmtiTagMapEntry* entry = (JvmtiTagMapEntry*)Hashtable<WeakHandle, mtServiceability>::allocate_new_entry(hash, w);
entry->set_tag(tag);
return entry;
}
void JvmtiTagMapTable::free_entry(JvmtiTagMapEntry* entry) {
unlink_entry(entry);
entry->literal().release(JvmtiExport::weak_tag_storage()); // release to OopStorage
FREE_C_HEAP_ARRAY(char, entry);
}
unsigned int JvmtiTagMapTable::compute_hash(oop obj) {
assert(obj != NULL, "obj is null");
return Universe::heap()->hash_oop(obj);
}
JvmtiTagMapEntry* JvmtiTagMapTable::find(int index, unsigned int hash, oop obj) {
assert(obj != NULL, "Cannot search for a NULL object");
for (JvmtiTagMapEntry* p = bucket(index); p != NULL; p = p->next()) {
if (p->hash() == hash) {
// Peek the object to check if it is the right target.
oop target = p->object_no_keepalive();
// The obj is in the table as a target already
if (target == obj) {
ResourceMark rm;
log_trace(jvmti, table)("JvmtiTagMap entry found for %s index %d",
obj->print_value_string(), index);
// The object() accessor makes sure the target object is kept alive before
// leaking out.
(void)p->object();
return p;
}
}
}
return NULL;
}
JvmtiTagMapEntry* JvmtiTagMapTable::find(oop obj) {
unsigned int hash = compute_hash(obj);
int index = hash_to_index(hash);
return find(index, hash, obj);
}
JvmtiTagMapEntry* JvmtiTagMapTable::add(oop obj, jlong tag) {
unsigned int hash = compute_hash(obj);
int index = hash_to_index(hash);
// One was added while acquiring the lock
assert(find(index, hash, obj) == NULL, "shouldn't already be present");
// obj was read with AS_NO_KEEPALIVE, or equivalent.
// The object needs to be kept alive when it is published.
Universe::heap()->keep_alive(obj);
WeakHandle w(JvmtiExport::weak_tag_storage(), obj);
JvmtiTagMapEntry* p = new_entry(hash, w, tag);
Hashtable<WeakHandle, mtServiceability>::add_entry(index, p);
ResourceMark rm;
log_trace(jvmti, table)("JvmtiTagMap entry added for %s index %d",
obj->print_value_string(), index);
// Resize if the table is getting too big.
resize_if_needed();
return p;
}
void JvmtiTagMapTable::remove(oop obj) {
unsigned int hash = compute_hash(obj);
int index = hash_to_index(hash);
JvmtiTagMapEntry** p = bucket_addr(index);
JvmtiTagMapEntry* entry = bucket(index);
while (entry != NULL) {
oop target = entry->object_no_keepalive();
if (target != NULL && target == obj) {
log_trace(jvmti, table)("JvmtiTagMap entry removed for index %d", index);
*p = entry->next();
free_entry(entry);
return; // done
}
// get next entry and address
p = entry->next_addr();
entry = entry->next();
}
}
void JvmtiTagMapTable::entry_iterate(JvmtiTagMapEntryClosure* closure) {
for (int i = 0; i < table_size(); ++i) {
for (JvmtiTagMapEntry* p = bucket(i); p != NULL; p = p->next()) {
closure->do_entry(p);
}
}
}
const int _resize_load_trigger = 5; // load factor that will trigger the resize
static bool _resizable = true;
void JvmtiTagMapTable::resize_if_needed() {
if (_resizable && number_of_entries() > (_resize_load_trigger*table_size())) {
int desired_size = calculate_resize(true);
if (desired_size == table_size()) {
_resizable = false; // hit max
} else {
if (!resize(desired_size)) {
// Something went wrong, turn resizing off
_resizable = false;
}
log_info(jvmti, table) ("JvmtiTagMap table resized to %d", table_size());
}
}
}
// Serially remove entries for dead oops from the table, and notify jvmti.
void JvmtiTagMapTable::remove_dead_entries(JvmtiEnv* env, bool post_object_free) {
int oops_removed = 0;
int oops_counted = 0;
for (int i = 0; i < table_size(); ++i) {
JvmtiTagMapEntry** p = bucket_addr(i);
JvmtiTagMapEntry* entry = bucket(i);
while (entry != NULL) {
oops_counted++;
oop l = entry->object_no_keepalive();
if (l != NULL) {
p = entry->next_addr();
} else {
// Entry has been removed.
oops_removed++;
log_trace(jvmti, table)("JvmtiTagMap entry removed for index %d", i);
jlong tag = entry->tag();
*p = entry->next();
free_entry(entry);
// post the event to the profiler
if (post_object_free) {
JvmtiExport::post_object_free(env, tag);
}
}
// get next entry
entry = (JvmtiTagMapEntry*)HashtableEntry<WeakHandle, mtServiceability>::make_ptr(*p);
}
}
log_info(jvmti, table) ("JvmtiTagMap entries counted %d removed %d; %s",
oops_counted, oops_removed, post_object_free ? "free object posted" : "no posting");
}
// Rehash oops in the table
void JvmtiTagMapTable::rehash() {
ResourceMark rm;
GrowableArray<JvmtiTagMapEntry*> moved_entries;
int oops_counted = 0;
for (int i = 0; i < table_size(); ++i) {
JvmtiTagMapEntry** p = bucket_addr(i);
JvmtiTagMapEntry* entry = bucket(i);
while (entry != NULL) {
oops_counted++;
oop l = entry->object_no_keepalive();
if (l != NULL) {
// Check if oop has moved, ie its hashcode is different
// than the one entered in the table.
unsigned int new_hash = compute_hash(l);
if (entry->hash() != new_hash) {
*p = entry->next();
entry->set_hash(new_hash);
unlink_entry(entry);
moved_entries.push(entry);
} else {
p = entry->next_addr();
}
} else {
// Skip removed oops. They may still have to be posted.
p = entry->next_addr();
}
// get next entry
entry = (JvmtiTagMapEntry*)HashtableEntry<WeakHandle, mtServiceability>::make_ptr(*p);
}
}
int rehash_len = moved_entries.length();
// Now add back in the entries that were removed.
for (int i = 0; i < rehash_len; i++) {
JvmtiTagMapEntry* moved_entry = moved_entries.at(i);
int index = hash_to_index(moved_entry->hash());
Hashtable<WeakHandle, mtServiceability>::add_entry(index, moved_entry);
}
log_info(jvmti, table) ("JvmtiTagMap entries counted %d rehashed %d",
oops_counted, rehash_len);
}

View File

@ -0,0 +1,101 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_PRIMS_TAGMAPTABLE_HPP
#define SHARE_VM_PRIMS_TAGMAPTABLE_HPP
#include "oops/weakHandle.hpp"
#include "utilities/hashtable.hpp"
class JvmtiEnv;
// Hashtable to record oops used for JvmtiTagMap
class JvmtiTagMapEntryClosure;
class JvmtiTagMapEntry : public HashtableEntry<WeakHandle, mtServiceability> {
jlong _tag; // the tag
public:
JvmtiTagMapEntry* next() const {
return (JvmtiTagMapEntry*)HashtableEntry<WeakHandle, mtServiceability>::next();
}
JvmtiTagMapEntry** next_addr() {
return (JvmtiTagMapEntry**)HashtableEntry<WeakHandle, mtServiceability>::next_addr();
}
oop object();
oop object_no_keepalive();
jlong tag() const { return _tag; }
void set_tag(jlong tag) { _tag = tag; }
};
class JvmtiTagMapTable : public Hashtable<WeakHandle, mtServiceability> {
enum Constants {
_table_size = 1007
};
private:
JvmtiTagMapEntry* bucket(int i) {
return (JvmtiTagMapEntry*) Hashtable<WeakHandle, mtServiceability>::bucket(i);
}
JvmtiTagMapEntry** bucket_addr(int i) {
return (JvmtiTagMapEntry**) Hashtable<WeakHandle, mtServiceability>::bucket_addr(i);
}
JvmtiTagMapEntry* new_entry(unsigned int hash, WeakHandle w, jlong tag);
void free_entry(JvmtiTagMapEntry* entry);
unsigned int compute_hash(oop obj);
JvmtiTagMapEntry* find(int index, unsigned int hash, oop obj);
void resize_if_needed();
public:
JvmtiTagMapTable();
~JvmtiTagMapTable();
JvmtiTagMapEntry* find(oop obj);
JvmtiTagMapEntry* add(oop obj, jlong tag);
void remove(oop obj);
// iterate over all entries in the hashmap
void entry_iterate(JvmtiTagMapEntryClosure* closure);
bool is_empty() const { return number_of_entries() == 0; }
// Cleanup cleared entries and post
void remove_dead_entries(JvmtiEnv* env, bool post_object_free);
void rehash();
};
// A supporting class for iterating over all entries in Hashmap
class JvmtiTagMapEntryClosure {
public:
virtual void do_entry(JvmtiTagMapEntry* entry) = 0;
};
#endif // SHARE_VM_PRIMS_TAGMAPTABLE_HPP

View File

@ -41,6 +41,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "services/diagnosticArgument.hpp"
#include "services/diagnosticFramework.hpp"
@ -146,6 +147,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
JvmtiDeferredEvent jvmti_event;
bool oop_handles_to_release = false;
bool cldg_cleanup_work = false;
bool jvmti_tagmap_work = false;
{
// Need state transition ThreadBlockInVM so that this thread
// will be handled by safepoint correctly when this thread is
@ -173,7 +175,8 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
(protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
(oopstorage_work = OopStorage::has_cleanup_work_and_reset()) |
(oop_handles_to_release = (_oop_handle_list != NULL)) |
(cldg_cleanup_work = ClassLoaderDataGraph::should_clean_metaspaces_and_reset())
(cldg_cleanup_work = ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) |
(jvmti_tagmap_work = JvmtiTagMap::has_object_free_events_and_reset())
) == 0) {
// Wait until notified that there is some work to do.
ml.wait();
@ -236,6 +239,10 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
if (cldg_cleanup_work) {
ClassLoaderDataGraph::safepoint_and_clean_metaspaces();
}
if (jvmti_tagmap_work) {
JvmtiTagMap::flush_all_object_free_events();
}
}
}

View File

@ -111,6 +111,7 @@
template(PrintMetadata) \
template(GTestExecuteAtSafepoint) \
template(JFROldObject) \
template(JvmtiPostObjectFree)
class VM_Operation : public StackObj {
public:

View File

@ -37,6 +37,7 @@
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/weakHandle.inline.hpp"
#include "prims/jvmtiTagMapTable.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/hashtable.hpp"
@ -136,8 +137,32 @@ static int literal_size(WeakHandle v) {
return literal_size(v.peek());
}
const double _resize_factor = 2.0; // by how much we will resize using current number of entries
const int _small_table_sizes[] = { 107, 1009, 2017, 4049, 5051, 10103, 20201, 40423 } ;
const int _small_array_size = sizeof(_small_table_sizes)/sizeof(int);
// possible hashmap sizes - odd primes that roughly double in size.
// To avoid excessive resizing the odd primes from 4801-76831 and
// 76831-307261 have been removed.
const int _large_table_sizes[] = { 4801, 76831, 307261, 614563, 1228891,
2457733, 4915219, 9830479, 19660831, 39321619, 78643219 };
const int _large_array_size = sizeof(_large_table_sizes)/sizeof(int);
// Calculate next "good" hashtable size based on requested count
template <MEMFLAGS F> int BasicHashtable<F>::calculate_resize(bool use_large_table_sizes) const {
int requested = (int)(_resize_factor*number_of_entries());
const int* primelist = use_large_table_sizes ? _large_table_sizes : _small_table_sizes;
int arraysize = use_large_table_sizes ? _large_array_size : _small_array_size;
int newsize;
for (int i = 0; i < arraysize; i++) {
newsize = primelist[i];
if (newsize >= requested)
break;
}
return newsize;
}
template <MEMFLAGS F> bool BasicHashtable<F>::resize(int new_size) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
// Allocate new buckets
HashtableBucket<F>* buckets_new = NEW_C_HEAP_ARRAY2_RETURN_NULL(HashtableBucket<F>, new_size, F, CURRENT_PC);
@ -292,6 +317,7 @@ template class Hashtable<Symbol*, mtSymbol>;
template class Hashtable<Klass*, mtClass>;
template class Hashtable<InstanceKlass*, mtClass>;
template class Hashtable<WeakHandle, mtClass>;
template class Hashtable<WeakHandle, mtServiceability>;
template class Hashtable<Symbol*, mtModule>;
template class Hashtable<oop, mtSymbol>;
template class Hashtable<Symbol*, mtClass>;
@ -309,6 +335,7 @@ template class BasicHashtable<mtInternal>;
template class BasicHashtable<mtModule>;
template class BasicHashtable<mtCompiler>;
template class BasicHashtable<mtTracing>;
template class BasicHashtable<mtServiceability>;
template void BasicHashtable<mtClass>::verify_table<DictionaryEntry>(char const*);
template void BasicHashtable<mtModule>::verify_table<ModuleEntry>(char const*);

View File

@ -217,6 +217,7 @@ public:
int number_of_entries() const { return _number_of_entries; }
int calculate_resize(bool use_large_table_sizes) const;
bool resize(int new_size);
// Grow the number of buckets if the average entries per bucket is over the load_factor

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,6 +43,11 @@ public class disablecollection002a {
//-------------------------------------------------- log procedures
static String oom_message = "**> debuggee: caught: OutOfMemoryError";
private static void log_oom() {
log.display(oom_message);
}
private static void log1(String message) {
log.display("**> debuggee: " + message);
}
@ -126,13 +131,16 @@ public class disablecollection002a {
arr2[k] = new array2();
}
} catch (OutOfMemoryError e) {
log1("caught: OutOfMemoryError");
for (int k = 0; k < 100; k++) {
arr2[k] = null;
}
log_oom();
}
methodForCommunication();
break ;
case 2:
log1("runTime.gc();");
log1("runTime.gc(); called");
runTime.gc();
methodForCommunication();
break ;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,7 +69,16 @@ Java_nsk_jvmti_AttachOnDemand_attach021_attach021Target_setTagFor(JNIEnv * jni,
JNIEXPORT void JNICALL
Java_nsk_jvmti_AttachOnDemand_attach021_attach021Target_shutdownAgent(JNIEnv * jni,
jclass klass) {
nsk_jvmti_aod_disableEventAndFinish(agentName, JVMTI_EVENT_OBJECT_FREE, success, jvmti, jni);
/* Flush any pending ObjectFree events, which will set global success variable to 1
for any pending ObjectFree events. */
if (jvmti->SetEventNotificationMode(JVMTI_DISABLE,
JVMTI_EVENT_OBJECT_FREE,
NULL) != JVMTI_ERROR_NONE) {
success = 0;
}
nsk_aod_agentFinished(jni, agentName, success);
}
void JNICALL objectFreeHandler(jvmtiEnv *jvmti, jlong tag) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,6 +65,11 @@ void shutdownAgent(JNIEnv* jni) {
JNIEXPORT jboolean JNICALL
Java_nsk_jvmti_AttachOnDemand_attach022_attach022Target_shutdownAgent(JNIEnv * jni,
jclass klass, jint expectedTaggedObjectsCounter) {
// Flush any pending ObjectFree events.
if (!nsk_jvmti_aod_disableEvents(jvmti, testEvents, testEventsNumber))
success = 0;
if (taggedObjectsCounter != expectedTaggedObjectsCounter) {
success = 0;
NSK_COMPLAIN2("ERROR: unexpected taggedObjectsCounter: %d (expected value is %d)\n", taggedObjectsCounter, expectedTaggedObjectsCounter);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,6 +54,8 @@ public class ap01t001 extends DebugeeClass implements Cloneable {
// this method returns new ap01t001 instance using JNI AllocObject function
private static native Object allocObject();
private native void flushObjectFreeEvents();
private ap01t001[] ap01t001arr = new ap01t001[6];
/* scaffold objects */
@ -104,6 +106,8 @@ public class ap01t001 extends DebugeeClass implements Cloneable {
log.display("Sync: GC called");
flushObjectFreeEvents();
status = checkStatus(status);
return status;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -191,6 +191,16 @@ Java_nsk_jvmti_scenarios_allocation_AP01_ap01t001_allocObject(JNIEnv* jni, jclas
return result;
}
JNIEXPORT void JNICALL
Java_nsk_jvmti_scenarios_allocation_AP01_ap01t001_flushObjectFreeEvents(JNIEnv* jni, jobject obj) {
// Already enabled, but this triggers flush of pending events.
if (!NSK_JVMTI_VERIFY(jvmti->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_OBJECT_FREE,
NULL))) {
nsk_jvmti_setFailStatus();
}
}
static void JNICALL
agentProc(jvmtiEnv* jvmti, JNIEnv* jni, void* arg) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,6 +45,7 @@ public class ap12t001 extends DebugeeClass {
}
private native void setTag(long tag);
private native void flushObjectFreeEvents();
private static ap12t001[] ap12t001arr = { new ap12t001(), new ap12t001() };
@ -73,6 +74,7 @@ public class ap12t001 extends DebugeeClass {
for (int i= 0; i < GC_TRYS; i++)
ClassUnloader.eatMemory();
log.display("GC called");
flushObjectFreeEvents();
status = checkStatus(status);
return status;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,6 +84,16 @@ Java_nsk_jvmti_scenarios_allocation_AP12_ap12t001_setTag(JNIEnv* jni, jobject ob
}
}
JNIEXPORT void JNICALL
Java_nsk_jvmti_scenarios_allocation_AP12_ap12t001_flushObjectFreeEvents(JNIEnv* jni, jobject obj) {
// Already enabled, but this triggers flush of pending events.
if (!NSK_JVMTI_VERIFY(jvmti->SetEventNotificationMode(JVMTI_ENABLE,
JVMTI_EVENT_OBJECT_FREE,
NULL))) {
nsk_jvmti_setFailStatus();
}
}
static void JNICALL
agentProc(jvmtiEnv* jvmti, JNIEnv* jni, void* arg) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,6 +60,10 @@ static jlong timeout = 0;
/* test objects */
static jthread thread = NULL;
static jclass klass = NULL;
static jobject testedObject = NULL;
const jlong TESTED_TAG_VALUE = 5555555L;
static bool testedObjectNotified = false;
/* event counts */
static int ClassFileLoadHookEventsCount = 0;
@ -259,6 +263,10 @@ ObjectFree(jvmtiEnv *jvmti_env, jlong tag) {
ObjectFreeEventsCount++;
NSK_DISPLAY1("ObjectFree event: tag=%s\n", jlong_to_string(tag, buffer));
if (tag == TESTED_TAG_VALUE) {
testedObjectNotified = true;
}
}
/* ========================================================================== */
@ -416,6 +424,71 @@ ThreadObjectReference(jvmtiObjectReferenceKind reference_kind, jlong class_tag,
return JVMTI_ITERATION_CONTINUE;
}
// Create the jni local ref in a new frame so it
// doesn't stay alive.
class NewFrame {
JNIEnv* _jni;
public:
NewFrame(JNIEnv* jni) : _jni(jni) {
_jni->PushLocalFrame(16);
}
~NewFrame() {
_jni->PopLocalFrame(NULL);
}
};
static int checkObjectTagEvent(jvmtiEnv* jvmti, JNIEnv* jni) {
jlong tag = TESTED_TAG_VALUE;
jint count;
jobject *res_objects = NULL;
jlong *res_tags = NULL;
NewFrame local_frame(jni);
// Create a tested object to tag.
if (!NSK_JNI_VERIFY(jni, (testedObject = jni->NewStringUTF("abcde")) != NULL))
return NSK_FALSE;
NSK_DISPLAY0("Checking positive: SetTag\n");
if (!NSK_JVMTI_VERIFY(jvmti->SetTag(testedObject, TESTED_TAG_VALUE)))
return NSK_FALSE;
NSK_DISPLAY0("Checking positive: GetObjectsWithTags\n");
if (!NSK_JVMTI_VERIFY(jvmti->GetObjectsWithTags(1, &tag, &count, &res_objects, &res_tags)))
return NSK_FALSE;
if (!NSK_VERIFY(count == 1))
return NSK_FALSE;
return NSK_TRUE;
}
// Test that after GC, the object was removed from the tag map table.
static int checkObjectFreeEvent(jvmtiEnv* jvmti) {
jlong tag = TESTED_TAG_VALUE;
jint count;
jobject *res_objects = NULL;
jlong *res_tags = NULL;
// Make some GCs happen
for (int i = 0; i < 5; i++) {
if (!NSK_JVMTI_VERIFY(jvmti->ForceGarbageCollection()))
return NSK_FALSE;
}
if (!NSK_JVMTI_VERIFY(jvmti->GetObjectsWithTags(1, &tag, &count, &res_objects, &res_tags)))
return NSK_FALSE;
if (!NSK_VERIFY(count == 0))
return NSK_FALSE;
if (!NSK_VERIFY(testedObjectNotified))
return NSK_FALSE;
return NSK_TRUE;
}
static int checkHeapFunctions(jvmtiEnv* jvmti) {
const jlong TAG_VALUE = (123456789L);
jlong tag;
@ -622,6 +695,9 @@ agentProc(jvmtiEnv* jvmti, JNIEnv* jni, void* arg) {
if (!checkGetThreadCpuTime(jvmti))
nsk_jvmti_setFailStatus();
if (!checkObjectTagEvent(jvmti, jni))
nsk_jvmti_setFailStatus();
NSK_TRACE(jni->DeleteGlobalRef(thread));
/* resume debugee and wait for sync */
@ -630,7 +706,13 @@ agentProc(jvmtiEnv* jvmti, JNIEnv* jni, void* arg) {
if (!nsk_jvmti_waitForSync(timeout))
return;
NSK_DISPLAY0("Testcase #3: check if the events are generated\n");
/* this will also flush any pending ObjectFree events for event check */
NSK_DISPLAY0("Testcase #3: check if the object is freed in the tag map\n");
if (!checkObjectFreeEvent(jvmti)) {
nsk_jvmti_setFailStatus();
}
NSK_DISPLAY0("Testcase #4: check if the events are generated\n");
if (!checkGeneratedEvents()) {
nsk_jvmti_setFailStatus();
}