8317535: Shenandoah: Remove unused code

Reviewed-by: rkennke, ysr
This commit is contained in:
William Kemper 2023-10-13 21:55:27 +00:00 committed by Y. Srinivas Ramakrishna
parent 7cb2e6d65b
commit e942f368c3
20 changed files with 6 additions and 185 deletions

View File

@ -530,7 +530,7 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue&
Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
// 1: non-reference load, no additional barrier is needed
if (!access.is_oop()) {
return BarrierSetC2::load_at_resolved(access, val_type);;
return BarrierSetC2::load_at_resolved(access, val_type);
}
Node* load = BarrierSetC2::load_at_resolved(access, val_type);

View File

@ -353,10 +353,6 @@ bool ShenandoahAllocationRate::is_spiking(double rate, double threshold) const {
return false;
}
double ShenandoahAllocationRate::instantaneous_rate(size_t allocated) const {
return instantaneous_rate(os::elapsedTime(), allocated);
}
double ShenandoahAllocationRate::instantaneous_rate(double time, size_t allocated) const {
size_t last_value = _last_sample_value;
double last_time = _last_sample_time;

View File

@ -37,7 +37,6 @@ class ShenandoahAllocationRate : public CHeapObj<mtGC> {
double sample(size_t allocated);
double instantaneous_rate(size_t allocated) const;
double upper_bound(double sds) const;
bool is_spiking(double rate, double threshold) const;

View File

@ -220,4 +220,4 @@ void ShenandoahAssertNotForwardedClosure::do_oop(narrowOop* p) { do_oop_work(p);
void ShenandoahAssertNotForwardedClosure::do_oop(oop* p) { do_oop_work(p); }
#endif
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP

View File

@ -36,70 +36,6 @@
#include "runtime/atomic.hpp"
#include "utilities/powerOfTwo.hpp"
ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
_length = heaps->length();
_iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
for (int h = 0; h < _length; h++) {
_iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
}
}
ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
}
void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
for (int c = 0; c < _length; c++) {
_iters[c].parallel_blobs_do(f);
}
}
ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
_heap(heap), _claimed_idx(0), _finished(false) {
}
void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
/*
* Parallel code heap walk.
*
* This code makes all threads scan all code heaps, but only one thread would execute the
* closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
* had claimed the block, it can process all blobs in it. Others have to fast-forward to
* next attempt without processing.
*
* Late threads would return immediately if iterator is finished.
*/
if (_finished) {
return;
}
int stride = 256; // educated guess
int stride_mask = stride - 1;
assert (is_power_of_2(stride), "sanity");
int count = 0;
bool process_block = true;
for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != nullptr; cb = CodeCache::next_blob(_heap, cb)) {
int current = count++;
if ((current & stride_mask) == 0) {
process_block = (current >= _claimed_idx) &&
(Atomic::cmpxchg(&_claimed_idx, current, current + stride, memory_order_relaxed) == current);
}
if (process_block) {
f->do_code_blob(cb);
#ifdef ASSERT
if (cb->is_nmethod())
Universe::heap()->verify_nmethod((nmethod*)cb);
#endif
}
}
_finished = true;
}
ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
int ShenandoahCodeRoots::_disarmed_value = 1;
@ -303,7 +239,6 @@ void ShenandoahCodeRoots::purge() {
}
ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
_par_iterator(CodeCache::heaps()),
_table_snapshot(nullptr) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);

View File

@ -39,38 +39,9 @@ class ShenandoahNMethodTable;
class ShenandoahNMethodTableSnapshot;
class WorkerThreads;
class ShenandoahParallelCodeHeapIterator {
friend class CodeCache;
private:
CodeHeap* _heap;
shenandoah_padding(0);
volatile int _claimed_idx;
volatile bool _finished;
shenandoah_padding(1);
public:
ShenandoahParallelCodeHeapIterator(CodeHeap* heap);
void parallel_blobs_do(CodeBlobClosure* f);
};
class ShenandoahParallelCodeCacheIterator {
friend class CodeCache;
private:
ShenandoahParallelCodeHeapIterator* _iters;
int _length;
NONCOPYABLE(ShenandoahParallelCodeCacheIterator);
public:
ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps);
~ShenandoahParallelCodeCacheIterator();
void parallel_blobs_do(CodeBlobClosure* f);
};
class ShenandoahCodeRootsIterator {
friend class ShenandoahCodeRoots;
protected:
ShenandoahParallelCodeCacheIterator _par_iterator;
ShenandoahSharedFlag _seq_claimed;
ShenandoahNMethodTableSnapshot* _table_snapshot;
public:
@ -88,7 +59,6 @@ public:
static void initialize();
static void register_nmethod(nmethod* nm);
static void unregister_nmethod(nmethod* nm);
static void flush_nmethod(nmethod* nm);
static ShenandoahNMethodTable* table() {
return _nmethod_table;

View File

@ -58,7 +58,6 @@ public:
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
ShenandoahReferenceProcessor* rp = heap->ref_processor();
assert(rp != nullptr, "need reference processor");
StringDedup::Requests requests;

View File

@ -279,7 +279,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
}
end++;
};
}
size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();

View File

@ -66,7 +66,6 @@ public:
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahUpdateRefsClosure cl;
if (_check_alive) {
ShenandoahForwardedIsAliveClosure is_alive;

View File

@ -1893,14 +1893,6 @@ address ShenandoahHeap::in_cset_fast_test_addr() {
return (address) heap->collection_set()->biased_map_address();
}
address ShenandoahHeap::cancelled_gc_addr() {
return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
}
address ShenandoahHeap::gc_state_addr() {
return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
}
size_t ShenandoahHeap::bytes_allocated_since_gc_start() const {
return Atomic::load(&_bytes_allocated_since_gc_start);
}

View File

@ -295,7 +295,6 @@ private:
public:
char gc_state() const;
static address gc_state_addr();
void set_concurrent_mark_in_progress(bool in_progress);
void set_evacuation_in_progress(bool in_progress);
@ -316,7 +315,7 @@ public:
inline bool is_full_gc_in_progress() const;
inline bool is_full_gc_move_in_progress() const;
inline bool has_forwarded_objects() const;
inline bool is_gc_in_progress_mask(uint mask) const;
inline bool is_stw_gc_in_progress() const;
inline bool is_concurrent_strong_root_in_progress() const;
inline bool is_concurrent_weak_root_in_progress() const;
@ -336,7 +335,6 @@ private:
bool try_cancel_gc();
public:
static address cancelled_gc_addr();
inline bool cancelled_gc() const;
inline bool check_cancelled_gc_and_yield(bool sts_active = true);
@ -356,7 +354,6 @@ private:
void prepare_gc();
void prepare_regions_and_collection_set(bool concurrent);
// Evacuation
void prepare_evacuation(bool concurrent);
void evacuate_collection_set(bool concurrent);
// Concurrent root processing
void prepare_concurrent_roots();

View File

@ -383,10 +383,6 @@ inline bool ShenandoahHeap::is_evacuation_in_progress() const {
return _gc_state.is_set(EVACUATION);
}
inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
return _gc_state.is_set(mask);
}
inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
return _degenerated_gc_in_progress.is_set();
}

View File

@ -38,4 +38,4 @@ public:
static void register_jfr_type_serializers();
};
#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP

View File

@ -30,14 +30,10 @@
#include "gc/shenandoah/shenandoahOopClosures.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.hpp"
class ShenandoahCMDrainMarkingStackClosure;
// Base class for mark
// Mark class does not maintain states. Instead, mark states are
// maintained by task queues, mark bitmap and SATB buffers (concurrent mark)
class ShenandoahMark: public StackObj {
friend class ShenandoahCMDrainMarkingStackClosure;
protected:
ShenandoahObjToScanQueueSet* const _task_queues;

View File

@ -52,39 +52,6 @@ ShenandoahNMethod::~ShenandoahNMethod() {
}
}
class ShenandoahHasCSetOopClosure : public OopClosure {
private:
ShenandoahHeap* const _heap;
bool _has_cset_oops;
public:
ShenandoahHasCSetOopClosure(ShenandoahHeap *heap) :
_heap(heap),
_has_cset_oops(false) {
}
bool has_cset_oops() const {
return _has_cset_oops;
}
void do_oop(oop* p) {
oop value = RawAccess<>::oop_load(p);
if (!_has_cset_oops && _heap->in_collection_set(value)) {
_has_cset_oops = true;
}
}
void do_oop(narrowOop* p) {
ShouldNotReachHere();
}
};
bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) {
ShenandoahHasCSetOopClosure cl(heap);
oops_do(&cl);
return cl.has_cset_oops();
}
void ShenandoahNMethod::update() {
ResourceMark rm;
bool non_immediate_oops = false;
@ -209,10 +176,6 @@ public:
GrowableArray<oop*>* oops() {
return &_oops;
}
bool has_oops() {
return !_oops.is_empty();
}
};
void ShenandoahNMethod::assert_same_oops(bool allow_dead) {

View File

@ -55,12 +55,6 @@ public:
// Update oops when the nmethod is re-registered
void update();
bool has_cset_oops(ShenandoahHeap* heap);
inline int oop_count() const;
inline bool has_oops() const;
inline void mark_unregistered();
inline bool is_unregistered() const;
static ShenandoahNMethod* for_nmethod(nmethod* nm);
@ -77,7 +71,6 @@ public:
void assert_same_oops(bool allow_dead = false) NOT_DEBUG_RETURN;
private:
bool has_non_immed_oops() const { return _has_non_immed_oops; }
static void detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops, bool& _has_non_immed_oops);
};

View File

@ -39,18 +39,6 @@ ShenandoahReentrantLock* ShenandoahNMethod::lock() {
return &_lock;
}
int ShenandoahNMethod::oop_count() const {
return _oops_count + static_cast<int>(nm()->oops_end() - nm()->oops_begin());
}
bool ShenandoahNMethod::has_oops() const {
return oop_count() > 0;
}
void ShenandoahNMethod::mark_unregistered() {
_unregistered = true;
}
bool ShenandoahNMethod::is_unregistered() const {
return _unregistered;
}

View File

@ -402,7 +402,7 @@ T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint wo
}
template <typename T>
void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) {;
void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) {
log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>()));
T* list = refproc_data.discovered_list_addr<T>();
// The list head is basically a GC root, we need to resolve and update it,

View File

@ -89,7 +89,6 @@ void ShenandoahCodeCacheRoots::code_blobs_do(CodeBlobClosure* blob_cl, uint work
ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase) :
_heap(ShenandoahHeap::heap()),
_phase(phase),
_worker_phase(phase) {
}

View File

@ -134,7 +134,6 @@ private:
class ShenandoahRootProcessor : public StackObj {
private:
ShenandoahHeap* const _heap;
const ShenandoahPhaseTimings::Phase _phase;
const ShenandoahGCWorkerPhase _worker_phase;
public:
ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase);