8329629: GC interfaces should work directly against nmethod instead of CodeBlob

Reviewed-by: ayang, eosterlund
This commit is contained in:
Stefan Karlsson 2024-04-09 12:27:55 +00:00
parent 5ea21c3a61
commit 87131fb2f7
72 changed files with 344 additions and 424 deletions

@ -685,6 +685,14 @@ void CodeCache::nmethods_do(void f(nmethod* nm)) {
}
}
void CodeCache::nmethods_do(NMethodClosure* cl) {
assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter(NMethodIterator::all_blobs);
while(iter.next()) {
cl->do_nmethod(iter.method());
}
}
void CodeCache::metadata_do(MetadataClosure* f) {
assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter(NMethodIterator::all_blobs);
@ -883,20 +891,6 @@ void CodeCache::do_unloading(bool unloading_occurred) {
}
}
void CodeCache::blobs_do(CodeBlobClosure* f) {
assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALLOCABLE_HEAPS(heap) {
FOR_ALL_BLOBS(cb, *heap) {
f->do_code_blob(cb);
#ifdef ASSERT
if (cb->is_nmethod()) {
Universe::heap()->verify_nmethod((nmethod*)cb);
}
#endif //ASSERT
}
}
}
void CodeCache::verify_clean_inline_caches() {
#ifdef ASSERT
NMethodIterator iter(NMethodIterator::only_not_unloading);

@ -153,8 +153,8 @@ class CodeCache : AllStatic {
static bool contains(void *p); // returns whether p is included
static bool contains(nmethod* nm); // returns whether nm is included
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void nmethods_do(NMethodClosure* cl); // iterates over all nmethods
static void metadata_do(MetadataClosure* f); // iterates over metadata in alive nmethods
// Lookup

@ -152,7 +152,7 @@ public:
clean(always_true);
}
void iterate_at_safepoint(CodeBlobClosure* blk) {
void iterate_at_safepoint(NMethodClosure* blk) {
assert_at_safepoint();
// A lot of code root sets are typically empty.
if (is_empty()) {
@ -161,7 +161,7 @@ public:
auto do_value =
[&] (nmethod** value) {
blk->do_code_blob(*value);
blk->do_nmethod(*value);
return true;
};
_table_scanner.do_safepoint_scan(do_value);
@ -288,7 +288,7 @@ void G1CodeRootSet::reset_table_scanner() {
_table->reset_table_scanner();
}
void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
void G1CodeRootSet::nmethods_do(NMethodClosure* blk) const {
DEBUG_ONLY(_is_iterating = true;)
_table->iterate_at_safepoint(blk);
DEBUG_ONLY(_is_iterating = false;)
@ -317,14 +317,14 @@ class CleanCallback : public StackObj {
};
PointsIntoHRDetectionClosure _detector;
CodeBlobToOopClosure _blobs;
NMethodToOopClosure _nmethod_cl;
public:
CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {}
CleanCallback(HeapRegion* hr) : _detector(hr), _nmethod_cl(&_detector, !NMethodToOopClosure::FixRelocations) {}
bool operator()(nmethod** value) {
_detector._points_into = false;
_blobs.do_code_blob(*value);
_nmethod_cl.do_nmethod(*value);
return !_detector._points_into;
}
};

@ -50,7 +50,7 @@ class G1CodeRootSet {
// Prepare for MT iteration. Must be called before nmethods_do.
void reset_table_scanner();
void nmethods_do(CodeBlobClosure* blk) const;
void nmethods_do(NMethodClosure* blk) const;
// Remove all nmethods which no longer contain pointers into our "owner" region.
void clean(HeapRegion* owner);

@ -2518,7 +2518,7 @@ void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjec
ClassUnloadingContext ctx(workers()->active_workers(),
false /* unregister_nmethods_during_purge */,
false /* lock_codeblob_free_separately */);
false /* lock_nmethod_free_separately */);
{
CodeCache::UnlinkingScope scope(is_alive);
bool unloading_occurred = SystemDictionary::do_unloading(timer);
@ -2535,7 +2535,7 @@ void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjec
}
{
GCTraceTime(Debug, gc, phases) t("Free Code Blobs", timer);
ctx.free_code_blobs();
ctx.free_nmethods();
}
{
GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", timer);
@ -3016,24 +3016,22 @@ void G1CollectedHeap::update_used_after_gc(bool evacuation_failed) {
}
}
class RebuildCodeRootClosure: public CodeBlobClosure {
class RebuildCodeRootClosure: public NMethodClosure {
G1CollectedHeap* _g1h;
public:
RebuildCodeRootClosure(G1CollectedHeap* g1h) :
_g1h(g1h) {}
void do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != nullptr) {
_g1h->register_nmethod(nm);
}
void do_nmethod(nmethod* nm) {
assert(nm != nullptr, "Sanity");
_g1h->register_nmethod(nm);
}
};
void G1CollectedHeap::rebuild_code_roots() {
RebuildCodeRootClosure blob_cl(this);
CodeCache::blobs_do(&blob_cl);
RebuildCodeRootClosure nmethod_cl(this);
CodeCache::nmethods_do(&nmethod_cl);
}
void G1CollectedHeap::initialize_serviceability() {

@ -101,7 +101,7 @@ void G1FullGCAdjustTask::work(uint worker_id) {
}
CLDToOopClosure adjust_cld(&_adjust, ClassLoaderData::_claim_stw_fullgc_adjust);
CodeBlobToOopClosure adjust_code(&_adjust, CodeBlobToOopClosure::FixRelocations);
NMethodToOopClosure adjust_code(&_adjust, NMethodToOopClosure::FixRelocations);
_root_processor.process_all_roots(&_adjust, &adjust_cld, &adjust_code);
// Now adjust pointers region by region

@ -42,7 +42,7 @@ void G1FullGCMarkTask::work(uint worker_id) {
Ticks start = Ticks::now();
ResourceMark rm;
G1FullGCMarker* marker = collector()->marker(worker_id);
MarkingCodeBlobClosure code_closure(marker->mark_closure(), !CodeBlobToOopClosure::FixRelocations, true /* keepalive nmethods */);
MarkingNMethodClosure code_closure(marker->mark_closure(), !NMethodToOopClosure::FixRelocations, true /* keepalive nmethods */);
if (ClassUnloading) {
_root_processor.process_strong_roots(marker->mark_closure(),

@ -286,7 +286,7 @@ void HeapRegion::remove_code_root(nmethod* nm) {
rem_set()->remove_code_root(nm);
}
void HeapRegion::code_roots_do(CodeBlobClosure* blk) const {
void HeapRegion::code_roots_do(NMethodClosure* blk) const {
rem_set()->code_roots_do(blk);
}
@ -328,28 +328,27 @@ public:
bool has_oops_in_region() { return _has_oops_in_region; }
};
class VerifyCodeRootCodeBlobClosure: public CodeBlobClosure {
class VerifyCodeRootNMethodClosure: public NMethodClosure {
const HeapRegion* _hr;
bool _failures;
public:
VerifyCodeRootCodeBlobClosure(const HeapRegion* hr) :
VerifyCodeRootNMethodClosure(const HeapRegion* hr) :
_hr(hr), _failures(false) {}
void do_code_blob(CodeBlob* cb) {
nmethod* nm = (cb == nullptr) ? nullptr : cb->as_nmethod_or_null();
if (nm != nullptr) {
// Verify that the nemthod is live
VerifyCodeRootOopClosure oop_cl(_hr);
nm->oops_do(&oop_cl);
if (!oop_cl.has_oops_in_region()) {
log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its code roots with no pointers into region",
p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
_failures = true;
} else if (oop_cl.failures()) {
log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT,
p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
_failures = true;
}
void do_nmethod(nmethod* nm) {
assert(nm != nullptr, "Sanity");
// Verify that the nmethod is live
VerifyCodeRootOopClosure oop_cl(_hr);
nm->oops_do(&oop_cl);
if (!oop_cl.has_oops_in_region()) {
log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its code roots with no pointers into region",
p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
_failures = true;
} else if (oop_cl.failures()) {
log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT,
p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
_failures = true;
}
}
@ -395,10 +394,10 @@ bool HeapRegion::verify_code_roots(VerifyOption vo) const {
return has_code_roots;
}
VerifyCodeRootCodeBlobClosure cb_cl(this);
code_roots_do(&cb_cl);
VerifyCodeRootNMethodClosure nm_cl(this);
code_roots_do(&nm_cl);
return cb_cl.failures();
return nm_cl.failures();
}
void HeapRegion::print() const { print_on(tty); }

@ -540,9 +540,9 @@ public:
void add_code_root(nmethod* nm);
void remove_code_root(nmethod* nm);
// Applies blk->do_code_blob() to each of the entries in
// Applies blk->do_nmethod() to each of the entries in
// the code roots list for this region
void code_roots_do(CodeBlobClosure* blk) const;
void code_roots_do(NMethodClosure* blk) const;
uint node_index() const { return _node_index; }
void set_node_index(uint node_index) { _node_index = node_index; }

@ -119,7 +119,7 @@ void HeapRegionRemSet::bulk_remove_code_roots() {
_code_roots.bulk_remove();
}
void HeapRegionRemSet::code_roots_do(CodeBlobClosure* blk) const {
void HeapRegionRemSet::code_roots_do(NMethodClosure* blk) const {
_code_roots.nmethods_do(blk);
}

@ -40,7 +40,7 @@ class outputStream;
class HeapRegionRemSet : public CHeapObj<mtGC> {
friend class VMStructs;
// A set of code blobs (nmethods) whose code contains pointers into
// A set of nmethods whose code contains pointers into
// the region that owns this RSet.
G1CodeRootSet _code_roots;
@ -152,8 +152,8 @@ public:
void remove_code_root(nmethod* nm);
void bulk_remove_code_roots();
// Applies blk->do_code_blob() to each of the entries in _code_roots
void code_roots_do(CodeBlobClosure* blk) const;
// Applies blk->do_nmethod() to each of the entries in _code_roots
void code_roots_do(NMethodClosure* blk) const;
// Clean out code roots not having an oop pointing into this region any more.
void clean_code_roots(HeapRegion* hr);

@ -135,19 +135,17 @@ public:
bool failures() { return _failures; }
};
class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
class G1VerifyCodeRootNMethodClosure: public NMethodClosure {
G1VerifyCodeRootOopClosure* _oop_cl;
public:
G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
G1VerifyCodeRootNMethodClosure(G1VerifyCodeRootOopClosure* oop_cl):
_oop_cl(oop_cl) {}
void do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != nullptr) {
_oop_cl->set_nmethod(nm);
nm->oops_do(_oop_cl);
}
void do_nmethod(nmethod* nm) {
assert(nm != nullptr, "Sanity");
_oop_cl->set_nmethod(nm);
nm->oops_do(_oop_cl);
}
};
@ -340,7 +338,7 @@ void G1HeapVerifier::verify(VerifyOption vo) {
// system dictionary, class loader data graph, the string table
// and the nmethods in the code cache.
G1VerifyCodeRootOopClosure codeRootsCl(_g1h, &rootsCl, vo);
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
G1VerifyCodeRootNMethodClosure blobsCl(&codeRootsCl);
{
G1RootProcessor root_processor(_g1h, 1);

@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "code/nmethod.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1NMethodClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1HeapRegion.hpp"
@ -35,7 +35,7 @@
#include "oops/oop.inline.hpp"
template <typename T>
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
void G1NMethodClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
_work->do_oop(p);
T oop_or_narrowoop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(oop_or_narrowoop)) {
@ -46,16 +46,16 @@ void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
}
}
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(oop* o) {
void G1NMethodClosure::HeapRegionGatheringOopClosure::do_oop(oop* o) {
do_oop_work(o);
}
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(narrowOop* o) {
void G1NMethodClosure::HeapRegionGatheringOopClosure::do_oop(narrowOop* o) {
do_oop_work(o);
}
template<typename T>
void G1CodeBlobClosure::MarkingOopClosure::do_oop_work(T* p) {
void G1NMethodClosure::MarkingOopClosure::do_oop_work(T* p) {
T oop_or_narrowoop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(oop_or_narrowoop)) {
oop o = CompressedOops::decode_not_null(oop_or_narrowoop);
@ -63,18 +63,18 @@ void G1CodeBlobClosure::MarkingOopClosure::do_oop_work(T* p) {
}
}
G1CodeBlobClosure::MarkingOopClosure::MarkingOopClosure(uint worker_id) :
G1NMethodClosure::MarkingOopClosure::MarkingOopClosure(uint worker_id) :
_cm(G1CollectedHeap::heap()->concurrent_mark()), _worker_id(worker_id) { }
void G1CodeBlobClosure::MarkingOopClosure::do_oop(oop* o) {
void G1NMethodClosure::MarkingOopClosure::do_oop(oop* o) {
do_oop_work(o);
}
void G1CodeBlobClosure::MarkingOopClosure::do_oop(narrowOop* o) {
void G1NMethodClosure::MarkingOopClosure::do_oop(narrowOop* o) {
do_oop_work(o);
}
void G1CodeBlobClosure::do_evacuation_and_fixup(nmethod* nm) {
void G1NMethodClosure::do_evacuation_and_fixup(nmethod* nm) {
_oc.set_nm(nm);
// Evacuate objects pointed to by the nmethod
@ -93,7 +93,7 @@ void G1CodeBlobClosure::do_evacuation_and_fixup(nmethod* nm) {
nm->fix_oop_relocations();
}
void G1CodeBlobClosure::do_marking(nmethod* nm) {
void G1NMethodClosure::do_marking(nmethod* nm) {
// Mark through oops in the nmethod
nm->oops_do(&_marking_oc);
@ -109,10 +109,10 @@ void G1CodeBlobClosure::do_marking(nmethod* nm) {
}
class G1NmethodProcessor : public nmethod::OopsDoProcessor {
G1CodeBlobClosure* _cl;
G1NMethodClosure* _cl;
public:
G1NmethodProcessor(G1CodeBlobClosure* cl) : _cl(cl) { }
G1NmethodProcessor(G1NMethodClosure* cl) : _cl(cl) { }
void do_regular_processing(nmethod* nm) {
_cl->do_evacuation_and_fixup(nm);
@ -123,11 +123,8 @@ public:
}
};
void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm == nullptr) {
return;
}
void G1NMethodClosure::do_nmethod(nmethod* nm) {
assert(nm != nullptr, "Sanity");
G1NmethodProcessor cl(this);

@ -22,8 +22,8 @@
*
*/
#ifndef SHARE_GC_G1_G1CODEBLOBCLOSURE_HPP
#define SHARE_GC_G1_G1CODEBLOBCLOSURE_HPP
#ifndef SHARE_GC_G1_G1NMETHODCLOSURE_HPP
#define SHARE_GC_G1_G1NMETHODCLOSURE_HPP
#include "gc/g1/g1CollectedHeap.hpp"
#include "memory/iterator.hpp"
@ -31,7 +31,7 @@
class G1ConcurrentMark;
class nmethod;
class G1CodeBlobClosure : public CodeBlobClosure {
class G1NMethodClosure : public NMethodClosure {
// Gather nmethod remembered set entries.
class HeapRegionGatheringOopClosure : public OopClosure {
G1CollectedHeap* _g1h;
@ -72,13 +72,13 @@ class G1CodeBlobClosure : public CodeBlobClosure {
bool _strong;
public:
G1CodeBlobClosure(uint worker_id, OopClosure* oc, bool strong) :
G1NMethodClosure(uint worker_id, OopClosure* oc, bool strong) :
_oc(oc), _marking_oc(worker_id), _strong(strong) { }
void do_evacuation_and_fixup(nmethod* nm);
void do_marking(nmethod* nm);
void do_code_blob(CodeBlob* cb);
void do_nmethod(nmethod* nm);
};
#endif // SHARE_GC_G1_G1CODEBLOBCLOSURE_HPP
#endif // SHARE_GC_G1_G1NMETHODCLOSURE_HPP

@ -734,17 +734,17 @@ void G1RemSet::scan_heap_roots(G1ParScanThreadState* pss,
p->record_or_add_thread_work_item(scan_phase, worker_id, cl.heap_roots_found(), G1GCPhaseTimes::ScanHRFoundRoots);
}
// Wrapper around a CodeBlobClosure to count the number of code blobs scanned.
class G1ScanAndCountCodeBlobClosure : public CodeBlobClosure {
CodeBlobClosure* _cl;
// Wrapper around a NMethodClosure to count the number of nmethods scanned.
class G1ScanAndCountNMethodClosure : public NMethodClosure {
NMethodClosure* _cl;
size_t _count;
public:
G1ScanAndCountCodeBlobClosure(CodeBlobClosure* cl) : _cl(cl), _count(0) {
G1ScanAndCountNMethodClosure(NMethodClosure* cl) : _cl(cl), _count(0) {
}
void do_code_blob(CodeBlob* cb) override {
_cl->do_code_blob(cb);
void do_nmethod(nmethod* nm) override {
_cl->do_nmethod(nm);
_count++;
}
@ -820,7 +820,7 @@ public:
{
EventGCPhaseParallel event;
G1EvacPhaseWithTrimTimeTracker timer(_pss, _code_root_scan_time, _code_trim_partially_time);
G1ScanAndCountCodeBlobClosure cl(_pss->closures()->weak_codeblobs());
G1ScanAndCountNMethodClosure cl(_pss->closures()->weak_nmethods());
// Scan the code root list attached to the current region
r->code_roots_do(&cl);

@ -39,7 +39,6 @@
class BitMap;
class CardTableBarrierSet;
class CodeBlobClosure;
class G1AbstractSubTask;
class G1CollectedHeap;
class G1CMBitMap;

@ -42,8 +42,8 @@ public:
CLDClosure* weak_clds() { return &_closures._clds; }
CLDClosure* strong_clds() { return &_closures._clds; }
CodeBlobClosure* strong_codeblobs() { return &_closures._codeblobs; }
CodeBlobClosure* weak_codeblobs() { return &_closures._codeblobs; }
NMethodClosure* strong_nmethods() { return &_closures._nmethods; }
NMethodClosure* weak_nmethods() { return &_closures._nmethods; }
};
// Closures used during concurrent start.
@ -65,8 +65,8 @@ public:
CLDClosure* weak_clds() { return &_weak._clds; }
CLDClosure* strong_clds() { return &_strong._clds; }
CodeBlobClosure* strong_codeblobs() { return &_strong._codeblobs; }
CodeBlobClosure* weak_codeblobs() { return &_weak._codeblobs; }
NMethodClosure* strong_nmethods() { return &_strong._nmethods; }
NMethodClosure* weak_nmethods() { return &_weak._nmethods; }
};
G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1CollectedHeap* g1h,

@ -40,14 +40,14 @@ public:
virtual CLDClosure* weak_clds() = 0;
virtual CLDClosure* strong_clds() = 0;
// Applied to code blobs reachable as strong roots.
virtual CodeBlobClosure* strong_codeblobs() = 0;
// Applied to nmethods reachable as strong roots.
virtual NMethodClosure* strong_nmethods() = 0;
};
class G1EvacuationRootClosures : public G1RootClosures {
public:
// Applied to code blobs treated as weak roots.
virtual CodeBlobClosure* weak_codeblobs() = 0;
// Applied to nmethods treated as weak roots.
virtual NMethodClosure* weak_nmethods() = 0;
static G1EvacuationRootClosures* create_root_closures(G1CollectedHeap* g1h,
G1ParScanThreadState* pss,

@ -27,12 +27,12 @@
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1NMethodClosure.hpp"
#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RootClosures.hpp"
@ -80,23 +80,23 @@ void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id)
class StrongRootsClosures : public G1RootClosures {
OopClosure* _roots;
CLDClosure* _clds;
CodeBlobClosure* _blobs;
NMethodClosure* _nmethods;
public:
StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
_roots(roots), _clds(clds), _blobs(blobs) {}
StrongRootsClosures(OopClosure* roots, CLDClosure* clds, NMethodClosure* nmethods) :
_roots(roots), _clds(clds), _nmethods(nmethods) {}
OopClosure* strong_oops() { return _roots; }
CLDClosure* weak_clds() { return nullptr; }
CLDClosure* strong_clds() { return _clds; }
CodeBlobClosure* strong_codeblobs() { return _blobs; }
NMethodClosure* strong_nmethods() { return _nmethods; }
};
void G1RootProcessor::process_strong_roots(OopClosure* oops,
CLDClosure* clds,
CodeBlobClosure* blobs) {
StrongRootsClosures closures(oops, clds, blobs);
NMethodClosure* nmethods) {
StrongRootsClosures closures(oops, clds, nmethods);
process_java_roots(&closures, nullptr, 0);
process_vm_roots(&closures, nullptr, 0);
@ -123,20 +123,20 @@ public:
CLDClosure* weak_clds() { return _clds; }
CLDClosure* strong_clds() { return _clds; }
// We don't want to visit code blobs more than once, so we return null for the
// We don't want to visit nmethods more than once, so we return null for the
// strong case and walk the entire code cache as a separate step.
CodeBlobClosure* strong_codeblobs() { return nullptr; }
NMethodClosure* strong_nmethods() { return nullptr; }
};
void G1RootProcessor::process_all_roots(OopClosure* oops,
CLDClosure* clds,
CodeBlobClosure* blobs) {
NMethodClosure* nmethods) {
AllRootsClosures closures(oops, clds);
process_java_roots(&closures, nullptr, 0);
process_vm_roots(&closures, nullptr, 0);
process_code_cache_roots(blobs, nullptr, 0);
process_code_cache_roots(nmethods, nullptr, 0);
// refProcessor is not needed since we are inside a safe point
_process_strong_tasks.all_tasks_claimed(G1RP_PS_refProcessor_oops_do);
@ -149,7 +149,7 @@ void G1RootProcessor::process_java_roots(G1RootClosures* closures,
// processes nmethods in two ways, as "strong" and "weak" nmethods.
//
// 1) Strong nmethods are reachable from the thread stack frames. G1 applies
// the G1RootClosures::strong_codeblobs() closure on them. The closure
// the G1RootClosures::strong_nmethods() closure on them. The closure
// iterates over all oops embedded inside each nmethod, and performs 3
// operations:
// a) evacuates; relocate objects outside of collection set
@ -159,7 +159,7 @@ void G1RootProcessor::process_java_roots(G1RootClosures* closures,
// classes will not be unloaded.
//
// 2) Weak nmethods are reachable only from the code root remembered set (see
// G1CodeRootSet). G1 applies the G1RootClosures::weak_codeblobs() closure on
// G1CodeRootSet). G1 applies the G1RootClosures::weak_nmethods() closure on
// them. The closure iterates over all oops embedded inside each nmethod, and
// performs 2 operations: a) and b).
// Since these oops are *not* marked, their classes can potentially be
@ -179,7 +179,7 @@ void G1RootProcessor::process_java_roots(G1RootClosures* closures,
bool is_par = n_workers() > 1;
Threads::possibly_parallel_oops_do(is_par,
closures->strong_oops(),
closures->strong_codeblobs());
closures->strong_nmethods());
}
if (_process_strong_tasks.try_claim_task(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
@ -200,14 +200,14 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
}
}
void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
void G1RootProcessor::process_code_cache_roots(NMethodClosure* nmethod_closure,
G1GCPhaseTimes* phase_times,
uint worker_id) {
// We do not track timing of this phase. It is only required with class unloading
// disabled, which is an extremely uncommon use case and would otherwise only ever
// show up as "skipped" in the logs.
if (_process_strong_tasks.try_claim_task(G1RP_PS_CodeCache_oops_do)) {
CodeCache::blobs_do(code_closure);
CodeCache::nmethods_do(nmethod_closure);
}
}

@ -31,7 +31,6 @@
#include "runtime/mutex.hpp"
class CLDClosure;
class CodeBlobClosure;
class G1CollectedHeap;
class G1EvacuationRootClosures;
class G1GCPhaseTimes;
@ -68,7 +67,7 @@ class G1RootProcessor : public StackObj {
G1GCPhaseTimes* phase_times,
uint worker_id);
void process_code_cache_roots(CodeBlobClosure* code_closure,
void process_code_cache_roots(NMethodClosure* nmethods_closure,
G1GCPhaseTimes* phase_times,
uint worker_id);
@ -83,12 +82,12 @@ public:
// Apply oops, clds and blobs to all strongly reachable roots in the system
void process_strong_roots(OopClosure* oops,
CLDClosure* clds,
CodeBlobClosure* blobs);
NMethodClosure* nmethods);
// Apply oops, clds and blobs to strongly and weakly reachable roots in the system
void process_all_roots(OopClosure* oops,
CLDClosure* clds,
CodeBlobClosure* blobs);
NMethodClosure* nmethods);
// Number of worker threads used by the root processor.
uint n_workers() const;

@ -22,7 +22,7 @@
*
*/
#include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1NMethodClosure.hpp"
#include "gc/g1/g1OopClosures.hpp"
#include "memory/iterator.hpp"
@ -45,12 +45,12 @@ public:
G1ParCopyClosure<G1BarrierNoOptRoots, should_mark> _oops_in_nmethod;
G1CLDScanClosure _clds;
G1CodeBlobClosure _codeblobs;
G1NMethodClosure _nmethods;
G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty) :
_oops(g1h, pss),
_oops_in_cld(g1h, pss),
_oops_in_nmethod(g1h, pss),
_clds(&_oops_in_cld, process_only_dirty),
_codeblobs(pss->worker_id(), &_oops_in_nmethod, should_mark) {}
_nmethods(pss->worker_id(), &_oops_in_nmethod, should_mark) {}
};

@ -1408,7 +1408,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
false /* unregister_nmethods_during_purge */,
false /* lock_codeblob_free_separately */);
false /* lock_nmethod_free_separately */);
marking_phase(&_gc_tracer);
@ -1560,7 +1560,7 @@ public:
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id);
PCMarkAndPushClosure mark_and_push_closure(cm);
MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations, true /* keepalive nmethods */);
MarkingNMethodClosure mark_and_push_in_blobs(&mark_and_push_closure, !NMethodToOopClosure::FixRelocations, true /* keepalive nmethods */);
thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
@ -1731,7 +1731,7 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
}
{
GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
ctx->free_code_blobs();
ctx->free_nmethods();
}
// Prune dead klasses from subklass/sibling/implementor lists.
@ -1797,8 +1797,8 @@ public:
_weak_proc_task.work(worker_id, &always_alive, &adjust);
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&adjust_code);
NMethodToOopClosure adjust_code(&adjust, NMethodToOopClosure::FixRelocations);
CodeCache::nmethods_do(&adjust_code);
}
_sub_tasks.all_tasks_claimed();
}

@ -99,7 +99,7 @@ static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_i
case ParallelRootType::code_cache:
{
MarkingCodeBlobClosure code_closure(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations, false /* keepalive nmethods */);
MarkingNMethodClosure code_closure(&roots_to_old_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */);
ScavengableNMethods::nmethods_do(&code_closure);
}
break;
@ -268,9 +268,9 @@ public:
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id);
PSScavengeRootsClosure roots_closure(pm);
MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations, false /* keepalive nmethods */);
MarkingNMethodClosure roots_in_nmethods(&roots_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */);
thread->oops_do(&roots_closure, &roots_in_blobs);
thread->oops_do(&roots_closure, &roots_in_nmethods);
// Do the real work
pm->drain_stacks(false);

@ -702,9 +702,9 @@ void DefNewGeneration::collect(bool full,
RootScanClosure root_cl{this};
CLDScanClosure cld_cl{this};
MarkingCodeBlobClosure code_cl(&root_cl,
CodeBlobToOopClosure::FixRelocations,
false /* keepalive_nmethods */);
MarkingNMethodClosure code_cl(&root_cl,
NMethodToOopClosure::FixRelocations,
false /* keepalive_nmethods */);
heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
&root_cl,

@ -475,7 +475,7 @@ void SerialFullGC::phase1_mark(bool clear_all_softrefs) {
StrongRootsScope srs(0);
CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure;
MarkingCodeBlobClosure mark_code_closure(&follow_root_closure, !CodeBlobToOopClosure::FixRelocations, true);
MarkingNMethodClosure mark_code_closure(&follow_root_closure, !NMethodToOopClosure::FixRelocations, true);
gch->process_roots(SerialHeap::SO_None,
&follow_root_closure,
&follow_cld_closure,
@ -529,7 +529,7 @@ void SerialFullGC::phase1_mark(bool clear_all_softrefs) {
}
{
GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
ctx->free_code_blobs();
ctx->free_nmethods();
}
// Prune dead klasses from subklass/sibling/implementor lists.
@ -697,7 +697,7 @@ void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
NMethodToOopClosure code_closure(&adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
gch->process_roots(SerialHeap::SO_AllCodeCache,
&adjust_pointer_closure,
&adjust_cld_closure,

@ -585,7 +585,7 @@ void SerialHeap::do_collection(bool full,
ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
false /* unregister_nmethods_during_purge */,
false /* lock_codeblob_free_separately */);
false /* lock_nmethod_free_separately */);
collect_generation(_old_gen,
full,
@ -729,14 +729,14 @@ void SerialHeap::process_roots(ScanningOption so,
OopClosure* strong_roots,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots) {
NMethodToOopClosure* code_roots) {
// General roots.
assert(code_roots != nullptr, "code root closure should always be set");
ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
// Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
NMethodToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
Threads::oops_do(strong_roots, roots_from_code_p);
@ -753,7 +753,7 @@ void SerialHeap::process_roots(ScanningOption so,
// CMSCollector uses this to do intermediate-strength collections.
// We scan the entire code cache, since CodeCache::do_unloading is not called.
CodeCache::blobs_do(code_roots);
CodeCache::nmethods_do(code_roots);
}
}

@ -272,7 +272,7 @@ public:
OopClosure* strong_roots,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots);
NMethodToOopClosure* code_roots);
// Set the saved marks of generations, if that makes sense.
// In particular, if any generation might iterate over the oops

@ -34,12 +34,12 @@ ClassUnloadingContext* ClassUnloadingContext::_context = nullptr;
ClassUnloadingContext::ClassUnloadingContext(uint num_workers,
bool unregister_nmethods_during_purge,
bool lock_codeblob_free_separately) :
bool lock_nmethod_free_separately) :
_cld_head(nullptr),
_num_nmethod_unlink_workers(num_workers),
_unlinked_nmethods(nullptr),
_unregister_nmethods_during_purge(unregister_nmethods_during_purge),
_lock_codeblob_free_separately(lock_codeblob_free_separately) {
_lock_nmethod_free_separately(lock_nmethod_free_separately) {
assert(_context == nullptr, "context already set");
_context = this;
@ -123,7 +123,7 @@ void ClassUnloadingContext::purge_nmethods() {
CodeCache::maybe_restart_compiler(freed_memory);
}
void ClassUnloadingContext::free_code_blobs() {
void ClassUnloadingContext::free_nmethods() {
assert(_context != nullptr, "no context set");
// Sort nmethods before freeing to benefit from optimizations. If Nmethods were
@ -159,7 +159,7 @@ void ClassUnloadingContext::free_code_blobs() {
nmethod_set->sort(sort_nmethods);
// And free. Duplicate loop for clarity depending on where we want the locking.
if (_lock_codeblob_free_separately) {
if (_lock_nmethod_free_separately) {
for (nmethod* nm : *nmethod_set) {
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(nm);

@ -43,7 +43,7 @@ class ClassUnloadingContext : public CHeapObj<mtGC> {
NMethodSet** _unlinked_nmethods;
bool _unregister_nmethods_during_purge;
bool _lock_codeblob_free_separately;
bool _lock_nmethod_free_separately;
public:
static ClassUnloadingContext* context() { assert(_context != nullptr, "context not set"); return _context; }
@ -53,12 +53,12 @@ public:
// unregister_nmethods_during_purge determines whether unloaded nmethods should
// be unregistered from the garbage collector during purge. If not, ,the caller
// is responsible to do that later.
// lock_codeblob_free_separately determines whether freeing the code blobs takes
// the CodeCache_lock during the whole operation (=false) or per code blob
// lock_nmethod_free_separately determines whether freeing the nmethods takes
// the CodeCache_lock during the whole operation (=false) or per nmethod
// free operation (=true).
ClassUnloadingContext(uint num_nmethod_unlink_workers,
bool unregister_nmethods_during_purge,
bool lock_codeblob_free_separately);
bool lock_nmethod_free_separately);
~ClassUnloadingContext();
bool has_unloaded_classes() const;
@ -71,11 +71,11 @@ public:
// Register unloading nmethods, potentially in parallel.
void register_unlinked_nmethod(nmethod* nm);
void purge_nmethods();
void free_code_blobs();
void free_nmethods();
void purge_and_free_nmethods() {
purge_nmethods();
free_code_blobs();
free_nmethods();
}
};

@ -129,7 +129,7 @@ bool ScavengableNMethods::has_scavengable_oops(nmethod* nm) {
}
// Walk the list of methods which might contain oops to the java heap.
void ScavengableNMethods::nmethods_do_and_prune(CodeBlobToOopClosure* cl) {
void ScavengableNMethods::nmethods_do_and_prune(NMethodToOopClosure* cl) {
assert_locked_or_safepoint(CodeCache_lock);
debug_only(mark_on_list_nmethods());
@ -142,7 +142,7 @@ void ScavengableNMethods::nmethods_do_and_prune(CodeBlobToOopClosure* cl) {
assert(data.on_list(), "else shouldn't be on this list");
if (cl != nullptr) {
cl->do_code_blob(cur);
cl->do_nmethod(cur);
}
nmethod* const next = data.next();
@ -192,7 +192,7 @@ void ScavengableNMethods::prune_unlinked_nmethods() {
}
// Walk the list of methods which might contain oops to the java heap.
void ScavengableNMethods::nmethods_do(CodeBlobToOopClosure* cl) {
void ScavengableNMethods::nmethods_do(NMethodToOopClosure* cl) {
nmethods_do_and_prune(cl);
}

@ -29,9 +29,8 @@
#include "utilities/macros.hpp"
class BoolObjectClosure;
class CodeBlobClosure;
class CodeBlobToOopClosure;
class nmethod;
class NMethodToOopClosure;
class ScavengableNMethods : public AllStatic {
friend class VMStructs;
@ -53,10 +52,10 @@ public:
// Apply closure to every scavengable nmethod.
// Remove nmethods that no longer have scavengable oops.
static void nmethods_do(CodeBlobToOopClosure* cl);
static void nmethods_do(NMethodToOopClosure* cl);
private:
static void nmethods_do_and_prune(CodeBlobToOopClosure* cl);
static void nmethods_do_and_prune(NMethodToOopClosure* cl);
static void unlist_nmethod(nmethod* nm, nmethod* prev);
static bool has_scavengable_oops(nmethod* nm);

@ -119,13 +119,13 @@ public:
inline void do_oop(narrowOop* p);
};
class ShenandoahCodeBlobAndDisarmClosure: public CodeBlobToOopClosure {
class ShenandoahNMethodAndDisarmClosure: public NMethodToOopClosure {
private:
BarrierSetNMethod* const _bs;
public:
inline ShenandoahCodeBlobAndDisarmClosure(OopClosure* cl);
inline void do_code_blob(CodeBlob* cb);
inline ShenandoahNMethodAndDisarmClosure(OopClosure* cl);
inline void do_nmethod(nmethod* nm);
};
#ifdef ASSERT

@ -192,18 +192,16 @@ void ShenandoahCleanUpdateWeakOopsClosure<CONCURRENT, IsAlive, KeepAlive>::do_oo
ShouldNotReachHere();
}
ShenandoahCodeBlobAndDisarmClosure::ShenandoahCodeBlobAndDisarmClosure(OopClosure* cl) :
CodeBlobToOopClosure(cl, true /* fix_relocations */),
ShenandoahNMethodAndDisarmClosure::ShenandoahNMethodAndDisarmClosure(OopClosure* cl) :
NMethodToOopClosure(cl, true /* fix_relocations */),
_bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {
}
void ShenandoahCodeBlobAndDisarmClosure::do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
if (nm != nullptr) {
assert(!ShenandoahNMethod::gc_data(nm)->is_unregistered(), "Should not be here");
CodeBlobToOopClosure::do_code_blob(cb);
_bs->disarm(nm);
}
void ShenandoahNMethodAndDisarmClosure::do_nmethod(nmethod* nm) {
assert(nm != nullptr, "Sanity");
assert(!ShenandoahNMethod::gc_data(nm)->is_unregistered(), "Should not be here");
NMethodToOopClosure::do_nmethod(nm);
_bs->disarm(nm);
}
#ifdef ASSERT

@ -215,8 +215,8 @@ ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
locker.notify_all();
}
void ShenandoahCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
void ShenandoahCodeRootsIterator::possibly_parallel_nmethods_do(NMethodClosure *f) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
assert(_table_snapshot != nullptr, "Sanity");
_table_snapshot->parallel_blobs_do(f);
_table_snapshot->parallel_nmethods_do(f);
}

@ -48,7 +48,7 @@ public:
ShenandoahCodeRootsIterator();
~ShenandoahCodeRootsIterator();
void possibly_parallel_blobs_do(CodeBlobClosure *f);
void possibly_parallel_nmethods_do(NMethodClosure *f);
};
class ShenandoahCodeRoots : public AllStatic {

@ -1890,7 +1890,7 @@ void ShenandoahHeap::stw_unload_classes(bool full_gc) {
if (!unload_classes()) return;
ClassUnloadingContext ctx(_workers->active_workers(),
true /* unregister_nmethods_during_purge */,
false /* lock_codeblob_free_separately */);
false /* lock_nmethod_free_separately */);
// Unload classes and purge SystemDictionary.
{

@ -427,7 +427,7 @@ ShenandoahNMethodTableSnapshot::~ShenandoahNMethodTableSnapshot() {
_list->release();
}
void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {
void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) {
size_t stride = 256; // educated guess
ShenandoahNMethod** const list = _list->list();
@ -447,7 +447,7 @@ void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {
}
nmr->assert_correct();
f->do_code_blob(nmr->nm());
f->do_nmethod(nmr->nm());
}
}
}

@ -119,7 +119,7 @@ public:
ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table);
~ShenandoahNMethodTableSnapshot();
void parallel_blobs_do(CodeBlobClosure *f);
void parallel_nmethods_do(NMethodClosure *f);
void concurrent_nmethods_do(NMethodClosure* cl);
};

@ -63,7 +63,7 @@ ShenandoahThreadRoots::ShenandoahThreadRoots(ShenandoahPhaseTimings::Phase phase
Threads::change_thread_claim_token();
}
void ShenandoahThreadRoots::oops_do(OopClosure* oops_cl, CodeBlobClosure* code_cl, uint worker_id) {
void ShenandoahThreadRoots::oops_do(OopClosure* oops_cl, NMethodClosure* code_cl, uint worker_id) {
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ThreadRoots, worker_id);
ResourceMark rm;
Threads::possibly_parallel_oops_do(_is_par, oops_cl, code_cl);
@ -82,9 +82,9 @@ ShenandoahThreadRoots::~ShenandoahThreadRoots() {
ShenandoahCodeCacheRoots::ShenandoahCodeCacheRoots(ShenandoahPhaseTimings::Phase phase) : _phase(phase) {
}
void ShenandoahCodeCacheRoots::code_blobs_do(CodeBlobClosure* blob_cl, uint worker_id) {
void ShenandoahCodeCacheRoots::nmethods_do(NMethodClosure* nmethod_cl, uint worker_id) {
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
_coderoots_iterator.possibly_parallel_blobs_do(blob_cl);
_coderoots_iterator.possibly_parallel_nmethods_do(nmethod_cl);
}
ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase) :
@ -159,8 +159,8 @@ void ShenandoahConcurrentRootScanner::roots_do(OopClosure* oops, uint worker_id)
{
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
_codecache_snapshot->parallel_blobs_do(&blobs);
NMethodToOopClosure nmethods(oops, !NMethodToOopClosure::FixRelocations);
_codecache_snapshot->parallel_nmethods_do(&nmethods);
}
}
@ -203,11 +203,11 @@ ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTi
}
void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
CodeBlobToOopClosure code_blob_cl(oops, CodeBlobToOopClosure::FixRelocations);
ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(oops);
CodeBlobToOopClosure* adjust_code_closure = ShenandoahCodeRoots::use_nmethod_barriers_for_mark() ?
static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
static_cast<CodeBlobToOopClosure*>(&code_blob_cl);
NMethodToOopClosure code_blob_cl(oops, NMethodToOopClosure::FixRelocations);
ShenandoahNMethodAndDisarmClosure nmethods_and_disarm_Cl(oops);
NMethodToOopClosure* adjust_code_closure = ShenandoahCodeRoots::use_nmethod_barriers_for_mark() ?
static_cast<NMethodToOopClosure*>(&nmethods_and_disarm_Cl) :
static_cast<NMethodToOopClosure*>(&code_blob_cl);
CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong);
// Process light-weight/limited parallel roots then
@ -216,7 +216,7 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
_cld_roots.cld_do(&adjust_cld_closure, worker_id);
// Process heavy-weight/fully parallel roots the last
_code_roots.code_blobs_do(adjust_code_closure, worker_id);
_code_roots.nmethods_do(adjust_code_closure, worker_id);
_thread_roots.oops_do(oops, nullptr, worker_id);
}
@ -229,34 +229,32 @@ ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner(uint n_wo
_code_roots(ShenandoahPhaseTimings::heap_iteration_roots) {
}
class ShenandoahMarkCodeBlobClosure : public CodeBlobClosure {
class ShenandoahMarkNMethodClosure : public NMethodClosure {
private:
OopClosure* const _oops;
BarrierSetNMethod* const _bs_nm;
public:
ShenandoahMarkCodeBlobClosure(OopClosure* oops) :
ShenandoahMarkNMethodClosure(OopClosure* oops) :
_oops(oops),
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
virtual void do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
if (nm != nullptr) {
if (_bs_nm != nullptr) {
// Make sure it only sees to-space objects
_bs_nm->nmethod_entry_barrier(nm);
}
ShenandoahNMethod* const snm = ShenandoahNMethod::gc_data(nm);
assert(snm != nullptr, "Sanity");
snm->oops_do(_oops, false /*fix_relocations*/);
virtual void do_nmethod(nmethod* nm) {
assert(nm != nullptr, "Sanity");
if (_bs_nm != nullptr) {
// Make sure it only sees to-space objects
_bs_nm->nmethod_entry_barrier(nm);
}
ShenandoahNMethod* const snm = ShenandoahNMethod::gc_data(nm);
assert(snm != nullptr, "Sanity");
snm->oops_do(_oops, false /*fix_relocations*/);
}
};
void ShenandoahHeapIterationRootScanner::roots_do(OopClosure* oops) {
// Must use _claim_other to avoid interfering with concurrent CLDG iteration
CLDToOopClosure clds(oops, ClassLoaderData::_claim_other);
ShenandoahMarkCodeBlobClosure code(oops);
ShenandoahMarkNMethodClosure code(oops);
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, nullptr);
ResourceMark rm;
@ -267,6 +265,6 @@ void ShenandoahHeapIterationRootScanner::roots_do(OopClosure* oops) {
_cld_roots.cld_do(&clds, 0);
// Process heavy-weight/fully parallel roots the last
_code_roots.code_blobs_do(&code, 0);
_code_roots.nmethods_do(&code, 0);
_thread_roots.threads_do(&tc_cl, 0);
}

@ -92,7 +92,7 @@ public:
ShenandoahThreadRoots(ShenandoahPhaseTimings::Phase phase, bool is_par);
~ShenandoahThreadRoots();
void oops_do(OopClosure* oops_cl, CodeBlobClosure* code_cl, uint worker_id);
void oops_do(OopClosure* oops_cl, NMethodClosure* code_cl, uint worker_id);
void threads_do(ThreadClosure* tc, uint worker_id);
};
@ -103,7 +103,7 @@ private:
public:
ShenandoahCodeCacheRoots(ShenandoahPhaseTimings::Phase phase);
void code_blobs_do(CodeBlobClosure* blob_cl, uint worker_id);
void nmethods_do(NMethodClosure* nmethod_cl, uint worker_id);
};
template <bool CONCURRENT>

@ -127,10 +127,10 @@ void ShenandoahClassLoaderDataRoots<CONCURRENT>::cld_do(CLDClosure* clds, uint w
class ShenandoahParallelOopsDoThreadClosure : public ThreadClosure {
private:
OopClosure* _f;
CodeBlobClosure* _cf;
NMethodClosure* _cf;
ThreadClosure* _thread_cl;
public:
ShenandoahParallelOopsDoThreadClosure(OopClosure* f, CodeBlobClosure* cf, ThreadClosure* thread_cl) :
ShenandoahParallelOopsDoThreadClosure(OopClosure* f, NMethodClosure* cf, ThreadClosure* thread_cl) :
_f(f), _cf(cf), _thread_cl(thread_cl) {}
void do_thread(Thread* t) {
@ -152,16 +152,16 @@ public:
// we risk executing that code cache blob, and crashing.
template <typename T>
void ShenandoahSTWRootScanner::roots_do(T* oops, uint worker_id) {
MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations, true /*FIXME*/);
MarkingNMethodClosure nmethods_cl(oops, !NMethodToOopClosure::FixRelocations, true /*FIXME*/);
CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
ResourceMark rm;
if (_unload_classes) {
_thread_roots.oops_do(oops, &blobs_cl, worker_id);
_thread_roots.oops_do(oops, &nmethods_cl, worker_id);
_cld_roots.always_strong_cld_do(&clds, worker_id);
} else {
_thread_roots.oops_do(oops, nullptr, worker_id);
_code_roots.code_blobs_do(&blobs_cl, worker_id);
_code_roots.nmethods_do(&nmethods_cl, worker_id);
_cld_roots.cld_do(&clds, worker_id);
}
@ -170,11 +170,11 @@ void ShenandoahSTWRootScanner::roots_do(T* oops, uint worker_id) {
template <typename IsAlive, typename KeepAlive>
void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive) {
CodeBlobToOopClosure update_blobs(keep_alive, CodeBlobToOopClosure::FixRelocations);
ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(keep_alive);
CodeBlobToOopClosure* codes_cl = ShenandoahCodeRoots::use_nmethod_barriers_for_mark() ?
static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
static_cast<CodeBlobToOopClosure*>(&update_blobs);
NMethodToOopClosure update_nmethods(keep_alive, NMethodToOopClosure::FixRelocations);
ShenandoahNMethodAndDisarmClosure nmethods_and_disarm_Cl(keep_alive);
NMethodToOopClosure* codes_cl = ShenandoahCodeRoots::use_nmethod_barriers_for_mark() ?
static_cast<NMethodToOopClosure*>(&nmethods_and_disarm_Cl) :
static_cast<NMethodToOopClosure*>(&update_nmethods);
CLDToOopClosure clds(keep_alive, ClassLoaderData::_claim_strong);
@ -184,7 +184,7 @@ void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAliv
_cld_roots.cld_do(&clds, worker_id);
// Process heavy-weight/fully parallel roots the last
_code_roots.code_blobs_do(codes_cl, worker_id);
_code_roots.nmethods_do(codes_cl, worker_id);
_thread_roots.oops_do(keep_alive, nullptr, worker_id);
}

@ -57,8 +57,8 @@ void ShenandoahRootVerifier::roots_do(OopClosure* oops) {
ShenandoahGCStateResetter resetter;
shenandoah_assert_safepoint();
CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&blobs);
NMethodToOopClosure blobs(oops, !NMethodToOopClosure::FixRelocations);
CodeCache::nmethods_do(&blobs);
CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
ClassLoaderDataGraph::cld_do(&clds);
@ -86,6 +86,6 @@ void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) {
// Do thread roots the last. This allows verification code to find
// any broken objects from those special roots first, not the accidental
// dangling reference from the thread root.
CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
Threads::possibly_parallel_oops_do(true, oops, &blobs);
NMethodToOopClosure nmethods(oops, !NMethodToOopClosure::FixRelocations);
Threads::possibly_parallel_oops_do(true, oops, &nmethods);
}

@ -35,15 +35,13 @@
uint32_t ShenandoahStackWatermark::_epoch_id = 1;
ShenandoahOnStackCodeBlobClosure::ShenandoahOnStackCodeBlobClosure() :
ShenandoahOnStackNMethodClosure::ShenandoahOnStackNMethodClosure() :
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
void ShenandoahOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
if (nm != nullptr) {
const bool result = _bs_nm->nmethod_entry_barrier(nm);
assert(result, "NMethod on-stack must be alive");
}
void ShenandoahOnStackNMethodClosure::do_nmethod(nmethod* nm) {
assert(nm != nullptr, "Sanity");
const bool result = _bs_nm->nmethod_entry_barrier(nm);
assert(result, "NMethod on-stack must be alive");
}
ThreadLocalAllocStats& ShenandoahStackWatermark::stats() {
@ -65,7 +63,7 @@ ShenandoahStackWatermark::ShenandoahStackWatermark(JavaThread* jt) :
_stats(),
_keep_alive_cl(),
_evac_update_oop_cl(),
_cb_cl() {}
_nm_cl() {}
OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) {
if (context != nullptr) {
@ -98,7 +96,7 @@ void ShenandoahStackWatermark::start_processing_impl(void* context) {
// It is also a good place to resize the TLAB sizes for future allocations.
retire_tlab();
_jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
_jt->oops_do_no_frames(closure_from_context(context), &_nm_cl);
} else if (heap->is_concurrent_weak_root_in_progress()) {
assert(heap->is_evacuation_in_progress(), "Should not be armed");
// Retire the TLABs, which will force threads to reacquire their TLABs.
@ -108,7 +106,7 @@ void ShenandoahStackWatermark::start_processing_impl(void* context) {
// be needed for reference updates (would update the large filler instead).
retire_tlab();
_jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
_jt->oops_do_no_frames(closure_from_context(context), &_nm_cl);
} else {
ShouldNotReachHere();
}
@ -135,5 +133,5 @@ void ShenandoahStackWatermark::process(const frame& fr, RegisterMap& register_ma
assert((heap->is_concurrent_weak_root_in_progress() && heap->is_evacuation_in_progress()) ||
heap->is_concurrent_mark_in_progress(),
"Only these two phases");
fr.oops_do(oops, &_cb_cl, &register_map, DerivedPointerIterationMode::_directly);
fr.oops_do(oops, &_nm_cl, &register_map, DerivedPointerIterationMode::_directly);
}

@ -38,13 +38,13 @@
class frame;
class JavaThread;
class ShenandoahOnStackCodeBlobClosure : public CodeBlobClosure {
class ShenandoahOnStackNMethodClosure : public NMethodClosure {
private:
BarrierSetNMethod* _bs_nm;
void do_code_blob(CodeBlob* cb);
void do_nmethod(nmethod* nm);
public:
ShenandoahOnStackCodeBlobClosure();
ShenandoahOnStackNMethodClosure();
};
class ShenandoahStackWatermark : public StackWatermark {
@ -56,7 +56,7 @@ private:
// Closures
ShenandoahKeepAliveClosure _keep_alive_cl;
ShenandoahEvacuateUpdateRootsClosure _evac_update_oop_cl;
ShenandoahOnStackCodeBlobClosure _cb_cl;
ShenandoahOnStackNMethodClosure _nm_cl;
public:
ShenandoahStackWatermark(JavaThread* jt);
ThreadLocalAllocStats& stats();

@ -138,7 +138,7 @@ void ShenandoahUnload::unload() {
ClassUnloadingContext ctx(heap->workers()->active_workers(),
true /* unregister_nmethods_during_purge */,
true /* lock_codeblob_free_separately */);
true /* lock_nmethod_free_separately */);
// Unlink stale metadata and nmethods
{

@ -323,7 +323,7 @@ void XHeap::process_non_strong_references() {
ClassUnloadingContext ctx(_workers.active_workers(),
true /* unregister_nmethods_during_purge */,
true /* lock_codeblob_free_separately */);
true /* lock_nmethod_free_separately */);
// Unlink stale metadata and nmethods
_unload.unlink();

@ -303,16 +303,16 @@ public:
class XHeapIteratorThreadClosure : public ThreadClosure {
private:
OopClosure* const _cl;
CodeBlobToNMethodClosure _cb_cl;
OopClosure* const _cl;
NMethodClosure* const _nm_cl;
public:
XHeapIteratorThreadClosure(OopClosure* cl, NMethodClosure* nm_cl) :
_cl(cl),
_cb_cl(nm_cl) {}
_nm_cl(nm_cl) {}
void do_thread(Thread* thread) {
thread->oops_do(_cl, &_cb_cl);
thread->oops_do(_cl, _nm_cl);
}
};

@ -33,15 +33,12 @@
#include "runtime/frame.inline.hpp"
#include "utilities/preserveException.hpp"
XOnStackCodeBlobClosure::XOnStackCodeBlobClosure() :
XOnStackNMethodClosure::XOnStackNMethodClosure() :
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
void XOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
if (nm != nullptr) {
const bool result = _bs_nm->nmethod_entry_barrier(nm);
assert(result, "NMethod on-stack must be alive");
}
void XOnStackNMethodClosure::do_nmethod(nmethod* nm) {
const bool result = _bs_nm->nmethod_entry_barrier(nm);
assert(result, "NMethod on-stack must be alive");
}
ThreadLocalAllocStats& XStackWatermark::stats() {
@ -55,7 +52,7 @@ uint32_t XStackWatermark::epoch_id() const {
XStackWatermark::XStackWatermark(JavaThread* jt) :
StackWatermark(jt, StackWatermarkKind::gc, *XAddressBadMaskHighOrderBitsAddr),
_jt_cl(),
_cb_cl(),
_nm_cl(),
_stats() {}
OopClosure* XStackWatermark::closure_from_context(void* context) {
@ -72,7 +69,7 @@ void XStackWatermark::start_processing_impl(void* context) {
XVerify::verify_thread_head_bad(_jt);
// Process the non-frame part of the thread
_jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
_jt->oops_do_no_frames(closure_from_context(context), &_nm_cl);
XThreadLocalData::do_invisible_root(_jt, XBarrier::load_barrier_on_invisible_root_oop_field);
// Verification of frames is done after processing of the "head" (no_frames).
@ -95,5 +92,5 @@ void XStackWatermark::start_processing_impl(void* context) {
void XStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) {
XVerify::verify_frame_bad(fr, register_map);
fr.oops_do(closure_from_context(context), &_cb_cl, &register_map, DerivedPointerIterationMode::_directly);
fr.oops_do(closure_from_context(context), &_nm_cl, &register_map, DerivedPointerIterationMode::_directly);
}

@ -37,21 +37,21 @@
class frame;
class JavaThread;
class XOnStackCodeBlobClosure : public CodeBlobClosure {
class XOnStackNMethodClosure : public NMethodClosure {
private:
BarrierSetNMethod* _bs_nm;
virtual void do_code_blob(CodeBlob* cb);
virtual void do_nmethod(nmethod* nm);
public:
XOnStackCodeBlobClosure();
XOnStackNMethodClosure();
};
class XStackWatermark : public StackWatermark {
private:
XLoadBarrierOopClosure _jt_cl;
XOnStackCodeBlobClosure _cb_cl;
ThreadLocalAllocStats _stats;
XLoadBarrierOopClosure _jt_cl;
XOnStackNMethodClosure _nm_cl;
ThreadLocalAllocStats _stats;
OopClosure* closure_from_context(void* context);

@ -97,16 +97,6 @@ public:
}
};
class XVerifyCodeBlobClosure : public CodeBlobToOopClosure {
public:
XVerifyCodeBlobClosure(XVerifyRootClosure* _cl) :
CodeBlobToOopClosure(_cl, false /* fix_relocations */) {}
virtual void do_code_blob(CodeBlob* cb) {
CodeBlobToOopClosure::do_code_blob(cb);
}
};
class XVerifyStack : public OopClosure {
private:
XVerifyRootClosure* const _cl;
@ -166,12 +156,12 @@ public:
}
void verify_frames() {
XVerifyCodeBlobClosure cb_cl(_cl);
NMethodToOopClosure nm_cl(_cl, false /* fix_relocations */);
for (StackFrameStream frames(_jt, true /* update */, false /* process_frames */);
!frames.is_done();
frames.next()) {
frame& frame = *frames.current();
frame.oops_do(this, &cb_cl, frames.register_map(), DerivedPointerIterationMode::_ignore);
frame.oops_do(this, &nm_cl, frames.register_map(), DerivedPointerIterationMode::_ignore);
prepare_next_frame(frame);
}
}

@ -1323,7 +1323,7 @@ void ZGenerationOld::process_non_strong_references() {
ClassUnloadingContext ctx(_workers.active_workers(),
true /* unregister_nmethods_during_purge */,
true /* lock_codeblob_free_separately */);
true /* lock_nmethod_free_separately */);
// Unlink stale metadata and nmethods
_unload.unlink();

@ -375,16 +375,16 @@ public:
class ZHeapIteratorThreadClosure : public ThreadClosure {
private:
OopClosure* const _cl;
CodeBlobToNMethodClosure _cb_cl;
OopClosure* const _cl;
NMethodClosure* const _nm_cl;
public:
ZHeapIteratorThreadClosure(OopClosure* cl, NMethodClosure* nm_cl)
: _cl(cl),
_cb_cl(nm_cl) {}
_nm_cl(nm_cl) {}
void do_thread(Thread* thread) {
thread->oops_do(_cl, &_cb_cl);
thread->oops_do(_cl, _nm_cl);
}
};

@ -37,15 +37,13 @@
#include "runtime/thread.hpp"
#include "utilities/preserveException.hpp"
ZOnStackCodeBlobClosure::ZOnStackCodeBlobClosure()
ZOnStackNMethodClosure::ZOnStackNMethodClosure()
: _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
void ZOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
if (nm != nullptr) {
const bool result = _bs_nm->nmethod_entry_barrier(nm);
assert(result, "NMethod on-stack must be alive");
}
void ZOnStackNMethodClosure::do_nmethod(nmethod* nm) {
assert(nm != nullptr, "Sanity");
const bool result = _bs_nm->nmethod_entry_barrier(nm);
assert(result, "NMethod on-stack must be alive");
}
ThreadLocalAllocStats& ZStackWatermark::stats() {
@ -166,9 +164,9 @@ void ZStackWatermark::process_head(void* context) {
const uintptr_t color = prev_head_color();
ZStackWatermarkProcessOopClosure cl(context, color);
ZOnStackCodeBlobClosure cb_cl;
ZOnStackNMethodClosure nm_cl;
_jt->oops_do_no_frames(&cl, &cb_cl);
_jt->oops_do_no_frames(&cl, &nm_cl);
zaddress_unsafe* const invisible_root = ZThreadLocalData::invisible_root(_jt);
if (invisible_root != nullptr) {
@ -209,7 +207,7 @@ void ZStackWatermark::start_processing_impl(void* context) {
void ZStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) {
const uintptr_t color = prev_frame_color(fr);
ZStackWatermarkProcessOopClosure cl(context, color);
ZOnStackCodeBlobClosure cb_cl;
ZOnStackNMethodClosure nm_cl;
fr.oops_do(&cl, &cb_cl, &register_map, DerivedPointerIterationMode::_directly);
fr.oops_do(&cl, &nm_cl, &register_map, DerivedPointerIterationMode::_directly);
}

@ -38,14 +38,14 @@
class frame;
class JavaThread;
class ZOnStackCodeBlobClosure : public CodeBlobClosure {
class ZOnStackNMethodClosure : public NMethodClosure {
private:
BarrierSetNMethod* _bs_nm;
virtual void do_code_blob(CodeBlob* cb);
virtual void do_nmethod(nmethod* nm);
public:
ZOnStackCodeBlobClosure();
ZOnStackNMethodClosure();
};
struct ZColorWatermark {

@ -240,16 +240,6 @@ public:
}
};
class ZVerifyCodeBlobClosure : public CodeBlobToOopClosure {
public:
ZVerifyCodeBlobClosure(OopClosure* cl)
: CodeBlobToOopClosure(cl, false /* fix_relocations */) {}
virtual void do_code_blob(CodeBlob* cb) {
CodeBlobToOopClosure::do_code_blob(cb);
}
};
class ZVerifyOldOopClosure : public BasicOopIterateClosure {
private:
const bool _verify_weaks;

@ -41,23 +41,16 @@ void ObjectToOopClosure::do_object(oop obj) {
obj->oop_iterate(_cl);
}
void CodeBlobToOopClosure::do_nmethod(nmethod* nm) {
void NMethodToOopClosure::do_nmethod(nmethod* nm) {
nm->oops_do(_cl);
if (_fix_relocations) {
nm->fix_oop_relocations();
}
}
void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != nullptr) {
do_nmethod(nm);
}
}
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != nullptr && nm->oops_do_try_claim()) {
void MarkingNMethodClosure::do_nmethod(nmethod* nm) {
assert(nm != nullptr, "Unexpected nullptr");
if (nm->oops_do_try_claim()) {
// Process the oops in the nmethod
nm->oops_do(_cl);
@ -76,10 +69,3 @@ void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
}
}
}
void CodeBlobToNMethodClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != nullptr) {
_nm_cl->do_nmethod(nm);
}
}

@ -232,56 +232,40 @@ public:
ObjectToOopClosure(OopIterateClosure* cl) : _cl(cl) {}
};
// CodeBlobClosure is used for iterating through code blobs
// NMethodClosure is used for iterating through nmethods
// in the code cache or on thread stacks
class CodeBlobClosure : public Closure {
public:
// Called for each code blob.
virtual void do_code_blob(CodeBlob* cb) = 0;
};
// Applies an oop closure to all ref fields in code blobs
// iterated over in an object iteration.
class CodeBlobToOopClosure : public CodeBlobClosure {
protected:
OopClosure* _cl;
bool _fix_relocations;
void do_nmethod(nmethod* nm);
public:
// If fix_relocations(), then cl must copy objects to their new location immediately to avoid
// patching nmethods with the old locations.
CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
virtual void do_code_blob(CodeBlob* cb);
bool fix_relocations() const { return _fix_relocations; }
const static bool FixRelocations = true;
};
class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
bool _keepalive_nmethods;
public:
MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations, bool keepalive_nmethods) :
CodeBlobToOopClosure(cl, fix_relocations),
_keepalive_nmethods(keepalive_nmethods) {}
// Called for each code blob, but at most once per unique blob.
virtual void do_code_blob(CodeBlob* cb);
};
class NMethodClosure : public Closure {
public:
virtual void do_nmethod(nmethod* n) = 0;
};
class CodeBlobToNMethodClosure : public CodeBlobClosure {
NMethodClosure* const _nm_cl;
// Applies an oop closure to all ref fields in nmethods
// iterated over in an object iteration.
class NMethodToOopClosure : public NMethodClosure {
protected:
OopClosure* _cl;
bool _fix_relocations;
public:
// If fix_relocations(), then cl must copy objects to their new location immediately to avoid
// patching nmethods with the old locations.
NMethodToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
void do_nmethod(nmethod* nm) override;
bool fix_relocations() const { return _fix_relocations; }
const static bool FixRelocations = true;
};
class MarkingNMethodClosure : public NMethodToOopClosure {
bool _keepalive_nmethods;
public:
CodeBlobToNMethodClosure(NMethodClosure* nm_cl) : _nm_cl(nm_cl) {}
MarkingNMethodClosure(OopClosure* cl, bool fix_relocations, bool keepalive_nmethods) :
NMethodToOopClosure(cl, fix_relocations),
_keepalive_nmethods(keepalive_nmethods) {}
virtual void do_code_blob(CodeBlob* cb);
// Called for each nmethod.
virtual void do_nmethod(nmethod* nm);
};
// MonitorClosure is used for iterating over monitors in the monitors cache

@ -1014,17 +1014,17 @@ void JvmtiDeferredEvent::run_nmethod_entry_barriers() {
// Keep the nmethod for compiled_method_load from being unloaded.
void JvmtiDeferredEvent::oops_do(OopClosure* f, CodeBlobClosure* cf) {
void JvmtiDeferredEvent::oops_do(OopClosure* f, NMethodClosure* cf) {
if (cf != nullptr && _type == TYPE_COMPILED_METHOD_LOAD) {
cf->do_code_blob(_event_data.compiled_method_load);
cf->do_nmethod(_event_data.compiled_method_load);
}
}
// The GC calls this and marks the nmethods here on the stack so that
// they cannot be unloaded while in the queue.
void JvmtiDeferredEvent::nmethods_do(CodeBlobClosure* cf) {
void JvmtiDeferredEvent::nmethods_do(NMethodClosure* cf) {
if (cf != nullptr && _type == TYPE_COMPILED_METHOD_LOAD) {
cf->do_code_blob(_event_data.compiled_method_load);
cf->do_nmethod(_event_data.compiled_method_load);
}
}
@ -1092,13 +1092,13 @@ void JvmtiDeferredEventQueue::run_nmethod_entry_barriers() {
}
void JvmtiDeferredEventQueue::oops_do(OopClosure* f, CodeBlobClosure* cf) {
void JvmtiDeferredEventQueue::oops_do(OopClosure* f, NMethodClosure* cf) {
for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
node->event().oops_do(f, cf);
}
}
void JvmtiDeferredEventQueue::nmethods_do(CodeBlobClosure* cf) {
void JvmtiDeferredEventQueue::nmethods_do(NMethodClosure* cf) {
for(QueueNode* node = _queue_head; node != nullptr; node = node->next()) {
node->event().nmethods_do(cf);
}

@ -499,9 +499,9 @@ class JvmtiDeferredEvent {
void post_compiled_method_load_event(JvmtiEnv* env) NOT_JVMTI_RETURN;
void run_nmethod_entry_barriers() NOT_JVMTI_RETURN;
// GC support to keep nmethods from unloading while in the queue.
void nmethods_do(CodeBlobClosure* cf) NOT_JVMTI_RETURN;
void nmethods_do(NMethodClosure* cf) NOT_JVMTI_RETURN;
// GC support to keep nmethod from being unloaded while in the queue.
void oops_do(OopClosure* f, CodeBlobClosure* cf) NOT_JVMTI_RETURN;
void oops_do(OopClosure* f, NMethodClosure* cf) NOT_JVMTI_RETURN;
};
/**
@ -542,9 +542,9 @@ class JvmtiDeferredEventQueue : public CHeapObj<mtInternal> {
void run_nmethod_entry_barriers();
// GC support to keep nmethods from unloading while in the queue.
void nmethods_do(CodeBlobClosure* cf) NOT_JVMTI_RETURN;
void nmethods_do(NMethodClosure* cf) NOT_JVMTI_RETURN;
// GC support to keep nmethod from being unloaded while in the queue.
void oops_do(OopClosure* f, CodeBlobClosure* cf) NOT_JVMTI_RETURN;
void oops_do(OopClosure* f, NMethodClosure* cf) NOT_JVMTI_RETURN;
};
// Utility macro that checks for null pointers:

@ -984,7 +984,7 @@ void JvmtiThreadState::process_pending_step_for_earlyret() {
}
}
void JvmtiThreadState::oops_do(OopClosure* f, CodeBlobClosure* cf) {
void JvmtiThreadState::oops_do(OopClosure* f, NMethodClosure* cf) {
f->do_oop((oop*) &_earlyret_oop);
// Keep nmethods from unloading on the event queue
@ -993,7 +993,7 @@ void JvmtiThreadState::oops_do(OopClosure* f, CodeBlobClosure* cf) {
}
}
void JvmtiThreadState::nmethods_do(CodeBlobClosure* cf) {
void JvmtiThreadState::nmethods_do(NMethodClosure* cf) {
// Keep nmethods from unloading on the event queue
if (_jvmti_event_queue != nullptr) {
_jvmti_event_queue->nmethods_do(cf);

@ -525,8 +525,8 @@ class JvmtiThreadState : public CHeapObj<mtInternal> {
static ByteSize earlyret_oop_offset() { return byte_offset_of(JvmtiThreadState, _earlyret_oop); }
static ByteSize earlyret_value_offset() { return byte_offset_of(JvmtiThreadState, _earlyret_value); }
void oops_do(OopClosure* f, CodeBlobClosure* cf) NOT_JVMTI_RETURN; // GC support
void nmethods_do(CodeBlobClosure* cf) NOT_JVMTI_RETURN;
void oops_do(OopClosure* f, NMethodClosure* cf) NOT_JVMTI_RETURN; // GC support
void nmethods_do(NMethodClosure* cf) NOT_JVMTI_RETURN;
public:
void set_should_post_on_exceptions(bool val);

@ -2534,7 +2534,7 @@ static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, output
ResourceMark rm;
ThawVerifyOopsClosure cl(st);
CodeBlobToOopClosure cf(&cl, false);
NMethodToOopClosure cf(&cl, false);
StackFrameStream fst(thread, true, false);
fst.register_map()->set_include_argument_oops(false);

@ -962,7 +962,7 @@ void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver,
finder.oops_do();
}
void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
void frame::oops_nmethod_do(OopClosure* f, NMethodClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
assert(_cb != nullptr, "sanity check");
assert((oop_map() == nullptr) == (_cb->oop_maps() == nullptr), "frame and _cb must agree that oopmap is set or not");
if (oop_map() != nullptr) {
@ -983,8 +983,8 @@ void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClos
// prevent them from being collected. However, this visit should be
// restricted to certain phases of the collection only. The
// closure decides how it wants nmethods to be traced.
if (cf != nullptr)
cf->do_code_blob(_cb);
if (cf != nullptr && _cb->is_nmethod())
cf->do_nmethod(_cb->as_nmethod());
}
class CompiledArgumentOopFinder: public SignatureIterator {
@ -1131,7 +1131,7 @@ bool frame::is_deoptimized_frame() const {
return false;
}
void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf,
void frame::oops_do_internal(OopClosure* f, NMethodClosure* cf,
DerivedOopClosure* df, DerivedPointerIterationMode derived_mode,
const RegisterMap* map, bool use_interpreter_oop_map_cache) const {
#ifndef PRODUCT
@ -1148,15 +1148,15 @@ void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf,
} else if (is_upcall_stub_frame()) {
_cb->as_upcall_stub()->oops_do(f, *this);
} else if (CodeCache::contains(pc())) {
oops_code_blob_do(f, cf, df, derived_mode, map);
oops_nmethod_do(f, cf, df, derived_mode, map);
} else {
ShouldNotReachHere();
}
}
void frame::nmethods_do(CodeBlobClosure* cf) const {
void frame::nmethod_do(NMethodClosure* cf) const {
if (_cb != nullptr && _cb->is_nmethod()) {
cf->do_code_blob(_cb);
cf->do_nmethod(_cb->as_nmethod());
}
}

@ -450,17 +450,17 @@ class frame {
void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const;
// Iteration of oops
void oops_do_internal(OopClosure* f, CodeBlobClosure* cf,
void oops_do_internal(OopClosure* f, NMethodClosure* cf,
DerivedOopClosure* df, DerivedPointerIterationMode derived_mode,
const RegisterMap* map, bool use_interpreter_oop_map_cache) const;
void oops_entry_do(OopClosure* f, const RegisterMap* map) const;
void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf,
DerivedOopClosure* df, DerivedPointerIterationMode derived_mode,
const RegisterMap* map) const;
void oops_nmethod_do(OopClosure* f, NMethodClosure* cf,
DerivedOopClosure* df, DerivedPointerIterationMode derived_mode,
const RegisterMap* map) const;
public:
// Memory management
void oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map) {
void oops_do(OopClosure* f, NMethodClosure* cf, const RegisterMap* map) {
#if COMPILER2_OR_JVMCI
DerivedPointerIterationMode dpim = DerivedPointerTable::is_active() ?
DerivedPointerIterationMode::_with_table :
@ -471,16 +471,16 @@ class frame {
oops_do_internal(f, cf, nullptr, dpim, map, true);
}
void oops_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, const RegisterMap* map) {
void oops_do(OopClosure* f, NMethodClosure* cf, DerivedOopClosure* df, const RegisterMap* map) {
oops_do_internal(f, cf, df, DerivedPointerIterationMode::_ignore, map, true);
}
void oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map,
void oops_do(OopClosure* f, NMethodClosure* cf, const RegisterMap* map,
DerivedPointerIterationMode derived_mode) const {
oops_do_internal(f, cf, nullptr, derived_mode, map, true);
}
void nmethods_do(CodeBlobClosure* cf) const;
void nmethod_do(NMethodClosure* cf) const;
// RedefineClasses support for finding live interpreted methods on the stack
void metadata_do(MetadataClosure* f) const;

@ -1381,7 +1381,7 @@ void JavaThread::pop_jni_handle_block() {
JNIHandleBlock::release_block(old_handles, this);
}
void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
void JavaThread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) {
// Verify that the deferred card marks have been flushed.
assert(deferred_card_mark().is_empty(), "Should be empty during GC");
@ -1439,7 +1439,7 @@ void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
}
}
void JavaThread::oops_do_frames(OopClosure* f, CodeBlobClosure* cf) {
void JavaThread::oops_do_frames(OopClosure* f, NMethodClosure* cf) {
if (!has_last_Java_frame()) {
return;
}
@ -1458,14 +1458,14 @@ void JavaThread::verify_states_for_handshake() {
}
#endif
void JavaThread::nmethods_do(CodeBlobClosure* cf) {
void JavaThread::nmethods_do(NMethodClosure* cf) {
DEBUG_ONLY(verify_frame_info();)
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());)
if (has_last_Java_frame()) {
// Traverse the execution stack
for (StackFrameStream fst(this, true /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
fst.current()->nmethods_do(cf);
fst.current()->nmethod_do(cf);
}
}

@ -891,11 +891,11 @@ private:
void frames_do(void f(frame*, const RegisterMap*));
// Memory operations
void oops_do_frames(OopClosure* f, CodeBlobClosure* cf);
void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf);
void oops_do_frames(OopClosure* f, NMethodClosure* cf);
void oops_do_no_frames(OopClosure* f, NMethodClosure* cf);
// GC operations
virtual void nmethods_do(CodeBlobClosure* cf);
virtual void nmethods_do(NMethodClosure* cf);
// RedefineClasses Support
void metadata_do(MetadataClosure* f);

@ -208,7 +208,7 @@ void ServiceThread::enqueue_deferred_event(JvmtiDeferredEvent* event) {
Service_lock->notify_all();
}
void ServiceThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
void ServiceThread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) {
JavaThread::oops_do_no_frames(f, cf);
// The ServiceThread "owns" the JVMTI Deferred events, scan them here
// to keep them alive until they are processed.
@ -220,7 +220,7 @@ void ServiceThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
_jvmti_service_queue.oops_do(f, cf);
}
void ServiceThread::nmethods_do(CodeBlobClosure* cf) {
void ServiceThread::nmethods_do(NMethodClosure* cf) {
JavaThread::nmethods_do(cf);
if (cf != nullptr) {
if (_jvmti_event != nullptr) {

@ -54,8 +54,8 @@ class ServiceThread : public JavaThread {
static void enqueue_deferred_event(JvmtiDeferredEvent* event);
// GC support
void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf);
void nmethods_do(CodeBlobClosure* cf);
void oops_do_no_frames(OopClosure* f, NMethodClosure* cf);
void nmethods_do(NMethodClosure* cf);
};
#endif // SHARE_RUNTIME_SERVICETHREAD_HPP

@ -399,7 +399,7 @@ bool Thread::claim_par_threads_do(uintx claim_token) {
return false;
}
void Thread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
void Thread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) {
// Do oop for ThreadShadow
f->do_oop((oop*)&_pending_exception);
handle_area()->oops_do(f);
@ -429,7 +429,7 @@ public:
}
};
void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
void Thread::oops_do(OopClosure* f, NMethodClosure* cf) {
// Record JavaThread to GC thread
RememberProcessedThread rpt(this);
oops_do_no_frames(f, cf);

@ -48,7 +48,9 @@ class HandleArea;
class HandleMark;
class ICRefillVerifier;
class JvmtiRawMonitor;
class NMethodClosure;
class Metadata;
class OopClosure;
class OSThread;
class ParkEvent;
class ResourceArea;
@ -58,8 +60,6 @@ class ThreadsList;
class ThreadsSMRSupport;
class VMErrorCallback;
class OopClosure;
class CodeBlobClosure;
DEBUG_ONLY(class ResourceMark;)
@ -443,10 +443,10 @@ class Thread: public ThreadShadow {
// GC support
// Apply "f->do_oop" to all root oops in "this".
// Used by JavaThread::oops_do.
// Apply "cf->do_code_blob" (if !nullptr) to all code blobs active in frames
virtual void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf);
virtual void oops_do_frames(OopClosure* f, CodeBlobClosure* cf) {}
void oops_do(OopClosure* f, CodeBlobClosure* cf);
// Apply "cf->do_nmethod" (if !nullptr) to all nmethods active in frames
virtual void oops_do_no_frames(OopClosure* f, NMethodClosure* cf);
virtual void oops_do_frames(OopClosure* f, NMethodClosure* cf) {}
void oops_do(OopClosure* f, NMethodClosure* cf);
// Handles the parallel case for claim_threads_do.
private:

@ -1091,7 +1091,7 @@ void Threads::remove(JavaThread* p, bool is_daemon) {
// uses the Threads_lock to guarantee this property. It also makes sure that
// all threads gets blocked when exiting or starting).
void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
void Threads::oops_do(OopClosure* f, NMethodClosure* cf) {
ALL_JAVA_THREADS(p) {
p->oops_do(f, cf);
}
@ -1148,15 +1148,15 @@ void Threads::assert_all_threads_claimed() {
class ParallelOopsDoThreadClosure : public ThreadClosure {
private:
OopClosure* _f;
CodeBlobClosure* _cf;
NMethodClosure* _cf;
public:
ParallelOopsDoThreadClosure(OopClosure* f, CodeBlobClosure* cf) : _f(f), _cf(cf) {}
ParallelOopsDoThreadClosure(OopClosure* f, NMethodClosure* cf) : _f(f), _cf(cf) {}
void do_thread(Thread* t) {
t->oops_do(_f, _cf);
}
};
void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf) {
void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, NMethodClosure* cf) {
ParallelOopsDoThreadClosure tc(f, cf);
possibly_parallel_threads_do(is_par, &tc);
}

@ -33,15 +33,13 @@
class JavaThread;
class Metadata;
class MetadataClosure;
class OopClosure;
class Thread;
class ThreadClosure;
class ThreadsList;
class outputStream;
class CodeBlobClosure;
class MetadataClosure;
class OopClosure;
// The active thread queue. It also keeps track of the current used
// thread priorities.
class Threads: AllStatic {
@ -106,9 +104,9 @@ public:
// Apply "f->do_oop" to all root oops in all threads.
// This version may only be called by sequential code.
static void oops_do(OopClosure* f, CodeBlobClosure* cf);
static void oops_do(OopClosure* f, NMethodClosure* cf);
// This version may be called by sequential or parallel code.
static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf);
static void possibly_parallel_oops_do(bool is_par, OopClosure* f, NMethodClosure* cf);
// RedefineClasses support
static void metadata_do(MetadataClosure* f);