8049421: G1 Class Unloading after completing a concurrent mark cycle
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com> Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
This commit is contained in:
parent
ef1e9b3c80
commit
1b001a2afd
@ -1050,6 +1050,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
||||
n_copy->set_data((intx) (load_klass()));
|
||||
} else {
|
||||
assert(mirror() != NULL, "klass not set");
|
||||
// Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
|
||||
n_copy->set_data(cast_from_oop<intx>(mirror()));
|
||||
}
|
||||
|
||||
|
@ -185,6 +185,10 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void ensure_metadata_alive(ciMetadata* m) {
|
||||
_factory->ensure_metadata_alive(m);
|
||||
}
|
||||
|
||||
ciInstance* get_instance(oop o) {
|
||||
if (o == NULL) return NULL;
|
||||
return get_object(o)->as_instance();
|
||||
|
@ -43,6 +43,7 @@ class ciKlass : public ciType {
|
||||
friend class ciMethod;
|
||||
friend class ciMethodData;
|
||||
friend class ciObjArrayKlass;
|
||||
friend class ciReceiverTypeData;
|
||||
|
||||
private:
|
||||
ciSymbol* _name;
|
||||
|
@ -170,6 +170,7 @@ void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
|
||||
Klass* k = data->as_ReceiverTypeData()->receiver(row);
|
||||
if (k != NULL) {
|
||||
ciKlass* klass = CURRENT_ENV->get_klass(k);
|
||||
CURRENT_ENV->ensure_metadata_alive(klass);
|
||||
set_receiver(row, klass);
|
||||
}
|
||||
}
|
||||
@ -191,6 +192,7 @@ void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
|
||||
void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
|
||||
Method* m = data->as_SpeculativeTrapData()->method();
|
||||
ciMethod* ci_m = CURRENT_ENV->get_method(m);
|
||||
CURRENT_ENV->ensure_metadata_alive(ci_m);
|
||||
set_method(ci_m);
|
||||
}
|
||||
|
||||
|
@ -70,6 +70,7 @@ protected:
|
||||
Klass* v = TypeEntries::valid_klass(k);
|
||||
if (v != NULL) {
|
||||
ciKlass* klass = CURRENT_ENV->get_klass(v);
|
||||
CURRENT_ENV->ensure_metadata_alive(klass);
|
||||
return with_status(klass, k);
|
||||
}
|
||||
return with_status(NULL, k);
|
||||
|
@ -46,6 +46,9 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.inline2.hpp"
|
||||
#include "runtime/fieldType.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif
|
||||
|
||||
// ciObjectFactory
|
||||
//
|
||||
@ -374,6 +377,37 @@ ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciObjectFactory::ensure_metadata_alive
|
||||
//
|
||||
// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
|
||||
// This is primarily useful for metadata which is considered as weak roots
|
||||
// by the GC but need to be strong roots if reachable from a current compilation.
|
||||
//
|
||||
void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) {
|
||||
ASSERT_IN_VM; // We're handling raw oops here.
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (!UseG1GC) {
|
||||
return;
|
||||
}
|
||||
Klass* metadata_owner_klass;
|
||||
if (m->is_klass()) {
|
||||
metadata_owner_klass = m->as_klass()->get_Klass();
|
||||
} else if (m->is_method()) {
|
||||
metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
|
||||
} else {
|
||||
fatal("Not implemented for other types of metadata");
|
||||
}
|
||||
|
||||
oop metadata_holder = metadata_owner_klass->klass_holder();
|
||||
if (metadata_holder != NULL) {
|
||||
G1SATBCardTableModRefBS::enqueue(metadata_holder);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// ciObjectFactory::get_unloaded_method
|
||||
//
|
||||
|
@ -75,6 +75,8 @@ private:
|
||||
ciObject* create_new_object(oop o);
|
||||
ciMetadata* create_new_object(Metadata* o);
|
||||
|
||||
void ensure_metadata_alive(ciMetadata* m);
|
||||
|
||||
static bool is_equal(NonPermObject* p, oop key) {
|
||||
return p->object()->get_oop() == key;
|
||||
}
|
||||
|
@ -332,6 +332,27 @@ void ClassLoaderData::unload() {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
class AllAliveClosure : public OopClosure {
|
||||
BoolObjectClosure* _is_alive_closure;
|
||||
bool _found_dead;
|
||||
public:
|
||||
AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
|
||||
template <typename T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!_is_alive_closure->do_object_b(obj)) {
|
||||
_found_dead = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
void do_oop(oop* p) { do_oop_work<oop>(p); }
|
||||
void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
|
||||
bool found_dead() { return _found_dead; }
|
||||
};
|
||||
#endif
|
||||
|
||||
oop ClassLoaderData::keep_alive_object() const {
|
||||
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
|
||||
return is_anonymous() ? _klasses->java_mirror() : class_loader();
|
||||
@ -341,7 +362,15 @@ bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
|
||||
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|
||||
|| is_alive_closure->do_object_b(keep_alive_object());
|
||||
|
||||
assert(!alive || claimed(), "must be claimed");
|
||||
#ifdef ASSERT
|
||||
if (alive) {
|
||||
AllAliveClosure all_alive_closure(is_alive_closure);
|
||||
KlassToOopClosure klass_closure(&all_alive_closure);
|
||||
const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
|
||||
assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
|
||||
}
|
||||
#endif
|
||||
|
||||
return alive;
|
||||
}
|
||||
|
||||
@ -619,9 +648,9 @@ void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, KlassClosure* klass
|
||||
|
||||
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
|
||||
if (ClassUnloading) {
|
||||
ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
|
||||
keep_alive_oops_do(f, klass_closure, must_claim);
|
||||
} else {
|
||||
ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
|
||||
oops_do(f, klass_closure, must_claim);
|
||||
}
|
||||
}
|
||||
|
||||
@ -631,6 +660,27 @@ void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
|
||||
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->_next) {
|
||||
CLDClosure* closure = cld->keep_alive() ? strong : weak;
|
||||
if (closure != NULL) {
|
||||
closure->do_cld(cld);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
|
||||
roots_cld_do(cl, NULL);
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
|
||||
if (ClassUnloading) {
|
||||
keep_alive_cld_do(cl);
|
||||
} else {
|
||||
cld_do(cl);
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
|
||||
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
|
||||
cld->classes_do(klass_closure);
|
||||
@ -686,6 +736,16 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
|
||||
return array;
|
||||
}
|
||||
|
||||
bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
|
||||
for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
|
||||
if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
|
||||
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
|
||||
@ -801,6 +861,60 @@ Metaspace* ClassLoaderData::rw_metaspace() {
|
||||
return _rw_metaspace;
|
||||
}
|
||||
|
||||
ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
|
||||
: _next_klass(NULL) {
|
||||
ClassLoaderData* cld = ClassLoaderDataGraph::_head;
|
||||
Klass* klass = NULL;
|
||||
|
||||
// Find the first klass in the CLDG.
|
||||
while (cld != NULL) {
|
||||
klass = cld->_klasses;
|
||||
if (klass != NULL) {
|
||||
_next_klass = klass;
|
||||
return;
|
||||
}
|
||||
cld = cld->next();
|
||||
}
|
||||
}
|
||||
|
||||
Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) {
|
||||
Klass* next = klass->next_link();
|
||||
if (next != NULL) {
|
||||
return next;
|
||||
}
|
||||
|
||||
// No more klasses in the current CLD. Time to find a new CLD.
|
||||
ClassLoaderData* cld = klass->class_loader_data();
|
||||
while (next == NULL) {
|
||||
cld = cld->next();
|
||||
if (cld == NULL) {
|
||||
break;
|
||||
}
|
||||
next = cld->_klasses;
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
|
||||
Klass* head = (Klass*)_next_klass;
|
||||
|
||||
while (head != NULL) {
|
||||
Klass* next = next_klass_in_cldg(head);
|
||||
|
||||
Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
|
||||
|
||||
if (old_head == head) {
|
||||
return head; // Won the CAS.
|
||||
}
|
||||
|
||||
head = old_head;
|
||||
}
|
||||
|
||||
// Nothing more for the iterator to hand out.
|
||||
assert(head == NULL, err_msg("head is " PTR_FORMAT ", expected not null:", p2i(head)));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
|
||||
_data = ClassLoaderDataGraph::_head;
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "memory/metaspaceCounters.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
#if INCLUDE_TRACE
|
||||
# include "utilities/ticks.hpp"
|
||||
#endif
|
||||
@ -59,6 +58,7 @@ class Metadebug;
|
||||
class ClassLoaderDataGraph : public AllStatic {
|
||||
friend class ClassLoaderData;
|
||||
friend class ClassLoaderDataGraphMetaspaceIterator;
|
||||
friend class ClassLoaderDataGraphKlassIteratorAtomic;
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// All CLDs (except the null CLD) can be reached by walking _head->_next->...
|
||||
@ -74,10 +74,16 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
|
||||
static void purge();
|
||||
static void clear_claimed_marks();
|
||||
// oops do
|
||||
static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
|
||||
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
|
||||
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
|
||||
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
|
||||
// cld do
|
||||
static void cld_do(CLDClosure* cl);
|
||||
static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
|
||||
static void keep_alive_cld_do(CLDClosure* cl);
|
||||
static void always_strong_cld_do(CLDClosure* cl);
|
||||
// klass do
|
||||
static void classes_do(KlassClosure* klass_closure);
|
||||
static void classes_do(void f(Klass* const));
|
||||
static void methods_do(void f(Method*));
|
||||
@ -103,6 +109,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static void dump() { dump_on(tty); }
|
||||
static void verify();
|
||||
|
||||
static bool unload_list_contains(const void* x);
|
||||
#ifndef PRODUCT
|
||||
static bool contains_loader_data(ClassLoaderData* loader_data);
|
||||
#endif
|
||||
@ -135,6 +142,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
};
|
||||
|
||||
friend class ClassLoaderDataGraph;
|
||||
friend class ClassLoaderDataGraphKlassIteratorAtomic;
|
||||
friend class ClassLoaderDataGraphMetaspaceIterator;
|
||||
friend class MetaDataFactory;
|
||||
friend class Method;
|
||||
@ -194,7 +202,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
|
||||
void unload();
|
||||
bool keep_alive() const { return _keep_alive; }
|
||||
bool is_alive(BoolObjectClosure* is_alive_closure) const;
|
||||
void classes_do(void f(Klass*));
|
||||
void loaded_classes_do(KlassClosure* klass_closure);
|
||||
void classes_do(void f(InstanceKlass*));
|
||||
@ -207,6 +214,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
MetaWord* allocate(size_t size);
|
||||
|
||||
public:
|
||||
|
||||
bool is_alive(BoolObjectClosure* is_alive_closure) const;
|
||||
|
||||
// Accessors
|
||||
Metaspace* metaspace_or_null() const { return _metaspace; }
|
||||
|
||||
@ -292,6 +302,16 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
void initialize_shared_metaspaces();
|
||||
};
|
||||
|
||||
// An iterator that distributes Klasses to parallel worker threads.
|
||||
class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
|
||||
volatile Klass* _next_klass;
|
||||
public:
|
||||
ClassLoaderDataGraphKlassIteratorAtomic();
|
||||
Klass* next_klass();
|
||||
private:
|
||||
static Klass* next_klass_in_cldg(Klass* klass);
|
||||
};
|
||||
|
||||
class ClassLoaderDataGraphMetaspaceIterator : public StackObj {
|
||||
ClassLoaderData* _data;
|
||||
public:
|
||||
|
@ -199,6 +199,26 @@ bool Dictionary::do_unloading() {
|
||||
return class_was_unloaded;
|
||||
}
|
||||
|
||||
void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
|
||||
// Skip the strong roots probe marking if the closures are the same.
|
||||
if (strong == weak) {
|
||||
oops_do(strong);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (DictionaryEntry *probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
Klass* e = probe->klass();
|
||||
ClassLoaderData* loader_data = probe->loader_data();
|
||||
if (is_strongly_reachable(loader_data, e)) {
|
||||
probe->set_strongly_reachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
_pd_cache_table->roots_oops_do(strong, weak);
|
||||
}
|
||||
|
||||
void Dictionary::always_strong_oops_do(OopClosure* blk) {
|
||||
// Follow all system classes and temporary placeholders in dictionary; only
|
||||
@ -490,6 +510,23 @@ void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
|
||||
}
|
||||
}
|
||||
|
||||
void ProtectionDomainCacheTable::roots_oops_do(OopClosure* strong, OopClosure* weak) {
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (ProtectionDomainCacheEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
if (probe->is_strongly_reachable()) {
|
||||
probe->reset_strongly_reachable();
|
||||
probe->oops_do(strong);
|
||||
} else {
|
||||
if (weak != NULL) {
|
||||
probe->oops_do(weak);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint ProtectionDomainCacheTable::bucket_size() {
|
||||
return sizeof(ProtectionDomainCacheEntry);
|
||||
}
|
||||
|
@ -89,6 +89,7 @@ public:
|
||||
// GC support
|
||||
void oops_do(OopClosure* f);
|
||||
void always_strong_oops_do(OopClosure* blk);
|
||||
void roots_oops_do(OopClosure* strong, OopClosure* weak);
|
||||
|
||||
void always_strong_classes_do(KlassClosure* closure);
|
||||
|
||||
@ -218,6 +219,7 @@ public:
|
||||
// GC support
|
||||
void oops_do(OopClosure* f);
|
||||
void always_strong_oops_do(OopClosure* f);
|
||||
void roots_oops_do(OopClosure* strong, OopClosure* weak);
|
||||
|
||||
static uint bucket_size();
|
||||
|
||||
|
@ -47,8 +47,11 @@ MetadataOnStackMark::MetadataOnStackMark() {
|
||||
if (_marked_objects == NULL) {
|
||||
_marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
|
||||
}
|
||||
|
||||
Threads::metadata_do(Metadata::mark_on_stack);
|
||||
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
|
||||
if (JvmtiExport::has_redefined_a_class()) {
|
||||
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
|
||||
}
|
||||
CompileBroker::mark_on_stack();
|
||||
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
|
||||
ThreadService::metadata_do(Metadata::mark_on_stack);
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/g1StringDedup.hpp"
|
||||
#endif
|
||||
|
||||
@ -157,11 +158,26 @@ oop StringTable::lookup(Symbol* symbol) {
|
||||
return lookup(chars, length);
|
||||
}
|
||||
|
||||
// Tell the GC that this string was looked up in the StringTable.
|
||||
static void ensure_string_alive(oop string) {
|
||||
// A lookup in the StringTable could return an object that was previously
|
||||
// considered dead. The SATB part of G1 needs to get notified about this
|
||||
// potential resurrection, otherwise the marking might not find the object.
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC && string != NULL) {
|
||||
G1SATBCardTableModRefBS::enqueue(string);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
oop StringTable::lookup(jchar* name, int len) {
|
||||
unsigned int hash = hash_string(name, len);
|
||||
int index = the_table()->hash_to_index(hash);
|
||||
return the_table()->lookup(index, name, len, hash);
|
||||
oop string = the_table()->lookup(index, name, len, hash);
|
||||
|
||||
ensure_string_alive(string);
|
||||
|
||||
return string;
|
||||
}
|
||||
|
||||
|
||||
@ -172,7 +188,10 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
|
||||
oop found_string = the_table()->lookup(index, name, len, hashValue);
|
||||
|
||||
// Found
|
||||
if (found_string != NULL) return found_string;
|
||||
if (found_string != NULL) {
|
||||
ensure_string_alive(found_string);
|
||||
return found_string;
|
||||
}
|
||||
|
||||
debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
|
||||
assert(!Universe::heap()->is_in_reserved(name),
|
||||
@ -197,11 +216,17 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
|
||||
|
||||
// Grab the StringTable_lock before getting the_table() because it could
|
||||
// change at safepoint.
|
||||
MutexLocker ml(StringTable_lock, THREAD);
|
||||
oop added_or_found;
|
||||
{
|
||||
MutexLocker ml(StringTable_lock, THREAD);
|
||||
// Otherwise, add to symbol to table
|
||||
added_or_found = the_table()->basic_add(index, string, name, len,
|
||||
hashValue, CHECK_NULL);
|
||||
}
|
||||
|
||||
// Otherwise, add to symbol to table
|
||||
return the_table()->basic_add(index, string, name, len,
|
||||
hashValue, CHECK_NULL);
|
||||
ensure_string_alive(added_or_found);
|
||||
|
||||
return added_or_found;
|
||||
}
|
||||
|
||||
oop StringTable::intern(Symbol* symbol, TRAPS) {
|
||||
|
@ -1612,13 +1612,7 @@ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
|
||||
// system dictionary and follows the remaining classes' contents.
|
||||
|
||||
void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
|
||||
blk->do_oop(&_java_system_loader);
|
||||
blk->do_oop(&_system_loader_lock_obj);
|
||||
|
||||
dictionary()->always_strong_oops_do(blk);
|
||||
|
||||
// Visit extra methods
|
||||
invoke_method_table()->oops_do(blk);
|
||||
roots_oops_do(blk, NULL);
|
||||
}
|
||||
|
||||
void SystemDictionary::always_strong_classes_do(KlassClosure* closure) {
|
||||
@ -1685,6 +1679,17 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
|
||||
return unloading_occurred;
|
||||
}
|
||||
|
||||
void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
|
||||
strong->do_oop(&_java_system_loader);
|
||||
strong->do_oop(&_system_loader_lock_obj);
|
||||
|
||||
// Adjust dictionary
|
||||
dictionary()->roots_oops_do(strong, weak);
|
||||
|
||||
// Visit extra methods
|
||||
invoke_method_table()->oops_do(strong);
|
||||
}
|
||||
|
||||
void SystemDictionary::oops_do(OopClosure* f) {
|
||||
f->do_oop(&_java_system_loader);
|
||||
f->do_oop(&_system_loader_lock_obj);
|
||||
|
@ -330,6 +330,7 @@ public:
|
||||
|
||||
// Applies "f->do_oop" to all root oops in the system dictionary.
|
||||
static void oops_do(OopClosure* f);
|
||||
static void roots_oops_do(OopClosure* strong, OopClosure* weak);
|
||||
|
||||
// System loader lock
|
||||
static oop system_loader_lock() { return _system_loader_lock_obj; }
|
||||
|
@ -331,6 +331,11 @@ void CodeCache::blobs_do(CodeBlobClosure* f) {
|
||||
// Walk the list of methods which might contain non-perm oops.
|
||||
void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
debug_only(mark_scavenge_root_nmethods());
|
||||
|
||||
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
|
||||
@ -356,6 +361,11 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
|
||||
|
||||
void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
nm->set_on_scavenge_root_list();
|
||||
nm->set_scavenge_root_link(_scavenge_root_nmethods);
|
||||
set_scavenge_root_nmethods(nm);
|
||||
@ -364,6 +374,11 @@ void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
|
||||
|
||||
void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
print_trace("drop_scavenge_root", nm);
|
||||
nmethod* last = NULL;
|
||||
nmethod* cur = scavenge_root_nmethods();
|
||||
@ -385,6 +400,11 @@ void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
|
||||
|
||||
void CodeCache::prune_scavenge_root_nmethods() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
debug_only(mark_scavenge_root_nmethods());
|
||||
|
||||
nmethod* last = NULL;
|
||||
@ -417,6 +437,10 @@ void CodeCache::prune_scavenge_root_nmethods() {
|
||||
|
||||
#ifndef PRODUCT
|
||||
void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
// While we are here, verify the integrity of the list.
|
||||
mark_scavenge_root_nmethods();
|
||||
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
|
||||
@ -457,9 +481,36 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
|
||||
}
|
||||
#endif //PRODUCT
|
||||
|
||||
void CodeCache::verify_clean_inline_caches() {
|
||||
#ifdef ASSERT
|
||||
FOR_ALL_ALIVE_BLOBS(cb) {
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
assert(!nm->is_unloaded(), "Tautology");
|
||||
nm->verify_clean_inline_caches();
|
||||
nm->verify();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void CodeCache::verify_icholder_relocations() {
|
||||
#ifdef ASSERT
|
||||
// make sure that we aren't leaking icholders
|
||||
int count = 0;
|
||||
FOR_ALL_BLOBS(cb) {
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
count += nm->verify_icholder_relocations();
|
||||
}
|
||||
}
|
||||
|
||||
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
|
||||
CompiledICHolder::live_count(), "must agree");
|
||||
#endif
|
||||
}
|
||||
|
||||
void CodeCache::gc_prologue() {
|
||||
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
|
||||
}
|
||||
|
||||
void CodeCache::gc_epilogue() {
|
||||
@ -472,41 +523,15 @@ void CodeCache::gc_epilogue() {
|
||||
nm->cleanup_inline_caches();
|
||||
}
|
||||
DEBUG_ONLY(nm->verify());
|
||||
nm->fix_oop_relocations();
|
||||
DEBUG_ONLY(nm->verify_oop_relocations());
|
||||
}
|
||||
}
|
||||
set_needs_cache_clean(false);
|
||||
prune_scavenge_root_nmethods();
|
||||
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
|
||||
|
||||
#ifdef ASSERT
|
||||
// make sure that we aren't leaking icholders
|
||||
int count = 0;
|
||||
FOR_ALL_BLOBS(cb) {
|
||||
if (cb->is_nmethod()) {
|
||||
RelocIterator iter((nmethod*)cb);
|
||||
while(iter.next()) {
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
if (TraceCompiledIC) {
|
||||
tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
|
||||
ic->print();
|
||||
}
|
||||
assert(ic->cached_icholder() != NULL, "must be non-NULL");
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
|
||||
CompiledICHolder::live_count(), "must agree");
|
||||
#endif
|
||||
verify_icholder_relocations();
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::verify_oops() {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
VerifyOopClosure voc;
|
||||
|
@ -134,10 +134,6 @@ class CodeCache : AllStatic {
|
||||
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
|
||||
// to "true" iff some code got unloaded.
|
||||
static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
static void oops_do(OopClosure* f) {
|
||||
CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
|
||||
blobs_do(&oopc);
|
||||
}
|
||||
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
|
||||
static void scavenge_root_nmethods_do(CodeBlobClosure* f);
|
||||
|
||||
@ -173,6 +169,9 @@ class CodeCache : AllStatic {
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
|
||||
static void verify_clean_inline_caches();
|
||||
static void verify_icholder_relocations();
|
||||
|
||||
// Deoptimization
|
||||
static int mark_for_deoptimization(DepChange& changes);
|
||||
#ifdef HOTSWAP
|
||||
|
@ -99,13 +99,13 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub
|
||||
}
|
||||
|
||||
{
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
#ifdef ASSERT
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
|
||||
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
|
||||
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
|
||||
#endif
|
||||
_ic_call->set_destination_mt_safe(entry_point);
|
||||
}
|
||||
_ic_call->set_destination_mt_safe(entry_point);
|
||||
}
|
||||
|
||||
if (is_optimized() || is_icstub) {
|
||||
// Optimized call sites don't have a cache value and ICStub call
|
||||
@ -529,7 +529,7 @@ bool CompiledIC::is_icholder_entry(address entry) {
|
||||
void CompiledStaticCall::set_to_clean() {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset call site
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
#ifdef ASSERT
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(this);
|
||||
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
|
||||
|
@ -51,6 +51,8 @@
|
||||
|
||||
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||
|
||||
unsigned char nmethod::_global_unloading_clock = 0;
|
||||
|
||||
#ifdef DTRACE_ENABLED
|
||||
|
||||
// Only bother with this argument setup if dtrace is available
|
||||
@ -446,6 +448,7 @@ const char* nmethod::compile_kind() const {
|
||||
// Fill in default values for various flag fields
|
||||
void nmethod::init_defaults() {
|
||||
_state = in_use;
|
||||
_unloading_clock = 0;
|
||||
_marked_for_reclamation = 0;
|
||||
_has_flushed_dependencies = 0;
|
||||
_has_unsafe_access = 0;
|
||||
@ -464,7 +467,11 @@ void nmethod::init_defaults() {
|
||||
_oops_do_mark_link = NULL;
|
||||
_jmethod_id = NULL;
|
||||
_osr_link = NULL;
|
||||
_scavenge_root_link = NULL;
|
||||
if (UseG1GC) {
|
||||
_unloading_next = NULL;
|
||||
} else {
|
||||
_scavenge_root_link = NULL;
|
||||
}
|
||||
_scavenge_root_state = 0;
|
||||
_compiler = NULL;
|
||||
#if INCLUDE_RTM_OPT
|
||||
@ -1170,6 +1177,77 @@ void nmethod::cleanup_inline_caches() {
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::verify_clean_inline_caches() {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
|
||||
// If the method is not entrant or zombie then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (!is_in_use()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// This means that the low_boundary is going to be a little too high.
|
||||
// This shouldn't matter, since oops of non-entrant methods are never used.
|
||||
// In fact, why are we bothering to look at oops in a non-entrant method??
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while(iter.next()) {
|
||||
switch(iter.type()) {
|
||||
case relocInfo::virtual_call_type:
|
||||
case relocInfo::opt_virtual_call_type: {
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
|
||||
if( cb != NULL && cb->is_nmethod() ) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
// Verify that inline caches pointing to both zombie and not_entrant methods are clean
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
assert(ic->is_clean(), "IC should be clean");
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case relocInfo::static_call_type: {
|
||||
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
|
||||
if( cb != NULL && cb->is_nmethod() ) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
// Verify that inline caches pointing to both zombie and not_entrant methods are clean
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
assert(csc->is_clean(), "IC should be clean");
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int nmethod::verify_icholder_relocations() {
|
||||
int count = 0;
|
||||
|
||||
RelocIterator iter(this);
|
||||
while(iter.next()) {
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
if (TraceCompiledIC) {
|
||||
tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
|
||||
ic->print();
|
||||
}
|
||||
assert(ic->cached_icholder() != NULL, "must be non-NULL");
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
// This is a private interface with the sweeper.
|
||||
void nmethod::mark_as_seen_on_stack() {
|
||||
assert(is_alive(), "Must be an alive method");
|
||||
@ -1202,6 +1280,23 @@ void nmethod::inc_decompile_count() {
|
||||
mdo->inc_decompile_count();
|
||||
}
|
||||
|
||||
void nmethod::increase_unloading_clock() {
|
||||
_global_unloading_clock++;
|
||||
if (_global_unloading_clock == 0) {
|
||||
// _nmethods are allocated with _unloading_clock == 0,
|
||||
// so 0 is never used as a clock value.
|
||||
_global_unloading_clock = 1;
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::set_unloading_clock(unsigned char unloading_clock) {
|
||||
OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
|
||||
}
|
||||
|
||||
unsigned char nmethod::unloading_clock() {
|
||||
return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
|
||||
}
|
||||
|
||||
void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
||||
|
||||
post_compiled_method_unload();
|
||||
@ -1247,6 +1342,10 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
||||
// for later on.
|
||||
CodeCache::set_needs_cache_clean(true);
|
||||
}
|
||||
|
||||
// Unregister must be done before the state change
|
||||
Universe::heap()->unregister_nmethod(this);
|
||||
|
||||
_state = unloaded;
|
||||
|
||||
// Log the unloading.
|
||||
@ -1590,6 +1689,35 @@ void nmethod::post_compiled_method_unload() {
|
||||
set_unload_reported();
|
||||
}
|
||||
|
||||
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
|
||||
if (ic->is_icholder_call()) {
|
||||
// The only exception is compiledICHolder oops which may
|
||||
// yet be marked below. (We check this further below).
|
||||
CompiledICHolder* cichk_oop = ic->cached_icholder();
|
||||
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
|
||||
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
Metadata* ic_oop = ic->cached_metadata();
|
||||
if (ic_oop != NULL) {
|
||||
if (ic_oop->is_klass()) {
|
||||
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
|
||||
return;
|
||||
}
|
||||
} else if (ic_oop->is_method()) {
|
||||
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ic->set_to_clean();
|
||||
}
|
||||
|
||||
// This is called at the end of the strong tracing/marking phase of a
|
||||
// GC to unload an nmethod if it contains otherwise unreachable
|
||||
// oops.
|
||||
@ -1633,31 +1761,7 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
|
||||
while(iter.next()) {
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
if (ic->is_icholder_call()) {
|
||||
// The only exception is compiledICHolder oops which may
|
||||
// yet be marked below. (We check this further below).
|
||||
CompiledICHolder* cichk_oop = ic->cached_icholder();
|
||||
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
|
||||
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
Metadata* ic_oop = ic->cached_metadata();
|
||||
if (ic_oop != NULL) {
|
||||
if (ic_oop->is_klass()) {
|
||||
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
|
||||
continue;
|
||||
}
|
||||
} else if (ic_oop->is_method()) {
|
||||
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
ic->set_to_clean();
|
||||
clean_ic_if_metadata_is_dead(ic, is_alive);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1695,6 +1799,175 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
|
||||
verify_metadata_loaders(low_boundary, is_alive);
|
||||
}
|
||||
|
||||
template <class CompiledICorStaticCall>
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
|
||||
if (cb != NULL && cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
|
||||
if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
|
||||
// The nmethod has not been processed yet.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Clean inline caches pointing to both zombie and not_entrant methods
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
ic->set_to_clean();
|
||||
assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
|
||||
return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
|
||||
}
|
||||
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
|
||||
return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
|
||||
}
|
||||
|
||||
bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||
ResourceMark rm;
|
||||
|
||||
// Make sure the oop's ready to receive visitors
|
||||
assert(!is_zombie() && !is_unloaded(),
|
||||
"should not call follow on zombie or unloaded nmethod");
|
||||
|
||||
// If the method is not entrant then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (is_not_entrant()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// (See comment above.)
|
||||
}
|
||||
|
||||
// The RedefineClasses() API can cause the class unloading invariant
|
||||
// to no longer be true. See jvmtiExport.hpp for details.
|
||||
// Also, leave a debugging breadcrumb in local flag.
|
||||
bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
|
||||
if (a_class_was_redefined) {
|
||||
// This set of the unloading_occurred flag is done before the
|
||||
// call to post_compiled_method_unload() so that the unloading
|
||||
// of this nmethod is reported.
|
||||
unloading_occurred = true;
|
||||
}
|
||||
|
||||
// Exception cache
|
||||
clean_exception_cache(is_alive);
|
||||
|
||||
bool is_unloaded = false;
|
||||
bool postponed = false;
|
||||
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while(iter.next()) {
|
||||
|
||||
switch (iter.type()) {
|
||||
|
||||
case relocInfo::virtual_call_type:
|
||||
if (unloading_occurred) {
|
||||
// If class unloading occurred we first iterate over all inline caches and
|
||||
// clear ICs where the cached oop is referring to an unloaded klass or method.
|
||||
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
|
||||
}
|
||||
|
||||
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::static_call_type:
|
||||
postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::oop_type:
|
||||
if (!is_unloaded) {
|
||||
// Unload check
|
||||
oop_Relocation* r = iter.oop_reloc();
|
||||
// Traverse those oops directly embedded in the code.
|
||||
// Other oops (oop_index>0) are seen as part of scopes_oops.
|
||||
assert(1 == (r->oop_is_immediate()) +
|
||||
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
||||
"oop must be found in exactly one place");
|
||||
if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
||||
if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
|
||||
is_unloaded = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (is_unloaded) {
|
||||
return postponed;
|
||||
}
|
||||
|
||||
// Scopes
|
||||
for (oop* p = oops_begin(); p < oops_end(); p++) {
|
||||
if (*p == Universe::non_oop_word()) continue; // skip non-oops
|
||||
if (can_unload(is_alive, p, unloading_occurred)) {
|
||||
is_unloaded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_unloaded) {
|
||||
return postponed;
|
||||
}
|
||||
|
||||
// Ensure that all metadata is still alive
|
||||
verify_metadata_loaders(low_boundary, is_alive);
|
||||
|
||||
return postponed;
|
||||
}
|
||||
|
||||
void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||
ResourceMark rm;
|
||||
|
||||
// Make sure the oop's ready to receive visitors
|
||||
assert(!is_zombie(),
|
||||
"should not call follow on zombie nmethod");
|
||||
|
||||
// If the method is not entrant then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (is_not_entrant()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// (See comment above.)
|
||||
}
|
||||
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while(iter.next()) {
|
||||
|
||||
switch (iter.type()) {
|
||||
|
||||
case relocInfo::virtual_call_type:
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::static_call_type:
|
||||
clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
class CheckClass : AllStatic {
|
||||
@ -1911,7 +2184,7 @@ void nmethod::oops_do_marking_epilogue() {
|
||||
assert(cur != NULL, "not NULL-terminated");
|
||||
nmethod* next = cur->_oops_do_mark_link;
|
||||
cur->_oops_do_mark_link = NULL;
|
||||
cur->fix_oop_relocations();
|
||||
cur->verify_oop_relocations();
|
||||
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
|
||||
cur = next;
|
||||
}
|
||||
@ -2479,6 +2752,10 @@ public:
|
||||
};
|
||||
|
||||
void nmethod::verify_scavenge_root_oops() {
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!on_scavenge_root_list()) {
|
||||
// Actually look inside, to verify the claim that it's clean.
|
||||
DebugScavengeRoot debug_scavenge_root(this);
|
||||
|
@ -111,6 +111,11 @@ class nmethod : public CodeBlob {
|
||||
friend class NMethodSweeper;
|
||||
friend class CodeCache; // scavengable oops
|
||||
private:
|
||||
|
||||
// GC support to help figure out if an nmethod has been
|
||||
// cleaned/unloaded by the current GC.
|
||||
static unsigned char _global_unloading_clock;
|
||||
|
||||
// Shared fields for all nmethod's
|
||||
Method* _method;
|
||||
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
|
||||
@ -118,7 +123,13 @@ class nmethod : public CodeBlob {
|
||||
|
||||
// To support simple linked-list chaining of nmethods:
|
||||
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
|
||||
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
|
||||
|
||||
union {
|
||||
// Used by G1 to chain nmethods.
|
||||
nmethod* _unloading_next;
|
||||
// Used by non-G1 GCs to chain nmethods.
|
||||
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
|
||||
};
|
||||
|
||||
static nmethod* volatile _oops_do_mark_nmethods;
|
||||
nmethod* volatile _oops_do_mark_link;
|
||||
@ -180,6 +191,8 @@ class nmethod : public CodeBlob {
|
||||
// Protected by Patching_lock
|
||||
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
|
||||
|
||||
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
|
||||
|
||||
#ifdef ASSERT
|
||||
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
|
||||
#endif
|
||||
@ -437,6 +450,15 @@ class nmethod : public CodeBlob {
|
||||
bool unload_reported() { return _unload_reported; }
|
||||
void set_unload_reported() { _unload_reported = true; }
|
||||
|
||||
void set_unloading_next(nmethod* next) { _unloading_next = next; }
|
||||
nmethod* unloading_next() { return _unloading_next; }
|
||||
|
||||
static unsigned char global_unloading_clock() { return _global_unloading_clock; }
|
||||
static void increase_unloading_clock();
|
||||
|
||||
void set_unloading_clock(unsigned char unloading_clock);
|
||||
unsigned char unloading_clock();
|
||||
|
||||
bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
|
||||
void mark_for_deoptimization() { _marked_for_deoptimization = true; }
|
||||
|
||||
@ -552,6 +574,10 @@ public:
|
||||
return (addr >= code_begin() && addr < verified_entry_point());
|
||||
}
|
||||
|
||||
// Verify calls to dead methods have been cleaned.
|
||||
void verify_clean_inline_caches();
|
||||
// Verify and count cached icholder relocations.
|
||||
int verify_icholder_relocations();
|
||||
// Check that all metadata is still alive
|
||||
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
|
||||
|
||||
@ -577,6 +603,10 @@ public:
|
||||
|
||||
// GC support
|
||||
void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
// The parallel versions are used by G1.
|
||||
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
// Unload a nmethod if the *root object is dead.
|
||||
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
|
||||
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
|
||||
|
@ -1558,11 +1558,11 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
}
|
||||
|
||||
if (MetaspaceGC::should_concurrent_collect()) {
|
||||
if (Verbose && PrintGCDetails) {
|
||||
if (Verbose && PrintGCDetails) {
|
||||
gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// CMSTriggerInterval starts a CMS cycle if enough time has passed.
|
||||
if (CMSTriggerInterval >= 0) {
|
||||
@ -2997,20 +2997,21 @@ void CMSCollector::verify_after_remark_work_1() {
|
||||
HandleMark hm;
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
|
||||
// Get a clear set of claim bits for the strong roots processing to work with.
|
||||
// Get a clear set of claim bits for the roots processing to work with.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
// Mark from roots one level into CMS
|
||||
MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
¬Older,
|
||||
NULL,
|
||||
NULL); // SSS: Provide correct closure
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
NULL); // SSS: Provide correct closure
|
||||
|
||||
// Now mark from the roots
|
||||
MarkFromRootsClosure markFromRootsClosure(this, _span,
|
||||
@ -3061,22 +3062,24 @@ void CMSCollector::verify_after_remark_work_2() {
|
||||
HandleMark hm;
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
|
||||
// Get a clear set of claim bits for the strong roots processing to work with.
|
||||
// Get a clear set of claim bits for the roots processing to work with.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
// Mark from roots one level into CMS
|
||||
MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
|
||||
markBitMap());
|
||||
KlassToOopClosure klass_closure(¬Older);
|
||||
CLDToOopClosure cld_closure(¬Older, true);
|
||||
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
¬Older,
|
||||
NULL,
|
||||
&klass_closure);
|
||||
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
|
||||
// Now mark from the roots
|
||||
MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
|
||||
@ -3263,12 +3266,10 @@ bool ConcurrentMarkSweepGeneration::is_too_full() const {
|
||||
void CMSCollector::setup_cms_unloading_and_verification_state() {
|
||||
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|
||||
|| VerifyBeforeExit;
|
||||
const int rso = SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
|
||||
const int rso = SharedHeap::SO_AllCodeCache;
|
||||
|
||||
// We set the proper root for this CMS cycle here.
|
||||
if (should_unload_classes()) { // Should unload classes this cycle
|
||||
remove_root_scanning_option(SharedHeap::SO_AllClasses);
|
||||
add_root_scanning_option(SharedHeap::SO_SystemClasses);
|
||||
remove_root_scanning_option(rso); // Shrink the root set appropriately
|
||||
set_verifying(should_verify); // Set verification state for this cycle
|
||||
return; // Nothing else needs to be done at this time
|
||||
@ -3276,8 +3277,6 @@ void CMSCollector::setup_cms_unloading_and_verification_state() {
|
||||
|
||||
// Not unloading classes this cycle
|
||||
assert(!should_unload_classes(), "Inconsistency!");
|
||||
remove_root_scanning_option(SharedHeap::SO_SystemClasses);
|
||||
add_root_scanning_option(SharedHeap::SO_AllClasses);
|
||||
|
||||
if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
|
||||
// Include symbols, strings and code cache elements to prevent their resurrection.
|
||||
@ -3685,15 +3684,16 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
|
||||
gch->set_par_threads(0);
|
||||
} else {
|
||||
// The serial version.
|
||||
KlassToOopClosure klass_closure(¬Older);
|
||||
CLDToOopClosure cld_closure(¬Older, true);
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
¬Older,
|
||||
NULL,
|
||||
&klass_closure);
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5139,7 +5139,6 @@ void CMSParInitialMarkTask::work(uint worker_id) {
|
||||
_timer.start();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
|
||||
KlassToOopClosure klass_closure(&par_mri_cl);
|
||||
|
||||
// ---------- young gen roots --------------
|
||||
{
|
||||
@ -5155,13 +5154,17 @@ void CMSParInitialMarkTask::work(uint worker_id) {
|
||||
// ---------- remaining roots --------------
|
||||
_timer.reset();
|
||||
_timer.start();
|
||||
gch->gen_process_strong_roots(_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
&par_mri_cl,
|
||||
NULL,
|
||||
&klass_closure);
|
||||
|
||||
CLDToOopClosure cld_closure(&par_mri_cl, true);
|
||||
|
||||
gch->gen_process_roots(_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
&par_mri_cl,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
assert(_collector->should_unload_classes()
|
||||
|| (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
|
||||
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
|
||||
@ -5290,13 +5293,15 @@ void CMSParRemarkTask::work(uint worker_id) {
|
||||
// ---------- remaining roots --------------
|
||||
_timer.reset();
|
||||
_timer.start();
|
||||
gch->gen_process_strong_roots(_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
&par_mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
gch->gen_process_roots(_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
&par_mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
|
||||
assert(_collector->should_unload_classes()
|
||||
|| (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
|
||||
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
|
||||
@ -5351,7 +5356,7 @@ void CMSParRemarkTask::work(uint worker_id) {
|
||||
// We might have added oops to ClassLoaderData::_handles during the
|
||||
// concurrent marking phase. These oops point to newly allocated objects
|
||||
// that are guaranteed to be kept alive. Either by the direct allocation
|
||||
// code, or when the young collector processes the strong roots. Hence,
|
||||
// code, or when the young collector processes the roots. Hence,
|
||||
// we don't have to revisit the _handles block during the remark phase.
|
||||
|
||||
// ---------- rescan dirty cards ------------
|
||||
@ -5773,7 +5778,7 @@ void CMSCollector::do_remark_parallel() {
|
||||
cms_space,
|
||||
n_workers, workers, task_queues());
|
||||
|
||||
// Set up for parallel process_strong_roots work.
|
||||
// Set up for parallel process_roots work.
|
||||
gch->set_par_threads(n_workers);
|
||||
// We won't be iterating over the cards in the card table updating
|
||||
// the younger_gen cards, so we shouldn't call the following else
|
||||
@ -5782,7 +5787,7 @@ void CMSCollector::do_remark_parallel() {
|
||||
// gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
|
||||
|
||||
// The young gen rescan work will not be done as part of
|
||||
// process_strong_roots (which currently doesn't knw how to
|
||||
// process_roots (which currently doesn't know how to
|
||||
// parallelize such a scan), but rather will be broken up into
|
||||
// a set of parallel tasks (via the sampling that the [abortable]
|
||||
// preclean phase did of EdenSpace, plus the [two] tasks of
|
||||
@ -5879,13 +5884,15 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
GenCollectedHeap::StrongRootsScope srs(gch);
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
true, // younger gens as roots
|
||||
false, // use the local StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
&mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens as roots
|
||||
false, // use the local StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
&mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
|
||||
assert(should_unload_classes()
|
||||
|| (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
|
||||
@ -5925,7 +5932,7 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
// We might have added oops to ClassLoaderData::_handles during the
|
||||
// concurrent marking phase. These oops point to newly allocated objects
|
||||
// that are guaranteed to be kept alive. Either by the direct allocation
|
||||
// code, or when the young collector processes the strong roots. Hence,
|
||||
// code, or when the young collector processes the roots. Hence,
|
||||
// we don't have to revisit the _handles block during the remark phase.
|
||||
|
||||
verify_work_stacks_empty();
|
||||
@ -6175,15 +6182,14 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
||||
// Clean up unreferenced symbols in symbol table.
|
||||
SymbolTable::unlink();
|
||||
}
|
||||
|
||||
{
|
||||
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(&_is_alive_closure);
|
||||
}
|
||||
}
|
||||
|
||||
// CMS doesn't use the StringTable as hard roots when class unloading is turned off.
|
||||
// Need to check if we really scanned the StringTable.
|
||||
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
|
||||
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(&_is_alive_closure);
|
||||
}
|
||||
|
||||
// Restore any preserved marks as a result of mark stack or
|
||||
// work queue overflow
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gc_implementation/shared/generationCounters.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/virtualspace.hpp"
|
||||
#include "services/memoryService.hpp"
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc_implementation/g1/concurrentMark.inline.hpp"
|
||||
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
@ -39,6 +40,7 @@
|
||||
#include "gc_implementation/shared/gcTimer.hpp"
|
||||
#include "gc_implementation/shared/gcTrace.hpp"
|
||||
#include "gc_implementation/shared/gcTraceTime.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/referencePolicy.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
@ -58,8 +60,8 @@ CMBitMapRO::CMBitMapRO(int shifter) :
|
||||
_bmWordSize = 0;
|
||||
}
|
||||
|
||||
HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
|
||||
HeapWord* limit) const {
|
||||
HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
|
||||
const HeapWord* limit) const {
|
||||
// First we must round addr *up* to a possible object boundary.
|
||||
addr = (HeapWord*)align_size_up((intptr_t)addr,
|
||||
HeapWordSize << _shifter);
|
||||
@ -76,8 +78,8 @@ HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
|
||||
return nextAddr;
|
||||
}
|
||||
|
||||
HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
|
||||
HeapWord* limit) const {
|
||||
HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
|
||||
const HeapWord* limit) const {
|
||||
size_t addrOffset = heapWordToOffset(addr);
|
||||
if (limit == NULL) {
|
||||
limit = _bmStartWord + _bmWordSize;
|
||||
@ -1223,6 +1225,9 @@ public:
|
||||
};
|
||||
|
||||
void ConcurrentMark::scanRootRegions() {
|
||||
// Start of concurrent marking.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
// scan_in_progress() will have been set to true only if there was
|
||||
// at least one root region to scan. So, if it's false, we
|
||||
// should not attempt to do any further work.
|
||||
@ -1271,7 +1276,7 @@ void ConcurrentMark::markFromRoots() {
|
||||
CMConcurrentMarkingTask markingTask(this, cmThread());
|
||||
if (use_parallel_marking_threads()) {
|
||||
_parallel_workers->set_active_workers((int)active_workers);
|
||||
// Don't set _n_par_threads because it affects MT in process_strong_roots()
|
||||
// Don't set _n_par_threads because it affects MT in process_roots()
|
||||
// and the decisions on that MT processing is made elsewhere.
|
||||
assert(_parallel_workers->active_workers() > 0, "Should have been set");
|
||||
_parallel_workers->run_task(&markingTask);
|
||||
@ -2142,23 +2147,29 @@ void ConcurrentMark::cleanup() {
|
||||
// Update the soft reference policy with the new heap occupancy.
|
||||
Universe::update_heap_info_at_gc();
|
||||
|
||||
// We need to make this be a "collection" so any collection pause that
|
||||
// races with it goes around and waits for completeCleanup to finish.
|
||||
g1h->increment_total_collections();
|
||||
|
||||
// We reclaimed old regions so we should calculate the sizes to make
|
||||
// sure we update the old gen/space data.
|
||||
g1h->g1mm()->update_sizes();
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(after)");
|
||||
}
|
||||
|
||||
g1h->check_bitmaps("Cleanup End");
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
|
||||
// We need to make this be a "collection" so any collection pause that
|
||||
// races with it goes around and waits for completeCleanup to finish.
|
||||
g1h->increment_total_collections();
|
||||
|
||||
// Clean out dead classes and update Metaspace sizes.
|
||||
ClassLoaderDataGraph::purge();
|
||||
MetaspaceGC::compute_new_size();
|
||||
|
||||
// We reclaimed old regions so we should calculate the sizes to make
|
||||
// sure we update the old gen/space data.
|
||||
g1h->g1mm()->update_sizes();
|
||||
|
||||
g1h->trace_heap_after_concurrent_cycle();
|
||||
}
|
||||
|
||||
@ -2445,6 +2456,26 @@ void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
|
||||
G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
|
||||
}
|
||||
|
||||
// Helper class to get rid of some boilerplate code.
|
||||
class G1RemarkGCTraceTime : public GCTraceTime {
|
||||
static bool doit_and_prepend(bool doit) {
|
||||
if (doit) {
|
||||
gclog_or_tty->put(' ');
|
||||
}
|
||||
return doit;
|
||||
}
|
||||
|
||||
public:
|
||||
G1RemarkGCTraceTime(const char* title, bool doit)
|
||||
: GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
|
||||
G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
|
||||
}
|
||||
};
|
||||
|
||||
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
if (has_overflown()) {
|
||||
// Skip processing the discovered references if we have
|
||||
@ -2557,9 +2588,28 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
return;
|
||||
}
|
||||
|
||||
g1h->unlink_string_and_symbol_table(&g1_is_alive,
|
||||
/* process_strings */ false, // currently strings are always roots
|
||||
/* process_symbols */ true);
|
||||
assert(_markStack.isEmpty(), "Marking should have completed");
|
||||
|
||||
// Unload Klasses, String, Symbols, Code Cache, etc.
|
||||
|
||||
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
|
||||
|
||||
bool purged_classes;
|
||||
|
||||
{
|
||||
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
||||
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
|
||||
}
|
||||
|
||||
{
|
||||
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
|
||||
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
|
||||
G1StringDedup::unlink(&g1_is_alive);
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentMark::swapMarkBitMaps() {
|
||||
@ -2568,6 +2618,57 @@ void ConcurrentMark::swapMarkBitMaps() {
|
||||
_nextMarkBitMap = (CMBitMap*) temp;
|
||||
}
|
||||
|
||||
class CMObjectClosure;
|
||||
|
||||
// Closure for iterating over objects, currently only used for
|
||||
// processing SATB buffers.
|
||||
class CMObjectClosure : public ObjectClosure {
|
||||
private:
|
||||
CMTask* _task;
|
||||
|
||||
public:
|
||||
void do_object(oop obj) {
|
||||
_task->deal_with_reference(obj);
|
||||
}
|
||||
|
||||
CMObjectClosure(CMTask* task) : _task(task) { }
|
||||
};
|
||||
|
||||
class G1RemarkThreadsClosure : public ThreadClosure {
|
||||
CMObjectClosure _cm_obj;
|
||||
G1CMOopClosure _cm_cl;
|
||||
MarkingCodeBlobClosure _code_cl;
|
||||
int _thread_parity;
|
||||
bool _is_par;
|
||||
|
||||
public:
|
||||
G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
|
||||
_cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
|
||||
_thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
|
||||
|
||||
void do_thread(Thread* thread) {
|
||||
if (thread->is_Java_thread()) {
|
||||
if (thread->claim_oops_do(_is_par, _thread_parity)) {
|
||||
JavaThread* jt = (JavaThread*)thread;
|
||||
|
||||
// In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
|
||||
// however the liveness of oops reachable from nmethods have very complex lifecycles:
|
||||
// * Alive if on the stack of an executing method
|
||||
// * Weakly reachable otherwise
|
||||
// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
|
||||
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
|
||||
jt->nmethods_do(&_code_cl);
|
||||
|
||||
jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
|
||||
}
|
||||
} else if (thread->is_VM_thread()) {
|
||||
if (thread->claim_oops_do(_is_par, _thread_parity)) {
|
||||
JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class CMRemarkTask: public AbstractGangTask {
|
||||
private:
|
||||
ConcurrentMark* _cm;
|
||||
@ -2579,6 +2680,14 @@ public:
|
||||
if (worker_id < _cm->active_tasks()) {
|
||||
CMTask* task = _cm->task(worker_id);
|
||||
task->record_start_time();
|
||||
{
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
|
||||
Threads::threads_do(&threads_f);
|
||||
}
|
||||
|
||||
do {
|
||||
task->do_marking_step(1000000000.0 /* something very large */,
|
||||
true /* do_termination */,
|
||||
@ -2601,6 +2710,8 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
||||
HandleMark hm;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
|
||||
|
||||
g1h->ensure_parsability(false);
|
||||
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
@ -3430,20 +3541,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// Closure for iterating over objects, currently only used for
|
||||
// processing SATB buffers.
|
||||
class CMObjectClosure : public ObjectClosure {
|
||||
private:
|
||||
CMTask* _task;
|
||||
|
||||
public:
|
||||
void do_object(oop obj) {
|
||||
_task->deal_with_reference(obj);
|
||||
}
|
||||
|
||||
CMObjectClosure(CMTask* task) : _task(task) { }
|
||||
};
|
||||
|
||||
G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
|
||||
ConcurrentMark* cm,
|
||||
CMTask* task)
|
||||
@ -3908,15 +4005,6 @@ void CMTask::drain_satb_buffers() {
|
||||
}
|
||||
}
|
||||
|
||||
if (!concurrent() && !has_aborted()) {
|
||||
// We should only do this during remark.
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
satb_mq_set.par_iterate_closure_all_threads(_worker_id);
|
||||
} else {
|
||||
satb_mq_set.iterate_closure_all_threads();
|
||||
}
|
||||
}
|
||||
|
||||
_draining_satb_buffers = false;
|
||||
|
||||
assert(has_aborted() ||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
|
||||
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.hpp"
|
||||
#include "gc_implementation/shared/gcId.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
@ -86,19 +87,19 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
|
||||
// Return the address corresponding to the next marked bit at or after
|
||||
// "addr", and before "limit", if "limit" is non-NULL. If there is no
|
||||
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
|
||||
HeapWord* getNextMarkedWordAddress(HeapWord* addr,
|
||||
HeapWord* limit = NULL) const;
|
||||
HeapWord* getNextMarkedWordAddress(const HeapWord* addr,
|
||||
const HeapWord* limit = NULL) const;
|
||||
// Return the address corresponding to the next unmarked bit at or after
|
||||
// "addr", and before "limit", if "limit" is non-NULL. If there is no
|
||||
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
|
||||
HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
|
||||
HeapWord* limit = NULL) const;
|
||||
HeapWord* getNextUnmarkedWordAddress(const HeapWord* addr,
|
||||
const HeapWord* limit = NULL) const;
|
||||
|
||||
// conversion utilities
|
||||
HeapWord* offsetToHeapWord(size_t offset) const {
|
||||
return _bmStartWord + (offset << _shifter);
|
||||
}
|
||||
size_t heapWordToOffset(HeapWord* addr) const {
|
||||
size_t heapWordToOffset(const HeapWord* addr) const {
|
||||
return pointer_delta(addr, _bmStartWord) >> _shifter;
|
||||
}
|
||||
int heapWordDiffToOffsetDiff(size_t diff) const;
|
||||
@ -476,6 +477,7 @@ protected:
|
||||
ForceOverflowSettings _force_overflow_conc;
|
||||
ForceOverflowSettings _force_overflow_stw;
|
||||
|
||||
void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
|
||||
void weakRefsWork(bool clear_all_soft_refs);
|
||||
|
||||
void swapMarkBitMaps();
|
||||
|
@ -426,7 +426,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
n += block_size(q);
|
||||
}
|
||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||
// [q, n) is the block that crosses the boundary.
|
||||
|
@ -113,7 +113,7 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
n += block_size(q);
|
||||
}
|
||||
assert(q <= n, "wrong order for q and addr");
|
||||
assert(addr < n, "wrong order for addr and n");
|
||||
|
@ -30,23 +30,52 @@
|
||||
|
||||
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||
|
||||
G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) {
|
||||
G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL), _free(NULL) {
|
||||
_top = bottom();
|
||||
}
|
||||
|
||||
void G1CodeRootChunk::reset() {
|
||||
_next = _prev = NULL;
|
||||
_free = NULL;
|
||||
_top = bottom();
|
||||
}
|
||||
|
||||
void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
|
||||
nmethod** cur = bottom();
|
||||
NmethodOrLink* cur = bottom();
|
||||
while (cur != _top) {
|
||||
cl->do_code_blob(*cur);
|
||||
if (is_nmethod(cur)) {
|
||||
cl->do_code_blob(cur->_nmethod);
|
||||
}
|
||||
cur++;
|
||||
}
|
||||
}
|
||||
|
||||
bool G1CodeRootChunk::remove_lock_free(nmethod* method) {
|
||||
NmethodOrLink* cur = bottom();
|
||||
|
||||
for (NmethodOrLink* cur = bottom(); cur != _top; cur++) {
|
||||
if (cur->_nmethod == method) {
|
||||
bool result = Atomic::cmpxchg_ptr(NULL, &cur->_nmethod, method) == method;
|
||||
|
||||
if (!result) {
|
||||
// Someone else cleared out this entry.
|
||||
return false;
|
||||
}
|
||||
|
||||
// The method was cleared. Time to link it into the free list.
|
||||
NmethodOrLink* prev_free;
|
||||
do {
|
||||
prev_free = (NmethodOrLink*)_free;
|
||||
cur->_link = prev_free;
|
||||
} while (Atomic::cmpxchg_ptr(cur, &_free, prev_free) != prev_free);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) {
|
||||
_free_list.initialize();
|
||||
_free_list.set_size(G1CodeRootChunk::word_size());
|
||||
@ -140,34 +169,43 @@ G1CodeRootSet::~G1CodeRootSet() {
|
||||
|
||||
void G1CodeRootSet::add(nmethod* method) {
|
||||
if (!contains(method)) {
|
||||
// Try to add the nmethod. If there is not enough space, get a new chunk.
|
||||
if (_list.head() == NULL || _list.head()->is_full()) {
|
||||
G1CodeRootChunk* cur = new_chunk();
|
||||
// Find the first chunk thatisn't full.
|
||||
G1CodeRootChunk* cur = _list.head();
|
||||
while (cur != NULL) {
|
||||
if (!cur->is_full()) {
|
||||
break;
|
||||
}
|
||||
cur = cur->next();
|
||||
}
|
||||
|
||||
// All chunks are full, get a new chunk.
|
||||
if (cur == NULL) {
|
||||
cur = new_chunk();
|
||||
_list.return_chunk_at_head(cur);
|
||||
}
|
||||
bool result = _list.head()->add(method);
|
||||
|
||||
// Add the nmethod.
|
||||
bool result = cur->add(method);
|
||||
|
||||
guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
|
||||
|
||||
_length++;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CodeRootSet::remove(nmethod* method) {
|
||||
void G1CodeRootSet::remove_lock_free(nmethod* method) {
|
||||
G1CodeRootChunk* found = find(method);
|
||||
if (found != NULL) {
|
||||
bool result = found->remove(method);
|
||||
guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method));
|
||||
// eventually free completely emptied chunk
|
||||
if (found->is_empty()) {
|
||||
_list.remove_chunk(found);
|
||||
free(found);
|
||||
bool result = found->remove_lock_free(method);
|
||||
if (result) {
|
||||
Atomic::dec_ptr((volatile intptr_t*)&_length);
|
||||
}
|
||||
_length--;
|
||||
}
|
||||
assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
|
||||
}
|
||||
|
||||
nmethod* G1CodeRootSet::pop() {
|
||||
do {
|
||||
while (true) {
|
||||
G1CodeRootChunk* cur = _list.head();
|
||||
if (cur == NULL) {
|
||||
assert(_length == 0, "when there are no chunks, there should be no elements");
|
||||
@ -180,7 +218,7 @@ nmethod* G1CodeRootSet::pop() {
|
||||
} else {
|
||||
free(_list.get_chunk_at_head());
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
}
|
||||
|
||||
G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
|
||||
|
@ -31,6 +31,14 @@
|
||||
|
||||
class CodeBlobClosure;
|
||||
|
||||
// The elements of the G1CodeRootChunk is either:
|
||||
// 1) nmethod pointers
|
||||
// 2) nodes in an internally chained free list
|
||||
typedef union {
|
||||
nmethod* _nmethod;
|
||||
void* _link;
|
||||
} NmethodOrLink;
|
||||
|
||||
class G1CodeRootChunk : public CHeapObj<mtGC> {
|
||||
private:
|
||||
static const int NUM_ENTRIES = 32;
|
||||
@ -38,16 +46,28 @@ class G1CodeRootChunk : public CHeapObj<mtGC> {
|
||||
G1CodeRootChunk* _next;
|
||||
G1CodeRootChunk* _prev;
|
||||
|
||||
nmethod** _top;
|
||||
NmethodOrLink* _top;
|
||||
// First free position within the chunk.
|
||||
volatile NmethodOrLink* _free;
|
||||
|
||||
nmethod* _data[NUM_ENTRIES];
|
||||
NmethodOrLink _data[NUM_ENTRIES];
|
||||
|
||||
nmethod** bottom() const {
|
||||
return (nmethod**) &(_data[0]);
|
||||
NmethodOrLink* bottom() const {
|
||||
return (NmethodOrLink*) &(_data[0]);
|
||||
}
|
||||
|
||||
nmethod** end() const {
|
||||
return (nmethod**) &(_data[NUM_ENTRIES]);
|
||||
NmethodOrLink* end() const {
|
||||
return (NmethodOrLink*) &(_data[NUM_ENTRIES]);
|
||||
}
|
||||
|
||||
bool is_link(NmethodOrLink* nmethod_or_link) {
|
||||
return nmethod_or_link->_link == NULL ||
|
||||
(bottom() <= nmethod_or_link->_link
|
||||
&& nmethod_or_link->_link < end());
|
||||
}
|
||||
|
||||
bool is_nmethod(NmethodOrLink* nmethod_or_link) {
|
||||
return !is_link(nmethod_or_link);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -85,46 +105,55 @@ class G1CodeRootChunk : public CHeapObj<mtGC> {
|
||||
}
|
||||
|
||||
bool is_full() const {
|
||||
return _top == (nmethod**)end();
|
||||
return _top == end() && _free == NULL;
|
||||
}
|
||||
|
||||
bool contains(nmethod* method) {
|
||||
nmethod** cur = bottom();
|
||||
NmethodOrLink* cur = bottom();
|
||||
while (cur != _top) {
|
||||
if (*cur == method) return true;
|
||||
if (cur->_nmethod == method) return true;
|
||||
cur++;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool add(nmethod* method) {
|
||||
if (is_full()) return false;
|
||||
*_top = method;
|
||||
_top++;
|
||||
if (is_full()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_free != NULL) {
|
||||
// Take from internally chained free list
|
||||
NmethodOrLink* first_free = (NmethodOrLink*)_free;
|
||||
_free = (NmethodOrLink*)_free->_link;
|
||||
first_free->_nmethod = method;
|
||||
} else {
|
||||
// Take from top.
|
||||
_top->_nmethod = method;
|
||||
_top++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool remove(nmethod* method) {
|
||||
nmethod** cur = bottom();
|
||||
while (cur != _top) {
|
||||
if (*cur == method) {
|
||||
memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**));
|
||||
_top--;
|
||||
return true;
|
||||
}
|
||||
cur++;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool remove_lock_free(nmethod* method);
|
||||
|
||||
void nmethods_do(CodeBlobClosure* blk);
|
||||
|
||||
nmethod* pop() {
|
||||
if (is_empty()) {
|
||||
return NULL;
|
||||
if (_free != NULL) {
|
||||
// Kill the free list.
|
||||
_free = NULL;
|
||||
}
|
||||
_top--;
|
||||
return *_top;
|
||||
|
||||
while (!is_empty()) {
|
||||
_top--;
|
||||
if (is_nmethod(_top)) {
|
||||
return _top->_nmethod;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
};
|
||||
|
||||
@ -193,7 +222,7 @@ class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
|
||||
// method is likely to be repeatedly called with the same nmethod.
|
||||
void add(nmethod* method);
|
||||
|
||||
void remove(nmethod* method);
|
||||
void remove_lock_free(nmethod* method);
|
||||
nmethod* pop();
|
||||
|
||||
bool contains(nmethod* method);
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include "gc_implementation/shared/gcTrace.hpp"
|
||||
#include "gc_implementation/shared/gcTraceTime.hpp"
|
||||
#include "gc_implementation/shared/isGCActiveMark.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/gcLocker.inline.hpp"
|
||||
#include "memory/generationSpec.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
@ -91,10 +92,10 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
||||
// G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
|
||||
// The number of GC workers is passed to heap_region_par_iterate_chunked().
|
||||
// It does use run_task() which sets _n_workers in the task.
|
||||
// G1ParTask executes g1_process_strong_roots() ->
|
||||
// SharedHeap::process_strong_roots() which calls eventually to
|
||||
// G1ParTask executes g1_process_roots() ->
|
||||
// SharedHeap::process_roots() which calls eventually to
|
||||
// CardTableModRefBS::par_non_clean_card_iterate_work() which uses
|
||||
// SequentialSubTasksDone. SharedHeap::process_strong_roots() also
|
||||
// SequentialSubTasksDone. SharedHeap::process_roots() also
|
||||
// directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
|
||||
//
|
||||
|
||||
@ -3379,25 +3380,19 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
|
||||
if (!silent) { gclog_or_tty->print("Roots "); }
|
||||
VerifyRootsClosure rootsCl(vo);
|
||||
VerifyKlassClosure klassCl(this, &rootsCl);
|
||||
CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
|
||||
|
||||
// We apply the relevant closures to all the oops in the
|
||||
// system dictionary, class loader data graph and the string table.
|
||||
// Don't verify the code cache here, since it's verified below.
|
||||
const int so = SO_AllClasses | SO_Strings;
|
||||
|
||||
// Need cleared claim bits for the strong roots processing
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
process_strong_roots(true, // activate StrongRootsScope
|
||||
ScanningOption(so), // roots scanning options
|
||||
&rootsCl,
|
||||
&klassCl
|
||||
);
|
||||
|
||||
// Verify the nmethods in the code cache.
|
||||
// system dictionary, class loader data graph, the string table
|
||||
// and the nmethods in the code cache.
|
||||
G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
|
||||
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
|
||||
CodeCache::blobs_do(&blobsCl);
|
||||
|
||||
process_all_roots(true, // activate StrongRootsScope
|
||||
SO_AllCodeCache, // roots scanning options
|
||||
&rootsCl,
|
||||
&cldCl,
|
||||
&blobsCl);
|
||||
|
||||
bool failures = rootsCl.failures() || codeRootsCl.failures();
|
||||
|
||||
@ -3979,6 +3974,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
increment_gc_time_stamp();
|
||||
|
||||
verify_before_gc();
|
||||
|
||||
check_bitmaps("GC Start");
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
@ -4329,11 +4325,7 @@ void G1CollectedHeap::release_mutator_alloc_region() {
|
||||
assert(_mutator_alloc_region.get() == NULL, "post-condition");
|
||||
}
|
||||
|
||||
void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
_survivor_gc_alloc_region.init();
|
||||
_old_gc_alloc_region.init();
|
||||
void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
|
||||
HeapRegion* retained_region = _retained_old_gc_alloc_region;
|
||||
_retained_old_gc_alloc_region = NULL;
|
||||
|
||||
@ -4365,6 +4357,15 @@ void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
_survivor_gc_alloc_region.init();
|
||||
_old_gc_alloc_region.init();
|
||||
|
||||
use_retained_old_gc_alloc_region(evacuation_info);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
|
||||
evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
|
||||
_old_gc_alloc_region.count());
|
||||
@ -4587,7 +4588,7 @@ void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
|
||||
}
|
||||
}
|
||||
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
template <G1Barrier barrier, G1Mark do_mark_object>
|
||||
template <class T>
|
||||
void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
@ -4609,7 +4610,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
}
|
||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||
if (do_mark_object && forwardee != obj) {
|
||||
if (do_mark_object != G1MarkNone && forwardee != obj) {
|
||||
// If the object is self-forwarded we don't need to explicitly
|
||||
// mark it, the evacuation failure protocol will do so.
|
||||
mark_forwarded_object(obj, forwardee);
|
||||
@ -4620,9 +4621,8 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
}
|
||||
} else {
|
||||
// The object is not in collection set. If we're a root scanning
|
||||
// closure during an initial mark pause (i.e. do_mark_object will
|
||||
// be true) then attempt to mark the object.
|
||||
if (do_mark_object) {
|
||||
// closure during an initial mark pause then attempt to mark the object.
|
||||
if (do_mark_object == G1MarkFromRoot) {
|
||||
mark_object(obj);
|
||||
}
|
||||
}
|
||||
@ -4632,8 +4632,8 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
}
|
||||
}
|
||||
|
||||
template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
|
||||
template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
|
||||
template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
|
||||
template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
|
||||
|
||||
class G1ParEvacuateFollowersClosure : public VoidClosure {
|
||||
protected:
|
||||
@ -4746,6 +4746,51 @@ public:
|
||||
_n_workers = active_workers;
|
||||
}
|
||||
|
||||
// Helps out with CLD processing.
|
||||
//
|
||||
// During InitialMark we need to:
|
||||
// 1) Scavenge all CLDs for the young GC.
|
||||
// 2) Mark all objects directly reachable from strong CLDs.
|
||||
template <G1Mark do_mark_object>
|
||||
class G1CLDClosure : public CLDClosure {
|
||||
G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;
|
||||
G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;
|
||||
G1KlassScanClosure _klass_in_cld_closure;
|
||||
bool _claim;
|
||||
|
||||
public:
|
||||
G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
|
||||
bool only_young, bool claim)
|
||||
: _oop_closure(oop_closure),
|
||||
_oop_in_klass_closure(oop_closure->g1(),
|
||||
oop_closure->pss(),
|
||||
oop_closure->rp()),
|
||||
_klass_in_cld_closure(&_oop_in_klass_closure, only_young),
|
||||
_claim(claim) {
|
||||
|
||||
}
|
||||
|
||||
void do_cld(ClassLoaderData* cld) {
|
||||
cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
|
||||
}
|
||||
};
|
||||
|
||||
class G1CodeBlobClosure: public CodeBlobClosure {
|
||||
OopClosure* _f;
|
||||
|
||||
public:
|
||||
G1CodeBlobClosure(OopClosure* f) : _f(f) {}
|
||||
void do_code_blob(CodeBlob* blob) {
|
||||
nmethod* that = blob->as_nmethod_or_null();
|
||||
if (that != NULL) {
|
||||
if (!that->test_set_oops_do_mark()) {
|
||||
that->oops_do(_f);
|
||||
that->fix_oop_relocations();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void work(uint worker_id) {
|
||||
if (worker_id >= _n_workers) return; // no work needed this round
|
||||
|
||||
@ -4763,40 +4808,62 @@ public:
|
||||
|
||||
pss.set_evac_failure_closure(&evac_failure_cl);
|
||||
|
||||
G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
|
||||
G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
|
||||
bool only_young = _g1h->g1_policy()->gcs_are_young();
|
||||
|
||||
G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
|
||||
G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
|
||||
// Non-IM young GC.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
|
||||
G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
|
||||
only_young, // Only process dirty klasses.
|
||||
false); // No need to claim CLDs.
|
||||
// IM young GC.
|
||||
// Strong roots closures.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
|
||||
G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
|
||||
false, // Process all klasses.
|
||||
true); // Need to claim CLDs.
|
||||
// Weak roots closures.
|
||||
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
|
||||
G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
|
||||
false, // Process all klasses.
|
||||
true); // Need to claim CLDs.
|
||||
|
||||
bool only_young = _g1h->g1_policy()->gcs_are_young();
|
||||
G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
|
||||
G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
|
||||
G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
|
||||
G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
|
||||
// IM Weak code roots are handled later.
|
||||
|
||||
OopClosure* scan_root_cl = &only_scan_root_cl;
|
||||
G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s;
|
||||
OopClosure* strong_root_cl;
|
||||
OopClosure* weak_root_cl;
|
||||
CLDClosure* strong_cld_cl;
|
||||
CLDClosure* weak_cld_cl;
|
||||
CodeBlobClosure* strong_code_cl;
|
||||
|
||||
if (_g1h->g1_policy()->during_initial_mark_pause()) {
|
||||
// We also need to mark copied objects.
|
||||
scan_root_cl = &scan_mark_root_cl;
|
||||
scan_klasses_cl = &scan_mark_klasses_cl_s;
|
||||
strong_root_cl = &scan_mark_root_cl;
|
||||
weak_root_cl = &scan_mark_weak_root_cl;
|
||||
strong_cld_cl = &scan_mark_cld_cl;
|
||||
weak_cld_cl = &scan_mark_weak_cld_cl;
|
||||
strong_code_cl = &scan_mark_code_cl;
|
||||
} else {
|
||||
strong_root_cl = &scan_only_root_cl;
|
||||
weak_root_cl = &scan_only_root_cl;
|
||||
strong_cld_cl = &scan_only_cld_cl;
|
||||
weak_cld_cl = &scan_only_cld_cl;
|
||||
strong_code_cl = &scan_only_code_cl;
|
||||
}
|
||||
|
||||
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
|
||||
|
||||
// Don't scan the scavengable methods in the code cache as part
|
||||
// of strong root scanning. The code roots that point into a
|
||||
// region in the collection set are scanned when we scan the
|
||||
// region's RSet.
|
||||
int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
|
||||
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
|
||||
|
||||
pss.start_strong_roots();
|
||||
_g1h->g1_process_strong_roots(/* is scavenging */ true,
|
||||
SharedHeap::ScanningOption(so),
|
||||
scan_root_cl,
|
||||
&push_heap_rs_cl,
|
||||
scan_klasses_cl,
|
||||
worker_id);
|
||||
_g1h->g1_process_roots(strong_root_cl,
|
||||
weak_root_cl,
|
||||
&push_heap_rs_cl,
|
||||
strong_cld_cl,
|
||||
weak_cld_cl,
|
||||
strong_code_cl,
|
||||
worker_id);
|
||||
|
||||
pss.end_strong_roots();
|
||||
|
||||
{
|
||||
@ -4834,24 +4901,31 @@ public:
|
||||
|
||||
void
|
||||
G1CollectedHeap::
|
||||
g1_process_strong_roots(bool is_scavenging,
|
||||
ScanningOption so,
|
||||
OopClosure* scan_non_heap_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
G1KlassScanClosure* scan_klasses,
|
||||
uint worker_i) {
|
||||
g1_process_roots(OopClosure* scan_non_heap_roots,
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
CLDClosure* scan_strong_clds,
|
||||
CLDClosure* scan_weak_clds,
|
||||
CodeBlobClosure* scan_strong_code,
|
||||
uint worker_i) {
|
||||
|
||||
// First scan the strong roots
|
||||
// First scan the shared roots.
|
||||
double ext_roots_start = os::elapsedTime();
|
||||
double closure_app_time_sec = 0.0;
|
||||
|
||||
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
|
||||
bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
|
||||
|
||||
process_strong_roots(false, // no scoping; this is parallel code
|
||||
so,
|
||||
&buf_scan_non_heap_roots,
|
||||
scan_klasses
|
||||
);
|
||||
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
|
||||
BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
|
||||
|
||||
process_roots(false, // no scoping; this is parallel code
|
||||
SharedHeap::SO_None,
|
||||
&buf_scan_non_heap_roots,
|
||||
&buf_scan_non_heap_weak_roots,
|
||||
scan_strong_clds,
|
||||
// Initial Mark handles the weak CLDs separately.
|
||||
(during_im ? NULL : scan_weak_clds),
|
||||
scan_strong_code);
|
||||
|
||||
// Now the CM ref_processor roots.
|
||||
if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
|
||||
@ -4862,10 +4936,21 @@ g1_process_strong_roots(bool is_scavenging,
|
||||
ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
|
||||
}
|
||||
|
||||
if (during_im) {
|
||||
// Barrier to make sure all workers passed
|
||||
// the strong CLD and strong nmethods phases.
|
||||
active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
|
||||
|
||||
// Now take the complement of the strong CLDs.
|
||||
ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
|
||||
}
|
||||
|
||||
// Finish up any enqueued closure apps (attributed as object copy time).
|
||||
buf_scan_non_heap_roots.done();
|
||||
buf_scan_non_heap_weak_roots.done();
|
||||
|
||||
double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
|
||||
double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
|
||||
+ buf_scan_non_heap_weak_roots.closure_app_seconds();
|
||||
|
||||
g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
|
||||
|
||||
@ -4889,22 +4974,10 @@ g1_process_strong_roots(bool is_scavenging,
|
||||
}
|
||||
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
|
||||
|
||||
// If this is an initial mark pause, and we're not scanning
|
||||
// the entire code cache, we need to mark the oops in the
|
||||
// strong code root lists for the regions that are not in
|
||||
// the collection set.
|
||||
// Note all threads participate in this set of root tasks.
|
||||
double mark_strong_code_roots_ms = 0.0;
|
||||
if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
|
||||
double mark_strong_roots_start = os::elapsedTime();
|
||||
mark_strong_code_roots(worker_i);
|
||||
mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
|
||||
}
|
||||
g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
|
||||
|
||||
// Now scan the complement of the collection set.
|
||||
CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
|
||||
g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
|
||||
MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
|
||||
|
||||
g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
|
||||
|
||||
_process_strong_tasks->all_tasks_completed();
|
||||
}
|
||||
@ -4926,7 +4999,8 @@ private:
|
||||
bool _do_in_parallel;
|
||||
public:
|
||||
G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
|
||||
AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
|
||||
AbstractGangTask("String/Symbol Unlinking"),
|
||||
_is_alive(is_alive),
|
||||
_do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
|
||||
_process_strings(process_strings), _strings_processed(0), _strings_removed(0),
|
||||
_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
|
||||
@ -4948,6 +5022,14 @@ public:
|
||||
guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
|
||||
err_msg("claim value %d after unlink less than initial symbol table size %d",
|
||||
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
|
||||
|
||||
if (G1TraceStringSymbolTableScrubbing) {
|
||||
gclog_or_tty->print_cr("Cleaned string and symbol table, "
|
||||
"strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
|
||||
"symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
|
||||
strings_processed(), strings_removed(),
|
||||
symbols_processed(), symbols_removed());
|
||||
}
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
@ -4983,12 +5065,279 @@ public:
|
||||
size_t symbols_removed() const { return (size_t)_symbols_removed; }
|
||||
};
|
||||
|
||||
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
|
||||
bool process_strings, bool process_symbols) {
|
||||
uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
|
||||
_g1h->workers()->active_workers() : 1);
|
||||
class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
static Monitor* _lock;
|
||||
|
||||
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
|
||||
BoolObjectClosure* const _is_alive;
|
||||
const bool _unloading_occurred;
|
||||
const uint _num_workers;
|
||||
|
||||
// Variables used to claim nmethods.
|
||||
nmethod* _first_nmethod;
|
||||
volatile nmethod* _claimed_nmethod;
|
||||
|
||||
// The list of nmethods that need to be processed by the second pass.
|
||||
volatile nmethod* _postponed_list;
|
||||
volatile uint _num_entered_barrier;
|
||||
|
||||
public:
|
||||
G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
|
||||
_is_alive(is_alive),
|
||||
_unloading_occurred(unloading_occurred),
|
||||
_num_workers(num_workers),
|
||||
_first_nmethod(NULL),
|
||||
_claimed_nmethod(NULL),
|
||||
_postponed_list(NULL),
|
||||
_num_entered_barrier(0)
|
||||
{
|
||||
nmethod::increase_unloading_clock();
|
||||
_first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
|
||||
_claimed_nmethod = (volatile nmethod*)_first_nmethod;
|
||||
}
|
||||
|
||||
~G1CodeCacheUnloadingTask() {
|
||||
CodeCache::verify_clean_inline_caches();
|
||||
|
||||
CodeCache::set_needs_cache_clean(false);
|
||||
guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
|
||||
|
||||
CodeCache::verify_icholder_relocations();
|
||||
}
|
||||
|
||||
private:
|
||||
void add_to_postponed_list(nmethod* nm) {
|
||||
nmethod* old;
|
||||
do {
|
||||
old = (nmethod*)_postponed_list;
|
||||
nm->set_unloading_next(old);
|
||||
} while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
|
||||
}
|
||||
|
||||
void clean_nmethod(nmethod* nm) {
|
||||
bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
|
||||
|
||||
if (postponed) {
|
||||
// This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
|
||||
add_to_postponed_list(nm);
|
||||
}
|
||||
|
||||
// Mark that this thread has been cleaned/unloaded.
|
||||
// After this call, it will be safe to ask if this nmethod was unloaded or not.
|
||||
nm->set_unloading_clock(nmethod::global_unloading_clock());
|
||||
}
|
||||
|
||||
void clean_nmethod_postponed(nmethod* nm) {
|
||||
nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
|
||||
}
|
||||
|
||||
static const int MaxClaimNmethods = 16;
|
||||
|
||||
void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
|
||||
nmethod* first;
|
||||
nmethod* last;
|
||||
|
||||
do {
|
||||
*num_claimed_nmethods = 0;
|
||||
|
||||
first = last = (nmethod*)_claimed_nmethod;
|
||||
|
||||
if (first != NULL) {
|
||||
for (int i = 0; i < MaxClaimNmethods; i++) {
|
||||
last = CodeCache::alive_nmethod(CodeCache::next(last));
|
||||
|
||||
if (last == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
claimed_nmethods[i] = last;
|
||||
(*num_claimed_nmethods)++;
|
||||
}
|
||||
}
|
||||
|
||||
} while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
|
||||
}
|
||||
|
||||
nmethod* claim_postponed_nmethod() {
|
||||
nmethod* claim;
|
||||
nmethod* next;
|
||||
|
||||
do {
|
||||
claim = (nmethod*)_postponed_list;
|
||||
if (claim == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
next = claim->unloading_next();
|
||||
|
||||
} while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
|
||||
|
||||
return claim;
|
||||
}
|
||||
|
||||
public:
|
||||
// Mark that we're done with the first pass of nmethod cleaning.
|
||||
void barrier_mark(uint worker_id) {
|
||||
MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
|
||||
_num_entered_barrier++;
|
||||
if (_num_entered_barrier == _num_workers) {
|
||||
ml.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
// See if we have to wait for the other workers to
|
||||
// finish their first-pass nmethod cleaning work.
|
||||
void barrier_wait(uint worker_id) {
|
||||
if (_num_entered_barrier < _num_workers) {
|
||||
MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
|
||||
while (_num_entered_barrier < _num_workers) {
|
||||
ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleaning and unloading of nmethods. Some work has to be postponed
|
||||
// to the second pass, when we know which nmethods survive.
|
||||
void work_first_pass(uint worker_id) {
|
||||
// The first nmethods is claimed by the first worker.
|
||||
if (worker_id == 0 && _first_nmethod != NULL) {
|
||||
clean_nmethod(_first_nmethod);
|
||||
_first_nmethod = NULL;
|
||||
}
|
||||
|
||||
int num_claimed_nmethods;
|
||||
nmethod* claimed_nmethods[MaxClaimNmethods];
|
||||
|
||||
while (true) {
|
||||
claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
|
||||
|
||||
if (num_claimed_nmethods == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_claimed_nmethods; i++) {
|
||||
clean_nmethod(claimed_nmethods[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void work_second_pass(uint worker_id) {
|
||||
nmethod* nm;
|
||||
// Take care of postponed nmethods.
|
||||
while ((nm = claim_postponed_nmethod()) != NULL) {
|
||||
clean_nmethod_postponed(nm);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
|
||||
|
||||
class G1KlassCleaningTask : public StackObj {
|
||||
BoolObjectClosure* _is_alive;
|
||||
volatile jint _clean_klass_tree_claimed;
|
||||
ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
|
||||
|
||||
public:
|
||||
G1KlassCleaningTask(BoolObjectClosure* is_alive) :
|
||||
_is_alive(is_alive),
|
||||
_clean_klass_tree_claimed(0),
|
||||
_klass_iterator() {
|
||||
}
|
||||
|
||||
private:
|
||||
bool claim_clean_klass_tree_task() {
|
||||
if (_clean_klass_tree_claimed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
|
||||
}
|
||||
|
||||
InstanceKlass* claim_next_klass() {
|
||||
Klass* klass;
|
||||
do {
|
||||
klass =_klass_iterator.next_klass();
|
||||
} while (klass != NULL && !klass->oop_is_instance());
|
||||
|
||||
return (InstanceKlass*)klass;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
void clean_klass(InstanceKlass* ik) {
|
||||
ik->clean_implementors_list(_is_alive);
|
||||
ik->clean_method_data(_is_alive);
|
||||
|
||||
// G1 specific cleanup work that has
|
||||
// been moved here to be done in parallel.
|
||||
ik->clean_dependent_nmethods();
|
||||
}
|
||||
|
||||
void work() {
|
||||
ResourceMark rm;
|
||||
|
||||
// One worker will clean the subklass/sibling klass tree.
|
||||
if (claim_clean_klass_tree_task()) {
|
||||
Klass::clean_subklass_tree(_is_alive);
|
||||
}
|
||||
|
||||
// All workers will help cleaning the classes,
|
||||
InstanceKlass* klass;
|
||||
while ((klass = claim_next_klass()) != NULL) {
|
||||
clean_klass(klass);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// To minimize the remark pause times, the tasks below are done in parallel.
|
||||
class G1ParallelCleaningTask : public AbstractGangTask {
|
||||
private:
|
||||
G1StringSymbolTableUnlinkTask _string_symbol_task;
|
||||
G1CodeCacheUnloadingTask _code_cache_task;
|
||||
G1KlassCleaningTask _klass_cleaning_task;
|
||||
|
||||
public:
|
||||
// The constructor is run in the VMThread.
|
||||
G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
|
||||
AbstractGangTask("Parallel Cleaning"),
|
||||
_string_symbol_task(is_alive, process_strings, process_symbols),
|
||||
_code_cache_task(num_workers, is_alive, unloading_occurred),
|
||||
_klass_cleaning_task(is_alive) {
|
||||
}
|
||||
|
||||
// The parallel work done by all worker threads.
|
||||
void work(uint worker_id) {
|
||||
// Do first pass of code cache cleaning.
|
||||
_code_cache_task.work_first_pass(worker_id);
|
||||
|
||||
// Let the threads, mark that the first pass is done.
|
||||
_code_cache_task.barrier_mark(worker_id);
|
||||
|
||||
// Clean the Strings and Symbols.
|
||||
_string_symbol_task.work(worker_id);
|
||||
|
||||
// Wait for all workers to finish the first code cache cleaning pass.
|
||||
_code_cache_task.barrier_wait(worker_id);
|
||||
|
||||
// Do the second code cache cleaning work, which realize on
|
||||
// the liveness information gathered during the first pass.
|
||||
_code_cache_task.work_second_pass(worker_id);
|
||||
|
||||
// Clean all klasses that were not unloaded.
|
||||
_klass_cleaning_task.work();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
|
||||
bool process_strings,
|
||||
bool process_symbols,
|
||||
bool class_unloading_occurred) {
|
||||
uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
|
||||
workers()->active_workers() : 1);
|
||||
|
||||
G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
|
||||
n_workers, class_unloading_occurred);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&g1_unlink_task);
|
||||
@ -4996,12 +5345,21 @@ void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive
|
||||
} else {
|
||||
g1_unlink_task.work(0);
|
||||
}
|
||||
if (G1TraceStringSymbolTableScrubbing) {
|
||||
gclog_or_tty->print_cr("Cleaned string and symbol table, "
|
||||
"strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
|
||||
"symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
|
||||
g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
|
||||
g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
|
||||
}
|
||||
|
||||
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
|
||||
bool process_strings, bool process_symbols) {
|
||||
{
|
||||
uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
|
||||
_g1h->workers()->active_workers() : 1);
|
||||
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&g1_unlink_task);
|
||||
set_par_threads(0);
|
||||
} else {
|
||||
g1_unlink_task.work(0);
|
||||
}
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
@ -5594,6 +5952,10 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
|
||||
{
|
||||
StrongRootsScope srs(this);
|
||||
// InitialMark needs claim bits to keep track of the marked-through CLDs.
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
}
|
||||
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
// The individual threads will set their evac-failure closures.
|
||||
@ -6629,106 +6991,6 @@ void G1CollectedHeap::purge_code_root_memory() {
|
||||
g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
|
||||
}
|
||||
|
||||
// Mark all the code roots that point into regions *not* in the
|
||||
// collection set.
|
||||
//
|
||||
// Note we do not want to use a "marking" CodeBlobToOopClosure while
|
||||
// walking the the code roots lists of regions not in the collection
|
||||
// set. Suppose we have an nmethod (M) that points to objects in two
|
||||
// separate regions - one in the collection set (R1) and one not (R2).
|
||||
// Using a "marking" CodeBlobToOopClosure here would result in "marking"
|
||||
// nmethod M when walking the code roots for R1. When we come to scan
|
||||
// the code roots for R2, we would see that M is already marked and it
|
||||
// would be skipped and the objects in R2 that are referenced from M
|
||||
// would not be evacuated.
|
||||
|
||||
class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
|
||||
|
||||
class MarkStrongCodeRootOopClosure: public OopClosure {
|
||||
ConcurrentMark* _cm;
|
||||
HeapRegion* _hr;
|
||||
uint _worker_id;
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
// Only mark objects in the region (which is assumed
|
||||
// to be not in the collection set).
|
||||
if (_hr->is_in(obj)) {
|
||||
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
|
||||
_cm(cm), _hr(hr), _worker_id(worker_id) {
|
||||
assert(!_hr->in_collection_set(), "sanity");
|
||||
}
|
||||
|
||||
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
void do_oop(oop* p) { do_oop_work(p); }
|
||||
};
|
||||
|
||||
MarkStrongCodeRootOopClosure _oop_cl;
|
||||
|
||||
public:
|
||||
MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
|
||||
_oop_cl(cm, hr, worker_id) {}
|
||||
|
||||
void do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
nm->oops_do(&_oop_cl);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
uint _worker_id;
|
||||
|
||||
public:
|
||||
MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
|
||||
_g1h(g1h), _worker_id(worker_id) {}
|
||||
|
||||
bool doHeapRegion(HeapRegion *hr) {
|
||||
HeapRegionRemSet* hrrs = hr->rem_set();
|
||||
if (hr->continuesHumongous()) {
|
||||
// Code roots should never be attached to a continuation of a humongous region
|
||||
assert(hrrs->strong_code_roots_list_length() == 0,
|
||||
err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
|
||||
" starting at "HR_FORMAT", but has "SIZE_FORMAT,
|
||||
HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
|
||||
hrrs->strong_code_roots_list_length()));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (hr->in_collection_set()) {
|
||||
// Don't mark code roots into regions in the collection set here.
|
||||
// They will be marked when we scan them.
|
||||
return false;
|
||||
}
|
||||
|
||||
MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
|
||||
hr->strong_code_roots_do(&cb_cl);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
|
||||
MarkStrongCodeRootsHRClosure cl(this, worker_id);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
heap_region_par_iterate_chunked(&cl,
|
||||
worker_id,
|
||||
workers()->active_workers(),
|
||||
HeapRegion::ParMarkRootClaimValue);
|
||||
} else {
|
||||
heap_region_iterate(&cl);
|
||||
}
|
||||
}
|
||||
|
||||
class RebuildStrongCodeRootClosure: public CodeBlobClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
|
@ -210,6 +210,7 @@ class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {
|
||||
class RefineCardTableEntryClosure;
|
||||
|
||||
class G1CollectedHeap : public SharedHeap {
|
||||
friend class VM_CollectForMetadataAllocation;
|
||||
friend class VM_G1CollectForAllocation;
|
||||
friend class VM_G1CollectFull;
|
||||
friend class VM_G1IncCollectionPause;
|
||||
@ -219,7 +220,7 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class OldGCAllocRegion;
|
||||
|
||||
// Closures used in implementation.
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
template <G1Barrier barrier, G1Mark do_mark_object>
|
||||
friend class G1ParCopyClosure;
|
||||
friend class G1IsAliveClosure;
|
||||
friend class G1EvacuateFollowersClosure;
|
||||
@ -346,6 +347,9 @@ private:
|
||||
// It initializes the GC alloc regions at the start of a GC.
|
||||
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
|
||||
// Setup the retained old gc alloc region as the currrent old gc alloc region.
|
||||
void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
|
||||
|
||||
// It releases the GC alloc regions at the end of a GC.
|
||||
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
|
||||
|
||||
@ -827,12 +831,13 @@ protected:
|
||||
// param is for use with parallel roots processing, and should be
|
||||
// the "i" of the calling parallel worker thread's work(i) function.
|
||||
// In the sequential case this param will be ignored.
|
||||
void g1_process_strong_roots(bool is_scavenging,
|
||||
ScanningOption so,
|
||||
OopClosure* scan_non_heap_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
G1KlassScanClosure* scan_klasses,
|
||||
uint worker_i);
|
||||
void g1_process_roots(OopClosure* scan_non_heap_roots,
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
CLDClosure* scan_strong_clds,
|
||||
CLDClosure* scan_weak_clds,
|
||||
CodeBlobClosure* scan_strong_code,
|
||||
uint worker_i);
|
||||
|
||||
// Notifies all the necessary spaces that the committed space has
|
||||
// been updated (either expanded or shrunk). It should be called
|
||||
@ -1025,7 +1030,7 @@ protected:
|
||||
// of G1CollectedHeap::_gc_time_stamp.
|
||||
unsigned int* _worker_cset_start_region_time_stamp;
|
||||
|
||||
enum G1H_process_strong_roots_tasks {
|
||||
enum G1H_process_roots_tasks {
|
||||
G1H_PS_filter_satb_buffers,
|
||||
G1H_PS_refProcessor_oops_do,
|
||||
// Leave this one last.
|
||||
@ -1607,10 +1612,6 @@ public:
|
||||
// Free up superfluous code root memory.
|
||||
void purge_code_root_memory();
|
||||
|
||||
// During an initial mark pause, mark all the code roots that
|
||||
// point into regions *not* in the collection set.
|
||||
void mark_strong_code_roots(uint worker_id);
|
||||
|
||||
// Rebuild the strong code root lists for each region
|
||||
// after a full GC.
|
||||
void rebuild_strong_code_roots();
|
||||
@ -1619,6 +1620,9 @@ public:
|
||||
// in symbol table, possibly in parallel.
|
||||
void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
|
||||
|
||||
// Parallel phase of unloading/cleaning after G1 concurrent mark.
|
||||
void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
|
||||
|
||||
// Redirty logged cards in the refinement queue.
|
||||
void redirty_logged_cards();
|
||||
// Verification
|
||||
|
@ -71,6 +71,9 @@ private:
|
||||
bool _during_initial_mark;
|
||||
bool _during_conc_mark;
|
||||
uint _worker_id;
|
||||
HeapWord* _end_of_last_gap;
|
||||
HeapWord* _last_gap_threshold;
|
||||
HeapWord* _last_obj_threshold;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
|
||||
@ -83,7 +86,10 @@ public:
|
||||
_update_rset_cl(update_rset_cl),
|
||||
_during_initial_mark(during_initial_mark),
|
||||
_during_conc_mark(during_conc_mark),
|
||||
_worker_id(worker_id) { }
|
||||
_worker_id(worker_id),
|
||||
_end_of_last_gap(hr->bottom()),
|
||||
_last_gap_threshold(hr->bottom()),
|
||||
_last_obj_threshold(hr->bottom()) { }
|
||||
|
||||
size_t marked_bytes() { return _marked_bytes; }
|
||||
|
||||
@ -107,7 +113,12 @@ public:
|
||||
HeapWord* obj_addr = (HeapWord*) obj;
|
||||
assert(_hr->is_in(obj_addr), "sanity");
|
||||
size_t obj_size = obj->size();
|
||||
_hr->update_bot_for_object(obj_addr, obj_size);
|
||||
HeapWord* obj_end = obj_addr + obj_size;
|
||||
|
||||
if (_end_of_last_gap != obj_addr) {
|
||||
// there was a gap before obj_addr
|
||||
_last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
|
||||
}
|
||||
|
||||
if (obj->is_forwarded() && obj->forwardee() == obj) {
|
||||
// The object failed to move.
|
||||
@ -115,7 +126,9 @@ public:
|
||||
// We consider all objects that we find self-forwarded to be
|
||||
// live. What we'll do is that we'll update the prev marking
|
||||
// info so that they are all under PTAMS and explicitly marked.
|
||||
_cm->markPrev(obj);
|
||||
if (!_cm->isPrevMarked(obj)) {
|
||||
_cm->markPrev(obj);
|
||||
}
|
||||
if (_during_initial_mark) {
|
||||
// For the next marking info we'll only mark the
|
||||
// self-forwarded objects explicitly if we are during
|
||||
@ -145,13 +158,18 @@ public:
|
||||
// remembered set entries missing given that we skipped cards on
|
||||
// the collection set. So, we'll recreate such entries now.
|
||||
obj->oop_iterate(_update_rset_cl);
|
||||
assert(_cm->isPrevMarked(obj), "Should be marked!");
|
||||
} else {
|
||||
|
||||
// The object has been either evacuated or is dead. Fill it with a
|
||||
// dummy object.
|
||||
MemRegion mr((HeapWord*) obj, obj_size);
|
||||
MemRegion mr(obj_addr, obj_size);
|
||||
CollectedHeap::fill_with_object(mr);
|
||||
|
||||
// must nuke all dead objects which we skipped when iterating over the region
|
||||
_cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
|
||||
}
|
||||
_end_of_last_gap = obj_end;
|
||||
_last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
|
||||
}
|
||||
};
|
||||
|
||||
@ -182,13 +200,6 @@ public:
|
||||
during_conc_mark,
|
||||
_worker_id);
|
||||
|
||||
MemRegion mr(hr->bottom(), hr->end());
|
||||
// We'll recreate the prev marking info so we'll first clear
|
||||
// the prev bitmap range for this region. We never mark any
|
||||
// CSet objects explicitly so the next bitmap range should be
|
||||
// cleared anyway.
|
||||
_cm->clearRangePrevBitmap(mr);
|
||||
|
||||
hr->note_self_forwarding_removal_start(during_initial_mark,
|
||||
during_conc_mark);
|
||||
_g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
|
||||
|
@ -167,7 +167,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
||||
_last_update_rs_processed_buffers(_max_gc_threads, "%d"),
|
||||
_last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_termination_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
|
||||
@ -194,7 +193,6 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
|
||||
_last_update_rs_processed_buffers.reset();
|
||||
_last_scan_rs_times_ms.reset();
|
||||
_last_strong_code_root_scan_times_ms.reset();
|
||||
_last_strong_code_root_mark_times_ms.reset();
|
||||
_last_obj_copy_times_ms.reset();
|
||||
_last_termination_times_ms.reset();
|
||||
_last_termination_attempts.reset();
|
||||
@ -215,7 +213,6 @@ void G1GCPhaseTimes::note_gc_end() {
|
||||
_last_update_rs_processed_buffers.verify();
|
||||
_last_scan_rs_times_ms.verify();
|
||||
_last_strong_code_root_scan_times_ms.verify();
|
||||
_last_strong_code_root_mark_times_ms.verify();
|
||||
_last_obj_copy_times_ms.verify();
|
||||
_last_termination_times_ms.verify();
|
||||
_last_termination_attempts.verify();
|
||||
@ -230,7 +227,6 @@ void G1GCPhaseTimes::note_gc_end() {
|
||||
_last_update_rs_times_ms.get(i) +
|
||||
_last_scan_rs_times_ms.get(i) +
|
||||
_last_strong_code_root_scan_times_ms.get(i) +
|
||||
_last_strong_code_root_mark_times_ms.get(i) +
|
||||
_last_obj_copy_times_ms.get(i) +
|
||||
_last_termination_times_ms.get(i);
|
||||
|
||||
@ -302,9 +298,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
if (_last_satb_filtering_times_ms.sum() > 0.0) {
|
||||
_last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
|
||||
}
|
||||
if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
|
||||
_last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
|
||||
}
|
||||
_last_update_rs_times_ms.print(2, "Update RS (ms)");
|
||||
_last_update_rs_processed_buffers.print(3, "Processed Buffers");
|
||||
_last_scan_rs_times_ms.print(2, "Scan RS (ms)");
|
||||
@ -322,9 +315,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
if (_last_satb_filtering_times_ms.sum() > 0.0) {
|
||||
_last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
|
||||
}
|
||||
if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
|
||||
_last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
|
||||
}
|
||||
_last_update_rs_times_ms.print(1, "Update RS (ms)");
|
||||
_last_update_rs_processed_buffers.print(2, "Processed Buffers");
|
||||
_last_scan_rs_times_ms.print(1, "Scan RS (ms)");
|
||||
|
@ -120,7 +120,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
WorkerDataArray<int> _last_update_rs_processed_buffers;
|
||||
WorkerDataArray<double> _last_scan_rs_times_ms;
|
||||
WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
|
||||
WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
|
||||
WorkerDataArray<double> _last_obj_copy_times_ms;
|
||||
WorkerDataArray<double> _last_termination_times_ms;
|
||||
WorkerDataArray<size_t> _last_termination_attempts;
|
||||
@ -199,10 +198,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_last_strong_code_root_scan_times_ms.set(worker_i, ms);
|
||||
}
|
||||
|
||||
void record_strong_code_root_mark_time(uint worker_i, double ms) {
|
||||
_last_strong_code_root_mark_times_ms.set(worker_i, ms);
|
||||
}
|
||||
|
||||
void record_obj_copy_time(uint worker_i, double ms) {
|
||||
_last_obj_copy_times_ms.set(worker_i, ms);
|
||||
}
|
||||
@ -369,10 +364,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
return _last_strong_code_root_scan_times_ms.average();
|
||||
}
|
||||
|
||||
double average_last_strong_code_root_mark_time(){
|
||||
return _last_strong_code_root_mark_times_ms.average();
|
||||
}
|
||||
|
||||
double average_last_obj_copy_time() {
|
||||
return _last_obj_copy_times_ms.average();
|
||||
}
|
||||
|
@ -129,13 +129,15 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
|
||||
// Need cleared claim bits for the strong roots processing
|
||||
// Need cleared claim bits for the roots processing
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
sh->process_strong_roots(true, // activate StrongRootsScope
|
||||
SharedHeap::SO_SystemClasses,
|
||||
MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
|
||||
sh->process_strong_roots(true, // activate StrongRootsScope
|
||||
SharedHeap::SO_None,
|
||||
&GenMarkSweep::follow_root_closure,
|
||||
&GenMarkSweep::follow_klass_closure);
|
||||
&GenMarkSweep::follow_cld_closure,
|
||||
&follow_code_closure);
|
||||
|
||||
// Process reference objects found during marking
|
||||
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
|
||||
@ -304,13 +306,15 @@ void G1MarkSweep::mark_sweep_phase3() {
|
||||
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
|
||||
// Need cleared claim bits for the strong roots processing
|
||||
// Need cleared claim bits for the roots processing
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
sh->process_strong_roots(true, // activate StrongRootsScope
|
||||
SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
|
||||
&GenMarkSweep::adjust_pointer_closure,
|
||||
&GenMarkSweep::adjust_klass_closure);
|
||||
CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
|
||||
sh->process_all_roots(true, // activate StrongRootsScope
|
||||
SharedHeap::SO_AllCodeCache,
|
||||
&GenMarkSweep::adjust_pointer_closure,
|
||||
&GenMarkSweep::adjust_cld_closure,
|
||||
&adjust_code_closure);
|
||||
|
||||
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
|
||||
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
|
||||
|
@ -25,6 +25,8 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
|
||||
class HeapRegion;
|
||||
class G1CollectedHeap;
|
||||
class G1RemSet;
|
||||
@ -106,7 +108,7 @@ protected:
|
||||
template <class T> void do_klass_barrier(T* p, oop new_obj);
|
||||
};
|
||||
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
template <G1Barrier barrier, G1Mark do_mark_object>
|
||||
class G1ParCopyClosure : public G1ParCopyHelper {
|
||||
private:
|
||||
template <class T> void do_oop_work(T* p);
|
||||
@ -121,19 +123,19 @@ public:
|
||||
template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
|
||||
G1CollectedHeap* g1() { return _g1; };
|
||||
G1ParScanThreadState* pss() { return _par_scan_state; }
|
||||
ReferenceProcessor* rp() { return _ref_processor; };
|
||||
};
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
|
||||
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> G1ParScanAndMarkWeakExtRootClosure;
|
||||
// We use a separate closure to handle references during evacuation
|
||||
// failure processing.
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierEvac, G1MarkNone> G1ParScanHeapEvacFailureClosure;
|
||||
|
||||
class FilterIntoCSClosure: public ExtendedOopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
@ -164,10 +166,11 @@ public:
|
||||
};
|
||||
|
||||
// Closure for iterating over object fields during concurrent marking
|
||||
class G1CMOopClosure : public ExtendedOopClosure {
|
||||
class G1CMOopClosure : public MetadataAwareOopClosure {
|
||||
protected:
|
||||
ConcurrentMark* _cm;
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
ConcurrentMark* _cm;
|
||||
CMTask* _task;
|
||||
public:
|
||||
G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
|
||||
@ -177,7 +180,7 @@ public:
|
||||
};
|
||||
|
||||
// Closure to scan the root regions during concurrent marking
|
||||
class G1RootRegionScanClosure : public ExtendedOopClosure {
|
||||
class G1RootRegionScanClosure : public MetadataAwareOopClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
ConcurrentMark* _cm;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
|
||||
/*
|
||||
@ -108,10 +109,6 @@ inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
|
||||
|
||||
template <class T>
|
||||
inline void G1CMOopClosure::do_oop_nv(T* p) {
|
||||
assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
|
||||
assert(!_g1h->is_on_master_free_list(
|
||||
_g1h->heap_region_containing((HeapWord*) p)), "invariant");
|
||||
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] we're looking at location "
|
||||
|
@ -30,14 +30,21 @@
|
||||
// non-virtually, using a mechanism defined in this file. Extend these
|
||||
// macros in the obvious way to add specializations for new closures.
|
||||
|
||||
// Forward declarations.
|
||||
enum G1Barrier {
|
||||
G1BarrierNone,
|
||||
G1BarrierEvac,
|
||||
G1BarrierKlass
|
||||
};
|
||||
|
||||
template<G1Barrier barrier, bool do_mark_object>
|
||||
enum G1Mark {
|
||||
G1MarkNone,
|
||||
G1MarkFromRoot,
|
||||
G1MarkPromotedFromRoot
|
||||
};
|
||||
|
||||
// Forward declarations.
|
||||
|
||||
template<G1Barrier barrier, G1Mark do_mark_object>
|
||||
class G1ParCopyClosure;
|
||||
|
||||
class G1ParScanClosure;
|
||||
|
@ -400,7 +400,6 @@ void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
|
||||
// We always recreate the prev marking info and we'll explicitly
|
||||
// mark all objects we find to be self-forwarded on the prev
|
||||
// bitmap. So all objects need to be below PTAMS.
|
||||
_prev_top_at_mark_start = top();
|
||||
_prev_marked_bytes = 0;
|
||||
|
||||
if (during_initial_mark) {
|
||||
@ -424,6 +423,7 @@ void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
|
||||
assert(0 <= marked_bytes && marked_bytes <= used(),
|
||||
err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
|
||||
marked_bytes, used()));
|
||||
_prev_top_at_mark_start = top();
|
||||
_prev_marked_bytes = marked_bytes;
|
||||
}
|
||||
|
||||
@ -905,7 +905,8 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
size_t obj_size = block_size(p);
|
||||
object_num += 1;
|
||||
|
||||
if (is_humongous != g1->isHumongous(obj_size)) {
|
||||
if (is_humongous != g1->isHumongous(obj_size) &&
|
||||
!g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
|
||||
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
|
||||
SIZE_FORMAT" words) in a %shumongous region",
|
||||
p, g1->isHumongous(obj_size) ? "" : "non-",
|
||||
@ -916,7 +917,9 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
|
||||
// If it returns false, verify_for_object() will output the
|
||||
// appropriate messasge.
|
||||
if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
|
||||
if (do_bot_verify &&
|
||||
!g1->is_obj_dead(obj, this) &&
|
||||
!_offsets.verify_for_object(p, obj_size)) {
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
@ -924,7 +927,10 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
if (!g1->is_obj_dead_cond(obj, this, vo)) {
|
||||
if (obj->is_oop()) {
|
||||
Klass* klass = obj->klass();
|
||||
if (!klass->is_metaspace_object()) {
|
||||
bool is_metaspace_object = Metaspace::contains(klass) ||
|
||||
(vo == VerifyOption_G1UsePrevMarking &&
|
||||
ClassLoaderDataGraph::unload_list_contains(klass));
|
||||
if (!is_metaspace_object) {
|
||||
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
|
||||
"not metadata", klass, (void *)obj);
|
||||
*failures = true;
|
||||
|
@ -247,11 +247,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
bool _evacuation_failed;
|
||||
|
||||
// A heap region may be a member one of a number of special subsets, each
|
||||
// represented as linked lists through the field below. Currently, these
|
||||
// sets include:
|
||||
// represented as linked lists through the field below. Currently, there
|
||||
// is only one set:
|
||||
// The collection set.
|
||||
// The set of allocation regions used in a collection pause.
|
||||
// Spaces that may contain gray objects.
|
||||
HeapRegion* _next_in_special_set;
|
||||
|
||||
// next region in the young "generation" region set
|
||||
|
@ -93,18 +93,27 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
|
||||
|
||||
inline bool
|
||||
HeapRegion::block_is_obj(const HeapWord* p) const {
|
||||
return p < top();
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
return !g1h->is_obj_dead(oop(p), this);
|
||||
}
|
||||
|
||||
inline size_t
|
||||
HeapRegion::block_size(const HeapWord *addr) const {
|
||||
const HeapWord* current_top = top();
|
||||
if (addr < current_top) {
|
||||
return oop(addr)->size();
|
||||
} else {
|
||||
assert(addr == current_top, "just checking");
|
||||
// Old regions' dead objects may have dead classes
|
||||
// We need to find the next live object in some other
|
||||
// manner than getting the oop size
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
if (g1h->is_obj_dead(oop(addr), this)) {
|
||||
HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
|
||||
getNextMarkedWordAddress(addr, prev_top_at_mark_start());
|
||||
|
||||
assert(next > addr, "must get the next live object");
|
||||
|
||||
return pointer_delta(next, addr);
|
||||
} else if (addr == top()) {
|
||||
return pointer_delta(end(), addr);
|
||||
}
|
||||
return oop(addr)->size();
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
|
||||
|
@ -931,7 +931,10 @@ void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
|
||||
|
||||
void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
|
||||
assert(nm != NULL, "sanity");
|
||||
_code_roots.remove(nm);
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
_code_roots.remove_lock_free(nm);
|
||||
|
||||
// Check that there were no duplicates
|
||||
guarantee(!_code_roots.contains(nm), "duplicate entry found");
|
||||
}
|
||||
|
@ -285,37 +285,6 @@ void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
|
||||
_par_closures[i] = par_closure;
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::iterate_closure_all_threads() {
|
||||
for(JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
t->satb_mark_queue().apply_closure_and_empty(_closure);
|
||||
}
|
||||
shared_satb_queue()->apply_closure_and_empty(_closure);
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
int parity = sh->strong_roots_parity();
|
||||
|
||||
for(JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
if (t->claim_oops_do(true, parity)) {
|
||||
t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]);
|
||||
}
|
||||
}
|
||||
|
||||
// We also need to claim the VMThread so that its parity is updated
|
||||
// otherwise the next call to Thread::possibly_parallel_oops_do inside
|
||||
// a StrongRootsScope might skip the VMThread because it has a stale
|
||||
// parity that matches the parity set by the StrongRootsScope
|
||||
//
|
||||
// Whichever worker succeeds in claiming the VMThread gets to do
|
||||
// the shared queue.
|
||||
|
||||
VMThread* vmt = VMThread::vm_thread();
|
||||
if (vmt->claim_oops_do(true, parity)) {
|
||||
shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]);
|
||||
}
|
||||
}
|
||||
|
||||
bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
|
||||
uint worker) {
|
||||
BufferNode* nd = NULL;
|
||||
|
@ -33,7 +33,9 @@ class SATBMarkQueueSet;
|
||||
|
||||
// A ptrQueue whose elements are "oops", pointers to object heads.
|
||||
class ObjPtrQueue: public PtrQueue {
|
||||
friend class Threads;
|
||||
friend class SATBMarkQueueSet;
|
||||
friend class G1RemarkThreadsClosure;
|
||||
|
||||
private:
|
||||
// Filter out unwanted entries from the buffer.
|
||||
@ -119,13 +121,6 @@ public:
|
||||
// closures, one for each parallel GC thread.
|
||||
void set_par_closure(int i, ObjectClosure* closure);
|
||||
|
||||
// Apply the registered closure to all entries on each
|
||||
// currently-active buffer and then empty the buffer. It should only
|
||||
// be called serially and at a safepoint.
|
||||
void iterate_closure_all_threads();
|
||||
// Parallel version of the above.
|
||||
void par_iterate_closure_all_threads(uint worker);
|
||||
|
||||
// If there exists some completed buffer, pop it, then apply the
|
||||
// registered closure to all its elements, and return true. If no
|
||||
// completed buffers exist, return false.
|
||||
|
@ -614,18 +614,21 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
|
||||
KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
|
||||
gch->rem_set()->klass_rem_set());
|
||||
|
||||
int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
|
||||
CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
|
||||
&par_scan_state.to_space_root_closure(),
|
||||
false);
|
||||
|
||||
par_scan_state.start_strong_roots();
|
||||
gch->gen_process_strong_roots(_gen->level(),
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
false, // no scope; this is parallel code
|
||||
SharedHeap::ScanningOption(so),
|
||||
&par_scan_state.to_space_root_closure(),
|
||||
&par_scan_state.older_gen_closure(),
|
||||
&klass_scan_closure);
|
||||
gch->gen_process_roots(_gen->level(),
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
false, // no scope; this is parallel code
|
||||
SharedHeap::SO_ScavengeCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&par_scan_state.to_space_root_closure(),
|
||||
&par_scan_state.older_gen_closure(),
|
||||
&cld_scan_closure);
|
||||
|
||||
par_scan_state.end_strong_roots();
|
||||
|
||||
// "evacuate followers".
|
||||
|
@ -69,7 +69,7 @@ class ParScanThreadState {
|
||||
ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
|
||||
ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
|
||||
ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
|
||||
// One of these two will be passed to process_strong_roots, which will
|
||||
// One of these two will be passed to process_roots, which will
|
||||
// set its generation. The first is for two-gen configs where the
|
||||
// old gen collects the perm gen; the second is for arbitrary configs.
|
||||
// The second isn't used right now (it used to be used for the train, an
|
||||
|
@ -59,7 +59,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
||||
CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true);
|
||||
CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
|
||||
MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
|
||||
|
||||
if (_java_thread != NULL)
|
||||
_java_thread->oops_do(
|
||||
@ -100,7 +100,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
case threads:
|
||||
{
|
||||
ResourceMark rm;
|
||||
CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
|
||||
MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
|
||||
CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure);
|
||||
Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob);
|
||||
}
|
||||
|
@ -536,14 +536,14 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
Universe::oops_do(mark_and_push_closure());
|
||||
JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
|
||||
CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
|
||||
CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
|
||||
MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
|
||||
Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
|
||||
ObjectSynchronizer::oops_do(mark_and_push_closure());
|
||||
FlatProfiler::oops_do(mark_and_push_closure());
|
||||
Management::oops_do(mark_and_push_closure());
|
||||
JvmtiExport::oops_do(mark_and_push_closure());
|
||||
SystemDictionary::always_strong_oops_do(mark_and_push_closure());
|
||||
ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
|
||||
ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
|
||||
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
|
||||
//CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
|
||||
}
|
||||
@ -633,16 +633,16 @@ void PSMarkSweep::mark_sweep_phase3() {
|
||||
FlatProfiler::oops_do(adjust_pointer_closure());
|
||||
Management::oops_do(adjust_pointer_closure());
|
||||
JvmtiExport::oops_do(adjust_pointer_closure());
|
||||
// SO_AllClasses
|
||||
SystemDictionary::oops_do(adjust_pointer_closure());
|
||||
ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
|
||||
ClassLoaderDataGraph::cld_do(adjust_cld_closure());
|
||||
|
||||
// Now adjust pointers in remaining weak roots. (All of which should
|
||||
// have been cleared if they pointed to non-surviving objects.)
|
||||
// Global (weak) JNI handles
|
||||
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
|
||||
|
||||
CodeCache::oops_do(adjust_pointer_closure());
|
||||
CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
|
||||
CodeCache::blobs_do(&adjust_from_blobs);
|
||||
StringTable::oops_do(adjust_pointer_closure());
|
||||
ref_processor()->weak_oops_do(adjust_pointer_closure());
|
||||
PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
|
||||
|
@ -40,11 +40,11 @@ class PSMarkSweep : public MarkSweep {
|
||||
static CollectorCounters* _counters;
|
||||
|
||||
// Closure accessors
|
||||
static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
|
||||
static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
|
||||
static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
|
||||
static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
|
||||
static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
|
||||
static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
|
||||
static CLDClosure* follow_cld_closure() { return &MarkSweep::follow_cld_closure; }
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
|
||||
static CLDClosure* adjust_cld_closure() { return &MarkSweep::adjust_cld_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
|
||||
|
||||
debug_only(public:) // Used for PSParallelCompact debugging
|
||||
|
@ -2474,7 +2474,6 @@ void PSParallelCompact::adjust_roots() {
|
||||
FlatProfiler::oops_do(adjust_pointer_closure());
|
||||
Management::oops_do(adjust_pointer_closure());
|
||||
JvmtiExport::oops_do(adjust_pointer_closure());
|
||||
// SO_AllClasses
|
||||
SystemDictionary::oops_do(adjust_pointer_closure());
|
||||
ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
|
||||
|
||||
@ -2483,7 +2482,8 @@ void PSParallelCompact::adjust_roots() {
|
||||
// Global (weak) JNI handles
|
||||
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
|
||||
|
||||
CodeCache::oops_do(adjust_pointer_closure());
|
||||
CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
|
||||
CodeCache::blobs_do(&adjust_from_blobs);
|
||||
StringTable::oops_do(adjust_pointer_closure());
|
||||
ref_processor()->weak_oops_do(adjust_pointer_closure());
|
||||
// Roots were visited so references into the young gen in roots
|
||||
|
@ -100,7 +100,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
case code_cache:
|
||||
{
|
||||
CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true);
|
||||
MarkingCodeBlobClosure each_scavengable_code_blob(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
|
||||
CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
|
||||
}
|
||||
break;
|
||||
@ -123,7 +123,7 @@ void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
|
||||
PSScavengeRootsClosure roots_closure(pm);
|
||||
CLDClosure* roots_from_clds = NULL; // Not needed. All CLDs are already visited.
|
||||
CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
|
||||
MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations);
|
||||
|
||||
if (_java_thread != NULL)
|
||||
_java_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs);
|
||||
|
@ -54,21 +54,14 @@ void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
|
||||
void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
|
||||
|
||||
MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
|
||||
MarkSweep::FollowKlassClosure MarkSweep::follow_klass_closure;
|
||||
MarkSweep::AdjustKlassClosure MarkSweep::adjust_klass_closure;
|
||||
CLDToOopClosure MarkSweep::follow_cld_closure(&mark_and_push_closure);
|
||||
CLDToOopClosure MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
|
||||
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
|
||||
|
||||
void MarkSweep::FollowKlassClosure::do_klass(Klass* klass) {
|
||||
klass->oops_do(&MarkSweep::mark_and_push_closure);
|
||||
}
|
||||
void MarkSweep::AdjustKlassClosure::do_klass(Klass* klass) {
|
||||
klass->oops_do(&MarkSweep::adjust_pointer_closure);
|
||||
}
|
||||
|
||||
void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
|
||||
cld->oops_do(&MarkSweep::mark_and_push_closure, &MarkSweep::follow_klass_closure, true);
|
||||
MarkSweep::follow_cld_closure.do_cld(cld);
|
||||
}
|
||||
|
||||
void MarkSweep::follow_stack() {
|
||||
|
@ -65,17 +65,6 @@ class MarkSweep : AllStatic {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
// The one and only place to start following the classes.
|
||||
// Should only be applied to the ClassLoaderData klasses list.
|
||||
class FollowKlassClosure : public KlassClosure {
|
||||
public:
|
||||
void do_klass(Klass* klass);
|
||||
};
|
||||
class AdjustKlassClosure : public KlassClosure {
|
||||
public:
|
||||
void do_klass(Klass* klass);
|
||||
};
|
||||
|
||||
class FollowStackClosure: public VoidClosure {
|
||||
public:
|
||||
virtual void do_void();
|
||||
@ -144,10 +133,10 @@ class MarkSweep : AllStatic {
|
||||
static IsAliveClosure is_alive;
|
||||
static FollowRootClosure follow_root_closure;
|
||||
static MarkAndPushClosure mark_and_push_closure;
|
||||
static FollowKlassClosure follow_klass_closure;
|
||||
static FollowStackClosure follow_stack_closure;
|
||||
static CLDToOopClosure follow_cld_closure;
|
||||
static AdjustPointerClosure adjust_pointer_closure;
|
||||
static AdjustKlassClosure adjust_klass_closure;
|
||||
static CLDToOopClosure adjust_cld_closure;
|
||||
|
||||
// Accessors
|
||||
static uint total_invocations() { return _total_invocations; }
|
||||
|
@ -195,6 +195,43 @@ void VM_GenCollectFull::doit() {
|
||||
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
|
||||
}
|
||||
|
||||
bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseConcMarkSweepGC || UseG1GC) {
|
||||
if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
|
||||
MetaspaceGC::set_should_concurrent_collect(true);
|
||||
} else if (UseG1GC) {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
g1h->g1_policy()->set_initiate_conc_mark_if_possible();
|
||||
|
||||
GCCauseSetter x(g1h, _gc_cause);
|
||||
|
||||
// At this point we are supposed to start a concurrent cycle. We
|
||||
// will do so if one is not already in progress.
|
||||
bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
|
||||
|
||||
if (should_start) {
|
||||
double pause_target = g1h->g1_policy()->max_pause_time_ms();
|
||||
g1h->do_collection_pause_at_safepoint(pause_target);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static void log_metaspace_alloc_failure_for_concurrent_GC() {
|
||||
if (Verbose && PrintGCDetails) {
|
||||
if (UseConcMarkSweepGC) {
|
||||
gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
|
||||
} else if (UseG1GC) {
|
||||
gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VM_CollectForMetadataAllocation::doit() {
|
||||
SvcGCMarker sgcm(SvcGCMarker::FULL);
|
||||
|
||||
@ -206,54 +243,57 @@ void VM_CollectForMetadataAllocation::doit() {
|
||||
// a GC that freed space for the allocation.
|
||||
if (!MetadataAllocationFailALot) {
|
||||
_result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
}
|
||||
|
||||
if (_result == NULL) {
|
||||
if (UseConcMarkSweepGC) {
|
||||
if (CMSClassUnloadingEnabled) {
|
||||
MetaspaceGC::set_should_concurrent_collect(true);
|
||||
}
|
||||
// For CMS expand since the collection is going to be concurrent.
|
||||
_result =
|
||||
_loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
|
||||
}
|
||||
if (_result == NULL) {
|
||||
// Don't clear the soft refs yet.
|
||||
if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
|
||||
gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
|
||||
}
|
||||
heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
|
||||
// After a GC try to allocate without expanding. Could fail
|
||||
// and expansion will be tried below.
|
||||
_result =
|
||||
_loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
}
|
||||
if (_result == NULL) {
|
||||
// If still failing, allow the Metaspace to expand.
|
||||
// See delta_capacity_until_GC() for explanation of the
|
||||
// amount of the expansion.
|
||||
// This should work unless there really is no more space
|
||||
// or a MaxMetaspaceSize has been specified on the command line.
|
||||
_result =
|
||||
_loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
|
||||
if (_result == NULL) {
|
||||
// If expansion failed, do a last-ditch collection and try allocating
|
||||
// again. A last-ditch collection will clear softrefs. This
|
||||
// behavior is similar to the last-ditch collection done for perm
|
||||
// gen when it was full and a collection for failed allocation
|
||||
// did not free perm gen space.
|
||||
heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
|
||||
_result =
|
||||
_loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
}
|
||||
}
|
||||
if (Verbose && PrintGCDetails && _result == NULL) {
|
||||
gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
|
||||
SIZE_FORMAT, _size);
|
||||
if (_result != NULL) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
|
||||
if (initiate_concurrent_GC()) {
|
||||
// For CMS and G1 expand since the collection is going to be concurrent.
|
||||
_result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
|
||||
if (_result != NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
log_metaspace_alloc_failure_for_concurrent_GC();
|
||||
}
|
||||
|
||||
// Don't clear the soft refs yet.
|
||||
heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
|
||||
// After a GC try to allocate without expanding. Could fail
|
||||
// and expansion will be tried below.
|
||||
_result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
if (_result != NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If still failing, allow the Metaspace to expand.
|
||||
// See delta_capacity_until_GC() for explanation of the
|
||||
// amount of the expansion.
|
||||
// This should work unless there really is no more space
|
||||
// or a MaxMetaspaceSize has been specified on the command line.
|
||||
_result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
|
||||
if (_result != NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If expansion failed, do a last-ditch collection and try allocating
|
||||
// again. A last-ditch collection will clear softrefs. This
|
||||
// behavior is similar to the last-ditch collection done for perm
|
||||
// gen when it was full and a collection for failed allocation
|
||||
// did not free perm gen space.
|
||||
heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
|
||||
_result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
if (_result != NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (Verbose && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
|
||||
SIZE_FORMAT, _size);
|
||||
}
|
||||
|
||||
if (GC_locker::is_active_and_needs_gc()) {
|
||||
set_gc_locked();
|
||||
}
|
||||
}
|
||||
|
@ -217,6 +217,8 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation {
|
||||
virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
|
||||
virtual void doit();
|
||||
MetaWord* result() const { return _result; }
|
||||
|
||||
bool initiate_concurrent_GC();
|
||||
};
|
||||
|
||||
class SvcGCMarker : public StackObj {
|
||||
|
@ -429,7 +429,7 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
|
||||
OopsInGenClosure* cl,
|
||||
CardTableRS* ct) {
|
||||
if (!mr.is_empty()) {
|
||||
// Caller (process_strong_roots()) claims that all GC threads
|
||||
// Caller (process_roots()) claims that all GC threads
|
||||
// execute this call. With UseDynamicNumberOfGCThreads now all
|
||||
// active GC threads execute this call. The number of active GC
|
||||
// threads needs to be passed to par_non_clean_card_iterate_work()
|
||||
@ -438,7 +438,7 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
|
||||
// This is an example of where n_par_threads() is used instead
|
||||
// of workers()->active_workers(). n_par_threads can be set to 0 to
|
||||
// turn off parallelism. For example when this code is called as
|
||||
// part of verification and SharedHeap::process_strong_roots() is being
|
||||
// part of verification and SharedHeap::process_roots() is being
|
||||
// used, then n_par_threads() may have been set to 0. active_workers
|
||||
// is not overloaded with the meaning that it is a switch to disable
|
||||
// parallelism and so keeps the meaning of the number of
|
||||
|
@ -614,6 +614,9 @@ void DefNewGeneration::collect(bool full,
|
||||
|
||||
KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
|
||||
gch->rem_set()->klass_rem_set());
|
||||
CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
|
||||
&fsc_with_no_gc_barrier,
|
||||
false);
|
||||
|
||||
set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
|
||||
FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
|
||||
@ -623,16 +626,15 @@ void DefNewGeneration::collect(bool full,
|
||||
assert(gch->no_allocs_since_save_marks(0),
|
||||
"save marks have not been newly set.");
|
||||
|
||||
int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
|
||||
|
||||
gch->gen_process_strong_roots(_level,
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(so),
|
||||
&fsc_with_no_gc_barrier,
|
||||
&fsc_with_gc_barrier,
|
||||
&klass_scan_closure);
|
||||
gch->gen_process_roots(_level,
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::SO_ScavengeCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&fsc_with_no_gc_barrier,
|
||||
&fsc_with_gc_barrier,
|
||||
&cld_scan_closure);
|
||||
|
||||
// "evacuate followers".
|
||||
evacuate_followers.do_void();
|
||||
|
@ -61,8 +61,8 @@
|
||||
GenCollectedHeap* GenCollectedHeap::_gch;
|
||||
NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
|
||||
|
||||
// The set of potentially parallel tasks in strong root scanning.
|
||||
enum GCH_process_strong_roots_tasks {
|
||||
// The set of potentially parallel tasks in root scanning.
|
||||
enum GCH_strong_roots_tasks {
|
||||
// We probably want to parallelize both of these internally, but for now...
|
||||
GCH_PS_younger_gens,
|
||||
// Leave this one last.
|
||||
@ -72,11 +72,11 @@ enum GCH_process_strong_roots_tasks {
|
||||
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
|
||||
SharedHeap(policy),
|
||||
_gen_policy(policy),
|
||||
_gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
|
||||
_gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
|
||||
_full_collections_completed(0)
|
||||
{
|
||||
if (_gen_process_strong_tasks == NULL ||
|
||||
!_gen_process_strong_tasks->valid()) {
|
||||
if (_gen_process_roots_tasks == NULL ||
|
||||
!_gen_process_roots_tasks->valid()) {
|
||||
vm_exit_during_initialization("Failed necessary allocation.");
|
||||
}
|
||||
assert(policy != NULL, "Sanity check");
|
||||
@ -584,24 +584,29 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
|
||||
|
||||
void GenCollectedHeap::set_par_threads(uint t) {
|
||||
SharedHeap::set_par_threads(t);
|
||||
_gen_process_strong_tasks->set_n_threads(t);
|
||||
_gen_process_roots_tasks->set_n_threads(t);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::
|
||||
gen_process_strong_roots(int level,
|
||||
bool younger_gens_as_roots,
|
||||
bool activate_scope,
|
||||
SharedHeap::ScanningOption so,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
OopsInGenClosure* older_gens,
|
||||
KlassClosure* klass_closure) {
|
||||
// General strong roots.
|
||||
gen_process_roots(int level,
|
||||
bool younger_gens_as_roots,
|
||||
bool activate_scope,
|
||||
SharedHeap::ScanningOption so,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
OopsInGenClosure* weak_roots,
|
||||
OopsInGenClosure* older_gens,
|
||||
CLDClosure* cld_closure,
|
||||
CLDClosure* weak_cld_closure,
|
||||
CodeBlobClosure* code_closure) {
|
||||
|
||||
SharedHeap::process_strong_roots(activate_scope, so,
|
||||
not_older_gens, klass_closure);
|
||||
// General roots.
|
||||
SharedHeap::process_roots(activate_scope, so,
|
||||
not_older_gens, weak_roots,
|
||||
cld_closure, weak_cld_closure,
|
||||
code_closure);
|
||||
|
||||
if (younger_gens_as_roots) {
|
||||
if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
|
||||
if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
|
||||
for (int i = 0; i < level; i++) {
|
||||
not_older_gens->set_generation(_gens[i]);
|
||||
_gens[i]->oop_iterate(not_older_gens);
|
||||
@ -617,7 +622,38 @@ gen_process_strong_roots(int level,
|
||||
older_gens->reset_generation();
|
||||
}
|
||||
|
||||
_gen_process_strong_tasks->all_tasks_completed();
|
||||
_gen_process_roots_tasks->all_tasks_completed();
|
||||
}
|
||||
|
||||
void GenCollectedHeap::
|
||||
gen_process_roots(int level,
|
||||
bool younger_gens_as_roots,
|
||||
bool activate_scope,
|
||||
SharedHeap::ScanningOption so,
|
||||
bool only_strong_roots,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
OopsInGenClosure* older_gens,
|
||||
CLDClosure* cld_closure) {
|
||||
|
||||
const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
|
||||
|
||||
bool is_moving_collection = false;
|
||||
if (level == 0 || is_adjust_phase) {
|
||||
// young collections are always moving
|
||||
is_moving_collection = true;
|
||||
}
|
||||
|
||||
MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
|
||||
CodeBlobClosure* code_closure = &mark_code_closure;
|
||||
|
||||
gen_process_roots(level,
|
||||
younger_gens_as_roots,
|
||||
activate_scope, so,
|
||||
not_older_gens, only_strong_roots ? NULL : not_older_gens,
|
||||
older_gens,
|
||||
cld_closure, only_strong_roots ? NULL : cld_closure,
|
||||
code_closure);
|
||||
|
||||
}
|
||||
|
||||
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
|
||||
|
@ -78,9 +78,9 @@ public:
|
||||
unsigned int _full_collections_completed;
|
||||
|
||||
// Data structure for claiming the (potentially) parallel tasks in
|
||||
// (gen-specific) strong roots processing.
|
||||
SubTasksDone* _gen_process_strong_tasks;
|
||||
SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; }
|
||||
// (gen-specific) roots processing.
|
||||
SubTasksDone* _gen_process_roots_tasks;
|
||||
SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
|
||||
|
||||
// In block contents verification, the number of header words to skip
|
||||
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
|
||||
@ -403,18 +403,30 @@ public:
|
||||
// The "so" argument determines which of the roots
|
||||
// the closure is applied to:
|
||||
// "SO_None" does none;
|
||||
// "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
|
||||
// "SO_SystemClasses" to all the "system" classes and loaders;
|
||||
// "SO_Strings" applies the closure to all entries in the StringTable.
|
||||
void gen_process_strong_roots(int level,
|
||||
bool younger_gens_as_roots,
|
||||
// The remaining arguments are in an order
|
||||
// consistent with SharedHeap::process_strong_roots:
|
||||
bool activate_scope,
|
||||
SharedHeap::ScanningOption so,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
OopsInGenClosure* older_gens,
|
||||
KlassClosure* klass_closure);
|
||||
private:
|
||||
void gen_process_roots(int level,
|
||||
bool younger_gens_as_roots,
|
||||
bool activate_scope,
|
||||
SharedHeap::ScanningOption so,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
OopsInGenClosure* weak_roots,
|
||||
OopsInGenClosure* older_gens,
|
||||
CLDClosure* cld_closure,
|
||||
CLDClosure* weak_cld_closure,
|
||||
CodeBlobClosure* code_closure);
|
||||
|
||||
public:
|
||||
static const bool StrongAndWeakRoots = false;
|
||||
static const bool StrongRootsOnly = true;
|
||||
|
||||
void gen_process_roots(int level,
|
||||
bool younger_gens_as_roots,
|
||||
bool activate_scope,
|
||||
SharedHeap::ScanningOption so,
|
||||
bool only_strong_roots,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
OopsInGenClosure* older_gens,
|
||||
CLDClosure* cld_closure);
|
||||
|
||||
// Apply "root_closure" to all the weak roots of the system.
|
||||
// These include JNI weak roots, string table,
|
||||
|
@ -207,13 +207,14 @@ void GenMarkSweep::mark_sweep_phase1(int level,
|
||||
// Need new claim bits before marking starts.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
gch->gen_process_strong_roots(level,
|
||||
false, // Younger gens are not roots.
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::SO_SystemClasses,
|
||||
&follow_root_closure,
|
||||
&follow_root_closure,
|
||||
&follow_klass_closure);
|
||||
gch->gen_process_roots(level,
|
||||
false, // Younger gens are not roots.
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::SO_None,
|
||||
GenCollectedHeap::StrongRootsOnly,
|
||||
&follow_root_closure,
|
||||
&follow_root_closure,
|
||||
&follow_cld_closure);
|
||||
|
||||
// Process reference objects found during marking
|
||||
{
|
||||
@ -291,13 +292,14 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
|
||||
// are run.
|
||||
adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
|
||||
|
||||
gch->gen_process_strong_roots(level,
|
||||
false, // Younger gens are not roots.
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_klass_closure);
|
||||
gch->gen_process_roots(level,
|
||||
false, // Younger gens are not roots.
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::SO_AllCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_cld_closure);
|
||||
|
||||
gch->gen_process_weak_roots(&adjust_pointer_closure);
|
||||
|
||||
|
@ -35,6 +35,10 @@ void CLDToOopClosure::do_cld(ClassLoaderData* cld) {
|
||||
cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld);
|
||||
}
|
||||
|
||||
void CLDToKlassAndOopClosure::do_cld(ClassLoaderData* cld) {
|
||||
cld->oops_do(_oop_closure, _klass_closure, _must_claim_cld);
|
||||
}
|
||||
|
||||
void ObjectToOopClosure::do_object(oop obj) {
|
||||
obj->oop_iterate(_cl);
|
||||
}
|
||||
@ -43,6 +47,20 @@ void VoidClosure::do_void() {
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
|
||||
void CodeBlobToOopClosure::do_nmethod(nmethod* nm) {
|
||||
nm->oops_do(_cl);
|
||||
if (_fix_relocations) {
|
||||
nm->fix_oop_relocations();
|
||||
}
|
||||
}
|
||||
|
||||
void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
do_nmethod(nm);
|
||||
}
|
||||
}
|
||||
|
||||
MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
|
||||
: _active(activate)
|
||||
{
|
||||
@ -55,32 +73,7 @@ MarkingCodeBlobClosure::MarkScope::~MarkScope() {
|
||||
|
||||
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm == NULL) return;
|
||||
if (!nm->test_set_oops_do_mark()) {
|
||||
NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, 1st visit\n"));
|
||||
do_newly_marked_nmethod(nm);
|
||||
} else {
|
||||
NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, skipped on 2nd visit\n"));
|
||||
if (nm != NULL && !nm->test_set_oops_do_mark()) {
|
||||
do_nmethod(nm);
|
||||
}
|
||||
}
|
||||
|
||||
void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
|
||||
nm->oops_do(_cl, /*allow_zombie=*/ false);
|
||||
}
|
||||
|
||||
void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
|
||||
if (!_do_marking) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL) nm->print_on(tty, "oops_do, unmarked visit\n"));
|
||||
// This assert won't work, since there are lots of mini-passes
|
||||
// (mostly in debug mode) that co-exist with marking phases.
|
||||
//assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
|
||||
if (nm != NULL) {
|
||||
nm->oops_do(_cl);
|
||||
}
|
||||
} else {
|
||||
MarkingCodeBlobClosure::do_code_blob(cb);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -70,8 +70,8 @@ class ExtendedOopClosure : public OopClosure {
|
||||
//
|
||||
// Providing default implementations of the _nv functions unfortunately
|
||||
// removes the compile-time safeness, but reduces the clutter for the
|
||||
// ExtendedOopClosures that don't need to walk the metadata. Currently,
|
||||
// only CMS needs these.
|
||||
// ExtendedOopClosures that don't need to walk the metadata.
|
||||
// Currently, only CMS and G1 need these.
|
||||
|
||||
virtual bool do_metadata() { return do_metadata_nv(); }
|
||||
bool do_metadata_v() { return do_metadata(); }
|
||||
@ -126,15 +126,16 @@ class KlassToOopClosure : public KlassClosure {
|
||||
_oop_closure = oop_closure;
|
||||
}
|
||||
|
||||
public:
|
||||
public:
|
||||
KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
|
||||
|
||||
virtual void do_klass(Klass* k);
|
||||
};
|
||||
|
||||
class CLDToOopClosure : public CLDClosure {
|
||||
OopClosure* _oop_closure;
|
||||
OopClosure* _oop_closure;
|
||||
KlassToOopClosure _klass_closure;
|
||||
bool _must_claim_cld;
|
||||
bool _must_claim_cld;
|
||||
|
||||
public:
|
||||
CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
|
||||
@ -145,6 +146,23 @@ class CLDToOopClosure : public CLDClosure {
|
||||
void do_cld(ClassLoaderData* cld);
|
||||
};
|
||||
|
||||
class CLDToKlassAndOopClosure : public CLDClosure {
|
||||
friend class SharedHeap;
|
||||
friend class G1CollectedHeap;
|
||||
protected:
|
||||
OopClosure* _oop_closure;
|
||||
KlassClosure* _klass_closure;
|
||||
bool _must_claim_cld;
|
||||
public:
|
||||
CLDToKlassAndOopClosure(KlassClosure* klass_closure,
|
||||
OopClosure* oop_closure,
|
||||
bool must_claim_cld) :
|
||||
_oop_closure(oop_closure),
|
||||
_klass_closure(klass_closure),
|
||||
_must_claim_cld(must_claim_cld) {}
|
||||
void do_cld(ClassLoaderData* cld);
|
||||
};
|
||||
|
||||
// The base class for all concurrent marking closures,
|
||||
// that participates in class unloading.
|
||||
// It's used to proxy through the metadata to the oops defined in them.
|
||||
@ -246,14 +264,26 @@ class CodeBlobClosure : public Closure {
|
||||
virtual void do_code_blob(CodeBlob* cb) = 0;
|
||||
};
|
||||
|
||||
|
||||
class MarkingCodeBlobClosure : public CodeBlobClosure {
|
||||
// Applies an oop closure to all ref fields in code blobs
|
||||
// iterated over in an object iteration.
|
||||
class CodeBlobToOopClosure : public CodeBlobClosure {
|
||||
OopClosure* _cl;
|
||||
bool _fix_relocations;
|
||||
protected:
|
||||
void do_nmethod(nmethod* nm);
|
||||
public:
|
||||
CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
|
||||
virtual void do_code_blob(CodeBlob* cb);
|
||||
|
||||
const static bool FixRelocations = true;
|
||||
};
|
||||
|
||||
class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
|
||||
public:
|
||||
MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
|
||||
// Called for each code blob, but at most once per unique blob.
|
||||
virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
|
||||
|
||||
virtual void do_code_blob(CodeBlob* cb);
|
||||
// = { if (!nmethod(cb)->test_set_oops_do_mark()) do_newly_marked_nmethod(cb); }
|
||||
|
||||
class MarkScope : public StackObj {
|
||||
protected:
|
||||
@ -266,23 +296,6 @@ class MarkingCodeBlobClosure : public CodeBlobClosure {
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
// Applies an oop closure to all ref fields in code blobs
|
||||
// iterated over in an object iteration.
|
||||
class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
|
||||
OopClosure* _cl;
|
||||
bool _do_marking;
|
||||
public:
|
||||
virtual void do_newly_marked_nmethod(nmethod* cb);
|
||||
// = { cb->oops_do(_cl); }
|
||||
virtual void do_code_blob(CodeBlob* cb);
|
||||
// = { if (_do_marking) super::do_code_blob(cb); else cb->oops_do(_cl); }
|
||||
CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
|
||||
: _cl(cl), _do_marking(do_marking) {}
|
||||
};
|
||||
|
||||
|
||||
|
||||
// MonitorClosure is used for iterating over monitors in the monitors cache
|
||||
|
||||
class ObjectMonitor;
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_MEMORY_METADATAFACTORY_HPP
|
||||
#define SHARE_VM_MEMORY_METADATAFACTORY_HPP
|
||||
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "utilities/array.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/fprofiler.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "services/management.hpp"
|
||||
@ -39,8 +40,8 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||
|
||||
SharedHeap* SharedHeap::_sh;
|
||||
|
||||
// The set of potentially parallel tasks in strong root scanning.
|
||||
enum SH_process_strong_roots_tasks {
|
||||
// The set of potentially parallel tasks in root scanning.
|
||||
enum SH_process_roots_tasks {
|
||||
SH_PS_Universe_oops_do,
|
||||
SH_PS_JNIHandles_oops_do,
|
||||
SH_PS_ObjectSynchronizer_oops_do,
|
||||
@ -58,6 +59,7 @@ SharedHeap::SharedHeap(CollectorPolicy* policy_) :
|
||||
CollectedHeap(),
|
||||
_collector_policy(policy_),
|
||||
_rem_set(NULL),
|
||||
_strong_roots_scope(NULL),
|
||||
_strong_roots_parity(0),
|
||||
_process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
|
||||
_workers(NULL)
|
||||
@ -114,6 +116,19 @@ public:
|
||||
static AssertNonScavengableClosure assert_is_non_scavengable_closure;
|
||||
#endif
|
||||
|
||||
SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
|
||||
return _strong_roots_scope;
|
||||
}
|
||||
void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
|
||||
assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
|
||||
assert(scope != NULL, "Illegal argument");
|
||||
_strong_roots_scope = scope;
|
||||
}
|
||||
void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
|
||||
assert(_strong_roots_scope == scope, "Wrong scope unregistered");
|
||||
_strong_roots_scope = NULL;
|
||||
}
|
||||
|
||||
void SharedHeap::change_strong_roots_parity() {
|
||||
// Also set the new collection parity.
|
||||
assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
|
||||
@ -124,112 +139,161 @@ void SharedHeap::change_strong_roots_parity() {
|
||||
"Not in range.");
|
||||
}
|
||||
|
||||
SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
|
||||
: MarkScope(activate)
|
||||
SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
|
||||
: MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
|
||||
{
|
||||
if (_active) {
|
||||
outer->change_strong_roots_parity();
|
||||
_sh->register_strong_roots_scope(this);
|
||||
_sh->change_strong_roots_parity();
|
||||
// Zero the claimed high water mark in the StringTable
|
||||
StringTable::clear_parallel_claimed_index();
|
||||
}
|
||||
}
|
||||
|
||||
SharedHeap::StrongRootsScope::~StrongRootsScope() {
|
||||
// nothing particular
|
||||
if (_active) {
|
||||
_sh->unregister_strong_roots_scope(this);
|
||||
}
|
||||
}
|
||||
|
||||
void SharedHeap::process_strong_roots(bool activate_scope,
|
||||
ScanningOption so,
|
||||
OopClosure* roots,
|
||||
KlassClosure* klass_closure) {
|
||||
Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
|
||||
|
||||
void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
|
||||
// The Thread work barrier is only needed by G1.
|
||||
// No need to use the barrier if this is single-threaded code.
|
||||
if (UseG1GC && n_workers > 0) {
|
||||
uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
|
||||
if (new_value == n_workers) {
|
||||
// This thread is last. Notify the others.
|
||||
MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
|
||||
_lock->notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
|
||||
// No need to use the barrier if this is single-threaded code.
|
||||
if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
|
||||
MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
|
||||
while ((uint)_n_workers_done_with_threads != n_workers) {
|
||||
_lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SharedHeap::process_roots(bool activate_scope,
|
||||
ScanningOption so,
|
||||
OopClosure* strong_roots,
|
||||
OopClosure* weak_roots,
|
||||
CLDClosure* strong_cld_closure,
|
||||
CLDClosure* weak_cld_closure,
|
||||
CodeBlobClosure* code_roots) {
|
||||
StrongRootsScope srs(this, activate_scope);
|
||||
|
||||
// General strong roots.
|
||||
// General roots.
|
||||
assert(_strong_roots_parity != 0, "must have called prologue code");
|
||||
assert(code_roots != NULL, "code root closure should always be set");
|
||||
// _n_termination for _process_strong_tasks should be set up stream
|
||||
// in a method not running in a GC worker. Otherwise the GC worker
|
||||
// could be trying to change the termination condition while the task
|
||||
// is executing in another GC worker.
|
||||
|
||||
// Iterating over the CLDG and the Threads are done early to allow G1 to
|
||||
// first process the strong CLDs and nmethods and then, after a barrier,
|
||||
// let the thread process the weak CLDs and nmethods.
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
|
||||
ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
|
||||
}
|
||||
|
||||
// Some CLDs contained in the thread frames should be considered strong.
|
||||
// Don't process them if they will be processed during the ClassLoaderDataGraph phase.
|
||||
CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
|
||||
// Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
|
||||
CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
|
||||
|
||||
Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
|
||||
|
||||
// This is the point where this worker thread will not find more strong CLDs/nmethods.
|
||||
// Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
|
||||
active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
|
||||
Universe::oops_do(roots);
|
||||
Universe::oops_do(strong_roots);
|
||||
}
|
||||
// Global (strong) JNI handles
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
|
||||
JNIHandles::oops_do(roots);
|
||||
|
||||
CodeBlobToOopClosure code_roots(roots, true);
|
||||
|
||||
CLDToOopClosure roots_from_clds(roots);
|
||||
// If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
|
||||
// CLDs which are strongly reachable from the thread stacks.
|
||||
CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
|
||||
// All threads execute this; the individual threads are task groups.
|
||||
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||
Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
|
||||
} else {
|
||||
Threads::oops_do(roots, roots_from_clds_p, &code_roots);
|
||||
}
|
||||
JNIHandles::oops_do(strong_roots);
|
||||
|
||||
if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
|
||||
ObjectSynchronizer::oops_do(roots);
|
||||
ObjectSynchronizer::oops_do(strong_roots);
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
|
||||
FlatProfiler::oops_do(roots);
|
||||
FlatProfiler::oops_do(strong_roots);
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
|
||||
Management::oops_do(roots);
|
||||
Management::oops_do(strong_roots);
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
|
||||
JvmtiExport::oops_do(roots);
|
||||
JvmtiExport::oops_do(strong_roots);
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
|
||||
if (so & SO_AllClasses) {
|
||||
SystemDictionary::oops_do(roots);
|
||||
} else if (so & SO_SystemClasses) {
|
||||
SystemDictionary::always_strong_oops_do(roots);
|
||||
} else {
|
||||
fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
|
||||
}
|
||||
}
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
|
||||
if (so & SO_AllClasses) {
|
||||
ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
|
||||
} else if (so & SO_SystemClasses) {
|
||||
ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
|
||||
}
|
||||
SystemDictionary::roots_oops_do(strong_roots, weak_roots);
|
||||
}
|
||||
|
||||
// All threads execute the following. A specific chunk of buckets
|
||||
// from the StringTable are the individual tasks.
|
||||
if (so & SO_Strings) {
|
||||
if (weak_roots != NULL) {
|
||||
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||
StringTable::possibly_parallel_oops_do(roots);
|
||||
StringTable::possibly_parallel_oops_do(weak_roots);
|
||||
} else {
|
||||
StringTable::oops_do(roots);
|
||||
StringTable::oops_do(weak_roots);
|
||||
}
|
||||
}
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
|
||||
if (so & SO_ScavengeCodeCache) {
|
||||
assert(&code_roots != NULL, "must supply closure for code cache");
|
||||
assert(code_roots != NULL, "must supply closure for code cache");
|
||||
|
||||
// We only visit parts of the CodeCache when scavenging.
|
||||
CodeCache::scavenge_root_nmethods_do(&code_roots);
|
||||
CodeCache::scavenge_root_nmethods_do(code_roots);
|
||||
}
|
||||
if (so & SO_AllCodeCache) {
|
||||
assert(&code_roots != NULL, "must supply closure for code cache");
|
||||
assert(code_roots != NULL, "must supply closure for code cache");
|
||||
|
||||
// CMSCollector uses this to do intermediate-strength collections.
|
||||
// We scan the entire code cache, since CodeCache::do_unloading is not called.
|
||||
CodeCache::blobs_do(&code_roots);
|
||||
CodeCache::blobs_do(code_roots);
|
||||
}
|
||||
// Verify that the code cache contents are not subject to
|
||||
// movement by a scavenging collection.
|
||||
DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
|
||||
DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
|
||||
DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
|
||||
}
|
||||
|
||||
_process_strong_tasks->all_tasks_completed();
|
||||
}
|
||||
|
||||
void SharedHeap::process_all_roots(bool activate_scope,
|
||||
ScanningOption so,
|
||||
OopClosure* roots,
|
||||
CLDClosure* cld_closure,
|
||||
CodeBlobClosure* code_closure) {
|
||||
process_roots(activate_scope, so,
|
||||
roots, roots,
|
||||
cld_closure, cld_closure,
|
||||
code_closure);
|
||||
}
|
||||
|
||||
void SharedHeap::process_strong_roots(bool activate_scope,
|
||||
ScanningOption so,
|
||||
OopClosure* roots,
|
||||
CLDClosure* cld_closure,
|
||||
CodeBlobClosure* code_closure) {
|
||||
process_roots(activate_scope, so,
|
||||
roots, NULL,
|
||||
cld_closure, NULL,
|
||||
code_closure);
|
||||
}
|
||||
|
||||
|
||||
class AlwaysTrueClosure: public BoolObjectClosure {
|
||||
public:
|
||||
bool do_object_b(oop p) { return true; }
|
||||
|
@ -69,14 +69,10 @@ class KlassClosure;
|
||||
// number of active GC workers. CompactibleFreeListSpace and Space
|
||||
// have SequentialSubTasksDone's.
|
||||
// Example of using SubTasksDone and SequentialSubTasksDone
|
||||
// G1CollectedHeap::g1_process_strong_roots() calls
|
||||
// process_strong_roots(false, // no scoping; this is parallel code
|
||||
// is_scavenging, so,
|
||||
// &buf_scan_non_heap_roots,
|
||||
// &eager_scan_code_roots);
|
||||
// which delegates to SharedHeap::process_strong_roots() and uses
|
||||
// G1CollectedHeap::g1_process_roots()
|
||||
// to SharedHeap::process_roots() and uses
|
||||
// SubTasksDone* _process_strong_tasks to claim tasks.
|
||||
// process_strong_roots() calls
|
||||
// process_roots() calls
|
||||
// rem_set()->younger_refs_iterate()
|
||||
// to scan the card table and which eventually calls down into
|
||||
// CardTableModRefBS::par_non_clean_card_iterate_work(). This method
|
||||
@ -182,12 +178,12 @@ public:
|
||||
// task. (This also means that a parallel thread may only call
|
||||
// process_strong_roots once.)
|
||||
//
|
||||
// For calls to process_strong_roots by sequential code, the parity is
|
||||
// For calls to process_roots by sequential code, the parity is
|
||||
// updated automatically.
|
||||
//
|
||||
// The idea is that objects representing fine-grained tasks, such as
|
||||
// threads, will contain a "parity" field. A task will is claimed in the
|
||||
// current "process_strong_roots" call only if its parity field is the
|
||||
// current "process_roots" call only if its parity field is the
|
||||
// same as the "strong_roots_parity"; task claiming is accomplished by
|
||||
// updating the parity field to the strong_roots_parity with a CAS.
|
||||
//
|
||||
@ -198,27 +194,44 @@ public:
|
||||
// c) to never return a distinguished value (zero) with which such
|
||||
// task-claiming variables may be initialized, to indicate "never
|
||||
// claimed".
|
||||
private:
|
||||
void change_strong_roots_parity();
|
||||
public:
|
||||
int strong_roots_parity() { return _strong_roots_parity; }
|
||||
|
||||
// Call these in sequential code around process_strong_roots.
|
||||
// Call these in sequential code around process_roots.
|
||||
// strong_roots_prologue calls change_strong_roots_parity, if
|
||||
// parallel tasks are enabled.
|
||||
class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
|
||||
public:
|
||||
StrongRootsScope(SharedHeap* outer, bool activate = true);
|
||||
// Used to implement the Thread work barrier.
|
||||
static Monitor* _lock;
|
||||
|
||||
SharedHeap* _sh;
|
||||
volatile jint _n_workers_done_with_threads;
|
||||
|
||||
public:
|
||||
StrongRootsScope(SharedHeap* heap, bool activate = true);
|
||||
~StrongRootsScope();
|
||||
|
||||
// Mark that this thread is done with the Threads work.
|
||||
void mark_worker_done_with_threads(uint n_workers);
|
||||
// Wait until all n_workers are done with the Threads work.
|
||||
void wait_until_all_workers_done_with_threads(uint n_workers);
|
||||
};
|
||||
friend class StrongRootsScope;
|
||||
|
||||
// The current active StrongRootScope
|
||||
StrongRootsScope* _strong_roots_scope;
|
||||
|
||||
StrongRootsScope* active_strong_roots_scope() const;
|
||||
|
||||
private:
|
||||
void register_strong_roots_scope(StrongRootsScope* scope);
|
||||
void unregister_strong_roots_scope(StrongRootsScope* scope);
|
||||
void change_strong_roots_parity();
|
||||
|
||||
public:
|
||||
enum ScanningOption {
|
||||
SO_None = 0x0,
|
||||
SO_AllClasses = 0x1,
|
||||
SO_SystemClasses = 0x2,
|
||||
SO_Strings = 0x4,
|
||||
SO_AllCodeCache = 0x8,
|
||||
SO_None = 0x0,
|
||||
SO_AllCodeCache = 0x8,
|
||||
SO_ScavengeCodeCache = 0x10
|
||||
};
|
||||
|
||||
@ -227,15 +240,26 @@ public:
|
||||
// Invoke the "do_oop" method the closure "roots" on all root locations.
|
||||
// The "so" argument determines which roots the closure is applied to:
|
||||
// "SO_None" does none;
|
||||
// "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
|
||||
// "SO_SystemClasses" to all the "system" classes and loaders;
|
||||
// "SO_Strings" applies the closure to all entries in StringTable;
|
||||
// "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
|
||||
// "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
|
||||
void process_roots(bool activate_scope,
|
||||
ScanningOption so,
|
||||
OopClosure* strong_roots,
|
||||
OopClosure* weak_roots,
|
||||
CLDClosure* strong_cld_closure,
|
||||
CLDClosure* weak_cld_closure,
|
||||
CodeBlobClosure* code_roots);
|
||||
void process_all_roots(bool activate_scope,
|
||||
ScanningOption so,
|
||||
OopClosure* roots,
|
||||
CLDClosure* cld_closure,
|
||||
CodeBlobClosure* code_roots);
|
||||
void process_strong_roots(bool activate_scope,
|
||||
ScanningOption so,
|
||||
OopClosure* roots,
|
||||
KlassClosure* klass_closure);
|
||||
CLDClosure* cld_closure,
|
||||
CodeBlobClosure* code_roots);
|
||||
|
||||
|
||||
// Apply "root_closure" to the JNI weak roots..
|
||||
void process_weak_roots(OopClosure* root_closure);
|
||||
@ -251,7 +275,7 @@ public:
|
||||
virtual void gc_epilogue(bool full) = 0;
|
||||
|
||||
// Sets the number of parallel threads that will be doing tasks
|
||||
// (such as process strong roots) subsequently.
|
||||
// (such as process roots) subsequently.
|
||||
virtual void set_par_threads(uint t);
|
||||
|
||||
int n_termination();
|
||||
|
@ -245,6 +245,7 @@ InstanceKlass::InstanceKlass(int vtable_len,
|
||||
set_static_oop_field_count(0);
|
||||
set_nonstatic_field_size(0);
|
||||
set_is_marked_dependent(false);
|
||||
set_has_unloaded_dependent(false);
|
||||
set_init_state(InstanceKlass::allocated);
|
||||
set_init_thread(NULL);
|
||||
set_reference_type(rt);
|
||||
@ -1801,6 +1802,9 @@ jmethodID InstanceKlass::jmethod_id_or_null(Method* method) {
|
||||
return id;
|
||||
}
|
||||
|
||||
int nmethodBucket::decrement() {
|
||||
return Atomic::add(-1, (volatile int *)&_count);
|
||||
}
|
||||
|
||||
//
|
||||
// Walk the list of dependent nmethods searching for nmethods which
|
||||
@ -1815,7 +1819,7 @@ int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
|
||||
nmethod* nm = b->get_nmethod();
|
||||
// since dependencies aren't removed until an nmethod becomes a zombie,
|
||||
// the dependency list may contain nmethods which aren't alive.
|
||||
if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
|
||||
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
|
||||
if (TraceDependencies) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("Marked for deoptimization");
|
||||
@ -1832,6 +1836,43 @@ int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
|
||||
return found;
|
||||
}
|
||||
|
||||
void InstanceKlass::clean_dependent_nmethods() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
if (has_unloaded_dependent()) {
|
||||
nmethodBucket* b = _dependencies;
|
||||
nmethodBucket* last = NULL;
|
||||
while (b != NULL) {
|
||||
assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
|
||||
|
||||
nmethodBucket* next = b->next();
|
||||
|
||||
if (b->count() == 0) {
|
||||
if (last == NULL) {
|
||||
_dependencies = next;
|
||||
} else {
|
||||
last->set_next(next);
|
||||
}
|
||||
delete b;
|
||||
// last stays the same.
|
||||
} else {
|
||||
last = b;
|
||||
}
|
||||
|
||||
b = next;
|
||||
}
|
||||
set_has_unloaded_dependent(false);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
else {
|
||||
// Verification
|
||||
for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
|
||||
assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
|
||||
assert(b->count() != 0, "empty buckets need to be cleaned");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
//
|
||||
// Add an nmethodBucket to the list of dependencies for this nmethod.
|
||||
@ -1866,13 +1907,10 @@ void InstanceKlass::remove_dependent_nmethod(nmethod* nm) {
|
||||
nmethodBucket* last = NULL;
|
||||
while (b != NULL) {
|
||||
if (nm == b->get_nmethod()) {
|
||||
if (b->decrement() == 0) {
|
||||
if (last == NULL) {
|
||||
_dependencies = b->next();
|
||||
} else {
|
||||
last->set_next(b->next());
|
||||
}
|
||||
delete b;
|
||||
int val = b->decrement();
|
||||
guarantee(val >= 0, err_msg("Underflow: %d", val));
|
||||
if (val == 0) {
|
||||
set_has_unloaded_dependent(true);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -1911,6 +1949,11 @@ bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
|
||||
nmethodBucket* b = _dependencies;
|
||||
while (b != NULL) {
|
||||
if (nm == b->get_nmethod()) {
|
||||
#ifdef ASSERT
|
||||
int count = b->count();
|
||||
assert(count >= 0, "Just check if we ever get here 1");
|
||||
assert(count > 0, "Just check if we ever get here 2");
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
b = b->next();
|
||||
@ -2209,7 +2252,7 @@ int InstanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
|
||||
assert(is_loader_alive(is_alive), "this klass should be live");
|
||||
assert(class_loader_data()->is_alive(is_alive), "this klass should be live");
|
||||
if (is_interface()) {
|
||||
if (ClassUnloading) {
|
||||
Klass* impl = implementor();
|
||||
|
@ -197,6 +197,7 @@ class InstanceKlass: public Klass {
|
||||
// _is_marked_dependent can be set concurrently, thus cannot be part of the
|
||||
// _misc_flags.
|
||||
bool _is_marked_dependent; // used for marking during flushing and deoptimization
|
||||
bool _has_unloaded_dependent;
|
||||
|
||||
enum {
|
||||
_misc_rewritten = 1 << 0, // methods rewritten.
|
||||
@ -444,6 +445,9 @@ class InstanceKlass: public Klass {
|
||||
bool is_marked_dependent() const { return _is_marked_dependent; }
|
||||
void set_is_marked_dependent(bool value) { _is_marked_dependent = value; }
|
||||
|
||||
bool has_unloaded_dependent() const { return _has_unloaded_dependent; }
|
||||
void set_has_unloaded_dependent(bool value) { _has_unloaded_dependent = value; }
|
||||
|
||||
// initialization (virtuals from Klass)
|
||||
bool should_be_initialized() const; // means that initialize should be called
|
||||
void initialize(TRAPS);
|
||||
@ -922,6 +926,7 @@ class InstanceKlass: public Klass {
|
||||
|
||||
void clean_implementors_list(BoolObjectClosure* is_alive);
|
||||
void clean_method_data(BoolObjectClosure* is_alive);
|
||||
void clean_dependent_nmethods();
|
||||
|
||||
// Explicit metaspace deallocation of fields
|
||||
// For RedefineClasses and class file parsing errors, we need to deallocate
|
||||
@ -1210,7 +1215,7 @@ class nmethodBucket: public CHeapObj<mtClass> {
|
||||
}
|
||||
int count() { return _count; }
|
||||
int increment() { _count += 1; return _count; }
|
||||
int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
|
||||
int decrement();
|
||||
nmethodBucket* next() { return _next; }
|
||||
void set_next(nmethodBucket* b) { _next = b; }
|
||||
nmethod* get_nmethod() { return _nmethod; }
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "utilities/stack.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
@ -159,7 +160,12 @@ Klass::Klass() {
|
||||
_primary_supers[0] = k;
|
||||
set_super_check_offset(in_bytes(primary_supers_offset()));
|
||||
|
||||
set_java_mirror(NULL);
|
||||
// The constructor is used from init_self_patching_vtbl_list,
|
||||
// which doesn't zero out the memory before calling the constructor.
|
||||
// Need to set the field explicitly to not hit an assert that the field
|
||||
// should be NULL before setting it.
|
||||
_java_mirror = NULL;
|
||||
|
||||
set_modifier_flags(0);
|
||||
set_layout_helper(Klass::_lh_neutral_value);
|
||||
set_name(NULL);
|
||||
@ -383,7 +389,7 @@ bool Klass::is_loader_alive(BoolObjectClosure* is_alive) {
|
||||
return mirror_alive;
|
||||
}
|
||||
|
||||
void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
|
||||
void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses) {
|
||||
if (!ClassUnloading) {
|
||||
return;
|
||||
}
|
||||
@ -428,7 +434,7 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
|
||||
}
|
||||
|
||||
// Clean the implementors list and method data.
|
||||
if (current->oop_is_instance()) {
|
||||
if (clean_alive_klasses && current->oop_is_instance()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(current);
|
||||
ik->clean_implementors_list(is_alive);
|
||||
ik->clean_method_data(is_alive);
|
||||
@ -440,12 +446,18 @@ void Klass::klass_update_barrier_set(oop v) {
|
||||
record_modified_oops();
|
||||
}
|
||||
|
||||
void Klass::klass_update_barrier_set_pre(void* p, oop v) {
|
||||
// This barrier used by G1, where it's used remember the old oop values,
|
||||
// so that we don't forget any objects that were live at the snapshot at
|
||||
// the beginning. This function is only used when we write oops into
|
||||
// Klasses. Since the Klasses are used as roots in G1, we don't have to
|
||||
// do anything here.
|
||||
// This barrier is used by G1 to remember the old oop values, so
|
||||
// that we don't forget any objects that were live at the snapshot at
|
||||
// the beginning. This function is only used when we write oops into Klasses.
|
||||
void Klass::klass_update_barrier_set_pre(oop* p, oop v) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC) {
|
||||
oop obj = *p;
|
||||
if (obj != NULL) {
|
||||
G1SATBCardTableModRefBS::enqueue(obj);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Klass::klass_oop_store(oop* p, oop v) {
|
||||
@ -456,7 +468,7 @@ void Klass::klass_oop_store(oop* p, oop v) {
|
||||
if (always_do_update_barrier) {
|
||||
klass_oop_store((volatile oop*)p, v);
|
||||
} else {
|
||||
klass_update_barrier_set_pre((void*)p, v);
|
||||
klass_update_barrier_set_pre(p, v);
|
||||
*p = v;
|
||||
klass_update_barrier_set(v);
|
||||
}
|
||||
@ -466,7 +478,7 @@ void Klass::klass_oop_store(volatile oop* p, oop v) {
|
||||
assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata");
|
||||
assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object");
|
||||
|
||||
klass_update_barrier_set_pre((void*)p, v);
|
||||
klass_update_barrier_set_pre((oop*)p, v); // Cast away volatile.
|
||||
OrderAccess::release_store_ptr(p, v);
|
||||
klass_update_barrier_set(v);
|
||||
}
|
||||
|
@ -553,7 +553,10 @@ class Klass : public Metadata {
|
||||
// The is_alive closure passed in depends on the Garbage Collector used.
|
||||
bool is_loader_alive(BoolObjectClosure* is_alive);
|
||||
|
||||
static void clean_weak_klass_links(BoolObjectClosure* is_alive);
|
||||
static void clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses = true);
|
||||
static void clean_subklass_tree(BoolObjectClosure* is_alive) {
|
||||
clean_weak_klass_links(is_alive, false /* clean_alive_klasses */);
|
||||
}
|
||||
|
||||
// iterators
|
||||
virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0;
|
||||
@ -660,7 +663,7 @@ class Klass : public Metadata {
|
||||
private:
|
||||
// barriers used by klass_oop_store
|
||||
void klass_update_barrier_set(oop v);
|
||||
void klass_update_barrier_set_pre(void* p, oop v);
|
||||
void klass_update_barrier_set_pre(oop* p, oop v);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OOPS_KLASS_HPP
|
||||
|
@ -3019,7 +3019,7 @@ inline bool VM_HeapWalkOperation::collect_simple_roots() {
|
||||
|
||||
// If there are any non-perm roots in the code cache, visit them.
|
||||
blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
|
||||
CodeBlobToOopClosure look_in_blobs(&blk, false);
|
||||
CodeBlobToOopClosure look_in_blobs(&blk, !CodeBlobToOopClosure::FixRelocations);
|
||||
CodeCache::scavenge_root_nmethods_do(&look_in_blobs);
|
||||
|
||||
return true;
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
@ -38,6 +39,7 @@
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
#include "utilities/array.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
@ -726,7 +728,6 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo
|
||||
return result;
|
||||
WB_END
|
||||
|
||||
|
||||
WB_ENTRY(jlong, WB_GetThreadStackSize(JNIEnv* env, jobject o))
|
||||
return (jlong) Thread::current()->stack_size();
|
||||
WB_END
|
||||
@ -736,6 +737,35 @@ WB_ENTRY(jlong, WB_GetThreadRemainingStackSize(JNIEnv* env, jobject o))
|
||||
return (jlong) t->stack_available(os::current_stack_pointer()) - (jlong) StackShadowPages * os::vm_page_size();
|
||||
WB_END
|
||||
|
||||
int WhiteBox::array_bytes_to_length(size_t bytes) {
|
||||
return Array<u1>::bytes_to_length(bytes);
|
||||
}
|
||||
|
||||
WB_ENTRY(jlong, WB_AllocateMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong size))
|
||||
if (size < 0) {
|
||||
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
|
||||
err_msg("WB_AllocateMetaspace: size is negative: " JLONG_FORMAT, size));
|
||||
}
|
||||
|
||||
oop class_loader_oop = JNIHandles::resolve(class_loader);
|
||||
ClassLoaderData* cld = class_loader_oop != NULL
|
||||
? java_lang_ClassLoader::loader_data(class_loader_oop)
|
||||
: ClassLoaderData::the_null_class_loader_data();
|
||||
|
||||
void* metadata = MetadataFactory::new_writeable_array<u1>(cld, WhiteBox::array_bytes_to_length((size_t)size), thread);
|
||||
|
||||
return (jlong)(uintptr_t)metadata;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_FreeMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong addr, jlong size))
|
||||
oop class_loader_oop = JNIHandles::resolve(class_loader);
|
||||
ClassLoaderData* cld = class_loader_oop != NULL
|
||||
? java_lang_ClassLoader::loader_data(class_loader_oop)
|
||||
: ClassLoaderData::the_null_class_loader_data();
|
||||
|
||||
MetadataFactory::free_array(cld, (Array<u1>*)(uintptr_t)addr);
|
||||
WB_END
|
||||
|
||||
//Some convenience methods to deal with objects from java
|
||||
int WhiteBox::offset_for_field(const char* field_name, oop object,
|
||||
Symbol* signature_symbol) {
|
||||
@ -866,6 +896,10 @@ static JNINativeMethod methods[] = {
|
||||
{CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable },
|
||||
{CC"fullGC", CC"()V", (void*)&WB_FullGC },
|
||||
{CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory },
|
||||
{CC"allocateMetaspace",
|
||||
CC"(Ljava/lang/ClassLoader;J)J", (void*)&WB_AllocateMetaspace },
|
||||
{CC"freeMetaspace",
|
||||
CC"(Ljava/lang/ClassLoader;JJ)V", (void*)&WB_FreeMetaspace },
|
||||
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
|
||||
{CC"getNMethod", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
|
||||
(void*)&WB_GetNMethod },
|
||||
|
@ -62,6 +62,8 @@ class WhiteBox : public AllStatic {
|
||||
Symbol* signature_symbol);
|
||||
static const char* lookup_jstring(const char* field_name, oop object);
|
||||
static bool lookup_bool(const char* field_name, oop object);
|
||||
|
||||
static int array_bytes_to_length(size_t bytes);
|
||||
};
|
||||
|
||||
|
||||
|
@ -4101,8 +4101,8 @@ void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBl
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
// Cannot yet substitute active_workers for n_par_threads
|
||||
// because of G1CollectedHeap::verify() use of
|
||||
// SharedHeap::process_strong_roots(). n_par_threads == 0 will
|
||||
// turn off parallelism in process_strong_roots while active_workers
|
||||
// SharedHeap::process_roots(). n_par_threads == 0 will
|
||||
// turn off parallelism in process_roots while active_workers
|
||||
// is being used for parallelism elsewhere.
|
||||
bool is_par = sh->n_par_threads() > 0;
|
||||
assert(!is_par ||
|
||||
|
@ -452,7 +452,7 @@ class Thread: public ThreadShadow {
|
||||
private:
|
||||
bool claim_oops_do_par_case(int collection_parity);
|
||||
public:
|
||||
// Requires that "collection_parity" is that of the current strong roots
|
||||
// Requires that "collection_parity" is that of the current roots
|
||||
// iteration. If "is_par" is false, sets the parity of "this" to
|
||||
// "collection_parity", and returns "true". If "is_par" is true,
|
||||
// uses an atomic instruction to set the current threads parity to
|
||||
|
@ -305,6 +305,7 @@ class Array: public MetaspaceObj {
|
||||
friend class MetadataFactory;
|
||||
friend class VMStructs;
|
||||
friend class MethodHandleCompiler; // special case
|
||||
friend class WhiteBox;
|
||||
protected:
|
||||
int _length; // the number of array elements
|
||||
T _data[1]; // the array memory
|
||||
@ -326,6 +327,29 @@ protected:
|
||||
|
||||
static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
|
||||
|
||||
// WhiteBox API helper.
|
||||
static int bytes_to_length(size_t bytes) {
|
||||
assert(is_size_aligned(bytes, BytesPerWord), "Must be, for now");
|
||||
|
||||
if (sizeof(Array<T>) >= bytes) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t left = bytes - sizeof(Array<T>);
|
||||
assert(is_size_aligned(left, sizeof(T)), "Must be");
|
||||
|
||||
size_t elements = left / sizeof(T);
|
||||
assert(elements <= (size_t)INT_MAX, err_msg("number of elements " SIZE_FORMAT "doesn't fit into an int.", elements));
|
||||
|
||||
int length = (int)elements;
|
||||
|
||||
assert((size_t)size(length) * BytesPerWord == bytes,
|
||||
err_msg("Expected: " SIZE_FORMAT " got: " SIZE_FORMAT,
|
||||
bytes, (size_t)size(length) * BytesPerWord));
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
explicit Array(int length) : _length(length) {
|
||||
assert(length >= 0, "illegal length");
|
||||
}
|
||||
|
@ -142,6 +142,8 @@ public class WhiteBox {
|
||||
|
||||
// Memory
|
||||
public native void readReservedMemory();
|
||||
public native long allocateMetaspace(ClassLoader classLoader, long size);
|
||||
public native void freeMetaspace(ClassLoader classLoader, long addr, long size);
|
||||
|
||||
// force Full GC
|
||||
public native void fullGC();
|
||||
|
Loading…
Reference in New Issue
Block a user