7041789: 30% perf regression with c2/arm following 7017732
Implement a more accurate is_scavengable() Reviewed-by: stefank, jcoomes, ysr
This commit is contained in:
parent
8cdd97938c
commit
6819e3739e
@ -1810,7 +1810,7 @@ public:
|
|||||||
void maybe_print(oop* p) {
|
void maybe_print(oop* p) {
|
||||||
if (_print_nm == NULL) return;
|
if (_print_nm == NULL) return;
|
||||||
if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root");
|
if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root");
|
||||||
tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
|
tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
|
||||||
_print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
|
_print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
|
||||||
(intptr_t)(*p), (intptr_t)p);
|
(intptr_t)(*p), (intptr_t)p);
|
||||||
(*p)->print();
|
(*p)->print();
|
||||||
@ -2311,7 +2311,7 @@ public:
|
|||||||
_nm->print_nmethod(true);
|
_nm->print_nmethod(true);
|
||||||
_ok = false;
|
_ok = false;
|
||||||
}
|
}
|
||||||
tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
|
tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
|
||||||
(intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
|
(intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
|
||||||
(*p)->print();
|
(*p)->print();
|
||||||
}
|
}
|
||||||
@ -2324,7 +2324,7 @@ void nmethod::verify_scavenge_root_oops() {
|
|||||||
DebugScavengeRoot debug_scavenge_root(this);
|
DebugScavengeRoot debug_scavenge_root(this);
|
||||||
oops_do(&debug_scavenge_root);
|
oops_do(&debug_scavenge_root);
|
||||||
if (!debug_scavenge_root.ok())
|
if (!debug_scavenge_root.ok())
|
||||||
fatal("found an unadvertised bad non-perm oop in the code cache");
|
fatal("found an unadvertised bad scavengable oop in the code cache");
|
||||||
}
|
}
|
||||||
assert(scavenge_root_not_marked(), "");
|
assert(scavenge_root_not_marked(), "");
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ class xmlStream;
|
|||||||
class nmethod : public CodeBlob {
|
class nmethod : public CodeBlob {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
friend class NMethodSweeper;
|
friend class NMethodSweeper;
|
||||||
friend class CodeCache; // non-perm oops
|
friend class CodeCache; // scavengable oops
|
||||||
private:
|
private:
|
||||||
// Shared fields for all nmethod's
|
// Shared fields for all nmethod's
|
||||||
methodOop _method;
|
methodOop _method;
|
||||||
@ -466,17 +466,17 @@ public:
|
|||||||
bool is_at_poll_return(address pc);
|
bool is_at_poll_return(address pc);
|
||||||
bool is_at_poll_or_poll_return(address pc);
|
bool is_at_poll_or_poll_return(address pc);
|
||||||
|
|
||||||
// Non-perm oop support
|
// Scavengable oop support
|
||||||
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
|
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
|
||||||
protected:
|
protected:
|
||||||
enum { npl_on_list = 0x01, npl_marked = 0x10 };
|
enum { sl_on_list = 0x01, sl_marked = 0x10 };
|
||||||
void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
|
void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; }
|
||||||
void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
|
void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
|
||||||
// assertion-checking and pruning logic uses the bits of _scavenge_root_state
|
// assertion-checking and pruning logic uses the bits of _scavenge_root_state
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
|
void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; }
|
||||||
void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
|
void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; }
|
||||||
bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
|
bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; }
|
||||||
// N.B. there is no positive marked query, and we only use the not_marked query for asserts.
|
// N.B. there is no positive marked query, and we only use the not_marked query for asserts.
|
||||||
#endif //PRODUCT
|
#endif //PRODUCT
|
||||||
nmethod* scavenge_root_link() const { return _scavenge_root_link; }
|
nmethod* scavenge_root_link() const { return _scavenge_root_link; }
|
||||||
|
@ -428,6 +428,37 @@ void G1CollectedHeap::stop_conc_gc_threads() {
|
|||||||
_cmThread->stop();
|
_cmThread->stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
// A region is added to the collection set as it is retired
|
||||||
|
// so an address p can point to a region which will be in the
|
||||||
|
// collection set but has not yet been retired. This method
|
||||||
|
// therefore is only accurate during a GC pause after all
|
||||||
|
// regions have been retired. It is used for debugging
|
||||||
|
// to check if an nmethod has references to objects that can
|
||||||
|
// be move during a partial collection. Though it can be
|
||||||
|
// inaccurate, it is sufficient for G1 because the conservative
|
||||||
|
// implementation of is_scavengable() for G1 will indicate that
|
||||||
|
// all nmethods must be scanned during a partial collection.
|
||||||
|
bool G1CollectedHeap::is_in_partial_collection(const void* p) {
|
||||||
|
HeapRegion* hr = heap_region_containing(p);
|
||||||
|
return hr != NULL && hr->in_collection_set();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Returns true if the reference points to an object that
|
||||||
|
// can move in an incremental collecction.
|
||||||
|
bool G1CollectedHeap::is_scavengable(const void* p) {
|
||||||
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
|
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||||
|
HeapRegion* hr = heap_region_containing(p);
|
||||||
|
if (hr == NULL) {
|
||||||
|
// perm gen (or null)
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
return !hr->isHumongous();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
||||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
|
CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
|
||||||
|
@ -1254,6 +1254,12 @@ public:
|
|||||||
return hr != NULL && hr->is_young();
|
return hr != NULL && hr->is_young();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
virtual bool is_in_partial_collection(const void* p);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
virtual bool is_scavengable(const void* addr);
|
||||||
|
|
||||||
// We don't need barriers for initializing stores to objects
|
// We don't need barriers for initializing stores to objects
|
||||||
// in the young gen: for the SATB pre-barrier, there is no
|
// in the young gen: for the SATB pre-barrier, there is no
|
||||||
// pre-value that needs to be remembered; for the remembered-set
|
// pre-value that needs to be remembered; for the remembered-set
|
||||||
|
@ -339,6 +339,21 @@ bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ParallelScavengeHeap::is_scavengable(const void* addr) {
|
||||||
|
return is_in_young((oop)addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
// Don't implement this by using is_in_young(). This method is used
|
||||||
|
// in some cases to check that is_in_young() is correct.
|
||||||
|
bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
|
||||||
|
assert(is_in_reserved(p) || p == NULL,
|
||||||
|
"Does not work if address is non-null and outside of the heap");
|
||||||
|
// The order of the generations is perm (low addr), old, young (high addr)
|
||||||
|
return p >= old_gen()->reserved().end();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// There are two levels of allocation policy here.
|
// There are two levels of allocation policy here.
|
||||||
//
|
//
|
||||||
// When an allocation request fails, the requesting thread must invoke a VM
|
// When an allocation request fails, the requesting thread must invoke a VM
|
||||||
|
@ -127,6 +127,12 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
|
|||||||
// collection.
|
// collection.
|
||||||
virtual bool is_maximal_no_gc() const;
|
virtual bool is_maximal_no_gc() const;
|
||||||
|
|
||||||
|
// Return true if the reference points to an object that
|
||||||
|
// can be moved in a partial collection. For currently implemented
|
||||||
|
// generational collectors that means during a collection of
|
||||||
|
// the young gen.
|
||||||
|
virtual bool is_scavengable(const void* addr);
|
||||||
|
|
||||||
// Does this heap support heap inspection? (+PrintClassHistogram)
|
// Does this heap support heap inspection? (+PrintClassHistogram)
|
||||||
bool supports_heap_inspection() const { return true; }
|
bool supports_heap_inspection() const { return true; }
|
||||||
|
|
||||||
@ -143,6 +149,10 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
|
|||||||
return perm_gen()->reserved().contains(p);
|
return perm_gen()->reserved().contains(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
virtual bool is_in_partial_collection(const void *p);
|
||||||
|
#endif
|
||||||
|
|
||||||
bool is_permanent(const void *p) const { // committed part
|
bool is_permanent(const void *p) const { // committed part
|
||||||
return perm_gen()->is_in(p);
|
return perm_gen()->is_in(p);
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,12 @@ inline void ParallelScavengeHeap::invoke_full_gc(bool maximum_compaction)
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline bool ParallelScavengeHeap::is_in_young(oop p) {
|
inline bool ParallelScavengeHeap::is_in_young(oop p) {
|
||||||
return young_gen()->is_in_reserved(p);
|
// Assumes the the old gen address range is lower than that of the young gen.
|
||||||
|
const void* loc = (void*) p;
|
||||||
|
bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
|
||||||
|
assert(result == young_gen()->is_in_reserved(p),
|
||||||
|
err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool ParallelScavengeHeap::is_in_old_or_perm(oop p) {
|
inline bool ParallelScavengeHeap::is_in_old_or_perm(oop p) {
|
||||||
|
@ -269,6 +269,13 @@ class CollectedHeap : public CHeapObj {
|
|||||||
// space). If you need the more conservative answer use is_permanent().
|
// space). If you need the more conservative answer use is_permanent().
|
||||||
virtual bool is_in_permanent(const void *p) const = 0;
|
virtual bool is_in_permanent(const void *p) const = 0;
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
// Returns true if "p" is in the part of the
|
||||||
|
// heap being collected.
|
||||||
|
virtual bool is_in_partial_collection(const void *p) = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
bool is_in_permanent_or_null(const void *p) const {
|
bool is_in_permanent_or_null(const void *p) const {
|
||||||
return p == NULL || is_in_permanent(p);
|
return p == NULL || is_in_permanent(p);
|
||||||
}
|
}
|
||||||
@ -284,11 +291,7 @@ class CollectedHeap : public CHeapObj {
|
|||||||
|
|
||||||
// An object is scavengable if its location may move during a scavenge.
|
// An object is scavengable if its location may move during a scavenge.
|
||||||
// (A scavenge is a GC which is not a full GC.)
|
// (A scavenge is a GC which is not a full GC.)
|
||||||
// Currently, this just means it is not perm (and not null).
|
virtual bool is_scavengable(const void *p) = 0;
|
||||||
// This could change if we rethink what's in perm-gen.
|
|
||||||
bool is_scavengable(const void *p) const {
|
|
||||||
return !is_in_permanent_or_null(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns "TRUE" if "p" is a method oop in the
|
// Returns "TRUE" if "p" is a method oop in the
|
||||||
// current heap, with high probability. This predicate
|
// current heap, with high probability. This predicate
|
||||||
|
@ -711,15 +711,6 @@ void GenCollectedHeap::set_par_threads(int t) {
|
|||||||
_gen_process_strong_tasks->set_n_threads(t);
|
_gen_process_strong_tasks->set_n_threads(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
class AssertIsPermClosure: public OopClosure {
|
|
||||||
public:
|
|
||||||
void do_oop(oop* p) {
|
|
||||||
assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
|
|
||||||
}
|
|
||||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
|
||||||
};
|
|
||||||
static AssertIsPermClosure assert_is_perm_closure;
|
|
||||||
|
|
||||||
void GenCollectedHeap::
|
void GenCollectedHeap::
|
||||||
gen_process_strong_roots(int level,
|
gen_process_strong_roots(int level,
|
||||||
bool younger_gens_as_roots,
|
bool younger_gens_as_roots,
|
||||||
@ -962,6 +953,13 @@ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool GenCollectedHeap::is_in_young(oop p) {
|
||||||
|
bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
|
||||||
|
assert(result == _gens[0]->is_in_reserved(p),
|
||||||
|
err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
// Returns "TRUE" iff "p" points into the allocated area of the heap.
|
// Returns "TRUE" iff "p" points into the allocated area of the heap.
|
||||||
bool GenCollectedHeap::is_in(const void* p) const {
|
bool GenCollectedHeap::is_in(const void* p) const {
|
||||||
#ifndef ASSERT
|
#ifndef ASSERT
|
||||||
@ -984,10 +982,16 @@ bool GenCollectedHeap::is_in(const void* p) const {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns "TRUE" iff "p" points into the allocated area of the heap.
|
#ifdef ASSERT
|
||||||
bool GenCollectedHeap::is_in_youngest(void* p) {
|
// Don't implement this by using is_in_young(). This method is used
|
||||||
return _gens[0]->is_in(p);
|
// in some cases to check that is_in_young() is correct.
|
||||||
|
bool GenCollectedHeap::is_in_partial_collection(const void* p) {
|
||||||
|
assert(is_in_reserved(p) || p == NULL,
|
||||||
|
"Does not work if address is non-null and outside of the heap");
|
||||||
|
// The order of the generations is young (low addr), old, perm (high addr)
|
||||||
|
return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void GenCollectedHeap::oop_iterate(OopClosure* cl) {
|
void GenCollectedHeap::oop_iterate(OopClosure* cl) {
|
||||||
for (int i = 0; i < _n_gens; i++) {
|
for (int i = 0; i < _n_gens; i++) {
|
||||||
|
@ -216,8 +216,18 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns "TRUE" iff "p" points into the youngest generation.
|
// Returns true if the reference is to an object in the reserved space
|
||||||
bool is_in_youngest(void* p);
|
// for the young generation.
|
||||||
|
// Assumes the the young gen address range is less than that of the old gen.
|
||||||
|
bool is_in_young(oop p);
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
virtual bool is_in_partial_collection(const void* p);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
virtual bool is_scavengable(const void* addr) {
|
||||||
|
return is_in_young((oop)addr);
|
||||||
|
}
|
||||||
|
|
||||||
// Iteration functions.
|
// Iteration functions.
|
||||||
void oop_iterate(OopClosure* cl);
|
void oop_iterate(OopClosure* cl);
|
||||||
@ -283,7 +293,7 @@ public:
|
|||||||
// "Check can_elide_initializing_store_barrier() for this collector");
|
// "Check can_elide_initializing_store_barrier() for this collector");
|
||||||
// but unfortunately the flag UseSerialGC need not necessarily always
|
// but unfortunately the flag UseSerialGC need not necessarily always
|
||||||
// be set when DefNew+Tenured are being used.
|
// be set when DefNew+Tenured are being used.
|
||||||
return is_in_youngest((void*)new_obj);
|
return is_in_young(new_obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Can a compiler elide a store barrier when it writes
|
// Can a compiler elide a store barrier when it writes
|
||||||
|
@ -102,6 +102,17 @@ public:
|
|||||||
};
|
};
|
||||||
static AssertIsPermClosure assert_is_perm_closure;
|
static AssertIsPermClosure assert_is_perm_closure;
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
class AssertNonScavengableClosure: public OopClosure {
|
||||||
|
public:
|
||||||
|
virtual void do_oop(oop* p) {
|
||||||
|
assert(!Universe::heap()->is_in_partial_collection(*p),
|
||||||
|
"Referent should not be scavengable."); }
|
||||||
|
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||||
|
};
|
||||||
|
static AssertNonScavengableClosure assert_is_non_scavengable_closure;
|
||||||
|
#endif
|
||||||
|
|
||||||
void SharedHeap::change_strong_roots_parity() {
|
void SharedHeap::change_strong_roots_parity() {
|
||||||
// Also set the new collection parity.
|
// Also set the new collection parity.
|
||||||
assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
|
assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
|
||||||
@ -196,9 +207,10 @@ void SharedHeap::process_strong_roots(bool activate_scope,
|
|||||||
CodeCache::scavenge_root_nmethods_do(code_roots);
|
CodeCache::scavenge_root_nmethods_do(code_roots);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Verify if the code cache contents are in the perm gen
|
// Verify that the code cache contents are not subject to
|
||||||
NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
|
// movement by a scavenging collection.
|
||||||
NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
|
DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
|
||||||
|
DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!collecting_perm_gen) {
|
if (!collecting_perm_gen) {
|
||||||
|
@ -397,7 +397,7 @@ void instanceRefKlass::oop_verify_on(oop obj, outputStream* st) {
|
|||||||
|
|
||||||
if (referent != NULL) {
|
if (referent != NULL) {
|
||||||
guarantee(referent->is_oop(), "referent field heap failed");
|
guarantee(referent->is_oop(), "referent field heap failed");
|
||||||
if (gch != NULL && !gch->is_in_youngest(obj)) {
|
if (gch != NULL && !gch->is_in_young(obj)) {
|
||||||
// We do a specific remembered set check here since the referent
|
// We do a specific remembered set check here since the referent
|
||||||
// field is not part of the oop mask and therefore skipped by the
|
// field is not part of the oop mask and therefore skipped by the
|
||||||
// regular verify code.
|
// regular verify code.
|
||||||
@ -415,7 +415,7 @@ void instanceRefKlass::oop_verify_on(oop obj, outputStream* st) {
|
|||||||
if (next != NULL) {
|
if (next != NULL) {
|
||||||
guarantee(next->is_oop(), "next field verify failed");
|
guarantee(next->is_oop(), "next field verify failed");
|
||||||
guarantee(next->is_instanceRef(), "next field verify failed");
|
guarantee(next->is_instanceRef(), "next field verify failed");
|
||||||
if (gch != NULL && !gch->is_in_youngest(obj)) {
|
if (gch != NULL && !gch->is_in_young(obj)) {
|
||||||
// We do a specific remembered set check here since the next field is
|
// We do a specific remembered set check here since the next field is
|
||||||
// not part of the oop mask and therefore skipped by the regular
|
// not part of the oop mask and therefore skipped by the regular
|
||||||
// verify code.
|
// verify code.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user