Merge
This commit is contained in:
commit
8dac1f1b8a
@ -50,34 +50,6 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||
return is_icholder_entry(call->destination());
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// Search for the ic_call at the given address.
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// A PPC CompiledStaticCall looks like this:
|
||||
|
@ -50,34 +50,6 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||
return is_icholder_entry(call->destination());
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// Search for the ic_call at the given address.
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
#define __ _masm.
|
||||
|
@ -47,34 +47,6 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||
return is_icholder_entry(call->destination());
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// Search for the ic_call at the given address.
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
#define __ _masm.
|
||||
|
@ -58,34 +58,6 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||
return is_icholder_entry(call->destination());
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// Search for the ic_call at the given address.
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||
|
@ -159,6 +159,30 @@ address CompiledIC::stub_address() const {
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// Search for the ic_call at the given address.
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
|
||||
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
|
@ -364,26 +364,29 @@ void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
|
||||
set_exception_cache(new_entry);
|
||||
}
|
||||
|
||||
void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
|
||||
void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {
|
||||
ExceptionCache* prev = NULL;
|
||||
ExceptionCache* curr = exception_cache();
|
||||
assert(curr != NULL, "nothing to remove");
|
||||
// find the previous and next entry of ec
|
||||
while (curr != ec) {
|
||||
prev = curr;
|
||||
curr = curr->next();
|
||||
assert(curr != NULL, "ExceptionCache not found");
|
||||
}
|
||||
// now: curr == ec
|
||||
ExceptionCache* next = curr->next();
|
||||
if (prev == NULL) {
|
||||
set_exception_cache(next);
|
||||
} else {
|
||||
prev->set_next(next);
|
||||
}
|
||||
delete curr;
|
||||
}
|
||||
|
||||
while (curr != NULL) {
|
||||
ExceptionCache* next = curr->next();
|
||||
|
||||
Klass* ex_klass = curr->exception_type();
|
||||
if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
|
||||
if (prev == NULL) {
|
||||
set_exception_cache(next);
|
||||
} else {
|
||||
prev->set_next(next);
|
||||
}
|
||||
delete curr;
|
||||
// prev stays the same.
|
||||
} else {
|
||||
prev = curr;
|
||||
}
|
||||
|
||||
curr = next;
|
||||
}
|
||||
}
|
||||
|
||||
// public method for accessing the exception cache
|
||||
// These are the public access methods.
|
||||
@ -1619,15 +1622,7 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
|
||||
}
|
||||
|
||||
// Exception cache
|
||||
ExceptionCache* ec = exception_cache();
|
||||
while (ec != NULL) {
|
||||
Klass* ex_klass = ec->exception_type();
|
||||
ExceptionCache* next_ec = ec->next();
|
||||
if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
|
||||
remove_from_exception_cache(ec);
|
||||
}
|
||||
ec = next_ec;
|
||||
}
|
||||
clean_exception_cache(is_alive);
|
||||
|
||||
// If class unloading occurred we first iterate over all inline caches and
|
||||
// clear ICs where the cached oop is referring to an unloaded klass or method.
|
||||
|
@ -529,7 +529,7 @@ public:
|
||||
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
|
||||
address handler_for_exception_and_pc(Handle exception, address pc);
|
||||
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
|
||||
void remove_from_exception_cache(ExceptionCache* ec);
|
||||
void clean_exception_cache(BoolObjectClosure* is_alive);
|
||||
|
||||
// implicit exceptions support
|
||||
address continuation_for_implicit_exception(address pc);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
|
||||
|
||||
#include "memory/genOopClosures.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// Closures used by ConcurrentMarkSweepGeneration's collector
|
||||
@ -48,33 +49,13 @@ class Par_MarkFromRootsClosure;
|
||||
} \
|
||||
}
|
||||
|
||||
// Applies the given oop closure to all oops in all klasses visited.
|
||||
class CMKlassClosure : public KlassClosure {
|
||||
friend class CMSOopClosure;
|
||||
friend class CMSOopsInGenClosure;
|
||||
|
||||
OopClosure* _oop_closure;
|
||||
|
||||
// Used when _oop_closure couldn't be set in an initialization list.
|
||||
void initialize(OopClosure* oop_closure) {
|
||||
assert(_oop_closure == NULL, "Should only be called once");
|
||||
_oop_closure = oop_closure;
|
||||
}
|
||||
// TODO: This duplication of the MetadataAwareOopClosure class is only needed
|
||||
// because some CMS OopClosures derive from OopsInGenClosure. It would be
|
||||
// good to get rid of them completely.
|
||||
class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
|
||||
KlassToOopClosure _klass_closure;
|
||||
public:
|
||||
CMKlassClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) { }
|
||||
|
||||
void do_klass(Klass* k);
|
||||
};
|
||||
|
||||
// The base class for all CMS marking closures.
|
||||
// It's used to proxy through the metadata to the oops defined in them.
|
||||
class CMSOopClosure: public ExtendedOopClosure {
|
||||
CMKlassClosure _klass_closure;
|
||||
public:
|
||||
CMSOopClosure() : ExtendedOopClosure() {
|
||||
_klass_closure.initialize(this);
|
||||
}
|
||||
CMSOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
|
||||
MetadataAwareOopsInGenClosure() {
|
||||
_klass_closure.initialize(this);
|
||||
}
|
||||
|
||||
@ -87,26 +68,7 @@ class CMSOopClosure: public ExtendedOopClosure {
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld);
|
||||
};
|
||||
|
||||
// TODO: This duplication of the CMSOopClosure class is only needed because
|
||||
// some CMS OopClosures derive from OopsInGenClosure. It would be good
|
||||
// to get rid of them completely.
|
||||
class CMSOopsInGenClosure: public OopsInGenClosure {
|
||||
CMKlassClosure _klass_closure;
|
||||
public:
|
||||
CMSOopsInGenClosure() {
|
||||
_klass_closure.initialize(this);
|
||||
}
|
||||
|
||||
virtual bool do_metadata() { return do_metadata_nv(); }
|
||||
inline bool do_metadata_nv() { return true; }
|
||||
|
||||
virtual void do_klass(Klass* k);
|
||||
void do_klass_nv(Klass* k);
|
||||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld);
|
||||
};
|
||||
|
||||
class MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
||||
class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
|
||||
private:
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
@ -118,7 +80,7 @@ class MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
||||
class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
|
||||
private:
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
@ -132,7 +94,7 @@ class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
||||
|
||||
// A variant of the above used in certain kinds of CMS
|
||||
// marking verification.
|
||||
class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
|
||||
class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
|
||||
private:
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _verification_bm;
|
||||
@ -147,7 +109,7 @@ class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
class PushAndMarkClosure: public CMSOopClosure {
|
||||
class PushAndMarkClosure: public MetadataAwareOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
@ -177,7 +139,7 @@ class PushAndMarkClosure: public CMSOopClosure {
|
||||
// synchronization (for instance, via CAS). The marking stack
|
||||
// used in the non-parallel case above is here replaced with
|
||||
// an OopTaskQueue structure to allow efficient work stealing.
|
||||
class Par_PushAndMarkClosure: public CMSOopClosure {
|
||||
class Par_PushAndMarkClosure: public MetadataAwareOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
@ -198,7 +160,7 @@ class Par_PushAndMarkClosure: public CMSOopClosure {
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
||||
class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
|
||||
private:
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
@ -239,7 +201,7 @@ class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
||||
// stack and the bitMap are shared, so access needs to be suitably
|
||||
// synchronized. An OopTaskQueue structure, supporting efficient
|
||||
// work stealing, replaces a CMSMarkStack for storing grey objects.
|
||||
class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
||||
class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
|
||||
private:
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
@ -265,7 +227,7 @@ class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
||||
// This closure is used during the concurrent marking phase
|
||||
// following the first checkpoint. Its use is buried in
|
||||
// the closure MarkFromRootsClosure.
|
||||
class PushOrMarkClosure: public CMSOopClosure {
|
||||
class PushOrMarkClosure: public MetadataAwareOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
@ -298,7 +260,7 @@ class PushOrMarkClosure: public CMSOopClosure {
|
||||
// This closure is used during the concurrent marking phase
|
||||
// following the first checkpoint. Its use is buried in
|
||||
// the closure Par_MarkFromRootsClosure.
|
||||
class Par_PushOrMarkClosure: public CMSOopClosure {
|
||||
class Par_PushOrMarkClosure: public MetadataAwareOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _whole_span;
|
||||
@ -338,7 +300,7 @@ class Par_PushOrMarkClosure: public CMSOopClosure {
|
||||
// processing phase of the CMS final checkpoint step, as
|
||||
// well as during the concurrent precleaning of the discovered
|
||||
// reference lists.
|
||||
class CMSKeepAliveClosure: public CMSOopClosure {
|
||||
class CMSKeepAliveClosure: public MetadataAwareOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
const MemRegion _span;
|
||||
@ -358,7 +320,7 @@ class CMSKeepAliveClosure: public CMSOopClosure {
|
||||
inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
class CMSInnerParMarkAndPushClosure: public CMSOopClosure {
|
||||
class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
@ -379,7 +341,7 @@ class CMSInnerParMarkAndPushClosure: public CMSOopClosure {
|
||||
// A parallel (MT) version of the above, used when
|
||||
// reference processing is parallel; the only difference
|
||||
// is in the do_oop method.
|
||||
class CMSParKeepAliveClosure: public CMSOopClosure {
|
||||
class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
|
||||
private:
|
||||
MemRegion _span;
|
||||
OopTaskQueue* _work_queue;
|
||||
|
@ -44,33 +44,20 @@ inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
|
||||
}
|
||||
}
|
||||
|
||||
// CMSOopClosure and CMSoopsInGenClosure are duplicated,
|
||||
// MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
|
||||
// until we get rid of OopsInGenClosure.
|
||||
|
||||
inline void CMSOopClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
inline void CMSOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
inline void CMSOopClosure::do_klass_nv(Klass* k) {
|
||||
ClassLoaderData* cld = k->class_loader_data();
|
||||
do_class_loader_data(cld);
|
||||
}
|
||||
inline void CMSOopsInGenClosure::do_klass_nv(Klass* k) {
|
||||
inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
|
||||
ClassLoaderData* cld = k->class_loader_data();
|
||||
do_class_loader_data(cld);
|
||||
}
|
||||
inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
inline void CMSOopClosure::do_class_loader_data(ClassLoaderData* cld) {
|
||||
inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
|
||||
assert(_klass_closure._oop_closure == this, "Must be");
|
||||
|
||||
bool claim = true; // Must claim the class loader data before processing.
|
||||
cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
|
||||
}
|
||||
inline void CMSOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
|
||||
assert(_klass_closure._oop_closure == this, "Must be");
|
||||
|
||||
bool claim = true; // Must claim the class loader data before processing.
|
||||
cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
|
||||
}
|
||||
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_INLINE_HPP
|
||||
|
@ -49,7 +49,7 @@
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
#include "memory/genMarkSweep.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/padded.hpp"
|
||||
#include "memory/referencePolicy.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
@ -3123,7 +3123,7 @@ void CMSCollector::verify_after_remark_work_2() {
|
||||
// Mark from roots one level into CMS
|
||||
MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
|
||||
markBitMap());
|
||||
CMKlassClosure klass_closure(¬Older);
|
||||
KlassToOopClosure klass_closure(¬Older);
|
||||
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
@ -3744,7 +3744,7 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
|
||||
gch->set_par_threads(0);
|
||||
} else {
|
||||
// The serial version.
|
||||
CMKlassClosure klass_closure(¬Older);
|
||||
KlassToOopClosure klass_closure(¬Older);
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
@ -4206,7 +4206,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
|
||||
pst->all_tasks_completed();
|
||||
}
|
||||
|
||||
class Par_ConcMarkingClosure: public CMSOopClosure {
|
||||
class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
CMSConcMarkingTask* _task;
|
||||
@ -4219,7 +4219,7 @@ class Par_ConcMarkingClosure: public CMSOopClosure {
|
||||
public:
|
||||
Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
|
||||
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
|
||||
CMSOopClosure(collector->ref_processor()),
|
||||
MetadataAwareOopClosure(collector->ref_processor()),
|
||||
_collector(collector),
|
||||
_task(task),
|
||||
_span(collector->_span),
|
||||
@ -4990,7 +4990,7 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
}
|
||||
|
||||
class PrecleanKlassClosure : public KlassClosure {
|
||||
CMKlassClosure _cm_klass_closure;
|
||||
KlassToOopClosure _cm_klass_closure;
|
||||
public:
|
||||
PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
|
||||
void do_klass(Klass* k) {
|
||||
@ -5228,7 +5228,7 @@ void CMSParInitialMarkTask::work(uint worker_id) {
|
||||
_timer.start();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
|
||||
CMKlassClosure klass_closure(&par_mri_cl);
|
||||
KlassToOopClosure klass_closure(&par_mri_cl);
|
||||
|
||||
// ---------- young gen roots --------------
|
||||
{
|
||||
@ -5302,7 +5302,7 @@ class CMSParRemarkTask: public CMSParMarkTask {
|
||||
};
|
||||
|
||||
class RemarkKlassClosure : public KlassClosure {
|
||||
CMKlassClosure _cm_klass_closure;
|
||||
KlassToOopClosure _cm_klass_closure;
|
||||
public:
|
||||
RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
|
||||
void do_klass(Klass* k) {
|
||||
@ -7741,7 +7741,7 @@ PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
|
||||
CMSCollector* collector, MemRegion span,
|
||||
CMSBitMap* verification_bm, CMSBitMap* cms_bm,
|
||||
CMSMarkStack* mark_stack):
|
||||
CMSOopClosure(collector->ref_processor()),
|
||||
MetadataAwareOopClosure(collector->ref_processor()),
|
||||
_collector(collector),
|
||||
_span(span),
|
||||
_verification_bm(verification_bm),
|
||||
@ -7794,7 +7794,7 @@ PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
CMSBitMap* bitMap, CMSMarkStack* markStack,
|
||||
HeapWord* finger, MarkFromRootsClosure* parent) :
|
||||
CMSOopClosure(collector->ref_processor()),
|
||||
MetadataAwareOopClosure(collector->ref_processor()),
|
||||
_collector(collector),
|
||||
_span(span),
|
||||
_bitMap(bitMap),
|
||||
@ -7811,7 +7811,7 @@ Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
|
||||
HeapWord* finger,
|
||||
HeapWord** global_finger_addr,
|
||||
Par_MarkFromRootsClosure* parent) :
|
||||
CMSOopClosure(collector->ref_processor()),
|
||||
MetadataAwareOopClosure(collector->ref_processor()),
|
||||
_collector(collector),
|
||||
_whole_span(collector->_span),
|
||||
_span(span),
|
||||
@ -7860,11 +7860,6 @@ void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
|
||||
_overflow_stack->expand(); // expand the stack if possible
|
||||
}
|
||||
|
||||
void CMKlassClosure::do_klass(Klass* k) {
|
||||
assert(_oop_closure != NULL, "Not initialized?");
|
||||
k->oops_do(_oop_closure);
|
||||
}
|
||||
|
||||
void PushOrMarkClosure::do_oop(oop obj) {
|
||||
// Ignore mark word because we are running concurrent with mutators.
|
||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
||||
@ -7962,7 +7957,7 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
|
||||
CMSBitMap* mod_union_table,
|
||||
CMSMarkStack* mark_stack,
|
||||
bool concurrent_precleaning):
|
||||
CMSOopClosure(rp),
|
||||
MetadataAwareOopClosure(rp),
|
||||
_collector(collector),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
@ -8035,7 +8030,7 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
|
||||
ReferenceProcessor* rp,
|
||||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue):
|
||||
CMSOopClosure(rp),
|
||||
MetadataAwareOopClosure(rp),
|
||||
_collector(collector),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
|
@ -1444,7 +1444,7 @@ class Par_MarkFromRootsClosure: public BitMapClosure {
|
||||
|
||||
// The following closures are used to do certain kinds of verification of
|
||||
// CMS marking.
|
||||
class PushAndMarkVerifyClosure: public CMSOopClosure {
|
||||
class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _verification_bm;
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "memory/space.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -98,6 +99,20 @@ bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
|
||||
return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
|
||||
}
|
||||
|
||||
void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
|
||||
check_index(index_for(right - 1), "right address out of range");
|
||||
assert(left < right, "Heap addresses out of order");
|
||||
size_t num_cards = pointer_delta(right, left) >> LogN_words;
|
||||
if (UseMemSetInBOT) {
|
||||
memset(&_offset_array[index_for(left)], offset, num_cards);
|
||||
} else {
|
||||
size_t i = index_for(left);
|
||||
const size_t end = i + num_cards;
|
||||
for (; i < end; i++) {
|
||||
_offset_array[i] = offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// G1BlockOffsetArray
|
||||
@ -107,7 +122,7 @@ G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
|
||||
MemRegion mr, bool init_to_zero) :
|
||||
G1BlockOffsetTable(mr.start(), mr.end()),
|
||||
_unallocated_block(_bottom),
|
||||
_array(array), _csp(NULL),
|
||||
_array(array), _gsp(NULL),
|
||||
_init_to_zero(init_to_zero) {
|
||||
assert(_bottom <= _end, "arguments out of order");
|
||||
if (!_init_to_zero) {
|
||||
@ -117,9 +132,8 @@ G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
|
||||
}
|
||||
}
|
||||
|
||||
void G1BlockOffsetArray::set_space(Space* sp) {
|
||||
_sp = sp;
|
||||
_csp = sp->toContiguousSpace();
|
||||
void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
|
||||
_gsp = sp;
|
||||
}
|
||||
|
||||
// The arguments follow the normal convention of denoting
|
||||
@ -378,7 +392,7 @@ G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
|
||||
}
|
||||
// Otherwise, find the block start using the table.
|
||||
HeapWord* q = block_at_or_preceding(addr, false, 0);
|
||||
HeapWord* n = q + _sp->block_size(q);
|
||||
HeapWord* n = q + block_size(q);
|
||||
return forward_to_block_containing_addr_const(q, n, addr);
|
||||
}
|
||||
|
||||
@ -406,31 +420,17 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
err_msg("next_boundary is beyond the end of the covered region "
|
||||
" next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
|
||||
next_boundary, _array->_end));
|
||||
if (csp() != NULL) {
|
||||
if (addr >= csp()->top()) return csp()->top();
|
||||
while (next_boundary < addr) {
|
||||
while (n <= next_boundary) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
}
|
||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||
// [q, n) is the block that crosses the boundary.
|
||||
alloc_block_work2(&next_boundary, &next_index, q, n);
|
||||
}
|
||||
} else {
|
||||
while (next_boundary < addr) {
|
||||
while (n <= next_boundary) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += _sp->block_size(q);
|
||||
}
|
||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||
// [q, n) is the block that crosses the boundary.
|
||||
alloc_block_work2(&next_boundary, &next_index, q, n);
|
||||
if (addr >= gsp()->top()) return gsp()->top();
|
||||
while (next_boundary < addr) {
|
||||
while (n <= next_boundary) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
}
|
||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||
// [q, n) is the block that crosses the boundary.
|
||||
alloc_block_work2(&next_boundary, &next_index, q, n);
|
||||
}
|
||||
return forward_to_block_containing_addr_const(q, n, addr);
|
||||
}
|
||||
@ -637,7 +637,7 @@ block_start_unsafe_const(const void* addr) const {
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
|
||||
HeapWord* n = q + _sp->block_size(q);
|
||||
HeapWord* n = q + block_size(q);
|
||||
return forward_to_block_containing_addr_const(q, n, addr);
|
||||
}
|
||||
|
||||
|
@ -52,8 +52,8 @@
|
||||
// consolidation.
|
||||
|
||||
// Forward declarations
|
||||
class ContiguousSpace;
|
||||
class G1BlockOffsetSharedArray;
|
||||
class G1OffsetTableContigSpace;
|
||||
|
||||
class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
@ -157,6 +157,8 @@ private:
|
||||
return _offset_array[index];
|
||||
}
|
||||
|
||||
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
|
||||
|
||||
void set_offset_array(size_t index, u_char offset) {
|
||||
check_index(index, "index out of range");
|
||||
check_offset(offset, "offset too large");
|
||||
@ -170,21 +172,6 @@ private:
|
||||
_offset_array[index] = (u_char) pointer_delta(high, low);
|
||||
}
|
||||
|
||||
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
|
||||
check_index(index_for(right - 1), "right address out of range");
|
||||
assert(left < right, "Heap addresses out of order");
|
||||
size_t num_cards = pointer_delta(right, left) >> LogN_words;
|
||||
if (UseMemSetInBOT) {
|
||||
memset(&_offset_array[index_for(left)], offset, num_cards);
|
||||
} else {
|
||||
size_t i = index_for(left);
|
||||
const size_t end = i + num_cards;
|
||||
for (; i < end; i++) {
|
||||
_offset_array[i] = offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void set_offset_array(size_t left, size_t right, u_char offset) {
|
||||
check_index(right, "right index out of range");
|
||||
assert(left <= right, "indexes out of order");
|
||||
@ -281,11 +268,7 @@ private:
|
||||
G1BlockOffsetSharedArray* _array;
|
||||
|
||||
// The space that owns this subregion.
|
||||
Space* _sp;
|
||||
|
||||
// If "_sp" is a contiguous space, the field below is the view of "_sp"
|
||||
// as a contiguous space, else NULL.
|
||||
ContiguousSpace* _csp;
|
||||
G1OffsetTableContigSpace* _gsp;
|
||||
|
||||
// If true, array entries are initialized to 0; otherwise, they are
|
||||
// initialized to point backwards to the beginning of the covered region.
|
||||
@ -310,7 +293,9 @@ private:
|
||||
|
||||
protected:
|
||||
|
||||
ContiguousSpace* csp() const { return _csp; }
|
||||
G1OffsetTableContigSpace* gsp() const { return _gsp; }
|
||||
|
||||
inline size_t block_size(const HeapWord* p) const;
|
||||
|
||||
// Returns the address of a block whose start is at most "addr".
|
||||
// If "has_max_index" is true, "assumes "max_index" is the last valid one
|
||||
@ -363,7 +348,7 @@ public:
|
||||
// "this" to be passed as a parameter to a member constructor for
|
||||
// the containing concrete subtype of Space.
|
||||
// This would be legal C++, but MS VC++ doesn't allow it.
|
||||
void set_space(Space* sp);
|
||||
void set_space(G1OffsetTableContigSpace* sp);
|
||||
|
||||
// Resets the covered region to the given "mr".
|
||||
void set_region(MemRegion mr);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "memory/space.hpp"
|
||||
|
||||
inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
|
||||
@ -69,6 +70,11 @@ G1BlockOffsetSharedArray::address_for_index(size_t index) const {
|
||||
return result;
|
||||
}
|
||||
|
||||
inline size_t
|
||||
G1BlockOffsetArray::block_size(const HeapWord* p) const {
|
||||
return gsp()->block_size(p);
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
G1BlockOffsetArray::block_at_or_preceding(const void* addr,
|
||||
bool has_max_index,
|
||||
@ -88,7 +94,7 @@ G1BlockOffsetArray::block_at_or_preceding(const void* addr,
|
||||
// to go back by.
|
||||
size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
|
||||
q -= (N_words * n_cards_back);
|
||||
assert(q >= _sp->bottom(), "Went below bottom!");
|
||||
assert(q >= gsp()->bottom(), "Went below bottom!");
|
||||
index -= n_cards_back;
|
||||
offset = _array->offset_array(index);
|
||||
}
|
||||
@ -101,21 +107,12 @@ inline HeapWord*
|
||||
G1BlockOffsetArray::
|
||||
forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
||||
const void* addr) const {
|
||||
if (csp() != NULL) {
|
||||
if (addr >= csp()->top()) return csp()->top();
|
||||
while (n <= addr) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
}
|
||||
} else {
|
||||
while (n <= addr) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += _sp->block_size(q);
|
||||
}
|
||||
if (addr >= gsp()->top()) return gsp()->top();
|
||||
while (n <= addr) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
}
|
||||
assert(q <= n, "wrong order for q and addr");
|
||||
assert(addr < n, "wrong order for addr and n");
|
||||
@ -126,7 +123,7 @@ inline HeapWord*
|
||||
G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
|
||||
const void* addr) {
|
||||
if (oop(q)->klass_or_null() == NULL) return q;
|
||||
HeapWord* n = q + _sp->block_size(q);
|
||||
HeapWord* n = q + block_size(q);
|
||||
// In the normal case, where the query "addr" is a card boundary, and the
|
||||
// offset table chunks are the same size as cards, the block starting at
|
||||
// "q" will contain addr, so the test below will fail, and we'll fall
|
||||
|
@ -5340,17 +5340,14 @@ public:
|
||||
class G1CopyingKeepAliveClosure: public OopClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
OopClosure* _copy_non_heap_obj_cl;
|
||||
OopsInHeapRegionClosure* _copy_metadata_obj_cl;
|
||||
G1ParScanThreadState* _par_scan_state;
|
||||
|
||||
public:
|
||||
G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
|
||||
OopClosure* non_heap_obj_cl,
|
||||
OopsInHeapRegionClosure* metadata_obj_cl,
|
||||
G1ParScanThreadState* pss):
|
||||
_g1h(g1h),
|
||||
_copy_non_heap_obj_cl(non_heap_obj_cl),
|
||||
_copy_metadata_obj_cl(metadata_obj_cl),
|
||||
_par_scan_state(pss)
|
||||
{}
|
||||
|
||||
@ -5383,7 +5380,7 @@ public:
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
assert(!Metaspace::contains((const void*)p),
|
||||
err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
|
||||
err_msg("Unexpectedly found a pointer from metadata: "
|
||||
PTR_FORMAT, p));
|
||||
_copy_non_heap_obj_cl->do_oop(p);
|
||||
}
|
||||
@ -5478,22 +5475,18 @@ public:
|
||||
pss.set_evac_failure_closure(&evac_failure_cl);
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
|
||||
G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
|
||||
G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
|
||||
|
||||
if (_g1h->g1_policy()->during_initial_mark_pause()) {
|
||||
// We also need to mark copied objects.
|
||||
copy_non_heap_cl = ©_mark_non_heap_cl;
|
||||
copy_metadata_cl = ©_mark_metadata_cl;
|
||||
}
|
||||
|
||||
// Keep alive closure.
|
||||
G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
|
||||
G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
|
||||
|
||||
// Complete GC closure
|
||||
G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
|
||||
@ -5588,18 +5581,14 @@ public:
|
||||
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
|
||||
G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
|
||||
G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
|
||||
|
||||
if (_g1h->g1_policy()->during_initial_mark_pause()) {
|
||||
// We also need to mark copied objects.
|
||||
copy_non_heap_cl = ©_mark_non_heap_cl;
|
||||
copy_metadata_cl = ©_mark_metadata_cl;
|
||||
}
|
||||
|
||||
// Is alive closure
|
||||
@ -5607,7 +5596,7 @@ public:
|
||||
|
||||
// Copying keep alive closure. Applied to referent objects that need
|
||||
// to be copied.
|
||||
G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
|
||||
G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
|
||||
|
||||
ReferenceProcessor* rp = _g1h->ref_processor_cm();
|
||||
|
||||
@ -5713,22 +5702,18 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
|
||||
assert(pss.refs()->is_empty(), "pre-condition");
|
||||
|
||||
G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
|
||||
G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
|
||||
G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
|
||||
|
||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||
OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl;
|
||||
|
||||
if (_g1h->g1_policy()->during_initial_mark_pause()) {
|
||||
// We also need to mark copied objects.
|
||||
copy_non_heap_cl = ©_mark_non_heap_cl;
|
||||
copy_metadata_cl = ©_mark_metadata_cl;
|
||||
}
|
||||
|
||||
// Keep alive closure.
|
||||
G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
|
||||
G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
|
||||
|
||||
// Serial Complete GC closure
|
||||
G1STWDrainQueueClosure drain_queue(this, &pss);
|
||||
|
@ -49,7 +49,7 @@ HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
|
||||
HeapRegion* hr, ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
FilterKind fk) :
|
||||
ContiguousSpaceDCTOC(hr, cl, precision, NULL),
|
||||
DirtyCardToOopClosure(hr, cl, precision, NULL),
|
||||
_hr(hr), _fk(fk), _g1(g1) { }
|
||||
|
||||
FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
|
||||
@ -78,19 +78,18 @@ HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
|
||||
return cur;
|
||||
}
|
||||
|
||||
void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
|
||||
HeapWord* bottom,
|
||||
HeapWord* top,
|
||||
ExtendedOopClosure* cl) {
|
||||
void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
HeapWord* bottom,
|
||||
HeapWord* top) {
|
||||
G1CollectedHeap* g1h = _g1;
|
||||
int oop_size;
|
||||
ExtendedOopClosure* cl2 = NULL;
|
||||
|
||||
FilterIntoCSClosure intoCSFilt(this, g1h, cl);
|
||||
FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
|
||||
FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
|
||||
FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
|
||||
|
||||
switch (_fk) {
|
||||
case NoFilterKind: cl2 = cl; break;
|
||||
case NoFilterKind: cl2 = _cl; break;
|
||||
case IntoCSFilterKind: cl2 = &intoCSFilt; break;
|
||||
case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
|
||||
default: ShouldNotReachHere();
|
||||
@ -112,17 +111,17 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
|
||||
// We replicate the loop below for several kinds of possible filters.
|
||||
switch (_fk) {
|
||||
case NoFilterKind:
|
||||
bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
|
||||
bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
|
||||
break;
|
||||
|
||||
case IntoCSFilterKind: {
|
||||
FilterIntoCSClosure filt(this, g1h, cl);
|
||||
FilterIntoCSClosure filt(this, g1h, _cl);
|
||||
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
|
||||
break;
|
||||
}
|
||||
|
||||
case OutOfRegionFilterKind: {
|
||||
FilterOutOfRegionClosure filt(_hr, cl);
|
||||
FilterOutOfRegionClosure filt(_hr, _cl);
|
||||
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
|
||||
break;
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
|
||||
#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
|
||||
#include "gc_implementation/g1/survRateGroup.hpp"
|
||||
#include "gc_implementation/shared/ageTable.hpp"
|
||||
@ -71,7 +71,7 @@ class nmethod;
|
||||
// in the concurrent marker used by G1 to filter remembered
|
||||
// sets.
|
||||
|
||||
class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
|
||||
class HeapRegionDCTOC : public DirtyCardToOopClosure {
|
||||
public:
|
||||
// Specification of possible DirtyCardToOopClosure filtering.
|
||||
enum FilterKind {
|
||||
@ -85,39 +85,13 @@ protected:
|
||||
FilterKind _fk;
|
||||
G1CollectedHeap* _g1;
|
||||
|
||||
void walk_mem_region_with_cl(MemRegion mr,
|
||||
HeapWord* bottom, HeapWord* top,
|
||||
ExtendedOopClosure* cl);
|
||||
|
||||
// We don't specialize this for FilteringClosure; filtering is handled by
|
||||
// the "FilterKind" mechanism. But we provide this to avoid a compiler
|
||||
// warning.
|
||||
void walk_mem_region_with_cl(MemRegion mr,
|
||||
HeapWord* bottom, HeapWord* top,
|
||||
FilteringClosure* cl) {
|
||||
HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
|
||||
(ExtendedOopClosure*)cl);
|
||||
}
|
||||
|
||||
// Get the actual top of the area on which the closure will
|
||||
// operate, given where the top is assumed to be (the end of the
|
||||
// memory region passed to do_MemRegion) and where the object
|
||||
// at the top is assumed to start. For example, an object may
|
||||
// start at the top but actually extend past the assumed top,
|
||||
// in which case the top becomes the end of the object.
|
||||
HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
|
||||
return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
|
||||
}
|
||||
|
||||
// Walk the given memory region from bottom to (actual) top
|
||||
// looking for objects and applying the oop closure (_cl) to
|
||||
// them. The base implementation of this treats the area as
|
||||
// blocks, where a block may or may not be an object. Sub-
|
||||
// classes should override this to provide more accurate
|
||||
// or possibly more efficient walking.
|
||||
void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
|
||||
Filtering_DCTOC::walk_mem_region(mr, bottom, top);
|
||||
}
|
||||
void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
|
||||
|
||||
public:
|
||||
HeapRegionDCTOC(G1CollectedHeap* g1,
|
||||
|
@ -25,6 +25,8 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
|
||||
inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
|
||||
HeapWord* res = ContiguousSpace::allocate(size);
|
||||
if (res != NULL) {
|
||||
|
@ -66,9 +66,10 @@ void GenerationSizer::initialize_flags() {
|
||||
|
||||
void GenerationSizer::initialize_size_info() {
|
||||
trace_gen_sizes("ps heap raw");
|
||||
const size_t page_sz = os::page_size_for_region(_min_heap_byte_size,
|
||||
_max_heap_byte_size,
|
||||
8);
|
||||
const size_t max_page_sz = os::page_size_for_region(_max_heap_byte_size, 8);
|
||||
const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
|
||||
const size_t min_page_sz = os::page_size_for_region(_min_heap_byte_size, min_pages);
|
||||
const size_t page_sz = MIN2(max_page_sz, min_page_sz);
|
||||
|
||||
// Can a page size be something else than a power of two?
|
||||
assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
|
||||
|
@ -56,7 +56,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
|
||||
const size_t words = bits / BitsPerWord;
|
||||
const size_t raw_bytes = words * sizeof(idx_t);
|
||||
const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
|
||||
const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
|
||||
|
||||
|
@ -403,7 +403,7 @@ PSVirtualSpace*
|
||||
ParallelCompactData::create_vspace(size_t count, size_t element_size)
|
||||
{
|
||||
const size_t raw_bytes = count * element_size;
|
||||
const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
|
||||
const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
|
||||
|
||||
|
@ -98,9 +98,13 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
|
||||
_log2_segment_size = exact_log2(segment_size);
|
||||
|
||||
// Reserve and initialize space for _memory.
|
||||
const size_t page_size = os::can_execute_large_page_memory() ?
|
||||
os::page_size_for_region(committed_size, reserved_size, 8) :
|
||||
os::vm_page_size();
|
||||
size_t page_size = os::vm_page_size();
|
||||
if (os::can_execute_large_page_memory()) {
|
||||
const size_t min_pages = 8;
|
||||
page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
|
||||
os::page_size_for_region(reserved_size, min_pages));
|
||||
}
|
||||
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
const size_t r_align = MAX2(page_size, granularity);
|
||||
const size_t r_size = align_size_up(reserved_size, r_align);
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
void KlassToOopClosure::do_klass(Klass* k) {
|
||||
assert(_oop_closure != NULL, "Not initialized?");
|
||||
k->oops_do(_oop_closure);
|
||||
}
|
||||
|
||||
|
@ -115,9 +115,19 @@ class CLDClosure : public Closure {
|
||||
};
|
||||
|
||||
class KlassToOopClosure : public KlassClosure {
|
||||
friend class MetadataAwareOopClosure;
|
||||
friend class MetadataAwareOopsInGenClosure;
|
||||
|
||||
OopClosure* _oop_closure;
|
||||
public:
|
||||
KlassToOopClosure(OopClosure* oop_closure) : _oop_closure(oop_closure) {}
|
||||
|
||||
// Used when _oop_closure couldn't be set in an initialization list.
|
||||
void initialize(OopClosure* oop_closure) {
|
||||
assert(_oop_closure == NULL, "Should only be called once");
|
||||
_oop_closure = oop_closure;
|
||||
}
|
||||
|
||||
public:
|
||||
KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
|
||||
virtual void do_klass(Klass* k);
|
||||
};
|
||||
|
||||
@ -135,6 +145,29 @@ class CLDToOopClosure : public CLDClosure {
|
||||
void do_cld(ClassLoaderData* cld);
|
||||
};
|
||||
|
||||
// The base class for all concurrent marking closures,
|
||||
// that participates in class unloading.
|
||||
// It's used to proxy through the metadata to the oops defined in them.
|
||||
class MetadataAwareOopClosure: public ExtendedOopClosure {
|
||||
KlassToOopClosure _klass_closure;
|
||||
|
||||
public:
|
||||
MetadataAwareOopClosure() : ExtendedOopClosure() {
|
||||
_klass_closure.initialize(this);
|
||||
}
|
||||
MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
|
||||
_klass_closure.initialize(this);
|
||||
}
|
||||
|
||||
virtual bool do_metadata() { return do_metadata_nv(); }
|
||||
inline bool do_metadata_nv() { return true; }
|
||||
|
||||
virtual void do_klass(Klass* k);
|
||||
void do_klass_nv(Klass* k);
|
||||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld);
|
||||
};
|
||||
|
||||
// ObjectClosure is used for iterating through an object space
|
||||
|
||||
class ObjectClosure : public Closure {
|
||||
@ -318,4 +351,16 @@ class SymbolClosure : public StackObj {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Helper defines for ExtendOopClosure
|
||||
|
||||
#define if_do_metadata_checked(closure, nv_suffix) \
|
||||
/* Make sure the non-virtual and the virtual versions match. */ \
|
||||
assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
|
||||
"Inconsistency in do_metadata"); \
|
||||
if (closure->do_metadata##nv_suffix())
|
||||
|
||||
#define assert_should_ignore_metadata(closure, nv_suffix) \
|
||||
assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented")
|
||||
|
||||
#endif // SHARE_VM_MEMORY_ITERATOR_HPP
|
||||
|
47
hotspot/src/share/vm/memory/iterator.inline.hpp
Normal file
47
hotspot/src/share/vm/memory/iterator.inline.hpp
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_MEMORY_ITERATOR_INLINE_HPP
|
||||
#define SHARE_VM_MEMORY_ITERATOR_INLINE_HPP
|
||||
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) {
|
||||
assert(_klass_closure._oop_closure == this, "Must be");
|
||||
|
||||
bool claim = true; // Must claim the class loader data before processing.
|
||||
cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
|
||||
}
|
||||
|
||||
inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
|
||||
ClassLoaderData* cld = k->class_loader_data();
|
||||
do_class_loader_data(cld);
|
||||
}
|
||||
|
||||
inline void MetadataAwareOopClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
#endif // SHARE_VM_MEMORY_ITERATOR_INLINE_HPP
|
@ -28,6 +28,7 @@
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.hpp"
|
||||
@ -44,12 +45,6 @@
|
||||
#include "oops/oop.pcgc.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#define if_do_metadata_checked(closure, nv_suffix) \
|
||||
/* Make sure the non-virtual and the virtual versions match. */ \
|
||||
assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
|
||||
"Inconsistency in do_metadata"); \
|
||||
if (closure->do_metadata##nv_suffix())
|
||||
|
||||
// Macro to define InstanceClassLoaderKlass::oop_oop_iterate for virtual/nonvirtual for
|
||||
// all closures. Macros calling macros above for each oop size.
|
||||
// Since ClassLoader objects have only a pointer to the loader_data, they are not
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "jvmtifiles/jvmti.h"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/heapInspection.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "oops/fieldStreams.hpp"
|
||||
@ -2114,12 +2115,6 @@ void InstanceKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
// closure's do_metadata() method dictates whether the given closure should be
|
||||
// applied to the klass ptr in the object header.
|
||||
|
||||
#define if_do_metadata_checked(closure, nv_suffix) \
|
||||
/* Make sure the non-virtual and the virtual versions match. */ \
|
||||
assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
|
||||
"Inconsistency in do_metadata"); \
|
||||
if (closure->do_metadata##nv_suffix())
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
@ -2143,10 +2138,9 @@ int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure)
|
||||
int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \
|
||||
OopClosureType* closure) { \
|
||||
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
|
||||
/* header */ \
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
closure->do_klass##nv_suffix(obj->klass()); \
|
||||
} \
|
||||
\
|
||||
assert_should_ignore_metadata(closure, nv_suffix); \
|
||||
\
|
||||
/* instance variables */ \
|
||||
InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
|
||||
obj, \
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
@ -241,12 +242,6 @@ int InstanceMirrorKlass::oop_adjust_pointers(oop obj) {
|
||||
return oop_size(obj); \
|
||||
|
||||
|
||||
#define if_do_metadata_checked(closure, nv_suffix) \
|
||||
/* Make sure the non-virtual and the virtual versions match. */ \
|
||||
assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
|
||||
"Inconsistency in do_metadata"); \
|
||||
if (closure->do_metadata##nv_suffix())
|
||||
|
||||
// Macro to define InstanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for
|
||||
// all closures. Macros calling macros above for each oop size.
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
@ -476,12 +477,6 @@ void ObjArrayKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#define if_do_metadata_checked(closure, nv_suffix) \
|
||||
/* Make sure the non-virtual and the virtual versions match. */ \
|
||||
assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
|
||||
"Inconsistency in do_metadata"); \
|
||||
if (closure->do_metadata##nv_suffix())
|
||||
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \
|
||||
|
@ -3865,6 +3865,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
|
||||
unit_test_function_call
|
||||
|
||||
// Forward declaration
|
||||
void TestOS_test();
|
||||
void TestReservedSpace_test();
|
||||
void TestReserveMemorySpecial_test();
|
||||
void TestVirtualSpace_test();
|
||||
@ -3886,6 +3887,7 @@ void TestCodeCacheRemSet_test();
|
||||
void execute_internal_vm_tests() {
|
||||
if (ExecuteInternalVMTests) {
|
||||
tty->print_cr("Running internal VM tests");
|
||||
run_unit_test(TestOS_test());
|
||||
run_unit_test(TestReservedSpace_test());
|
||||
run_unit_test(TestReserveMemorySpecial_test());
|
||||
run_unit_test(TestVirtualSpace_test());
|
||||
|
@ -1303,24 +1303,15 @@ bool os::stack_shadow_pages_available(Thread *thread, methodHandle method) {
|
||||
return (sp > (stack_limit + reserved_area));
|
||||
}
|
||||
|
||||
size_t os::page_size_for_region(size_t region_min_size, size_t region_max_size,
|
||||
uint min_pages)
|
||||
{
|
||||
size_t os::page_size_for_region(size_t region_size, size_t min_pages) {
|
||||
assert(min_pages > 0, "sanity");
|
||||
if (UseLargePages) {
|
||||
const size_t max_page_size = region_max_size / min_pages;
|
||||
const size_t max_page_size = region_size / min_pages;
|
||||
|
||||
for (unsigned int i = 0; _page_sizes[i] != 0; ++i) {
|
||||
const size_t sz = _page_sizes[i];
|
||||
const size_t mask = sz - 1;
|
||||
if ((region_min_size & mask) == 0 && (region_max_size & mask) == 0) {
|
||||
// The largest page size with no fragmentation.
|
||||
return sz;
|
||||
}
|
||||
|
||||
if (sz <= max_page_size) {
|
||||
// The largest page size that satisfies the min_pages requirement.
|
||||
return sz;
|
||||
for (size_t i = 0; _page_sizes[i] != 0; ++i) {
|
||||
const size_t page_size = _page_sizes[i];
|
||||
if (page_size <= max_page_size && is_size_aligned(region_size, page_size)) {
|
||||
return page_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1548,3 +1539,63 @@ os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::Stat
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
/////////////// Unit tests ///////////////
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
#define assert_eq(a,b) assert(a == b, err_msg(SIZE_FORMAT " != " SIZE_FORMAT, a, b))
|
||||
|
||||
class TestOS : AllStatic {
|
||||
static size_t small_page_size() {
|
||||
return os::vm_page_size();
|
||||
}
|
||||
|
||||
static size_t large_page_size() {
|
||||
const size_t large_page_size_example = 4 * M;
|
||||
return os::page_size_for_region(large_page_size_example, 1);
|
||||
}
|
||||
|
||||
static void test_page_size_for_region() {
|
||||
if (UseLargePages) {
|
||||
const size_t small_page = small_page_size();
|
||||
const size_t large_page = large_page_size();
|
||||
|
||||
if (large_page > small_page) {
|
||||
size_t num_small_pages_in_large = large_page / small_page;
|
||||
size_t page = os::page_size_for_region(large_page, num_small_pages_in_large);
|
||||
|
||||
assert_eq(page, small_page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void test_page_size_for_region_alignment() {
|
||||
if (UseLargePages) {
|
||||
const size_t small_page = small_page_size();
|
||||
const size_t large_page = large_page_size();
|
||||
if (large_page > small_page) {
|
||||
const size_t unaligned_region = large_page + 17;
|
||||
size_t page = os::page_size_for_region(unaligned_region, 1);
|
||||
assert_eq(page, small_page);
|
||||
|
||||
const size_t num_pages = 5;
|
||||
const size_t aligned_region = large_page * num_pages;
|
||||
page = os::page_size_for_region(aligned_region, num_pages);
|
||||
assert_eq(page, large_page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static void run_tests() {
|
||||
test_page_size_for_region();
|
||||
test_page_size_for_region_alignment();
|
||||
}
|
||||
};
|
||||
|
||||
void TestOS_test() {
|
||||
TestOS::run_tests();
|
||||
}
|
||||
|
||||
#endif // PRODUCT
|
||||
|
@ -253,19 +253,11 @@ class os: AllStatic {
|
||||
// Return the default page size.
|
||||
static int vm_page_size();
|
||||
|
||||
// Return the page size to use for a region of memory. The min_pages argument
|
||||
// is a hint intended to limit fragmentation; it says the returned page size
|
||||
// should be <= region_max_size / min_pages. Because min_pages is a hint,
|
||||
// this routine may return a size larger than region_max_size / min_pages.
|
||||
//
|
||||
// The current implementation ignores min_pages if a larger page size is an
|
||||
// exact multiple of both region_min_size and region_max_size. This allows
|
||||
// larger pages to be used when doing so would not cause fragmentation; in
|
||||
// particular, a single page can be used when region_min_size ==
|
||||
// region_max_size == a supported page size.
|
||||
static size_t page_size_for_region(size_t region_min_size,
|
||||
size_t region_max_size,
|
||||
uint min_pages);
|
||||
// Returns the page size to use for a region of memory.
|
||||
// region_size / min_pages will always be greater than or equal to the
|
||||
// returned value.
|
||||
static size_t page_size_for_region(size_t region_size, size_t min_pages);
|
||||
|
||||
// Return the largest page size that can be used
|
||||
static size_t max_page_size() {
|
||||
// The _page_sizes array is sorted in descending order.
|
||||
|
@ -53,7 +53,7 @@ ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size) {
|
||||
size_t page_size = os::page_size_for_region(size, size, 1);
|
||||
size_t page_size = os::page_size_for_region(size, 1);
|
||||
bool large_pages = page_size != (size_t)os::vm_page_size();
|
||||
// Don't force the alignment to be large page aligned,
|
||||
// since that will waste memory.
|
||||
@ -372,7 +372,7 @@ VirtualSpace::VirtualSpace() {
|
||||
|
||||
|
||||
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
|
||||
const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
|
||||
const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
|
||||
return initialize_with_granularity(rs, committed_size, max_commit_granularity);
|
||||
}
|
||||
|
||||
@ -1007,7 +1007,7 @@ class TestVirtualSpace : AllStatic {
|
||||
case Disable:
|
||||
return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
|
||||
case Commit:
|
||||
return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
|
||||
return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,6 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* @ignore 8027915
|
||||
* @test TestParallelHeapSizeFlags
|
||||
* @key gc
|
||||
* @bug 8006088
|
||||
|
@ -88,7 +88,6 @@ public class TestSummarizeRSetStatsTools {
|
||||
ArrayList<String> finalargs = new ArrayList<String>();
|
||||
String[] defaultArgs = new String[] {
|
||||
"-XX:+UseG1GC",
|
||||
"-XX:+UseCompressedOops",
|
||||
"-Xmn4m",
|
||||
"-Xmx20m",
|
||||
"-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking
|
||||
|
Loading…
Reference in New Issue
Block a user