Merge
This commit is contained in:
commit
ab1485b385
@ -766,16 +766,16 @@ enum FieldAllocationType {
|
||||
|
||||
|
||||
struct FieldAllocationCount {
|
||||
int static_oop_count;
|
||||
int static_byte_count;
|
||||
int static_short_count;
|
||||
int static_word_count;
|
||||
int static_double_count;
|
||||
int nonstatic_oop_count;
|
||||
int nonstatic_byte_count;
|
||||
int nonstatic_short_count;
|
||||
int nonstatic_word_count;
|
||||
int nonstatic_double_count;
|
||||
unsigned int static_oop_count;
|
||||
unsigned int static_byte_count;
|
||||
unsigned int static_short_count;
|
||||
unsigned int static_word_count;
|
||||
unsigned int static_double_count;
|
||||
unsigned int nonstatic_oop_count;
|
||||
unsigned int nonstatic_byte_count;
|
||||
unsigned int nonstatic_short_count;
|
||||
unsigned int nonstatic_word_count;
|
||||
unsigned int nonstatic_double_count;
|
||||
};
|
||||
|
||||
typeArrayHandle ClassFileParser::parse_fields(constantPoolHandle cp, bool is_interface,
|
||||
@ -2908,11 +2908,11 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
}
|
||||
// end of "discovered" field compactibility fix
|
||||
|
||||
int nonstatic_double_count = fac.nonstatic_double_count;
|
||||
int nonstatic_word_count = fac.nonstatic_word_count;
|
||||
int nonstatic_short_count = fac.nonstatic_short_count;
|
||||
int nonstatic_byte_count = fac.nonstatic_byte_count;
|
||||
int nonstatic_oop_count = fac.nonstatic_oop_count;
|
||||
unsigned int nonstatic_double_count = fac.nonstatic_double_count;
|
||||
unsigned int nonstatic_word_count = fac.nonstatic_word_count;
|
||||
unsigned int nonstatic_short_count = fac.nonstatic_short_count;
|
||||
unsigned int nonstatic_byte_count = fac.nonstatic_byte_count;
|
||||
unsigned int nonstatic_oop_count = fac.nonstatic_oop_count;
|
||||
|
||||
bool super_has_nonstatic_fields =
|
||||
(super_klass() != NULL && super_klass->has_nonstatic_fields());
|
||||
@ -2922,24 +2922,24 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
nonstatic_oop_count) != 0);
|
||||
|
||||
|
||||
// Prepare list of oops for oop maps generation.
|
||||
u2* nonstatic_oop_offsets;
|
||||
u2* nonstatic_oop_length;
|
||||
int nonstatic_oop_map_count = 0;
|
||||
// Prepare list of oops for oop map generation.
|
||||
int* nonstatic_oop_offsets;
|
||||
unsigned int* nonstatic_oop_counts;
|
||||
unsigned int nonstatic_oop_map_count = 0;
|
||||
|
||||
nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
|
||||
THREAD, u2, nonstatic_oop_count+1);
|
||||
nonstatic_oop_length = NEW_RESOURCE_ARRAY_IN_THREAD(
|
||||
THREAD, u2, nonstatic_oop_count+1);
|
||||
THREAD, int, nonstatic_oop_count + 1);
|
||||
nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD(
|
||||
THREAD, unsigned int, nonstatic_oop_count + 1);
|
||||
|
||||
// Add fake fields for java.lang.Class instances (also see above).
|
||||
// FieldsAllocationStyle and CompactFields values will be reset to default.
|
||||
if(class_name() == vmSymbols::java_lang_Class() && class_loader.is_null()) {
|
||||
java_lang_Class_fix_post(&next_nonstatic_field_offset);
|
||||
nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset;
|
||||
int fake_oop_count = (( next_nonstatic_field_offset -
|
||||
first_nonstatic_field_offset ) / heapOopSize);
|
||||
nonstatic_oop_length [0] = (u2)fake_oop_count;
|
||||
nonstatic_oop_offsets[0] = first_nonstatic_field_offset;
|
||||
const uint fake_oop_count = (next_nonstatic_field_offset -
|
||||
first_nonstatic_field_offset) / heapOopSize;
|
||||
nonstatic_oop_counts[0] = fake_oop_count;
|
||||
nonstatic_oop_map_count = 1;
|
||||
nonstatic_oop_count -= fake_oop_count;
|
||||
first_nonstatic_oop_offset = first_nonstatic_field_offset;
|
||||
@ -3119,13 +3119,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
// Update oop maps
|
||||
if( nonstatic_oop_map_count > 0 &&
|
||||
nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
|
||||
(u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * heapOopSize) ) {
|
||||
real_offset -
|
||||
int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
|
||||
heapOopSize ) {
|
||||
// Extend current oop map
|
||||
nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1;
|
||||
nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
|
||||
} else {
|
||||
// Create new oop map
|
||||
nonstatic_oop_offsets[nonstatic_oop_map_count] = (u2)real_offset;
|
||||
nonstatic_oop_length [nonstatic_oop_map_count] = 1;
|
||||
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
|
||||
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
|
||||
nonstatic_oop_map_count += 1;
|
||||
if( first_nonstatic_oop_offset == 0 ) { // Undefined
|
||||
first_nonstatic_oop_offset = real_offset;
|
||||
@ -3182,8 +3184,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
|
||||
assert(instance_size == align_object_size(align_size_up((instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), wordSize) / wordSize), "consistent layout helper value");
|
||||
|
||||
// Size of non-static oop map blocks (in words) allocated at end of klass
|
||||
int nonstatic_oop_map_size = compute_oop_map_size(super_klass, nonstatic_oop_map_count, first_nonstatic_oop_offset);
|
||||
// Number of non-static oop map blocks allocated at end of klass.
|
||||
const unsigned int total_oop_map_count =
|
||||
compute_oop_map_count(super_klass, nonstatic_oop_map_count,
|
||||
first_nonstatic_oop_offset);
|
||||
|
||||
// Compute reference type
|
||||
ReferenceType rt;
|
||||
@ -3194,14 +3198,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
}
|
||||
|
||||
// We can now create the basic klassOop for this klass
|
||||
klassOop ik = oopFactory::new_instanceKlass(
|
||||
vtable_size, itable_size,
|
||||
static_field_size, nonstatic_oop_map_size,
|
||||
klassOop ik = oopFactory::new_instanceKlass(vtable_size, itable_size,
|
||||
static_field_size,
|
||||
total_oop_map_count,
|
||||
rt, CHECK_(nullHandle));
|
||||
instanceKlassHandle this_klass (THREAD, ik);
|
||||
|
||||
assert(this_klass->static_field_size() == static_field_size &&
|
||||
this_klass->nonstatic_oop_map_size() == nonstatic_oop_map_size, "sanity check");
|
||||
assert(this_klass->static_field_size() == static_field_size, "sanity");
|
||||
assert(this_klass->nonstatic_oop_map_count() == total_oop_map_count,
|
||||
"sanity");
|
||||
|
||||
// Fill in information already parsed
|
||||
this_klass->set_access_flags(access_flags);
|
||||
@ -3282,7 +3287,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
klassItable::setup_itable_offset_table(this_klass);
|
||||
|
||||
// Do final class setup
|
||||
fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_length);
|
||||
fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts);
|
||||
|
||||
set_precomputed_flags(this_klass);
|
||||
|
||||
@ -3375,18 +3380,23 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
}
|
||||
|
||||
|
||||
int ClassFileParser::compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_map_count, int first_nonstatic_oop_offset) {
|
||||
int map_size = super.is_null() ? 0 : super->nonstatic_oop_map_size();
|
||||
unsigned int
|
||||
ClassFileParser::compute_oop_map_count(instanceKlassHandle super,
|
||||
unsigned int nonstatic_oop_map_count,
|
||||
int first_nonstatic_oop_offset) {
|
||||
unsigned int map_count =
|
||||
super.is_null() ? 0 : super->nonstatic_oop_map_count();
|
||||
if (nonstatic_oop_map_count > 0) {
|
||||
// We have oops to add to map
|
||||
if (map_size == 0) {
|
||||
map_size = nonstatic_oop_map_count;
|
||||
if (map_count == 0) {
|
||||
map_count = nonstatic_oop_map_count;
|
||||
} else {
|
||||
// Check whether we should add a new map block or whether the last one can be extended
|
||||
OopMapBlock* first_map = super->start_of_nonstatic_oop_maps();
|
||||
OopMapBlock* last_map = first_map + map_size - 1;
|
||||
// Check whether we should add a new map block or whether the last one can
|
||||
// be extended
|
||||
OopMapBlock* const first_map = super->start_of_nonstatic_oop_maps();
|
||||
OopMapBlock* const last_map = first_map + map_count - 1;
|
||||
|
||||
int next_offset = last_map->offset() + (last_map->length() * heapOopSize);
|
||||
int next_offset = last_map->offset() + last_map->count() * heapOopSize;
|
||||
if (next_offset == first_nonstatic_oop_offset) {
|
||||
// There is no gap bettwen superklass's last oop field and first
|
||||
// local oop field, merge maps.
|
||||
@ -3395,46 +3405,48 @@ int ClassFileParser::compute_oop_map_size(instanceKlassHandle super, int nonstat
|
||||
// Superklass didn't end with a oop field, add extra maps
|
||||
assert(next_offset < first_nonstatic_oop_offset, "just checking");
|
||||
}
|
||||
map_size += nonstatic_oop_map_count;
|
||||
map_count += nonstatic_oop_map_count;
|
||||
}
|
||||
}
|
||||
return map_size;
|
||||
return map_count;
|
||||
}
|
||||
|
||||
|
||||
void ClassFileParser::fill_oop_maps(instanceKlassHandle k,
|
||||
int nonstatic_oop_map_count,
|
||||
u2* nonstatic_oop_offsets, u2* nonstatic_oop_length) {
|
||||
unsigned int nonstatic_oop_map_count,
|
||||
int* nonstatic_oop_offsets,
|
||||
unsigned int* nonstatic_oop_counts) {
|
||||
OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps();
|
||||
OopMapBlock* last_oop_map = this_oop_map + k->nonstatic_oop_map_size();
|
||||
instanceKlass* super = k->superklass();
|
||||
if (super != NULL) {
|
||||
int super_oop_map_size = super->nonstatic_oop_map_size();
|
||||
OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps();
|
||||
const instanceKlass* const super = k->superklass();
|
||||
const unsigned int super_count = super ? super->nonstatic_oop_map_count() : 0;
|
||||
if (super_count > 0) {
|
||||
// Copy maps from superklass
|
||||
while (super_oop_map_size-- > 0) {
|
||||
OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps();
|
||||
for (unsigned int i = 0; i < super_count; ++i) {
|
||||
*this_oop_map++ = *super_oop_map++;
|
||||
}
|
||||
}
|
||||
|
||||
if (nonstatic_oop_map_count > 0) {
|
||||
if (this_oop_map + nonstatic_oop_map_count > last_oop_map) {
|
||||
// Calculated in compute_oop_map_size() number of oop maps is less then
|
||||
// collected oop maps since there is no gap between superklass's last oop
|
||||
// field and first local oop field. Extend the last oop map copied
|
||||
if (super_count + nonstatic_oop_map_count > k->nonstatic_oop_map_count()) {
|
||||
// The counts differ because there is no gap between superklass's last oop
|
||||
// field and the first local oop field. Extend the last oop map copied
|
||||
// from the superklass instead of creating new one.
|
||||
nonstatic_oop_map_count--;
|
||||
nonstatic_oop_offsets++;
|
||||
this_oop_map--;
|
||||
this_oop_map->set_length(this_oop_map->length() + *nonstatic_oop_length++);
|
||||
this_oop_map->set_count(this_oop_map->count() + *nonstatic_oop_counts++);
|
||||
this_oop_map++;
|
||||
}
|
||||
assert((this_oop_map + nonstatic_oop_map_count) == last_oop_map, "just checking");
|
||||
|
||||
// Add new map blocks, fill them
|
||||
while (nonstatic_oop_map_count-- > 0) {
|
||||
this_oop_map->set_offset(*nonstatic_oop_offsets++);
|
||||
this_oop_map->set_length(*nonstatic_oop_length++);
|
||||
this_oop_map->set_count(*nonstatic_oop_counts++);
|
||||
this_oop_map++;
|
||||
}
|
||||
assert(k->start_of_nonstatic_oop_maps() + k->nonstatic_oop_map_count() ==
|
||||
this_oop_map, "sanity");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,10 +125,13 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
int runtime_invisible_annotations_length, TRAPS);
|
||||
|
||||
// Final setup
|
||||
int compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_count,
|
||||
unsigned int compute_oop_map_count(instanceKlassHandle super,
|
||||
unsigned int nonstatic_oop_count,
|
||||
int first_nonstatic_oop_offset);
|
||||
void fill_oop_maps(instanceKlassHandle k, int nonstatic_oop_map_count,
|
||||
u2* nonstatic_oop_offsets, u2* nonstatic_oop_length);
|
||||
void fill_oop_maps(instanceKlassHandle k,
|
||||
unsigned int nonstatic_oop_map_count,
|
||||
int* nonstatic_oop_offsets,
|
||||
unsigned int* nonstatic_oop_counts);
|
||||
void set_precomputed_flags(instanceKlassHandle k);
|
||||
objArrayHandle compute_transitive_interfaces(instanceKlassHandle super,
|
||||
objArrayHandle local_ifs, TRAPS);
|
||||
|
@ -1079,6 +1079,10 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
||||
this, (address)_method, (address)cause);
|
||||
cause->klass()->print();
|
||||
}
|
||||
// Unlink the osr method, so we do not look this up again
|
||||
if (is_osr_method()) {
|
||||
invalidate_osr_method();
|
||||
}
|
||||
// If _method is already NULL the methodOop is about to be unloaded,
|
||||
// so we don't have to break the cycle. Note that it is possible to
|
||||
// have the methodOop live here, in case we unload the nmethod because
|
||||
@ -1148,7 +1152,7 @@ void nmethod::make_not_entrant_or_zombie(int state) {
|
||||
// will never be used anymore. That the nmethods only gets removed when class unloading
|
||||
// happens, make life much simpler, since the nmethods are not just going to disappear
|
||||
// out of the blue.
|
||||
if (is_osr_only_method()) {
|
||||
if (is_osr_method()) {
|
||||
if (osr_entry_bci() != InvalidOSREntryBci) {
|
||||
// only log this once
|
||||
log_state_change(state);
|
||||
@ -1520,6 +1524,17 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive,
|
||||
#endif // !PRODUCT
|
||||
}
|
||||
|
||||
// This method is called twice during GC -- once while
|
||||
// tracing the "active" nmethods on thread stacks during
|
||||
// the (strong) marking phase, and then again when walking
|
||||
// the code cache contents during the weak roots processing
|
||||
// phase. The two uses are distinguished by means of the
|
||||
// do_nmethods() method in the closure "f" below -- which
|
||||
// answers "yes" in the first case, and "no" in the second
|
||||
// case. We want to walk the weak roots in the nmethod
|
||||
// only in the second case. The weak roots in the nmethod
|
||||
// are the oops in the ExceptionCache and the InlineCache
|
||||
// oops.
|
||||
void nmethod::oops_do(OopClosure* f) {
|
||||
// make sure the oops ready to receive visitors
|
||||
assert(!is_zombie() && !is_unloaded(),
|
||||
@ -1538,19 +1553,25 @@ void nmethod::oops_do(OopClosure* f) {
|
||||
|
||||
// Compiled code
|
||||
f->do_oop((oop*) &_method);
|
||||
if (!f->do_nmethods()) {
|
||||
// weak roots processing phase -- update ExceptionCache oops
|
||||
ExceptionCache* ec = exception_cache();
|
||||
while(ec != NULL) {
|
||||
f->do_oop((oop*)ec->exception_type_addr());
|
||||
ec = ec->next();
|
||||
}
|
||||
} // Else strong roots phase -- skip oops in ExceptionCache
|
||||
|
||||
RelocIterator iter(this, low_boundary);
|
||||
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type ) {
|
||||
oop_Relocation* r = iter.oop_reloc();
|
||||
// In this loop, we must only follow those oops directly embedded in
|
||||
// the code. Other oops (oop_index>0) are seen as part of scopes_oops.
|
||||
assert(1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place");
|
||||
assert(1 == (r->oop_is_immediate()) +
|
||||
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
||||
"oop must be found in exactly one place");
|
||||
if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
||||
f->do_oop(r->oop_addr());
|
||||
}
|
||||
|
@ -314,7 +314,6 @@ class nmethod : public CodeBlob {
|
||||
bool is_java_method() const { return !method()->is_native(); }
|
||||
bool is_native_method() const { return method()->is_native(); }
|
||||
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
|
||||
bool is_osr_only_method() const { return is_osr_method(); }
|
||||
|
||||
bool is_compiled_by_c1() const;
|
||||
bool is_compiled_by_c2() const;
|
||||
|
@ -92,17 +92,50 @@ class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
|
||||
}
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
class PushAndMarkClosure: public OopClosure {
|
||||
private:
|
||||
// KlassRememberingOopClosure is used when marking of the permanent generation
|
||||
// is being done. It adds fields to support revisiting of klasses
|
||||
// for class unloading. _should_remember_klasses should be set to
|
||||
// indicate if klasses should be remembered. Currently that is whenever
|
||||
// CMS class unloading is turned on. The _revisit_stack is used
|
||||
// to save the klasses for later processing.
|
||||
class KlassRememberingOopClosure : public OopClosure {
|
||||
protected:
|
||||
CMSCollector* _collector;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool const _should_remember_klasses;
|
||||
public:
|
||||
void check_remember_klasses() const PRODUCT_RETURN;
|
||||
virtual const bool should_remember_klasses() const {
|
||||
check_remember_klasses();
|
||||
return _should_remember_klasses;
|
||||
}
|
||||
virtual void remember_klass(Klass* k);
|
||||
|
||||
KlassRememberingOopClosure(CMSCollector* collector,
|
||||
ReferenceProcessor* rp,
|
||||
CMSMarkStack* revisit_stack);
|
||||
};
|
||||
|
||||
// Similar to KlassRememberingOopClosure for use when multiple
|
||||
// GC threads will execute the closure.
|
||||
|
||||
class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
|
||||
public:
|
||||
Par_KlassRememberingOopClosure(CMSCollector* collector,
|
||||
ReferenceProcessor* rp,
|
||||
CMSMarkStack* revisit_stack):
|
||||
KlassRememberingOopClosure(collector, rp, revisit_stack) {}
|
||||
virtual void remember_klass(Klass* k);
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
class PushAndMarkClosure: public KlassRememberingOopClosure {
|
||||
private:
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSBitMap* _mod_union_table;
|
||||
CMSMarkStack* _mark_stack;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool _concurrent_precleaning;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
@ -122,10 +155,12 @@ class PushAndMarkClosure: public OopClosure {
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
// In support of class unloading
|
||||
virtual const bool should_remember_mdo() const {
|
||||
return false;
|
||||
// return _should_remember_klasses;
|
||||
}
|
||||
virtual void remember_klass(Klass* k);
|
||||
virtual void remember_mdo(DataLayout* v);
|
||||
};
|
||||
|
||||
// In the parallel case, the revisit stack, the bit map and the
|
||||
@ -134,14 +169,11 @@ class PushAndMarkClosure: public OopClosure {
|
||||
// synchronization (for instance, via CAS). The marking stack
|
||||
// used in the non-parallel case above is here replaced with
|
||||
// an OopTaskQueue structure to allow efficient work stealing.
|
||||
class Par_PushAndMarkClosure: public OopClosure {
|
||||
class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
@ -159,10 +191,12 @@ class Par_PushAndMarkClosure: public OopClosure {
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
// In support of class unloading
|
||||
virtual const bool should_remember_mdo() const {
|
||||
return false;
|
||||
// return _should_remember_klasses;
|
||||
}
|
||||
virtual void remember_klass(Klass* k);
|
||||
virtual void remember_mdo(DataLayout* v);
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
@ -201,6 +235,12 @@ class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
||||
void set_freelistLock(Mutex* m) {
|
||||
_freelistLock = m;
|
||||
}
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _pushAndMarkClosure.should_remember_klasses();
|
||||
}
|
||||
virtual void remember_klass(Klass* k) {
|
||||
_pushAndMarkClosure.remember_klass(k);
|
||||
}
|
||||
|
||||
private:
|
||||
inline void do_yield_check();
|
||||
@ -234,6 +274,16 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
||||
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
// When ScanMarkedObjectsAgainClosure is used,
|
||||
// it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
|
||||
// and this delegation is used.
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _par_pushAndMarkClosure.should_remember_klasses();
|
||||
}
|
||||
// See comment on should_remember_klasses() above.
|
||||
virtual void remember_klass(Klass* k) {
|
||||
_par_pushAndMarkClosure.remember_klass(k);
|
||||
}
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
@ -243,17 +293,14 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
||||
// This closure is used during the concurrent marking phase
|
||||
// following the first checkpoint. Its use is buried in
|
||||
// the closure MarkFromRootsClosure.
|
||||
class PushOrMarkClosure: public OopClosure {
|
||||
class PushOrMarkClosure: public KlassRememberingOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
CMSMarkStack* _markStack;
|
||||
CMSMarkStack* _revisitStack;
|
||||
HeapWord* const _finger;
|
||||
MarkFromRootsClosure* const
|
||||
_parent;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
@ -268,10 +315,13 @@ class PushOrMarkClosure: public OopClosure {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
// In support of class unloading
|
||||
virtual const bool should_remember_mdo() const {
|
||||
return false;
|
||||
// return _should_remember_klasses;
|
||||
}
|
||||
virtual void remember_klass(Klass* k);
|
||||
virtual void remember_mdo(DataLayout* v);
|
||||
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
private:
|
||||
@ -282,20 +332,17 @@ class PushOrMarkClosure: public OopClosure {
|
||||
// This closure is used during the concurrent marking phase
|
||||
// following the first checkpoint. Its use is buried in
|
||||
// the closure Par_MarkFromRootsClosure.
|
||||
class Par_PushOrMarkClosure: public OopClosure {
|
||||
class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _whole_span;
|
||||
MemRegion _span; // local chunk
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSMarkStack* _overflow_stack;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
HeapWord* const _finger;
|
||||
HeapWord** const _global_finger_addr;
|
||||
Par_MarkFromRootsClosure* const
|
||||
_parent;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
@ -312,10 +359,13 @@ class Par_PushOrMarkClosure: public OopClosure {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
// In support of class unloading
|
||||
virtual const bool should_remember_mdo() const {
|
||||
return false;
|
||||
// return _should_remember_klasses;
|
||||
}
|
||||
virtual void remember_klass(Klass* k);
|
||||
virtual void remember_mdo(DataLayout* v);
|
||||
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
private:
|
||||
@ -328,9 +378,8 @@ class Par_PushOrMarkClosure: public OopClosure {
|
||||
// processing phase of the CMS final checkpoint step, as
|
||||
// well as during the concurrent precleaning of the discovered
|
||||
// reference lists.
|
||||
class CMSKeepAliveClosure: public OopClosure {
|
||||
class CMSKeepAliveClosure: public KlassRememberingOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
const MemRegion _span;
|
||||
CMSMarkStack* _mark_stack;
|
||||
CMSBitMap* _bit_map;
|
||||
@ -340,14 +389,7 @@ class CMSKeepAliveClosure: public OopClosure {
|
||||
public:
|
||||
CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
|
||||
CMSBitMap* bit_map, CMSMarkStack* mark_stack,
|
||||
bool cpc):
|
||||
_collector(collector),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_mark_stack(mark_stack),
|
||||
_concurrent_precleaning(cpc) {
|
||||
assert(!_span.is_empty(), "Empty span could spell trouble");
|
||||
}
|
||||
CMSMarkStack* revisit_stack, bool cpc);
|
||||
bool concurrent_precleaning() const { return _concurrent_precleaning; }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
@ -355,9 +397,8 @@ class CMSKeepAliveClosure: public OopClosure {
|
||||
inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
class CMSInnerParMarkAndPushClosure: public OopClosure {
|
||||
class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSBitMap* _bit_map;
|
||||
@ -366,11 +407,8 @@ class CMSInnerParMarkAndPushClosure: public OopClosure {
|
||||
public:
|
||||
CMSInnerParMarkAndPushClosure(CMSCollector* collector,
|
||||
MemRegion span, CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue):
|
||||
_collector(collector),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue) { }
|
||||
CMSMarkStack* revisit_stack,
|
||||
OopTaskQueue* work_queue);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
@ -380,9 +418,8 @@ class CMSInnerParMarkAndPushClosure: public OopClosure {
|
||||
// A parallel (MT) version of the above, used when
|
||||
// reference processing is parallel; the only difference
|
||||
// is in the do_oop method.
|
||||
class CMSParKeepAliveClosure: public OopClosure {
|
||||
class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSBitMap* _bit_map;
|
||||
@ -394,7 +431,8 @@ class CMSParKeepAliveClosure: public OopClosure {
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
|
||||
CMSBitMap* bit_map, OopTaskQueue* work_queue);
|
||||
CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
|
||||
OopTaskQueue* work_queue);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
|
||||
|
@ -37,16 +37,34 @@ inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
|
||||
}
|
||||
}
|
||||
|
||||
inline void PushOrMarkClosure::remember_klass(Klass* k) {
|
||||
if (!_revisitStack->push(oop(k))) {
|
||||
#ifndef PRODUCT
|
||||
void KlassRememberingOopClosure::check_remember_klasses() const {
|
||||
assert(_should_remember_klasses == must_remember_klasses(),
|
||||
"Should remember klasses in this context.");
|
||||
}
|
||||
#endif
|
||||
|
||||
void KlassRememberingOopClosure::remember_klass(Klass* k) {
|
||||
if (!_revisit_stack->push(oop(k))) {
|
||||
fatal("Revisit stack overflow in PushOrMarkClosure");
|
||||
}
|
||||
check_remember_klasses();
|
||||
}
|
||||
|
||||
inline void Par_PushOrMarkClosure::remember_klass(Klass* k) {
|
||||
if (!_revisit_stack->par_push(oop(k))) {
|
||||
fatal("Revisit stack overflow in PushOrMarkClosure");
|
||||
inline void PushOrMarkClosure::remember_mdo(DataLayout* v) {
|
||||
// TBD
|
||||
}
|
||||
|
||||
|
||||
void Par_KlassRememberingOopClosure::remember_klass(Klass* k) {
|
||||
if (!_revisit_stack->par_push(oop(k))) {
|
||||
fatal("Revisit stack overflow in Par_KlassRememberingOopClosure");
|
||||
}
|
||||
check_remember_klasses();
|
||||
}
|
||||
|
||||
inline void Par_PushOrMarkClosure::remember_mdo(DataLayout* v) {
|
||||
// TBD
|
||||
}
|
||||
|
||||
inline void PushOrMarkClosure::do_yield_check() {
|
||||
|
@ -3499,6 +3499,7 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
|
||||
ref_processor()->set_enqueuing_is_done(false);
|
||||
|
||||
{
|
||||
// This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
|
||||
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
@ -3623,6 +3624,8 @@ bool CMSCollector::markFromRootsWork(bool asynch) {
|
||||
verify_overflow_empty();
|
||||
assert(_revisitStack.isEmpty(), "tabula rasa");
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
|
||||
|
||||
bool result = false;
|
||||
if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
|
||||
result = do_marking_mt(asynch);
|
||||
@ -3958,24 +3961,24 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
|
||||
pst->all_tasks_completed();
|
||||
}
|
||||
|
||||
class Par_ConcMarkingClosure: public OopClosure {
|
||||
class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSMarkStack* _overflow_stack;
|
||||
CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
|
||||
OopTaskQueue* _work_queue;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
|
||||
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
|
||||
_collector(collector),
|
||||
CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
|
||||
CMSMarkStack* revisit_stack):
|
||||
Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
|
||||
_span(_collector->_span),
|
||||
_work_queue(work_queue),
|
||||
_bit_map(bit_map),
|
||||
_overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
|
||||
_overflow_stack(overflow_stack)
|
||||
{ }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
void trim_queue(size_t max);
|
||||
@ -4063,8 +4066,9 @@ void CMSConcMarkingTask::do_work_steal(int i) {
|
||||
oop obj_to_scan;
|
||||
CMSBitMap* bm = &(_collector->_markBitMap);
|
||||
CMSMarkStack* ovflw = &(_collector->_markStack);
|
||||
CMSMarkStack* revisit = &(_collector->_revisitStack);
|
||||
int* seed = _collector->hash_seed(i);
|
||||
Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
|
||||
Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit);
|
||||
while (true) {
|
||||
cl.trim_queue(0);
|
||||
assert(work_q->size() == 0, "Should have been emptied above");
|
||||
@ -4089,6 +4093,7 @@ void CMSConcMarkingTask::coordinator_yield() {
|
||||
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
||||
"CMS thread should hold CMS token");
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker mux(false);)
|
||||
// First give up the locks, then yield, then re-lock
|
||||
// We should probably use a constructor/destructor idiom to
|
||||
// do this unlock/lock or modify the MutexUnlocker class to
|
||||
@ -4165,6 +4170,8 @@ bool CMSCollector::do_marking_mt(bool asynch) {
|
||||
// multi-threaded marking phase.
|
||||
ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
|
||||
|
||||
conc_workers()->start_task(&tsk);
|
||||
while (tsk.yielded()) {
|
||||
tsk.coordinator_yield();
|
||||
@ -4404,7 +4411,8 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
CMSPrecleanRefsYieldClosure yield_cl(this);
|
||||
assert(rp->span().equals(_span), "Spans should be equal");
|
||||
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
|
||||
&_markStack, true /* preclean */);
|
||||
&_markStack, &_revisitStack,
|
||||
true /* preclean */);
|
||||
CMSDrainMarkingStackClosure complete_trace(this,
|
||||
_span, &_markBitMap, &_markStack,
|
||||
&keep_alive, true /* preclean */);
|
||||
@ -4424,6 +4432,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
bitMapLock());
|
||||
startTimer();
|
||||
sample_eden();
|
||||
|
||||
// The following will yield to allow foreground
|
||||
// collection to proceed promptly. XXX YSR:
|
||||
// The code in this method may need further
|
||||
@ -4453,6 +4462,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
SurvivorSpacePrecleanClosure
|
||||
sss_cl(this, _span, &_markBitMap, &_markStack,
|
||||
&pam_cl, before_count, CMSYield);
|
||||
DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
|
||||
dng->from()->object_iterate_careful(&sss_cl);
|
||||
dng->to()->object_iterate_careful(&sss_cl);
|
||||
}
|
||||
@ -4554,6 +4564,13 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
|
||||
// Turn off checking for this method but turn it back on
|
||||
// selectively. There are yield points in this method
|
||||
// but it is difficult to turn the checking off just around
|
||||
// the yield points. It is simpler to selectively turn
|
||||
// it on.
|
||||
DEBUG_ONLY(RememberKlassesChecker mux(false);)
|
||||
|
||||
// strategy: starting with the first card, accumulate contiguous
|
||||
// ranges of dirty cards; clear these cards, then scan the region
|
||||
// covered by these cards.
|
||||
@ -4582,6 +4599,7 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
MemRegion dirtyRegion;
|
||||
{
|
||||
stopTimer();
|
||||
// Potential yield point
|
||||
CMSTokenSync ts(true);
|
||||
startTimer();
|
||||
sample_eden();
|
||||
@ -4607,6 +4625,7 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
assert(numDirtyCards > 0, "consistency check");
|
||||
HeapWord* stop_point = NULL;
|
||||
stopTimer();
|
||||
// Potential yield point
|
||||
CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
|
||||
bitMapLock());
|
||||
startTimer();
|
||||
@ -4614,6 +4633,7 @@ size_t CMSCollector::preclean_mod_union_table(
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
sample_eden();
|
||||
DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
|
||||
stop_point =
|
||||
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
}
|
||||
@ -4701,6 +4721,7 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
sample_eden();
|
||||
verify_work_stacks_empty();
|
||||
verify_overflow_empty();
|
||||
DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
|
||||
HeapWord* stop_point =
|
||||
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
|
||||
if (stop_point != NULL) {
|
||||
@ -4800,6 +4821,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
||||
assert(haveFreelistLocks(), "must have free list locks");
|
||||
assert_lock_strong(bitMapLock());
|
||||
|
||||
DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
|
||||
if (!init_mark_was_synchronous) {
|
||||
// We might assume that we need not fill TLAB's when
|
||||
// CMSScavengeBeforeRemark is set, because we may have just done
|
||||
@ -4903,6 +4925,9 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
||||
_markStack._hit_limit = 0;
|
||||
_markStack._failed_double = 0;
|
||||
|
||||
// Check that all the klasses have been checked
|
||||
assert(_revisitStack.isEmpty(), "Not all klasses revisited");
|
||||
|
||||
if ((VerifyAfterGC || VerifyDuringGC) &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
verify_after_remark();
|
||||
@ -5574,9 +5599,13 @@ public:
|
||||
void CMSRefProcTaskProxy::work(int i) {
|
||||
assert(_collector->_span.equals(_span), "Inconsistency in _span");
|
||||
CMSParKeepAliveClosure par_keep_alive(_collector, _span,
|
||||
_mark_bit_map, work_queue(i));
|
||||
_mark_bit_map,
|
||||
&_collector->_revisitStack,
|
||||
work_queue(i));
|
||||
CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
|
||||
_mark_bit_map, work_queue(i));
|
||||
_mark_bit_map,
|
||||
&_collector->_revisitStack,
|
||||
work_queue(i));
|
||||
CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
|
||||
_task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
|
||||
if (_task.marks_oops_alive()) {
|
||||
@ -5604,12 +5633,13 @@ public:
|
||||
};
|
||||
|
||||
CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
|
||||
MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
|
||||
_collector(collector),
|
||||
MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
|
||||
OopTaskQueue* work_queue):
|
||||
Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue),
|
||||
_mark_and_push(collector, span, bit_map, work_queue),
|
||||
_mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
|
||||
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
|
||||
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
|
||||
{ }
|
||||
@ -5696,7 +5726,8 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
||||
verify_work_stacks_empty();
|
||||
|
||||
CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
|
||||
&_markStack, false /* !preclean */);
|
||||
&_markStack, &_revisitStack,
|
||||
false /* !preclean */);
|
||||
CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
|
||||
_span, &_markBitMap, &_markStack,
|
||||
&cmsKeepAliveClosure, false /* !preclean */);
|
||||
@ -6531,6 +6562,7 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
|
||||
assert_lock_strong(_freelistLock);
|
||||
assert_lock_strong(_bit_map->lock());
|
||||
// relinquish the free_list_lock and bitMaplock()
|
||||
DEBUG_ONLY(RememberKlassesChecker mux(false);)
|
||||
_bit_map->lock()->unlock();
|
||||
_freelistLock->unlock();
|
||||
ConcurrentMarkSweepThread::desynchronize(true);
|
||||
@ -6703,6 +6735,7 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
|
||||
"CMS thread should hold CMS token");
|
||||
assert_lock_strong(_freelistLock);
|
||||
assert_lock_strong(_bitMap->lock());
|
||||
DEBUG_ONLY(RememberKlassesChecker mux(false);)
|
||||
// relinquish the free_list_lock and bitMaplock()
|
||||
_bitMap->lock()->unlock();
|
||||
_freelistLock->unlock();
|
||||
@ -6779,6 +6812,7 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
|
||||
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
||||
"CMS thread should hold CMS token");
|
||||
assert_lock_strong(_bit_map->lock());
|
||||
DEBUG_ONLY(RememberKlassesChecker smx(false);)
|
||||
// Relinquish the bit map lock
|
||||
_bit_map->lock()->unlock();
|
||||
ConcurrentMarkSweepThread::desynchronize(true);
|
||||
@ -6941,6 +6975,7 @@ void MarkFromRootsClosure::do_yield_work() {
|
||||
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
||||
"CMS thread should hold CMS token");
|
||||
assert_lock_strong(_bitMap->lock());
|
||||
DEBUG_ONLY(RememberKlassesChecker mux(false);)
|
||||
_bitMap->lock()->unlock();
|
||||
ConcurrentMarkSweepThread::desynchronize(true);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
@ -7295,15 +7330,12 @@ PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
|
||||
CMSBitMap* bitMap, CMSMarkStack* markStack,
|
||||
CMSMarkStack* revisitStack,
|
||||
HeapWord* finger, MarkFromRootsClosure* parent) :
|
||||
OopClosure(collector->ref_processor()),
|
||||
_collector(collector),
|
||||
KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
|
||||
_span(span),
|
||||
_bitMap(bitMap),
|
||||
_markStack(markStack),
|
||||
_revisitStack(revisitStack),
|
||||
_finger(finger),
|
||||
_parent(parent),
|
||||
_should_remember_klasses(collector->should_unload_classes())
|
||||
_parent(parent)
|
||||
{ }
|
||||
|
||||
Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
|
||||
@ -7315,18 +7347,17 @@ Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
|
||||
HeapWord* finger,
|
||||
HeapWord** global_finger_addr,
|
||||
Par_MarkFromRootsClosure* parent) :
|
||||
OopClosure(collector->ref_processor()),
|
||||
_collector(collector),
|
||||
Par_KlassRememberingOopClosure(collector,
|
||||
collector->ref_processor(),
|
||||
revisit_stack),
|
||||
_whole_span(collector->_span),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue),
|
||||
_overflow_stack(overflow_stack),
|
||||
_revisit_stack(revisit_stack),
|
||||
_finger(finger),
|
||||
_global_finger_addr(global_finger_addr),
|
||||
_parent(parent),
|
||||
_should_remember_klasses(collector->should_unload_classes())
|
||||
_parent(parent)
|
||||
{ }
|
||||
|
||||
// Assumes thread-safe access by callers, who are
|
||||
@ -7456,6 +7487,14 @@ void Par_PushOrMarkClosure::do_oop(oop obj) {
|
||||
void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
|
||||
KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
|
||||
ReferenceProcessor* rp,
|
||||
CMSMarkStack* revisit_stack) :
|
||||
OopClosure(rp),
|
||||
_collector(collector),
|
||||
_revisit_stack(revisit_stack),
|
||||
_should_remember_klasses(collector->should_unload_classes()) {}
|
||||
|
||||
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
ReferenceProcessor* rp,
|
||||
@ -7464,15 +7503,12 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
bool concurrent_precleaning):
|
||||
OopClosure(rp),
|
||||
_collector(collector),
|
||||
KlassRememberingOopClosure(collector, rp, revisit_stack),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_mod_union_table(mod_union_table),
|
||||
_mark_stack(mark_stack),
|
||||
_revisit_stack(revisit_stack),
|
||||
_concurrent_precleaning(concurrent_precleaning),
|
||||
_should_remember_klasses(collector->should_unload_classes())
|
||||
_concurrent_precleaning(concurrent_precleaning)
|
||||
{
|
||||
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
||||
}
|
||||
@ -7540,13 +7576,10 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
|
||||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue,
|
||||
CMSMarkStack* revisit_stack):
|
||||
OopClosure(rp),
|
||||
_collector(collector),
|
||||
Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue),
|
||||
_revisit_stack(revisit_stack),
|
||||
_should_remember_klasses(collector->should_unload_classes())
|
||||
_work_queue(work_queue)
|
||||
{
|
||||
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
||||
}
|
||||
@ -7599,19 +7632,16 @@ void Par_PushAndMarkClosure::do_oop(oop obj) {
|
||||
void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
|
||||
void PushAndMarkClosure::remember_klass(Klass* k) {
|
||||
if (!_revisit_stack->push(oop(k))) {
|
||||
fatal("Revisit stack overflowed in PushAndMarkClosure");
|
||||
}
|
||||
void PushAndMarkClosure::remember_mdo(DataLayout* v) {
|
||||
// TBD
|
||||
}
|
||||
|
||||
void Par_PushAndMarkClosure::remember_klass(Klass* k) {
|
||||
if (!_revisit_stack->par_push(oop(k))) {
|
||||
fatal("Revist stack overflowed in Par_PushAndMarkClosure");
|
||||
}
|
||||
void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
|
||||
// TBD
|
||||
}
|
||||
|
||||
void CMSPrecleanRefsYieldClosure::do_yield_work() {
|
||||
DEBUG_ONLY(RememberKlassesChecker mux(false);)
|
||||
Mutex* bml = _collector->bitMapLock();
|
||||
assert_lock_strong(bml);
|
||||
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
||||
@ -8302,6 +8332,19 @@ bool CMSIsAliveClosure::do_object_b(oop obj) {
|
||||
(!_span.contains(addr) || _bit_map->isMarked(addr));
|
||||
}
|
||||
|
||||
CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
|
||||
MemRegion span,
|
||||
CMSBitMap* bit_map, CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack, bool cpc):
|
||||
KlassRememberingOopClosure(collector, NULL, revisit_stack),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_mark_stack(mark_stack),
|
||||
_concurrent_precleaning(cpc) {
|
||||
assert(!_span.is_empty(), "Empty span could spell trouble");
|
||||
}
|
||||
|
||||
|
||||
// CMSKeepAliveClosure: the serial version
|
||||
void CMSKeepAliveClosure::do_oop(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
@ -8385,6 +8428,16 @@ void CMSParKeepAliveClosure::trim_queue(uint max) {
|
||||
}
|
||||
}
|
||||
|
||||
CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
|
||||
CMSCollector* collector,
|
||||
MemRegion span, CMSBitMap* bit_map,
|
||||
CMSMarkStack* revisit_stack,
|
||||
OopTaskQueue* work_queue):
|
||||
Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue) { }
|
||||
|
||||
void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) &&
|
||||
|
@ -1790,12 +1790,13 @@ class CMSParDrainMarkingStackClosure: public VoidClosure {
|
||||
public:
|
||||
CMSParDrainMarkingStackClosure(CMSCollector* collector,
|
||||
MemRegion span, CMSBitMap* bit_map,
|
||||
CMSMarkStack* revisit_stack,
|
||||
OopTaskQueue* work_queue):
|
||||
_collector(collector),
|
||||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue),
|
||||
_mark_and_push(collector, span, bit_map, work_queue) { }
|
||||
_mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
|
||||
|
||||
public:
|
||||
void trim_queue(uint max);
|
||||
|
@ -39,7 +39,6 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
|
||||
_next(next),
|
||||
_cg1r(cg1r),
|
||||
_vtime_accum(0.0),
|
||||
_co_tracker(G1CRGroup),
|
||||
_interval_ms(5.0)
|
||||
{
|
||||
create_and_start();
|
||||
@ -76,9 +75,6 @@ void ConcurrentG1RefineThread::run() {
|
||||
_vtime_start = os::elapsedVTime();
|
||||
wait_for_universe_init();
|
||||
|
||||
_co_tracker.enable();
|
||||
_co_tracker.start();
|
||||
|
||||
while (!_should_terminate) {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
// Wait for completed log buffers to exist.
|
||||
@ -147,7 +143,6 @@ void ConcurrentG1RefineThread::run() {
|
||||
}
|
||||
break;
|
||||
}
|
||||
_co_tracker.update(false);
|
||||
|
||||
// Check if we need to activate the next thread.
|
||||
if (curr_buffer_num > next_threshold && _next != NULL && !_next->is_active()) {
|
||||
@ -168,7 +163,6 @@ void ConcurrentG1RefineThread::run() {
|
||||
}
|
||||
n_logs++;
|
||||
}
|
||||
_co_tracker.update(false);
|
||||
_sts.leave();
|
||||
|
||||
if (os::supports_vtime()) {
|
||||
@ -177,9 +171,6 @@ void ConcurrentG1RefineThread::run() {
|
||||
_vtime_accum = 0.0;
|
||||
}
|
||||
}
|
||||
_sts.join();
|
||||
_co_tracker.update(true);
|
||||
_sts.leave();
|
||||
assert(_should_terminate, "just checking");
|
||||
|
||||
terminate();
|
||||
|
@ -51,7 +51,6 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
|
||||
private:
|
||||
ConcurrentG1Refine* _cg1r;
|
||||
|
||||
COTracker _co_tracker;
|
||||
double _interval_ms;
|
||||
|
||||
void decreaseInterval(int processing_time_ms) {
|
||||
|
@ -433,8 +433,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
||||
_total_counting_time(0.0),
|
||||
_total_rs_scrub_time(0.0),
|
||||
|
||||
_parallel_workers(NULL),
|
||||
_cleanup_co_tracker(G1CLGroup)
|
||||
_parallel_workers(NULL)
|
||||
{
|
||||
CMVerboseLevel verbose_level =
|
||||
(CMVerboseLevel) G1MarkingVerboseLevel;
|
||||
@ -823,18 +822,6 @@ void ConcurrentMark::checkpointRootsInitialPost() {
|
||||
// when marking is on. So, it's also called at the end of the
|
||||
// initial-mark pause to update the heap end, if the heap expands
|
||||
// during it. No need to call it here.
|
||||
|
||||
guarantee( !_cleanup_co_tracker.enabled(), "invariant" );
|
||||
|
||||
size_t max_marking_threads =
|
||||
MAX2((size_t) 1, parallel_marking_threads());
|
||||
for (int i = 0; i < (int)_max_task_num; ++i) {
|
||||
_tasks[i]->enable_co_tracker();
|
||||
if (i < (int) max_marking_threads)
|
||||
_tasks[i]->reset_co_tracker(marking_task_overhead());
|
||||
else
|
||||
_tasks[i]->reset_co_tracker(0.0);
|
||||
}
|
||||
}
|
||||
|
||||
// Checkpoint the roots into this generation from outside
|
||||
@ -845,7 +832,6 @@ void ConcurrentMark::checkpointRootsInitial() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
double start = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWStart(start);
|
||||
|
||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||
g1p->record_concurrent_mark_init_start();
|
||||
@ -876,7 +862,6 @@ void ConcurrentMark::checkpointRootsInitial() {
|
||||
// Statistics.
|
||||
double end = os::elapsedTime();
|
||||
_init_times.add((end - start) * 1000.0);
|
||||
GCOverheadReporter::recordSTWEnd(end);
|
||||
|
||||
g1p->record_concurrent_mark_init_end();
|
||||
}
|
||||
@ -1035,7 +1020,6 @@ public:
|
||||
|
||||
guarantee( (size_t)worker_i < _cm->active_tasks(), "invariant" );
|
||||
CMTask* the_task = _cm->task(worker_i);
|
||||
the_task->start_co_tracker();
|
||||
the_task->record_start_time();
|
||||
if (!_cm->has_aborted()) {
|
||||
do {
|
||||
@ -1061,8 +1045,6 @@ public:
|
||||
double end_time2_sec = os::elapsedTime();
|
||||
double elapsed_time2_sec = end_time2_sec - start_time_sec;
|
||||
|
||||
the_task->update_co_tracker();
|
||||
|
||||
#if 0
|
||||
gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
|
||||
"overhead %1.4lf",
|
||||
@ -1079,7 +1061,6 @@ public:
|
||||
ConcurrentGCThread::stsLeave();
|
||||
|
||||
double end_vtime = os::elapsedVTime();
|
||||
the_task->update_co_tracker(true);
|
||||
_cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
|
||||
}
|
||||
|
||||
@ -1133,7 +1114,6 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
g1p->record_concurrent_mark_remark_start();
|
||||
|
||||
double start = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWStart(start);
|
||||
|
||||
checkpointRootsFinalWork();
|
||||
|
||||
@ -1173,11 +1153,6 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
_remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
|
||||
_remark_times.add((now - start) * 1000.0);
|
||||
|
||||
GCOverheadReporter::recordSTWEnd(now);
|
||||
for (int i = 0; i < (int)_max_task_num; ++i)
|
||||
_tasks[i]->disable_co_tracker();
|
||||
_cleanup_co_tracker.enable();
|
||||
_cleanup_co_tracker.reset(cleanup_task_overhead());
|
||||
g1p->record_concurrent_mark_remark_end();
|
||||
}
|
||||
|
||||
@ -1188,7 +1163,6 @@ class CalcLiveObjectsClosure: public HeapRegionClosure {
|
||||
|
||||
CMBitMapRO* _bm;
|
||||
ConcurrentMark* _cm;
|
||||
COTracker* _co_tracker;
|
||||
bool _changed;
|
||||
bool _yield;
|
||||
size_t _words_done;
|
||||
@ -1216,12 +1190,10 @@ class CalcLiveObjectsClosure: public HeapRegionClosure {
|
||||
public:
|
||||
CalcLiveObjectsClosure(bool final,
|
||||
CMBitMapRO *bm, ConcurrentMark *cm,
|
||||
BitMap* region_bm, BitMap* card_bm,
|
||||
COTracker* co_tracker) :
|
||||
BitMap* region_bm, BitMap* card_bm) :
|
||||
_bm(bm), _cm(cm), _changed(false), _yield(true),
|
||||
_words_done(0), _tot_live(0), _tot_used(0),
|
||||
_region_bm(region_bm), _card_bm(card_bm),
|
||||
_final(final), _co_tracker(co_tracker),
|
||||
_region_bm(region_bm), _card_bm(card_bm),_final(final),
|
||||
_regions_done(0), _start_vtime_sec(0.0)
|
||||
{
|
||||
_bottom_card_num =
|
||||
@ -1265,9 +1237,6 @@ public:
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (_co_tracker != NULL)
|
||||
_co_tracker->update();
|
||||
|
||||
if (!_final && _regions_done == 0)
|
||||
_start_vtime_sec = os::elapsedVTime();
|
||||
|
||||
@ -1396,12 +1365,6 @@ public:
|
||||
if (elapsed_vtime_sec > (10.0 / 1000.0)) {
|
||||
jlong sleep_time_ms =
|
||||
(jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0);
|
||||
#if 0
|
||||
gclog_or_tty->print_cr("CL: elapsed %1.4lf ms, sleep %1.4lf ms, "
|
||||
"overhead %1.4lf",
|
||||
elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
|
||||
_co_tracker->concOverhead(os::elapsedTime()));
|
||||
#endif
|
||||
os::sleep(Thread::current(), sleep_time_ms, false);
|
||||
_start_vtime_sec = end_vtime_sec;
|
||||
}
|
||||
@ -1421,15 +1384,11 @@ public:
|
||||
|
||||
|
||||
void ConcurrentMark::calcDesiredRegions() {
|
||||
guarantee( _cleanup_co_tracker.enabled(), "invariant" );
|
||||
_cleanup_co_tracker.start();
|
||||
|
||||
_region_bm.clear();
|
||||
_card_bm.clear();
|
||||
CalcLiveObjectsClosure calccl(false /*final*/,
|
||||
nextMarkBitMap(), this,
|
||||
&_region_bm, &_card_bm,
|
||||
&_cleanup_co_tracker);
|
||||
&_region_bm, &_card_bm);
|
||||
G1CollectedHeap *g1h = G1CollectedHeap::heap();
|
||||
g1h->heap_region_iterate(&calccl);
|
||||
|
||||
@ -1437,8 +1396,6 @@ void ConcurrentMark::calcDesiredRegions() {
|
||||
calccl.reset();
|
||||
g1h->heap_region_iterate(&calccl);
|
||||
} while (calccl.changed());
|
||||
|
||||
_cleanup_co_tracker.update(true);
|
||||
}
|
||||
|
||||
class G1ParFinalCountTask: public AbstractGangTask {
|
||||
@ -1472,8 +1429,7 @@ public:
|
||||
void work(int i) {
|
||||
CalcLiveObjectsClosure calccl(true /*final*/,
|
||||
_bm, _g1h->concurrent_mark(),
|
||||
_region_bm, _card_bm,
|
||||
NULL /* CO tracker */);
|
||||
_region_bm, _card_bm);
|
||||
calccl.no_yield();
|
||||
if (ParallelGCThreads > 0) {
|
||||
_g1h->heap_region_par_iterate_chunked(&calccl, i,
|
||||
@ -1663,13 +1619,10 @@ void ConcurrentMark::cleanup() {
|
||||
/* prev marking */ true);
|
||||
}
|
||||
|
||||
_cleanup_co_tracker.disable();
|
||||
|
||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||
g1p->record_concurrent_mark_cleanup_start();
|
||||
|
||||
double start = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWStart(start);
|
||||
|
||||
// Do counting once more with the world stopped for good measure.
|
||||
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
|
||||
@ -1774,7 +1727,6 @@ void ConcurrentMark::cleanup() {
|
||||
// Statistics.
|
||||
double end = os::elapsedTime();
|
||||
_cleanup_times.add((end - start) * 1000.0);
|
||||
GCOverheadReporter::recordSTWEnd(end);
|
||||
|
||||
// G1CollectedHeap::heap()->print();
|
||||
// gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
|
||||
@ -2625,24 +2577,6 @@ void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
|
||||
_should_gray_objects = true;
|
||||
}
|
||||
|
||||
void ConcurrentMark::disable_co_trackers() {
|
||||
if (has_aborted()) {
|
||||
if (_cleanup_co_tracker.enabled())
|
||||
_cleanup_co_tracker.disable();
|
||||
for (int i = 0; i < (int)_max_task_num; ++i) {
|
||||
CMTask* task = _tasks[i];
|
||||
if (task->co_tracker_enabled())
|
||||
task->disable_co_tracker();
|
||||
}
|
||||
} else {
|
||||
guarantee( !_cleanup_co_tracker.enabled(), "invariant" );
|
||||
for (int i = 0; i < (int)_max_task_num; ++i) {
|
||||
CMTask* task = _tasks[i];
|
||||
guarantee( !task->co_tracker_enabled(), "invariant" );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// abandon current marking iteration due to a Full GC
|
||||
void ConcurrentMark::abort() {
|
||||
// Clear all marks to force marking thread to do nothing
|
||||
@ -4018,7 +3952,6 @@ CMTask::CMTask(int task_id,
|
||||
CMTaskQueue* task_queue,
|
||||
CMTaskQueueSet* task_queues)
|
||||
: _g1h(G1CollectedHeap::heap()),
|
||||
_co_tracker(G1CMGroup),
|
||||
_task_id(task_id), _cm(cm),
|
||||
_claimed(false),
|
||||
_nextMarkBitMap(NULL), _hash_seed(17),
|
||||
|
@ -407,8 +407,6 @@ protected:
|
||||
// verbose level
|
||||
CMVerboseLevel _verbose_level;
|
||||
|
||||
COTracker _cleanup_co_tracker;
|
||||
|
||||
// These two fields are used to implement the optimisation that
|
||||
// avoids pushing objects on the global/region stack if there are
|
||||
// no collection set regions above the lowest finger.
|
||||
@ -720,8 +718,6 @@ public:
|
||||
// Called to abort the marking cycle after a Full GC takes palce.
|
||||
void abort();
|
||||
|
||||
void disable_co_trackers();
|
||||
|
||||
// This prints the global/local fingers. It is used for debugging.
|
||||
NOT_PRODUCT(void print_finger();)
|
||||
|
||||
@ -773,9 +769,6 @@ private:
|
||||
// number of calls to this task
|
||||
int _calls;
|
||||
|
||||
// concurrent overhead over a single CPU for this task
|
||||
COTracker _co_tracker;
|
||||
|
||||
// when the virtual timer reaches this time, the marking step should
|
||||
// exit
|
||||
double _time_target_ms;
|
||||
@ -928,27 +921,6 @@ public:
|
||||
|
||||
void set_concurrent(bool concurrent) { _concurrent = concurrent; }
|
||||
|
||||
void enable_co_tracker() {
|
||||
guarantee( !_co_tracker.enabled(), "invariant" );
|
||||
_co_tracker.enable();
|
||||
}
|
||||
void disable_co_tracker() {
|
||||
guarantee( _co_tracker.enabled(), "invariant" );
|
||||
_co_tracker.disable();
|
||||
}
|
||||
bool co_tracker_enabled() {
|
||||
return _co_tracker.enabled();
|
||||
}
|
||||
void reset_co_tracker(double starting_conc_overhead = 0.0) {
|
||||
_co_tracker.reset(starting_conc_overhead);
|
||||
}
|
||||
void start_co_tracker() {
|
||||
_co_tracker.start();
|
||||
}
|
||||
void update_co_tracker(bool force_end = false) {
|
||||
_co_tracker.update(force_end);
|
||||
}
|
||||
|
||||
// The main method of this class which performs a marking step
|
||||
// trying not to exceed the given duration. However, it might exit
|
||||
// prematurely, according to some conditions (i.e. SATB buffers are
|
||||
|
@ -260,10 +260,6 @@ void ConcurrentMarkThread::run() {
|
||||
}
|
||||
}
|
||||
|
||||
_sts.join();
|
||||
_cm->disable_co_trackers();
|
||||
_sts.leave();
|
||||
|
||||
// we now want to allow clearing of the marking bitmap to be
|
||||
// suspended by a collection pause.
|
||||
_sts.join();
|
||||
|
@ -35,8 +35,7 @@ int ConcurrentZFThread::_zf_waits = 0;
|
||||
int ConcurrentZFThread::_regions_filled = 0;
|
||||
|
||||
ConcurrentZFThread::ConcurrentZFThread() :
|
||||
ConcurrentGCThread(),
|
||||
_co_tracker(G1ZFGroup)
|
||||
ConcurrentGCThread()
|
||||
{
|
||||
create_and_start();
|
||||
}
|
||||
@ -71,8 +70,6 @@ void ConcurrentZFThread::run() {
|
||||
Thread* thr_self = Thread::current();
|
||||
_vtime_start = os::elapsedVTime();
|
||||
wait_for_universe_init();
|
||||
_co_tracker.enable();
|
||||
_co_tracker.start();
|
||||
|
||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
||||
_sts.join();
|
||||
@ -135,10 +132,7 @@ void ConcurrentZFThread::run() {
|
||||
}
|
||||
_vtime_accum = (os::elapsedVTime() - _vtime_start);
|
||||
_sts.join();
|
||||
|
||||
_co_tracker.update();
|
||||
}
|
||||
_co_tracker.update(false);
|
||||
_sts.leave();
|
||||
|
||||
assert(_should_terminate, "just checking");
|
||||
|
@ -42,8 +42,6 @@ class ConcurrentZFThread: public ConcurrentGCThread {
|
||||
// Number of regions CFZ thread fills.
|
||||
static int _regions_filled;
|
||||
|
||||
COTracker _co_tracker;
|
||||
|
||||
double _vtime_start; // Initial virtual time.
|
||||
|
||||
// These are static because the "print_summary_info" method is, and
|
||||
|
@ -25,6 +25,8 @@
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_g1CollectedHeap.cpp.incl"
|
||||
|
||||
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
||||
|
||||
// turn it on so that the contents of the young list (scan-only /
|
||||
// to-be-collected) are printed at "strategic" points before / during
|
||||
// / after the collection --- this is useful for debugging
|
||||
@ -927,7 +929,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
|
||||
|
||||
double start = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWStart(start);
|
||||
g1_policy()->record_full_collection_start();
|
||||
|
||||
gc_prologue(true);
|
||||
@ -1049,7 +1050,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||
}
|
||||
|
||||
double end = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWEnd(end);
|
||||
g1_policy()->record_full_collection_end();
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
@ -1396,6 +1396,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
|
||||
vm_exit_during_initialization("Failed necessary allocation.");
|
||||
}
|
||||
|
||||
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
||||
|
||||
int n_queues = MAX2((int)ParallelGCThreads, 1);
|
||||
_task_queues = new RefToScanQueueSet(n_queues);
|
||||
|
||||
@ -1548,9 +1551,10 @@ jint G1CollectedHeap::initialize() {
|
||||
const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
|
||||
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
|
||||
|
||||
const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
|
||||
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
||||
guarantee(cards_per_region < max_cards_per_region, "too many cards per region");
|
||||
guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
|
||||
guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
|
||||
"too many cards per region");
|
||||
|
||||
_bot_shared = new G1BlockOffsetSharedArray(_reserved,
|
||||
heap_word_size(init_byte_size));
|
||||
@ -1610,9 +1614,6 @@ jint G1CollectedHeap::initialize() {
|
||||
// Do later initialization work for concurrent refinement.
|
||||
_cg1r->init();
|
||||
|
||||
const char* group_names[] = { "CR", "ZF", "CM", "CL" };
|
||||
GCOverheadReporter::initGCOverheadReporter(4, group_names);
|
||||
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
@ -2431,8 +2432,6 @@ void G1CollectedHeap::print_tracing_info() const {
|
||||
}
|
||||
g1_policy()->print_yg_surv_rate_info();
|
||||
|
||||
GCOverheadReporter::printGCOverhead();
|
||||
|
||||
SpecializationStats::print();
|
||||
}
|
||||
|
||||
@ -2669,7 +2668,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
// The elapsed time induced by the start time below deliberately elides
|
||||
// the possible verification above.
|
||||
double start_time_sec = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWStart(start_time_sec);
|
||||
size_t start_used_bytes = used();
|
||||
|
||||
g1_policy()->record_collection_pause_start(start_time_sec,
|
||||
@ -2747,8 +2745,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
_in_cset_fast_test = NULL;
|
||||
_in_cset_fast_test_base = NULL;
|
||||
|
||||
release_gc_alloc_regions(false /* totally */);
|
||||
|
||||
cleanup_surviving_young_words();
|
||||
|
||||
if (g1_policy()->in_young_gc_mode()) {
|
||||
@ -2798,7 +2794,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
|
||||
g1_policy()->record_pause_time_ms(pause_time_ms);
|
||||
GCOverheadReporter::recordSTWEnd(end_time_sec);
|
||||
g1_policy()->record_collection_pause_end(abandoned);
|
||||
|
||||
assert(regions_accounted_for(), "Region leakage.");
|
||||
@ -4141,6 +4136,7 @@ void G1CollectedHeap::evacuate_collection_set() {
|
||||
G1KeepAliveClosure keep_alive(this);
|
||||
JNIHandles::weak_oops_do(&is_alive, &keep_alive);
|
||||
}
|
||||
release_gc_alloc_regions(false /* totally */);
|
||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
concurrent_g1_refine()->clear_hot_cache();
|
||||
@ -4274,12 +4270,18 @@ void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRe
|
||||
class G1ParCleanupCTTask : public AbstractGangTask {
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1CollectedHeap* _g1h;
|
||||
HeapRegion* volatile _so_head;
|
||||
HeapRegion* volatile _su_head;
|
||||
public:
|
||||
G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
|
||||
G1CollectedHeap* g1h) :
|
||||
G1CollectedHeap* g1h,
|
||||
HeapRegion* scan_only_list,
|
||||
HeapRegion* survivor_list) :
|
||||
AbstractGangTask("G1 Par Cleanup CT Task"),
|
||||
_ct_bs(ct_bs),
|
||||
_g1h(g1h)
|
||||
_g1h(g1h),
|
||||
_so_head(scan_only_list),
|
||||
_su_head(survivor_list)
|
||||
{ }
|
||||
|
||||
void work(int i) {
|
||||
@ -4287,22 +4289,64 @@ public:
|
||||
while (r = _g1h->pop_dirty_cards_region()) {
|
||||
clear_cards(r);
|
||||
}
|
||||
// Redirty the cards of the scan-only and survivor regions.
|
||||
dirty_list(&this->_so_head);
|
||||
dirty_list(&this->_su_head);
|
||||
}
|
||||
|
||||
void clear_cards(HeapRegion* r) {
|
||||
// Cards for Survivor and Scan-Only regions will be dirtied later.
|
||||
if (!r->is_scan_only() && !r->is_survivor()) {
|
||||
_ct_bs->clear(MemRegion(r->bottom(), r->end()));
|
||||
}
|
||||
}
|
||||
|
||||
void dirty_list(HeapRegion* volatile * head_ptr) {
|
||||
HeapRegion* head;
|
||||
do {
|
||||
// Pop region off the list.
|
||||
head = *head_ptr;
|
||||
if (head != NULL) {
|
||||
HeapRegion* r = (HeapRegion*)
|
||||
Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
|
||||
if (r == head) {
|
||||
assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
|
||||
_ct_bs->dirty(MemRegion(r->bottom(), r->end()));
|
||||
}
|
||||
}
|
||||
} while (*head_ptr != NULL);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
class G1VerifyCardTableCleanup: public HeapRegionClosure {
|
||||
CardTableModRefBS* _ct_bs;
|
||||
public:
|
||||
G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
|
||||
: _ct_bs(ct_bs)
|
||||
{ }
|
||||
virtual bool doHeapRegion(HeapRegion* r)
|
||||
{
|
||||
MemRegion mr(r->bottom(), r->end());
|
||||
if (r->is_scan_only() || r->is_survivor()) {
|
||||
_ct_bs->verify_dirty_region(mr);
|
||||
} else {
|
||||
_ct_bs->verify_clean_region(mr);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
void G1CollectedHeap::cleanUpCardTable() {
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
|
||||
double start = os::elapsedTime();
|
||||
|
||||
// Iterate over the dirty cards region list.
|
||||
G1ParCleanupCTTask cleanup_task(ct_bs, this);
|
||||
G1ParCleanupCTTask cleanup_task(ct_bs, this,
|
||||
_young_list->first_scan_only_region(),
|
||||
_young_list->first_survivor_region());
|
||||
if (ParallelGCThreads > 0) {
|
||||
set_par_threads(workers()->total_workers());
|
||||
workers()->run_task(&cleanup_task);
|
||||
@ -4318,17 +4362,21 @@ void G1CollectedHeap::cleanUpCardTable() {
|
||||
}
|
||||
r->set_next_dirty_cards_region(NULL);
|
||||
}
|
||||
}
|
||||
// now, redirty the cards of the scan-only and survivor regions
|
||||
// (it seemed faster to do it this way, instead of iterating over
|
||||
// all regions and then clearing / dirtying as appropriate)
|
||||
dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
|
||||
dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
|
||||
|
||||
}
|
||||
double elapsed = os::elapsedTime() - start;
|
||||
g1_policy()->record_clear_ct_time( elapsed * 1000.0);
|
||||
#ifndef PRODUCT
|
||||
if (G1VerifyCTCleanup || VerifyAfterGC) {
|
||||
G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
|
||||
heap_region_iterate(&cleanup_verifier);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
|
||||
if (g1_policy()->should_do_collection_pause(word_size)) {
|
||||
@ -5022,7 +5070,7 @@ bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
|
||||
return hr->is_in(p);
|
||||
}
|
||||
}
|
||||
#endif // PRODUCT
|
||||
#endif // !PRODUCT
|
||||
|
||||
void G1CollectedHeap::g1_unimplemented() {
|
||||
// Unimplemented();
|
||||
|
@ -167,16 +167,11 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class G1MarkSweep;
|
||||
|
||||
private:
|
||||
enum SomePrivateConstants {
|
||||
VeryLargeInBytes = HeapRegion::GrainBytes/2,
|
||||
VeryLargeInWords = VeryLargeInBytes/HeapWordSize,
|
||||
MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME
|
||||
NumAPIs = HeapRegion::MaxAge
|
||||
};
|
||||
|
||||
// The one and only G1CollectedHeap, so static functions can find it.
|
||||
static G1CollectedHeap* _g1h;
|
||||
|
||||
static size_t _humongous_object_threshold_in_words;
|
||||
|
||||
// Storage for the G1 heap (excludes the permanent generation).
|
||||
VirtualSpace _g1_storage;
|
||||
MemRegion _g1_reserved;
|
||||
@ -1021,7 +1016,7 @@ public:
|
||||
|
||||
// Returns "true" iff the given word_size is "very large".
|
||||
static bool isHumongous(size_t word_size) {
|
||||
return word_size >= VeryLargeInWords;
|
||||
return word_size >= _humongous_object_threshold_in_words;
|
||||
}
|
||||
|
||||
// Update mod union table with the set of dirty cards.
|
||||
|
@ -201,6 +201,11 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
_survivors_age_table(true)
|
||||
|
||||
{
|
||||
// Set up the region size and associated fields. Given that the
|
||||
// policy is created before the heap, we have to set this up here,
|
||||
// so it's done as soon as possible.
|
||||
HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
|
||||
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||
|
||||
@ -993,8 +998,6 @@ void G1CollectorPolicy::record_full_collection_end() {
|
||||
double full_gc_time_sec = end_sec - _cur_collection_start_sec;
|
||||
double full_gc_time_ms = full_gc_time_sec * 1000.0;
|
||||
|
||||
checkpoint_conc_overhead();
|
||||
|
||||
_all_full_gc_times_ms->add(full_gc_time_ms);
|
||||
|
||||
update_recent_gc_times(end_sec, full_gc_time_ms);
|
||||
@ -1164,7 +1167,6 @@ void G1CollectorPolicy::record_concurrent_mark_init_end() {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
|
||||
_concurrent_mark_init_times_ms->add(elapsed_time_ms);
|
||||
checkpoint_conc_overhead();
|
||||
record_concurrent_mark_init_end_pre(elapsed_time_ms);
|
||||
|
||||
_mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
|
||||
@ -1178,7 +1180,6 @@ void G1CollectorPolicy::record_concurrent_mark_remark_start() {
|
||||
void G1CollectorPolicy::record_concurrent_mark_remark_end() {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
|
||||
checkpoint_conc_overhead();
|
||||
_concurrent_mark_remark_times_ms->add(elapsed_time_ms);
|
||||
_cur_mark_stop_world_time_ms += elapsed_time_ms;
|
||||
_prev_collection_pause_end_ms += elapsed_time_ms;
|
||||
@ -1210,7 +1211,6 @@ record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
|
||||
|
||||
// The important thing about this is that it includes "os::elapsedTime".
|
||||
void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
|
||||
checkpoint_conc_overhead();
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
|
||||
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
|
||||
@ -1425,8 +1425,6 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
checkpoint_conc_overhead();
|
||||
|
||||
if (in_young_gc_mode()) {
|
||||
last_pause_included_initial_mark = _should_initiate_conc_mark;
|
||||
if (last_pause_included_initial_mark)
|
||||
@ -2525,19 +2523,6 @@ region_num_to_mbs(int length) {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void
|
||||
G1CollectorPolicy::checkpoint_conc_overhead() {
|
||||
double conc_overhead = 0.0;
|
||||
if (G1AccountConcurrentOverhead)
|
||||
conc_overhead = COTracker::totalPredConcOverhead();
|
||||
_mmu_tracker->update_conc_overhead(conc_overhead);
|
||||
#if 0
|
||||
gclog_or_tty->print(" CO %1.4lf TARGET %1.4lf",
|
||||
conc_overhead, _mmu_tracker->max_gc_time());
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
size_t G1CollectorPolicy::max_regions(int purpose) {
|
||||
switch (purpose) {
|
||||
case GCAllocForSurvived:
|
||||
|
@ -92,9 +92,7 @@ protected:
|
||||
int _parallel_gc_threads;
|
||||
|
||||
enum SomePrivateConstants {
|
||||
NumPrevPausesForHeuristics = 10,
|
||||
NumPrevGCsForHeuristics = 10,
|
||||
NumAPIs = HeapRegion::MaxAge
|
||||
NumPrevPausesForHeuristics = 10
|
||||
};
|
||||
|
||||
G1MMUTracker* _mmu_tracker;
|
||||
@ -981,8 +979,6 @@ public:
|
||||
void set_should_initiate_conc_mark() { _should_initiate_conc_mark = true; }
|
||||
void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; }
|
||||
|
||||
void checkpoint_conc_overhead();
|
||||
|
||||
// If an expansion would be appropriate, because recent GC overhead had
|
||||
// exceeded the desired limit, return an amount to expand by.
|
||||
virtual size_t expansion_amount();
|
||||
|
@ -37,21 +37,7 @@
|
||||
|
||||
G1MMUTracker::G1MMUTracker(double time_slice, double max_gc_time) :
|
||||
_time_slice(time_slice),
|
||||
_max_gc_time(max_gc_time),
|
||||
_conc_overhead_time_sec(0.0) { }
|
||||
|
||||
void
|
||||
G1MMUTracker::update_conc_overhead(double conc_overhead) {
|
||||
double conc_overhead_time_sec = _time_slice * conc_overhead;
|
||||
if (conc_overhead_time_sec > 0.9 * _max_gc_time) {
|
||||
// We are screwed, as we only seem to have <10% of the soft
|
||||
// real-time goal available for pauses. Let's admit defeat and
|
||||
// allow something more generous as a pause target.
|
||||
conc_overhead_time_sec = 0.75 * _max_gc_time;
|
||||
}
|
||||
|
||||
_conc_overhead_time_sec = conc_overhead_time_sec;
|
||||
}
|
||||
_max_gc_time(max_gc_time) { }
|
||||
|
||||
G1MMUTrackerQueue::G1MMUTrackerQueue(double time_slice, double max_gc_time) :
|
||||
G1MMUTracker(time_slice, max_gc_time),
|
||||
@ -128,7 +114,7 @@ double G1MMUTrackerQueue::longest_pause_internal(double current_time) {
|
||||
|
||||
while( 1 ) {
|
||||
double gc_time =
|
||||
calculate_gc_time(current_time + target_time) + _conc_overhead_time_sec;
|
||||
calculate_gc_time(current_time + target_time);
|
||||
double diff = target_time + gc_time - _max_gc_time;
|
||||
if (!is_double_leq_0(diff)) {
|
||||
target_time -= diff;
|
||||
|
@ -33,19 +33,15 @@ protected:
|
||||
double _time_slice;
|
||||
double _max_gc_time; // this is per time slice
|
||||
|
||||
double _conc_overhead_time_sec;
|
||||
|
||||
public:
|
||||
G1MMUTracker(double time_slice, double max_gc_time);
|
||||
|
||||
void update_conc_overhead(double conc_overhead);
|
||||
|
||||
virtual void add_pause(double start, double end, bool gc_thread) = 0;
|
||||
virtual double longest_pause(double current_time) = 0;
|
||||
virtual double when_sec(double current_time, double pause_time) = 0;
|
||||
|
||||
double max_gc_time() {
|
||||
return _max_gc_time - _conc_overhead_time_sec;
|
||||
return _max_gc_time;
|
||||
}
|
||||
|
||||
inline bool now_max_gc(double current_time) {
|
||||
|
@ -102,9 +102,14 @@ void G1MarkSweep::allocate_stacks() {
|
||||
GenMarkSweep::_marking_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
|
||||
|
||||
size_t size = SystemDictionary::number_of_classes() * 2;
|
||||
int size = SystemDictionary::number_of_classes() * 2;
|
||||
GenMarkSweep::_revisit_klass_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<Klass*>((int)size, true);
|
||||
new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
// (#klass/k)^2 for k ~ 10 appears a better fit, but this will have to do
|
||||
// for now until we have a chance to work out a more optimal setting.
|
||||
GenMarkSweep::_revisit_mdo_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
|
||||
|
||||
}
|
||||
|
||||
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
@ -146,6 +151,11 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
assert(GenMarkSweep::_marking_stack->is_empty(),
|
||||
"stack should be empty by now");
|
||||
|
||||
// Visit memoized MDO's and clear any unmarked weak refs
|
||||
GenMarkSweep::follow_mdo_weak_refs();
|
||||
assert(GenMarkSweep::_marking_stack->is_empty(), "just drained");
|
||||
|
||||
|
||||
// Visit symbol and interned string tables and delete unmarked oops
|
||||
SymbolTable::unlink(&GenMarkSweep::is_alive);
|
||||
StringTable::unlink(&GenMarkSweep::is_alive);
|
||||
|
@ -37,11 +37,7 @@
|
||||
develop(intx, G1MarkingOverheadPercent, 0, \
|
||||
"Overhead of concurrent marking") \
|
||||
\
|
||||
develop(bool, G1AccountConcurrentOverhead, false, \
|
||||
"Whether soft real-time compliance in G1 will take into account" \
|
||||
"concurrent overhead") \
|
||||
\
|
||||
product(intx, G1YoungGenSize, 0, \
|
||||
product(uintx, G1YoungGenSize, 0, \
|
||||
"Size of the G1 young generation, 0 is the adaptive policy") \
|
||||
\
|
||||
develop(bool, G1Gen, true, \
|
||||
@ -250,6 +246,9 @@
|
||||
"If non-0 is the size of the G1 survivor space, " \
|
||||
"otherwise SurvivorRatio is used to determine the size") \
|
||||
\
|
||||
product(uintx, G1HeapRegionSize, 0, \
|
||||
"Size of the G1 regions.") \
|
||||
\
|
||||
experimental(bool, G1ParallelRSetUpdatingEnabled, false, \
|
||||
"Enables the parallelization of remembered set updating " \
|
||||
"during evacuation pauses") \
|
||||
@ -264,6 +263,9 @@
|
||||
\
|
||||
develop(intx, G1CardCountCacheExpandThreshold, 16, \
|
||||
"Expand the card count cache if the number of collisions for " \
|
||||
"a particular entry exceeds this value.")
|
||||
"a particular entry exceeds this value.") \
|
||||
\
|
||||
develop(bool, G1VerifyCTCleanup, false, \
|
||||
"Verify card table cleanup.")
|
||||
|
||||
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
||||
|
@ -25,6 +25,12 @@
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_heapRegion.cpp.incl"
|
||||
|
||||
int HeapRegion::LogOfHRGrainBytes = 0;
|
||||
int HeapRegion::LogOfHRGrainWords = 0;
|
||||
int HeapRegion::GrainBytes = 0;
|
||||
int HeapRegion::GrainWords = 0;
|
||||
int HeapRegion::CardsPerRegion = 0;
|
||||
|
||||
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
|
||||
HeapRegion* hr, OopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
@ -231,6 +237,73 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
|
||||
}
|
||||
}
|
||||
|
||||
// Minimum region size; we won't go lower than that.
|
||||
// We might want to decrease this in the future, to deal with small
|
||||
// heaps a bit more efficiently.
|
||||
#define MIN_REGION_SIZE ( 1024 * 1024 )
|
||||
|
||||
// Maximum region size; we don't go higher than that. There's a good
|
||||
// reason for having an upper bound. We don't want regions to get too
|
||||
// large, otherwise cleanup's effectiveness would decrease as there
|
||||
// will be fewer opportunities to find totally empty regions after
|
||||
// marking.
|
||||
#define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
|
||||
|
||||
// The automatic region size calculation will try to have around this
|
||||
// many regions in the heap (based on the min heap size).
|
||||
#define TARGET_REGION_NUMBER 2048
|
||||
|
||||
void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
|
||||
// region_size in bytes
|
||||
uintx region_size = G1HeapRegionSize;
|
||||
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
|
||||
// We base the automatic calculation on the min heap size. This
|
||||
// can be problematic if the spread between min and max is quite
|
||||
// wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
|
||||
// the max size, the region size might be way too large for the
|
||||
// min size. Either way, some users might have to set the region
|
||||
// size manually for some -Xms / -Xmx combos.
|
||||
|
||||
region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
|
||||
(uintx) MIN_REGION_SIZE);
|
||||
}
|
||||
|
||||
int region_size_log = log2_long((jlong) region_size);
|
||||
// Recalculate the region size to make sure it's a power of
|
||||
// 2. This means that region_size is the largest power of 2 that's
|
||||
// <= what we've calculated so far.
|
||||
region_size = 1 << region_size_log;
|
||||
|
||||
// Now make sure that we don't go over or under our limits.
|
||||
if (region_size < MIN_REGION_SIZE) {
|
||||
region_size = MIN_REGION_SIZE;
|
||||
} else if (region_size > MAX_REGION_SIZE) {
|
||||
region_size = MAX_REGION_SIZE;
|
||||
}
|
||||
|
||||
// And recalculate the log.
|
||||
region_size_log = log2_long((jlong) region_size);
|
||||
|
||||
// Now, set up the globals.
|
||||
guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
|
||||
LogOfHRGrainBytes = region_size_log;
|
||||
|
||||
guarantee(LogOfHRGrainWords == 0, "we should only set it once");
|
||||
LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
|
||||
|
||||
guarantee(GrainBytes == 0, "we should only set it once");
|
||||
// The cast to int is safe, given that we've bounded region_size by
|
||||
// MIN_REGION_SIZE and MAX_REGION_SIZE.
|
||||
GrainBytes = (int) region_size;
|
||||
|
||||
guarantee(GrainWords == 0, "we should only set it once");
|
||||
GrainWords = GrainBytes >> LogHeapWordSize;
|
||||
guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
|
||||
|
||||
guarantee(CardsPerRegion == 0, "we should only set it once");
|
||||
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
|
||||
}
|
||||
|
||||
void HeapRegion::reset_after_compaction() {
|
||||
G1OffsetTableContigSpace::reset_after_compaction();
|
||||
// After a compaction the mark bitmap is invalid, so we must
|
||||
|
@ -297,15 +297,24 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr, bool is_zeroed);
|
||||
|
||||
enum SomePublicConstants {
|
||||
// HeapRegions are GrainBytes-aligned
|
||||
// and have sizes that are multiples of GrainBytes.
|
||||
LogOfHRGrainBytes = 20,
|
||||
LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize,
|
||||
GrainBytes = 1 << LogOfHRGrainBytes,
|
||||
GrainWords = 1 <<LogOfHRGrainWords,
|
||||
MaxAge = 2, NoOfAges = MaxAge+1
|
||||
};
|
||||
static int LogOfHRGrainBytes;
|
||||
static int LogOfHRGrainWords;
|
||||
// The normal type of these should be size_t. However, they used to
|
||||
// be members of an enum before and they are assumed by the
|
||||
// compilers to be ints. To avoid going and fixing all their uses,
|
||||
// I'm declaring them as ints. I'm not anticipating heap region
|
||||
// sizes to reach anywhere near 2g, so using an int here is safe.
|
||||
static int GrainBytes;
|
||||
static int GrainWords;
|
||||
static int CardsPerRegion;
|
||||
|
||||
// It sets up the heap region size (GrainBytes / GrainWords), as
|
||||
// well as other related fields that are based on the heap region
|
||||
// size (LogOfHRGrainBytes / LogOfHRGrainWords /
|
||||
// CardsPerRegion). All those fields are considered constant
|
||||
// throughout the JVM's execution, therefore they should only be set
|
||||
// up once during initialization time.
|
||||
static void setup_heap_region_size(uintx min_heap_size);
|
||||
|
||||
enum ClaimValues {
|
||||
InitialClaimValue = 0,
|
||||
|
@ -57,10 +57,6 @@ class PerRegionTable: public CHeapObj {
|
||||
|
||||
#endif // _MSC_VER
|
||||
|
||||
enum SomePrivateConstants {
|
||||
CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
|
||||
};
|
||||
|
||||
protected:
|
||||
// We need access in order to union things into the base table.
|
||||
BitMap* bm() { return &_bm; }
|
||||
@ -76,7 +72,7 @@ protected:
|
||||
#if PRT_COUNT_OCCUPIED
|
||||
_occupied(0),
|
||||
#endif
|
||||
_bm(CardsPerRegion, false /* in-resource-area */)
|
||||
_bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
|
||||
{}
|
||||
|
||||
static void free(PerRegionTable* prt) {
|
||||
@ -144,7 +140,8 @@ protected:
|
||||
CardIdx_t from_card = (CardIdx_t)
|
||||
hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
|
||||
|
||||
assert(0 <= from_card && from_card < CardsPerRegion, "Must be in range.");
|
||||
assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
|
||||
"Must be in range.");
|
||||
add_card_work(from_card, par);
|
||||
}
|
||||
}
|
||||
@ -631,7 +628,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
uintptr_t(from_hr->bottom())
|
||||
>> CardTableModRefBS::card_shift;
|
||||
CardIdx_t card_index = from_card - from_hr_bot_card_index;
|
||||
assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion,
|
||||
assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
|
||||
"Must be in range.");
|
||||
if (G1HRRSUseSparseTable &&
|
||||
_sparse_table.add_card(from_hrs_ind, card_index)) {
|
||||
@ -922,7 +919,7 @@ size_t OtherRegionsTable::occ_fine() const {
|
||||
}
|
||||
|
||||
size_t OtherRegionsTable::occ_coarse() const {
|
||||
return (_n_coarse_entries * PosParPRT::CardsPerRegion);
|
||||
return (_n_coarse_entries * HeapRegion::CardsPerRegion);
|
||||
}
|
||||
|
||||
size_t OtherRegionsTable::occ_sparse() const {
|
||||
@ -1049,7 +1046,8 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const
|
||||
uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
|
||||
assert(from_card >= hr_bot_card_index, "Inv");
|
||||
CardIdx_t card_index = from_card - hr_bot_card_index;
|
||||
assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, "Must be in range.");
|
||||
assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
|
||||
"Must be in range.");
|
||||
return _sparse_table.contains_card(hr_ind, card_index);
|
||||
}
|
||||
|
||||
@ -1176,7 +1174,7 @@ void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
|
||||
_is = Sparse;
|
||||
// Set these values so that we increment to the first region.
|
||||
_coarse_cur_region_index = -1;
|
||||
_coarse_cur_region_cur_card = (PosParPRT::CardsPerRegion-1);;
|
||||
_coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
|
||||
|
||||
_cur_region_cur_card = 0;
|
||||
|
||||
@ -1195,7 +1193,7 @@ bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
|
||||
// Go to the next card.
|
||||
_coarse_cur_region_cur_card++;
|
||||
// Was the last the last card in the current region?
|
||||
if (_coarse_cur_region_cur_card == PosParPRT::CardsPerRegion) {
|
||||
if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
|
||||
// Yes: find the next region. This may leave _coarse_cur_region_index
|
||||
// Set to the last index, in which case there are no more coarse
|
||||
// regions.
|
||||
@ -1232,7 +1230,7 @@ bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
|
||||
_fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
|
||||
}
|
||||
while (!fine_has_next()) {
|
||||
if (_cur_region_cur_card == PosParPRT::CardsPerRegion) {
|
||||
if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
|
||||
_cur_region_cur_card = 0;
|
||||
_fine_cur_prt = _fine_cur_prt->next();
|
||||
}
|
||||
@ -1255,7 +1253,7 @@ bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
|
||||
bool HeapRegionRemSetIterator::fine_has_next() {
|
||||
return
|
||||
_fine_cur_prt != NULL &&
|
||||
_cur_region_cur_card < PosParPRT::CardsPerRegion;
|
||||
_cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
|
||||
}
|
||||
|
||||
bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
|
||||
|
@ -347,7 +347,7 @@ CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
|
||||
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
|
||||
return
|
||||
_heap_bot_card_ind
|
||||
+ (_rsht->entry(_bl_ind)->r_ind() * CardsPerRegion)
|
||||
+ (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
|
||||
+ ci;
|
||||
}
|
||||
|
||||
|
@ -172,10 +172,6 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
||||
RSHashTable* _rsht;
|
||||
size_t _heap_bot_card_ind;
|
||||
|
||||
enum SomePrivateConstants {
|
||||
CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
|
||||
};
|
||||
|
||||
// If the bucket list pointed to by _bl_ind contains a card, sets
|
||||
// _bl_ind to the index of that entry, and returns the card.
|
||||
// Otherwise, returns SparseEntry::NullEntry.
|
||||
|
@ -145,6 +145,7 @@ concurrentMarkSweepGeneration.cpp genOopClosures.inline.hpp
|
||||
concurrentMarkSweepGeneration.cpp globals_extension.hpp
|
||||
concurrentMarkSweepGeneration.cpp handles.inline.hpp
|
||||
concurrentMarkSweepGeneration.cpp isGCActiveMark.hpp
|
||||
concurrentMarkSweepGeneration.cpp iterator.hpp
|
||||
concurrentMarkSweepGeneration.cpp java.hpp
|
||||
concurrentMarkSweepGeneration.cpp jvmtiExport.hpp
|
||||
concurrentMarkSweepGeneration.cpp oop.inline.hpp
|
||||
|
@ -64,14 +64,12 @@ concurrentG1RefineThread.cpp mutexLocker.hpp
|
||||
concurrentG1RefineThread.cpp resourceArea.hpp
|
||||
|
||||
concurrentG1RefineThread.hpp concurrentGCThread.hpp
|
||||
concurrentG1RefineThread.hpp coTracker.hpp
|
||||
|
||||
concurrentMark.cpp concurrentMark.hpp
|
||||
concurrentMark.cpp concurrentMarkThread.inline.hpp
|
||||
concurrentMark.cpp g1CollectedHeap.inline.hpp
|
||||
concurrentMark.cpp g1CollectorPolicy.hpp
|
||||
concurrentMark.cpp g1RemSet.hpp
|
||||
concurrentMark.cpp gcOverheadReporter.hpp
|
||||
concurrentMark.cpp genOopClosures.inline.hpp
|
||||
concurrentMark.cpp heapRegionRemSet.hpp
|
||||
concurrentMark.cpp heapRegionSeq.inline.hpp
|
||||
@ -82,7 +80,6 @@ concurrentMark.cpp referencePolicy.hpp
|
||||
concurrentMark.cpp resourceArea.hpp
|
||||
concurrentMark.cpp symbolTable.hpp
|
||||
|
||||
concurrentMark.hpp coTracker.hpp
|
||||
concurrentMark.hpp heapRegion.hpp
|
||||
concurrentMark.hpp taskqueue.hpp
|
||||
|
||||
@ -107,7 +104,6 @@ concurrentZFThread.cpp mutexLocker.hpp
|
||||
concurrentZFThread.cpp space.inline.hpp
|
||||
|
||||
concurrentZFThread.hpp concurrentGCThread.hpp
|
||||
concurrentZFThread.hpp coTracker.hpp
|
||||
|
||||
dirtyCardQueue.cpp atomic.hpp
|
||||
dirtyCardQueue.cpp dirtyCardQueue.hpp
|
||||
@ -147,7 +143,6 @@ g1CollectedHeap.cpp g1RemSet.inline.hpp
|
||||
g1CollectedHeap.cpp g1OopClosures.inline.hpp
|
||||
g1CollectedHeap.cpp genOopClosures.inline.hpp
|
||||
g1CollectedHeap.cpp gcLocker.inline.hpp
|
||||
g1CollectedHeap.cpp gcOverheadReporter.hpp
|
||||
g1CollectedHeap.cpp generationSpec.hpp
|
||||
g1CollectedHeap.cpp heapRegionRemSet.hpp
|
||||
g1CollectedHeap.cpp heapRegionSeq.inline.hpp
|
||||
@ -170,6 +165,7 @@ g1CollectedHeap.inline.hpp g1CollectedHeap.hpp
|
||||
g1CollectedHeap.inline.hpp heapRegionSeq.hpp
|
||||
g1CollectedHeap.inline.hpp taskqueue.hpp
|
||||
|
||||
g1CollectorPolicy.cpp arguments.hpp
|
||||
g1CollectorPolicy.cpp concurrentG1Refine.hpp
|
||||
g1CollectorPolicy.cpp concurrentMark.hpp
|
||||
g1CollectorPolicy.cpp concurrentMarkThread.inline.hpp
|
||||
|
@ -253,10 +253,11 @@ psParallelCompact.cpp gcCause.hpp
|
||||
psParallelCompact.cpp gcLocker.inline.hpp
|
||||
psParallelCompact.cpp gcTaskManager.hpp
|
||||
psParallelCompact.cpp isGCActiveMark.hpp
|
||||
psParallelCompact.cpp management.hpp
|
||||
psParallelCompact.cpp memoryService.hpp
|
||||
psParallelCompact.cpp methodDataOop.hpp
|
||||
psParallelCompact.cpp oop.inline.hpp
|
||||
psParallelCompact.cpp oop.pcgc.inline.hpp
|
||||
psParallelCompact.cpp memoryService.hpp
|
||||
psParallelCompact.cpp management.hpp
|
||||
psParallelCompact.cpp parallelScavengeHeap.inline.hpp
|
||||
psParallelCompact.cpp pcTasks.hpp
|
||||
psParallelCompact.cpp psMarkSweep.hpp
|
||||
|
@ -35,12 +35,6 @@ concurrentGCThread.cpp systemDictionary.hpp
|
||||
|
||||
concurrentGCThread.hpp thread.hpp
|
||||
|
||||
coTracker.hpp globalDefinitions.hpp
|
||||
coTracker.hpp numberSeq.hpp
|
||||
|
||||
coTracker.cpp coTracker.hpp
|
||||
coTracker.cpp os.hpp
|
||||
|
||||
allocationStats.cpp allocationStats.hpp
|
||||
allocationStats.cpp ostream.hpp
|
||||
|
||||
@ -54,13 +48,6 @@ gcAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
|
||||
gcAdaptivePolicyCounters.cpp resourceArea.hpp
|
||||
gcAdaptivePolicyCounters.cpp gcAdaptivePolicyCounters.hpp
|
||||
|
||||
gcOverheadReporter.cpp allocation.inline.hpp
|
||||
gcOverheadReporter.cpp concurrentGCThread.hpp
|
||||
gcOverheadReporter.cpp coTracker.hpp
|
||||
gcOverheadReporter.cpp gcOverheadReporter.hpp
|
||||
gcOverheadReporter.cpp ostream.hpp
|
||||
gcOverheadReporter.cpp thread_<os_family>.inline.hpp
|
||||
|
||||
gSpaceCounters.cpp generation.hpp
|
||||
gSpaceCounters.cpp resourceArea.hpp
|
||||
gSpaceCounters.cpp gSpaceCounters.hpp
|
||||
|
@ -58,7 +58,6 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
||||
ParCompactionManager* cm =
|
||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||
// cm->allocate_stacks();
|
||||
assert(cm->stacks_have_been_allocated(),
|
||||
"Stack space has not been allocated");
|
||||
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
||||
@ -129,7 +128,6 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
||||
ParCompactionManager* cm =
|
||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||
// cm->allocate_stacks();
|
||||
assert(cm->stacks_have_been_allocated(),
|
||||
"Stack space has not been allocated");
|
||||
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
||||
|
@ -61,12 +61,16 @@ ParCompactionManager::ParCompactionManager() :
|
||||
int size =
|
||||
(SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
|
||||
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
|
||||
// have to do for now until we are able to investigate a more optimal setting.
|
||||
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
|
||||
|
||||
}
|
||||
|
||||
ParCompactionManager::~ParCompactionManager() {
|
||||
delete _overflow_stack;
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
// _manager_array and _stack_array are statics
|
||||
// shared with all instances of ParCompactionManager
|
||||
// should not be deallocated.
|
||||
@ -195,6 +199,7 @@ ParCompactionManager::gc_thread_compaction_manager(int index) {
|
||||
void ParCompactionManager::reset() {
|
||||
for(uint i=0; i<ParallelGCThreads+1; i++) {
|
||||
manager_array(i)->revisit_klass_stack()->clear();
|
||||
manager_array(i)->revisit_mdo_stack()->clear();
|
||||
}
|
||||
}
|
||||
|
||||
@ -296,6 +301,7 @@ void ParCompactionManager::drain_region_stacks() {
|
||||
|
||||
#ifdef ASSERT
|
||||
bool ParCompactionManager::stacks_have_been_allocated() {
|
||||
return (revisit_klass_stack()->data_addr() != NULL);
|
||||
return (revisit_klass_stack()->data_addr() != NULL &&
|
||||
revisit_mdo_stack()->data_addr() != NULL);
|
||||
}
|
||||
#endif
|
||||
|
@ -93,6 +93,7 @@ class ParCompactionManager : public CHeapObj {
|
||||
|
||||
#if 1 // does this happen enough to need a per thread stack?
|
||||
GrowableArray<Klass*>* _revisit_klass_stack;
|
||||
GrowableArray<DataLayout*>* _revisit_mdo_stack;
|
||||
#endif
|
||||
static ParMarkBitMap* _mark_bitmap;
|
||||
|
||||
@ -154,6 +155,7 @@ class ParCompactionManager : public CHeapObj {
|
||||
#if 1
|
||||
// Probably stays as a growable array
|
||||
GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; }
|
||||
GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
|
||||
#endif
|
||||
|
||||
// Save oop for later processing. Must not fail.
|
||||
|
@ -482,6 +482,9 @@ void PSMarkSweep::allocate_stacks() {
|
||||
|
||||
int size = SystemDictionary::number_of_classes() * 2;
|
||||
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
// (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
|
||||
// now until we investigate a more optimal setting.
|
||||
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
|
||||
}
|
||||
|
||||
|
||||
@ -495,6 +498,7 @@ void PSMarkSweep::deallocate_stacks() {
|
||||
|
||||
delete _marking_stack;
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
}
|
||||
|
||||
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
@ -540,6 +544,10 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
follow_weak_klass_links();
|
||||
assert(_marking_stack->is_empty(), "just drained");
|
||||
|
||||
// Visit memoized mdo's and clear unmarked weak refs
|
||||
follow_mdo_weak_refs();
|
||||
assert(_marking_stack->is_empty(), "just drained");
|
||||
|
||||
// Visit symbol and interned string tables and delete unmarked oops
|
||||
SymbolTable::unlink(is_alive_closure());
|
||||
StringTable::unlink(is_alive_closure());
|
||||
|
@ -2378,7 +2378,10 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
// revisit_klass_stack is used in follow_weak_klass_links().
|
||||
follow_weak_klass_links(cm);
|
||||
follow_weak_klass_links();
|
||||
|
||||
// Revisit memoized MDO's and clear any unmarked weak refs
|
||||
follow_mdo_weak_refs();
|
||||
|
||||
// Visit symbol and interned string tables and delete unmarked oops
|
||||
SymbolTable::unlink(is_alive_closure());
|
||||
@ -2721,17 +2724,25 @@ void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
|
||||
}
|
||||
|
||||
void
|
||||
PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
|
||||
PSParallelCompact::follow_weak_klass_links() {
|
||||
// All klasses on the revisit stack are marked at this point.
|
||||
// Update and follow all subklass, sibling and implementor links.
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
|
||||
ParCompactionManager* cm = ParCompactionManager::manager_array(i);
|
||||
KeepAliveClosure keep_alive_closure(cm);
|
||||
for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
|
||||
cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
|
||||
int length = cm->revisit_klass_stack()->length();
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("Revisit klass stack[%d] length = %d", i, length);
|
||||
}
|
||||
for (int j = 0; j < length; j++) {
|
||||
cm->revisit_klass_stack()->at(j)->follow_weak_klass_links(
|
||||
is_alive_closure(),
|
||||
&keep_alive_closure);
|
||||
}
|
||||
// revisit_klass_stack is cleared in reset()
|
||||
follow_stack(cm);
|
||||
}
|
||||
}
|
||||
@ -2741,6 +2752,35 @@ PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
|
||||
cm->revisit_klass_stack()->push(k);
|
||||
}
|
||||
|
||||
#if ( defined(COMPILER1) || defined(COMPILER2) )
|
||||
void PSParallelCompact::revisit_mdo(ParCompactionManager* cm, DataLayout* p) {
|
||||
cm->revisit_mdo_stack()->push(p);
|
||||
}
|
||||
|
||||
void PSParallelCompact::follow_mdo_weak_refs() {
|
||||
// All strongly reachable oops have been marked at this point;
|
||||
// we can visit and clear any weak references from MDO's which
|
||||
// we memoized during the strong marking phase.
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
|
||||
ParCompactionManager* cm = ParCompactionManager::manager_array(i);
|
||||
GrowableArray<DataLayout*>* rms = cm->revisit_mdo_stack();
|
||||
int length = rms->length();
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("Revisit MDO stack[%d] length = %d", i, length);
|
||||
}
|
||||
for (int j = 0; j < length; j++) {
|
||||
rms->at(j)->follow_weak_refs(is_alive_closure());
|
||||
}
|
||||
// revisit_mdo_stack is cleared in reset()
|
||||
follow_stack(cm);
|
||||
}
|
||||
}
|
||||
#endif // ( COMPILER1 || COMPILER2 )
|
||||
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
|
||||
void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
|
||||
|
@ -901,7 +901,8 @@ class PSParallelCompact : AllStatic {
|
||||
static void marking_phase(ParCompactionManager* cm,
|
||||
bool maximum_heap_compaction);
|
||||
static void follow_stack(ParCompactionManager* cm);
|
||||
static void follow_weak_klass_links(ParCompactionManager* cm);
|
||||
static void follow_weak_klass_links();
|
||||
static void follow_mdo_weak_refs();
|
||||
|
||||
template <class T> static inline void adjust_pointer(T* p, bool is_root);
|
||||
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
|
||||
@ -1221,6 +1222,9 @@ class PSParallelCompact : AllStatic {
|
||||
// Update subklass/sibling/implementor links at end of marking.
|
||||
static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
|
||||
|
||||
// Clear unmarked oops in MDOs at the end of marking.
|
||||
static void revisit_mdo(ParCompactionManager* cm, DataLayout* p);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Debugging support.
|
||||
static const char* space_names[last_space_id];
|
||||
|
@ -1,189 +0,0 @@
|
||||
/*
|
||||
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_coTracker.cpp.incl"
|
||||
|
||||
COTracker* COTracker::_head = NULL;
|
||||
double COTracker::_cpu_number = -1.0;
|
||||
|
||||
void
|
||||
COTracker::resetPeriod(double now_sec, double vnow_sec) {
|
||||
guarantee( _enabled, "invariant" );
|
||||
_period_start_time_sec = now_sec;
|
||||
_period_start_vtime_sec = vnow_sec;
|
||||
}
|
||||
|
||||
void
|
||||
COTracker::setConcOverhead(double time_stamp_sec,
|
||||
double conc_overhead) {
|
||||
guarantee( _enabled, "invariant" );
|
||||
_conc_overhead = conc_overhead;
|
||||
_time_stamp_sec = time_stamp_sec;
|
||||
if (conc_overhead > 0.001)
|
||||
_conc_overhead_seq.add(conc_overhead);
|
||||
}
|
||||
|
||||
void
|
||||
COTracker::reset(double starting_conc_overhead) {
|
||||
guarantee( _enabled, "invariant" );
|
||||
double now_sec = os::elapsedTime();
|
||||
setConcOverhead(now_sec, starting_conc_overhead);
|
||||
}
|
||||
|
||||
void
|
||||
COTracker::start() {
|
||||
guarantee( _enabled, "invariant" );
|
||||
resetPeriod(os::elapsedTime(), os::elapsedVTime());
|
||||
}
|
||||
|
||||
void
|
||||
COTracker::update(bool force_end) {
|
||||
assert( _enabled, "invariant" );
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double elapsed_time_sec = end_time_sec - _period_start_time_sec;
|
||||
if (force_end || elapsed_time_sec > _update_period_sec) {
|
||||
// reached the end of the period
|
||||
double end_vtime_sec = os::elapsedVTime();
|
||||
double elapsed_vtime_sec = end_vtime_sec - _period_start_vtime_sec;
|
||||
|
||||
double conc_overhead = elapsed_vtime_sec / elapsed_time_sec;
|
||||
|
||||
setConcOverhead(end_time_sec, conc_overhead);
|
||||
resetPeriod(end_time_sec, end_vtime_sec);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
COTracker::updateForSTW(double start_sec, double end_sec) {
|
||||
if (!_enabled)
|
||||
return;
|
||||
|
||||
// During a STW pause, no concurrent GC thread has done any
|
||||
// work. So, we can safely adjust the start of the current period by
|
||||
// adding the duration of the STW pause to it, so that the STW pause
|
||||
// doesn't affect the reading of the concurrent overhead (it's
|
||||
// basically like excluding the time of the STW pause from the
|
||||
// concurrent overhead calculation).
|
||||
|
||||
double stw_duration_sec = end_sec - start_sec;
|
||||
guarantee( stw_duration_sec > 0.0, "invariant" );
|
||||
|
||||
if (outOfDate(start_sec))
|
||||
_conc_overhead = 0.0;
|
||||
else
|
||||
_time_stamp_sec = end_sec;
|
||||
_period_start_time_sec += stw_duration_sec;
|
||||
_conc_overhead_seq = NumberSeq();
|
||||
|
||||
guarantee( os::elapsedTime() > _period_start_time_sec, "invariant" );
|
||||
}
|
||||
|
||||
double
|
||||
COTracker::predConcOverhead() {
|
||||
if (_enabled) {
|
||||
// tty->print(" %1.2lf", _conc_overhead_seq.maximum());
|
||||
return _conc_overhead_seq.maximum();
|
||||
} else {
|
||||
// tty->print(" DD");
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
COTracker::resetPred() {
|
||||
_conc_overhead_seq = NumberSeq();
|
||||
}
|
||||
|
||||
COTracker::COTracker(int group)
|
||||
: _enabled(false),
|
||||
_group(group),
|
||||
_period_start_time_sec(-1.0),
|
||||
_period_start_vtime_sec(-1.0),
|
||||
_conc_overhead(-1.0),
|
||||
_time_stamp_sec(-1.0),
|
||||
_next(NULL) {
|
||||
// GCOverheadReportingPeriodMS indicates how frequently the
|
||||
// concurrent overhead will be recorded by the GC Overhead
|
||||
// Reporter. We want to take readings less often than that. If we
|
||||
// took readings more often than some of them might be lost.
|
||||
_update_period_sec = ((double) GCOverheadReportingPeriodMS) / 1000.0 * 1.25;
|
||||
_next = _head;
|
||||
_head = this;
|
||||
|
||||
if (_cpu_number < 0.0)
|
||||
_cpu_number = (double) os::processor_count();
|
||||
}
|
||||
|
||||
// statics
|
||||
|
||||
void
|
||||
COTracker::updateAllForSTW(double start_sec, double end_sec) {
|
||||
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
|
||||
curr->updateForSTW(start_sec, end_sec);
|
||||
}
|
||||
}
|
||||
|
||||
double
|
||||
COTracker::totalConcOverhead(double now_sec) {
|
||||
double total_conc_overhead = 0.0;
|
||||
|
||||
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
|
||||
double conc_overhead = curr->concOverhead(now_sec);
|
||||
total_conc_overhead += conc_overhead;
|
||||
}
|
||||
|
||||
return total_conc_overhead;
|
||||
}
|
||||
|
||||
double
|
||||
COTracker::totalConcOverhead(double now_sec,
|
||||
size_t group_num,
|
||||
double* co_per_group) {
|
||||
double total_conc_overhead = 0.0;
|
||||
|
||||
for (size_t i = 0; i < group_num; ++i)
|
||||
co_per_group[i] = 0.0;
|
||||
|
||||
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
|
||||
size_t group = curr->_group;
|
||||
assert( 0 <= group && group < group_num, "invariant" );
|
||||
double conc_overhead = curr->concOverhead(now_sec);
|
||||
|
||||
co_per_group[group] += conc_overhead;
|
||||
total_conc_overhead += conc_overhead;
|
||||
}
|
||||
|
||||
return total_conc_overhead;
|
||||
}
|
||||
|
||||
double
|
||||
COTracker::totalPredConcOverhead() {
|
||||
double total_pred_conc_overhead = 0.0;
|
||||
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
|
||||
total_pred_conc_overhead += curr->predConcOverhead();
|
||||
curr->resetPred();
|
||||
}
|
||||
return total_pred_conc_overhead / _cpu_number;
|
||||
}
|
@ -1,181 +0,0 @@
|
||||
/*
|
||||
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// COTracker keeps track of the concurrent overhead of a GC thread.
|
||||
|
||||
// A thread that needs to be tracked must, itself, start up its
|
||||
// tracker with the start() method and then call the update() method
|
||||
// at regular intervals. What the tracker does is to calculate the
|
||||
// concurrent overhead of a process at a given update period. The
|
||||
// tracker starts and when is detects that it has exceeded the given
|
||||
// period, it calculates the duration of the period in wall-clock time
|
||||
// and the duration of the period in vtime (i.e. how much time the
|
||||
// concurrent processes really took up during this period). The ratio
|
||||
// of the latter over the former is the concurrent overhead of that
|
||||
// process for that period over a single CPU. This overhead is stored
|
||||
// on the tracker, "timestamped" with the wall-clock time of the end
|
||||
// of the period. When the concurrent overhead of this process needs
|
||||
// to be queried, this last "reading" provides a good approximation
|
||||
// (we assume that the concurrent overhead of a particular thread
|
||||
// stays largely constant over time). The timestamp is necessary to
|
||||
// detect when the process has stopped working and the recorded
|
||||
// reading hasn't been updated for some time.
|
||||
|
||||
// Each concurrent GC thread is considered to be part of a "group"
|
||||
// (i.e. any available concurrent marking threads are part of the
|
||||
// "concurrent marking thread group"). A COTracker is associated with
|
||||
// a single group at construction-time. It's up to each collector to
|
||||
// decide how groups will be mapped to such an id (ids should start
|
||||
// from 0 and be consecutive; there's a hardcoded max group num
|
||||
// defined on the GCOverheadTracker class). The notion of a group has
|
||||
// been introduced to be able to identify how much overhead was
|
||||
// imposed by each group, instead of getting a single value that
|
||||
// covers all concurrent overhead.
|
||||
|
||||
class COTracker {
|
||||
private:
|
||||
// It indicates whether this tracker is enabled or not. When the
|
||||
// tracker is disabled, then it returns 0.0 as the latest concurrent
|
||||
// overhead and several methods (reset, start, and update) are not
|
||||
// supposed to be called on it. This enabling / disabling facility
|
||||
// is really provided to make a bit more explicit in the code when a
|
||||
// particulary tracker of a processes that doesn't run all the time
|
||||
// (e.g. concurrent marking) is supposed to be used and not it's not.
|
||||
bool _enabled;
|
||||
|
||||
// The ID of the group associated with this tracker.
|
||||
int _group;
|
||||
|
||||
// The update period of the tracker. A new value for the concurrent
|
||||
// overhead of the associated process will be made at intervals no
|
||||
// smaller than this.
|
||||
double _update_period_sec;
|
||||
|
||||
// The start times (both wall-block time and vtime) of the current
|
||||
// interval.
|
||||
double _period_start_time_sec;
|
||||
double _period_start_vtime_sec;
|
||||
|
||||
// Number seq of the concurrent overhead readings within a period
|
||||
NumberSeq _conc_overhead_seq;
|
||||
|
||||
// The latest reading of the concurrent overhead (over a single CPU)
|
||||
// imposed by the associated concurrent thread, made available at
|
||||
// the indicated wall-clock time.
|
||||
double _conc_overhead;
|
||||
double _time_stamp_sec;
|
||||
|
||||
// The number of CPUs that the host machine has (for convenience
|
||||
// really, as we'd have to keep translating it into a double)
|
||||
static double _cpu_number;
|
||||
|
||||
// Fields that keep a list of all trackers created. This is useful,
|
||||
// since it allows us to sum up the concurrent overhead without
|
||||
// having to write code for a specific collector to broadcast a
|
||||
// request to all its concurrent processes.
|
||||
COTracker* _next;
|
||||
static COTracker* _head;
|
||||
|
||||
// It indicates that a new period is starting by updating the
|
||||
// _period_start_time_sec and _period_start_vtime_sec fields.
|
||||
void resetPeriod(double now_sec, double vnow_sec);
|
||||
// It updates the latest concurrent overhead reading, taken at a
|
||||
// given wall-clock time.
|
||||
void setConcOverhead(double time_stamp_sec, double conc_overhead);
|
||||
|
||||
// It determines whether the time stamp of the latest concurrent
|
||||
// overhead reading is out of date or not.
|
||||
bool outOfDate(double now_sec) {
|
||||
// The latest reading is considered out of date, if it was taken
|
||||
// 1.2x the update period.
|
||||
return (now_sec - _time_stamp_sec) > 1.2 * _update_period_sec;
|
||||
}
|
||||
|
||||
public:
|
||||
// The constructor which associates the tracker with a group ID.
|
||||
COTracker(int group);
|
||||
|
||||
// Methods to enable / disable the tracker and query whether it is enabled.
|
||||
void enable() { _enabled = true; }
|
||||
void disable() { _enabled = false; }
|
||||
bool enabled() { return _enabled; }
|
||||
|
||||
// It resets the tracker and sets concurrent overhead reading to be
|
||||
// the given parameter and the associated time stamp to be now.
|
||||
void reset(double starting_conc_overhead = 0.0);
|
||||
// The tracker starts tracking. IT should only be called from the
|
||||
// concurrent thread that is tracked by this tracker.
|
||||
void start();
|
||||
// It updates the tracker and, if the current period is longer than
|
||||
// the update period, the concurrent overhead reading will be
|
||||
// updated. force_end being true indicates that it's the last call
|
||||
// to update() by this process before the tracker is disabled (the
|
||||
// tracker can be re-enabled later if necessary). It should only be
|
||||
// called from the concurrent thread that is tracked by this tracker
|
||||
// and while the thread has joined the STS.
|
||||
void update(bool force_end = false);
|
||||
// It adjusts the contents of the tracker to take into account a STW
|
||||
// pause.
|
||||
void updateForSTW(double start_sec, double end_sec);
|
||||
|
||||
// It returns the last concurrent overhead reading over a single
|
||||
// CPU. If the reading is out of date, or the tracker is disabled,
|
||||
// it returns 0.0.
|
||||
double concCPUOverhead(double now_sec) {
|
||||
if (!_enabled || outOfDate(now_sec))
|
||||
return 0.0;
|
||||
else
|
||||
return _conc_overhead;
|
||||
}
|
||||
|
||||
// It returns the last concurrent overhead reading over all CPUs
|
||||
// that the host machine has. If the reading is out of date, or the
|
||||
// tracker is disabled, it returns 0.0.
|
||||
double concOverhead(double now_sec) {
|
||||
return concCPUOverhead(now_sec) / _cpu_number;
|
||||
}
|
||||
|
||||
double predConcOverhead();
|
||||
|
||||
void resetPred();
|
||||
|
||||
// statics
|
||||
|
||||
// It notifies all trackers about a STW pause.
|
||||
static void updateAllForSTW(double start_sec, double end_sec);
|
||||
|
||||
// It returns the sum of the concurrent overhead readings of all
|
||||
// available (and enabled) trackers for the given time stamp. The
|
||||
// overhead is over all the CPUs of the host machine.
|
||||
|
||||
static double totalConcOverhead(double now_sec);
|
||||
// Like the previous method, but it also sums up the overheads per
|
||||
// group number. The length of the co_per_group array must be at
|
||||
// least as large group_num
|
||||
static double totalConcOverhead(double now_sec,
|
||||
size_t group_num,
|
||||
double* co_per_group);
|
||||
|
||||
static double totalPredConcOverhead();
|
||||
};
|
@ -1,179 +0,0 @@
|
||||
/*
|
||||
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_gcOverheadReporter.cpp.incl"
|
||||
|
||||
class COReportingThread : public ConcurrentGCThread {
|
||||
private:
|
||||
GCOverheadReporter* _reporter;
|
||||
|
||||
public:
|
||||
COReportingThread(GCOverheadReporter* reporter) : _reporter(reporter) {
|
||||
guarantee( _reporter != NULL, "precondition" );
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
virtual void run() {
|
||||
initialize_in_thread();
|
||||
wait_for_universe_init();
|
||||
|
||||
int period_ms = GCOverheadReportingPeriodMS;
|
||||
|
||||
while ( true ) {
|
||||
os::sleep(Thread::current(), period_ms, false);
|
||||
|
||||
_sts.join();
|
||||
double now_sec = os::elapsedTime();
|
||||
_reporter->collect_and_record_conc_overhead(now_sec);
|
||||
_sts.leave();
|
||||
}
|
||||
|
||||
terminate();
|
||||
}
|
||||
};
|
||||
|
||||
GCOverheadReporter* GCOverheadReporter::_reporter = NULL;
|
||||
|
||||
GCOverheadReporter::GCOverheadReporter(size_t group_num,
|
||||
const char* group_names[],
|
||||
size_t length)
|
||||
: _group_num(group_num), _prev_end_sec(0.0) {
|
||||
guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum,
|
||||
"precondition" );
|
||||
|
||||
_base = NEW_C_HEAP_ARRAY(GCOverheadReporterEntry, length);
|
||||
_top = _base + length;
|
||||
_curr = _base;
|
||||
|
||||
for (size_t i = 0; i < group_num; ++i) {
|
||||
guarantee( group_names[i] != NULL, "precondition" );
|
||||
_group_names[i] = group_names[i];
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GCOverheadReporter::add(double start_sec, double end_sec,
|
||||
double* conc_overhead,
|
||||
double stw_overhead) {
|
||||
assert( _curr <= _top, "invariant" );
|
||||
|
||||
if (_curr == _top) {
|
||||
guarantee( false, "trace full" );
|
||||
return;
|
||||
}
|
||||
|
||||
_curr->_start_sec = start_sec;
|
||||
_curr->_end_sec = end_sec;
|
||||
for (size_t i = 0; i < _group_num; ++i) {
|
||||
_curr->_conc_overhead[i] =
|
||||
(conc_overhead != NULL) ? conc_overhead[i] : 0.0;
|
||||
}
|
||||
_curr->_stw_overhead = stw_overhead;
|
||||
|
||||
++_curr;
|
||||
}
|
||||
|
||||
void
|
||||
GCOverheadReporter::collect_and_record_conc_overhead(double end_sec) {
|
||||
double start_sec = _prev_end_sec;
|
||||
guarantee( end_sec > start_sec, "invariant" );
|
||||
|
||||
double conc_overhead[MaxGCOverheadGroupNum];
|
||||
COTracker::totalConcOverhead(end_sec, _group_num, conc_overhead);
|
||||
add_conc_overhead(start_sec, end_sec, conc_overhead);
|
||||
_prev_end_sec = end_sec;
|
||||
}
|
||||
|
||||
void
|
||||
GCOverheadReporter::record_stw_start(double start_sec) {
|
||||
guarantee( start_sec > _prev_end_sec, "invariant" );
|
||||
collect_and_record_conc_overhead(start_sec);
|
||||
}
|
||||
|
||||
void
|
||||
GCOverheadReporter::record_stw_end(double end_sec) {
|
||||
double start_sec = _prev_end_sec;
|
||||
COTracker::updateAllForSTW(start_sec, end_sec);
|
||||
add_stw_overhead(start_sec, end_sec, 1.0);
|
||||
|
||||
_prev_end_sec = end_sec;
|
||||
}
|
||||
|
||||
void
|
||||
GCOverheadReporter::print() const {
|
||||
tty->print_cr("");
|
||||
tty->print_cr("GC Overhead (%d entries)", _curr - _base);
|
||||
tty->print_cr("");
|
||||
GCOverheadReporterEntry* curr = _base;
|
||||
while (curr < _curr) {
|
||||
double total = curr->_stw_overhead;
|
||||
for (size_t i = 0; i < _group_num; ++i)
|
||||
total += curr->_conc_overhead[i];
|
||||
|
||||
tty->print("OVERHEAD %12.8lf %12.8lf ",
|
||||
curr->_start_sec, curr->_end_sec);
|
||||
|
||||
for (size_t i = 0; i < _group_num; ++i)
|
||||
tty->print("%s %12.8lf ", _group_names[i], curr->_conc_overhead[i]);
|
||||
|
||||
tty->print_cr("STW %12.8lf TOT %12.8lf", curr->_stw_overhead, total);
|
||||
++curr;
|
||||
}
|
||||
tty->print_cr("");
|
||||
}
|
||||
|
||||
// statics
|
||||
|
||||
void
|
||||
GCOverheadReporter::initGCOverheadReporter(size_t group_num,
|
||||
const char* group_names[]) {
|
||||
guarantee( _reporter == NULL, "should only be called once" );
|
||||
guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum,
|
||||
"precondition" );
|
||||
guarantee( group_names != NULL, "pre-condition" );
|
||||
|
||||
if (GCOverheadReporting) {
|
||||
_reporter = new GCOverheadReporter(group_num, group_names);
|
||||
new COReportingThread(_reporter);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GCOverheadReporter::recordSTWStart(double start_sec) {
|
||||
if (_reporter != NULL)
|
||||
_reporter->record_stw_start(start_sec);
|
||||
}
|
||||
|
||||
void
|
||||
GCOverheadReporter::recordSTWEnd(double end_sec) {
|
||||
if (_reporter != NULL)
|
||||
_reporter->record_stw_end(end_sec);
|
||||
}
|
||||
|
||||
void
|
||||
GCOverheadReporter::printGCOverhead() {
|
||||
if (_reporter != NULL)
|
||||
_reporter->print();
|
||||
}
|
@ -1,141 +0,0 @@
|
||||
/*
|
||||
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// Keeps track of the GC overhead (both concurrent and STW). It stores
|
||||
// it in a large array and then prints it to tty at the end of the
|
||||
// execution.
|
||||
|
||||
// See coTracker.hpp for the explanation on what groups are.
|
||||
|
||||
// Let's set a maximum number of concurrent overhead groups, to
|
||||
// statically allocate any arrays we need and not to have to
|
||||
// malloc/free them. This is just a bit more convenient.
|
||||
enum {
|
||||
MaxGCOverheadGroupNum = 4
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
double _start_sec;
|
||||
double _end_sec;
|
||||
|
||||
double _conc_overhead[MaxGCOverheadGroupNum];
|
||||
double _stw_overhead;
|
||||
} GCOverheadReporterEntry;
|
||||
|
||||
class GCOverheadReporter {
|
||||
friend class COReportingThread;
|
||||
|
||||
private:
|
||||
enum PrivateConstants {
|
||||
DefaultReporterLength = 128 * 1024
|
||||
};
|
||||
|
||||
// Reference to the single instance of this class.
|
||||
static GCOverheadReporter* _reporter;
|
||||
|
||||
// These three references point to the array that contains the GC
|
||||
// overhead entries (_base is the base of the array, _top is the
|
||||
// address passed the last entry of the array, _curr is the next
|
||||
// entry to be used).
|
||||
GCOverheadReporterEntry* _base;
|
||||
GCOverheadReporterEntry* _top;
|
||||
GCOverheadReporterEntry* _curr;
|
||||
|
||||
// The number of concurrent overhead groups.
|
||||
size_t _group_num;
|
||||
|
||||
// The wall-clock time of the end of the last recorded period of GC
|
||||
// overhead.
|
||||
double _prev_end_sec;
|
||||
|
||||
// Names for the concurrent overhead groups.
|
||||
const char* _group_names[MaxGCOverheadGroupNum];
|
||||
|
||||
// Add a new entry to the large array. conc_overhead being NULL is
|
||||
// equivalent to an array full of 0.0s. conc_overhead should have a
|
||||
// length of at least _group_num.
|
||||
void add(double start_sec, double end_sec,
|
||||
double* conc_overhead,
|
||||
double stw_overhead);
|
||||
|
||||
// Add an entry that represents concurrent GC overhead.
|
||||
// conc_overhead must be at least of length _group_num.
|
||||
// conc_overhead being NULL is equivalent to an array full of 0.0s.
|
||||
void add_conc_overhead(double start_sec, double end_sec,
|
||||
double* conc_overhead) {
|
||||
add(start_sec, end_sec, conc_overhead, 0.0);
|
||||
}
|
||||
|
||||
// Add an entry that represents STW GC overhead.
|
||||
void add_stw_overhead(double start_sec, double end_sec,
|
||||
double stw_overhead) {
|
||||
add(start_sec, end_sec, NULL, stw_overhead);
|
||||
}
|
||||
|
||||
// It records the start of a STW pause (i.e. it records the
|
||||
// concurrent overhead up to that point)
|
||||
void record_stw_start(double start_sec);
|
||||
|
||||
// It records the end of a STW pause (i.e. it records the overhead
|
||||
// associated with the pause and adjusts all the trackers to reflect
|
||||
// the pause)
|
||||
void record_stw_end(double end_sec);
|
||||
|
||||
// It queries all the trackers of their concurrent overhead and
|
||||
// records it.
|
||||
void collect_and_record_conc_overhead(double end_sec);
|
||||
|
||||
// It prints the contents of the GC overhead array
|
||||
void print() const;
|
||||
|
||||
|
||||
// Constructor. The same preconditions for group_num and group_names
|
||||
// from initGCOverheadReporter apply here too.
|
||||
GCOverheadReporter(size_t group_num,
|
||||
const char* group_names[],
|
||||
size_t length = DefaultReporterLength);
|
||||
|
||||
public:
|
||||
|
||||
// statics
|
||||
|
||||
// It initialises the GCOverheadReporter and launches the concurrent
|
||||
// overhead reporting thread. Both actions happen only if the
|
||||
// GCOverheadReporting parameter is set. The length of the
|
||||
// group_names array should be >= group_num and group_num should be
|
||||
// <= MaxGCOverheadGroupNum. Entries group_namnes[0..group_num-1]
|
||||
// should not be NULL.
|
||||
static void initGCOverheadReporter(size_t group_num,
|
||||
const char* group_names[]);
|
||||
|
||||
// The following three are provided for convenience and they are
|
||||
// wrappers around record_stw_start(start_sec), record_stw_end(end_sec),
|
||||
// and print(). Each of these checks whether GC overhead reporting
|
||||
// is on (i.e. _reporter != NULL) and, if it is, calls the
|
||||
// corresponding method. Saves from repeating this pattern again and
|
||||
// again from the places where they need to be called.
|
||||
static void recordSTWStart(double start_sec);
|
||||
static void recordSTWEnd(double end_sec);
|
||||
static void printGCOverhead();
|
||||
};
|
@ -27,6 +27,7 @@
|
||||
|
||||
GrowableArray<oop>* MarkSweep::_marking_stack = NULL;
|
||||
GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL;
|
||||
GrowableArray<DataLayout*>* MarkSweep::_revisit_mdo_stack = NULL;
|
||||
|
||||
GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL;
|
||||
GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL;
|
||||
@ -62,12 +63,37 @@ void MarkSweep::revisit_weak_klass_link(Klass* k) {
|
||||
void MarkSweep::follow_weak_klass_links() {
|
||||
// All klasses on the revisit stack are marked at this point.
|
||||
// Update and follow all subklass, sibling and implementor links.
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
|
||||
gclog_or_tty->print_cr("Revisit klass stack length = %d", _revisit_klass_stack->length());
|
||||
}
|
||||
for (int i = 0; i < _revisit_klass_stack->length(); i++) {
|
||||
_revisit_klass_stack->at(i)->follow_weak_klass_links(&is_alive,&keep_alive);
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
|
||||
#if ( defined(COMPILER1) || defined(COMPILER2) )
|
||||
void MarkSweep::revisit_mdo(DataLayout* p) {
|
||||
_revisit_mdo_stack->push(p);
|
||||
}
|
||||
|
||||
void MarkSweep::follow_mdo_weak_refs() {
|
||||
// All strongly reachable oops have been marked at this point;
|
||||
// we can visit and clear any weak references from MDO's which
|
||||
// we memoized during the strong marking phase.
|
||||
assert(_marking_stack->is_empty(), "Marking stack should be empty");
|
||||
if (PrintRevisitStats) {
|
||||
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
|
||||
gclog_or_tty->print_cr("Revisit MDO stack length = %d", _revisit_mdo_stack->length());
|
||||
}
|
||||
for (int i = 0; i < _revisit_mdo_stack->length(); i++) {
|
||||
_revisit_mdo_stack->at(i)->follow_weak_refs(&is_alive);
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
#endif // ( COMPILER1 || COMPILER2 )
|
||||
|
||||
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
|
||||
|
||||
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
class ReferenceProcessor;
|
||||
class DataLayout;
|
||||
|
||||
// MarkSweep takes care of global mark-compact garbage collection for a
|
||||
// GenCollectedHeap using a four-phase pointer forwarding algorithm. All
|
||||
@ -65,6 +66,8 @@ class MarkSweep : AllStatic {
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
virtual const bool should_remember_mdo() const { return true; }
|
||||
virtual void remember_mdo(DataLayout* p) { MarkSweep::revisit_mdo(p); }
|
||||
};
|
||||
|
||||
class FollowStackClosure: public VoidClosure {
|
||||
@ -103,6 +106,7 @@ class MarkSweep : AllStatic {
|
||||
friend class KeepAliveClosure;
|
||||
friend class VM_MarkSweep;
|
||||
friend void marksweep_init();
|
||||
friend class DataLayout;
|
||||
|
||||
//
|
||||
// Vars
|
||||
@ -112,6 +116,8 @@ class MarkSweep : AllStatic {
|
||||
static GrowableArray<oop>* _marking_stack;
|
||||
// Stack for live klasses to revisit at end of marking phase
|
||||
static GrowableArray<Klass*>* _revisit_klass_stack;
|
||||
// Set (stack) of MDO's to revisit at end of marking phase
|
||||
static GrowableArray<DataLayout*>* _revisit_mdo_stack;
|
||||
|
||||
// Space for storing/restoring mark word
|
||||
static GrowableArray<markOop>* _preserved_mark_stack;
|
||||
@ -157,6 +163,10 @@ class MarkSweep : AllStatic {
|
||||
// Class unloading. Update subklass/sibling/implementor links at end of marking phase.
|
||||
static void follow_weak_klass_links();
|
||||
|
||||
// Class unloading. Clear weak refs in MDO's (ProfileData)
|
||||
// at the end of the marking phase.
|
||||
static void follow_mdo_weak_refs();
|
||||
|
||||
// Debugging
|
||||
static void trace(const char* msg) PRODUCT_RETURN;
|
||||
|
||||
@ -213,7 +223,10 @@ class MarkSweep : AllStatic {
|
||||
#endif
|
||||
|
||||
// Call backs for class unloading
|
||||
static void revisit_weak_klass_link(Klass* k); // Update subklass/sibling/implementor links at end of marking.
|
||||
// Update subklass/sibling/implementor links at end of marking.
|
||||
static void revisit_weak_klass_link(Klass* k);
|
||||
// For weak refs clearing in MDO's
|
||||
static void revisit_mdo(DataLayout* p);
|
||||
};
|
||||
|
||||
class PreservedMark VALUE_OBJ_CLASS_SPEC {
|
||||
|
@ -239,6 +239,9 @@ class CollectedHeap : public CHeapObj {
|
||||
return p == NULL || is_in_closed_subset(p);
|
||||
}
|
||||
|
||||
// XXX is_permanent() and is_in_permanent() should be better named
|
||||
// to distinguish one from the other.
|
||||
|
||||
// Returns "TRUE" if "p" is allocated as "permanent" data.
|
||||
// If the heap does not use "permanent" data, returns the same
|
||||
// value is_in_reserved() would return.
|
||||
@ -247,13 +250,17 @@ class CollectedHeap : public CHeapObj {
|
||||
// space). If you need the more conservative answer use is_permanent().
|
||||
virtual bool is_in_permanent(const void *p) const = 0;
|
||||
|
||||
bool is_in_permanent_or_null(const void *p) const {
|
||||
return p == NULL || is_in_permanent(p);
|
||||
}
|
||||
|
||||
// Returns "TRUE" if "p" is in the committed area of "permanent" data.
|
||||
// If the heap does not use "permanent" data, returns the same
|
||||
// value is_in() would return.
|
||||
virtual bool is_permanent(const void *p) const = 0;
|
||||
|
||||
bool is_in_permanent_or_null(const void *p) const {
|
||||
return p == NULL || is_in_permanent(p);
|
||||
bool is_permanent_or_null(const void *p) const {
|
||||
return p == NULL || is_permanent(p);
|
||||
}
|
||||
|
||||
// Returns "TRUE" if "p" is a method oop in the
|
||||
|
@ -2684,6 +2684,7 @@ markOop.inline.hpp klassOop.hpp
|
||||
markOop.inline.hpp markOop.hpp
|
||||
|
||||
markSweep.cpp compileBroker.hpp
|
||||
markSweep.cpp methodDataOop.hpp
|
||||
|
||||
markSweep.hpp collectedHeap.hpp
|
||||
|
||||
|
@ -849,8 +849,25 @@ static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
|
||||
nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
|
||||
assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
|
||||
if (branch_bcp != NULL && nm != NULL) {
|
||||
// This was a successful request for an OSR nmethod. Because
|
||||
// frequency_counter_overflow_inner ends with a safepoint check,
|
||||
// nm could have been unloaded so look it up again. It's unsafe
|
||||
// to examine nm directly since it might have been freed and used
|
||||
// for something else.
|
||||
frame fr = thread->last_frame();
|
||||
methodOop method = fr.interpreter_frame_method();
|
||||
int bci = method->bci_from(fr.interpreter_frame_bcp());
|
||||
nm = method->lookup_osr_nmethod_for(bci);
|
||||
}
|
||||
return nm;
|
||||
}
|
||||
|
||||
IRT_ENTRY(nmethod*,
|
||||
InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp))
|
||||
InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp))
|
||||
// use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
|
||||
// flag, in case this method triggers classloading which will call into Java.
|
||||
UnlockFlagSaver fs(thread);
|
||||
@ -923,7 +940,6 @@ IRT_ENTRY(nmethod*,
|
||||
}
|
||||
BiasedLocking::revoke(objects_to_revoke);
|
||||
}
|
||||
|
||||
return osr_nm;
|
||||
}
|
||||
}
|
||||
|
@ -49,6 +49,9 @@ class InterpreterRuntime: AllStatic {
|
||||
static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); }
|
||||
static void note_trap(JavaThread *thread, int reason, TRAPS);
|
||||
|
||||
// Inner work method for Interpreter's frequency counter overflow
|
||||
static nmethod* frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp);
|
||||
|
||||
public:
|
||||
// Constants
|
||||
static void ldc (JavaThread* thread, bool wide);
|
||||
|
@ -660,6 +660,29 @@ void CardTableModRefBS::verify_clean_region(MemRegion mr) {
|
||||
GuaranteeNotModClosure blk(this);
|
||||
non_clean_card_iterate_work(mr, &blk, false);
|
||||
}
|
||||
|
||||
// To verify a MemRegion is entirely dirty this closure is passed to
|
||||
// dirty_card_iterate. If the region is dirty do_MemRegion will be
|
||||
// invoked only once with a MemRegion equal to the one being
|
||||
// verified.
|
||||
class GuaranteeDirtyClosure: public MemRegionClosure {
|
||||
CardTableModRefBS* _ct;
|
||||
MemRegion _mr;
|
||||
bool _result;
|
||||
public:
|
||||
GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
|
||||
: _ct(ct), _mr(mr), _result(false) {}
|
||||
void do_MemRegion(MemRegion mr) {
|
||||
_result = _mr.equals(mr);
|
||||
}
|
||||
bool result() const { return _result; }
|
||||
};
|
||||
|
||||
void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
|
||||
GuaranteeDirtyClosure blk(this, mr);
|
||||
dirty_card_iterate(mr, &blk);
|
||||
guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
|
||||
}
|
||||
#endif
|
||||
|
||||
bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
|
||||
|
@ -456,6 +456,7 @@ public:
|
||||
void verify_guard();
|
||||
|
||||
void verify_clean_region(MemRegion mr) PRODUCT_RETURN;
|
||||
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
|
||||
|
||||
static size_t par_chunk_heapword_alignment() {
|
||||
return CardsPerStrideChunk * card_size_in_words;
|
||||
|
@ -162,6 +162,9 @@ void GenMarkSweep::allocate_stacks() {
|
||||
|
||||
int size = SystemDictionary::number_of_classes() * 2;
|
||||
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
|
||||
// (#klass/k)^2 for k ~ 10 appears to be a better fit, but this will have to do for
|
||||
// now until we have had a chance to investigate a more optimal setting.
|
||||
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(2*size, true);
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
@ -206,6 +209,7 @@ void GenMarkSweep::deallocate_stacks() {
|
||||
|
||||
delete _marking_stack;
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
@ -262,6 +266,10 @@ void GenMarkSweep::mark_sweep_phase1(int level,
|
||||
follow_weak_klass_links();
|
||||
assert(_marking_stack->is_empty(), "just drained");
|
||||
|
||||
// Visit memoized MDO's and clear any unmarked weak refs
|
||||
follow_mdo_weak_refs();
|
||||
assert(_marking_stack->is_empty(), "just drained");
|
||||
|
||||
// Visit symbol and interned string tables and delete unmarked oops
|
||||
SymbolTable::unlink(&is_alive);
|
||||
StringTable::unlink(&is_alive);
|
||||
|
@ -25,6 +25,10 @@
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_iterator.cpp.incl"
|
||||
|
||||
#ifdef ASSERT
|
||||
bool OopClosure::_must_remember_klasses = false;
|
||||
#endif
|
||||
|
||||
void ObjectToOopClosure::do_object(oop obj) {
|
||||
obj->oop_iterate(_cl);
|
||||
}
|
||||
@ -32,3 +36,13 @@ void ObjectToOopClosure::do_object(oop obj) {
|
||||
void VoidClosure::do_void() {
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool OopClosure::must_remember_klasses() {
|
||||
return _must_remember_klasses;
|
||||
}
|
||||
void OopClosure::set_must_remember_klasses(bool v) {
|
||||
_must_remember_klasses = v;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
// The following classes are C++ `closures` for iterating over objects, roots and spaces
|
||||
|
||||
class ReferenceProcessor;
|
||||
class DataLayout;
|
||||
|
||||
// Closure provides abortability.
|
||||
|
||||
@ -54,9 +55,20 @@ class OopClosure : public Closure {
|
||||
|
||||
// In support of post-processing of weak links of KlassKlass objects;
|
||||
// see KlassKlass::oop_oop_iterate().
|
||||
virtual const bool should_remember_klasses() const { return false; }
|
||||
|
||||
virtual const bool should_remember_klasses() const {
|
||||
assert(!must_remember_klasses(), "Should have overriden this method.");
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual void remember_klass(Klass* k) { /* do nothing */ }
|
||||
|
||||
// In support of post-processing of weak references in
|
||||
// ProfileData (MethodDataOop) objects; see, for example,
|
||||
// VirtualCallData::oop_iterate().
|
||||
virtual const bool should_remember_mdo() const { return false; }
|
||||
virtual void remember_mdo(DataLayout* v) { /* do nothing */ }
|
||||
|
||||
// If "true", invoke on nmethods (when scanning compiled frames).
|
||||
virtual const bool do_nmethods() const { return false; }
|
||||
|
||||
@ -74,6 +86,12 @@ class OopClosure : public Closure {
|
||||
// location without an intervening "major reset" (like the end of a GC).
|
||||
virtual bool idempotent() { return false; }
|
||||
virtual bool apply_to_weak_ref_discovered_field() { return false; }
|
||||
|
||||
#ifdef ASSERT
|
||||
static bool _must_remember_klasses;
|
||||
static bool must_remember_klasses();
|
||||
static void set_must_remember_klasses(bool v);
|
||||
#endif
|
||||
};
|
||||
|
||||
// ObjectClosure is used for iterating through an object space
|
||||
@ -219,3 +237,38 @@ public:
|
||||
// correct length.
|
||||
virtual void do_tag(int tag) = 0;
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
// This class is used to flag phases of a collection that
|
||||
// can unload classes and which should override the
|
||||
// should_remember_klasses() and remember_klass() of OopClosure.
|
||||
// The _must_remember_klasses is set in the contructor and restored
|
||||
// in the destructor. _must_remember_klasses is checked in assertions
|
||||
// in the OopClosure implementations of should_remember_klasses() and
|
||||
// remember_klass() and the expectation is that the OopClosure
|
||||
// implementation should not be in use if _must_remember_klasses is set.
|
||||
// Instances of RememberKlassesChecker can be place in
|
||||
// marking phases of collections which can do class unloading.
|
||||
// RememberKlassesChecker can be passed "false" to turn off checking.
|
||||
// It is used by CMS when CMS yields to a different collector.
|
||||
class RememberKlassesChecker: StackObj {
|
||||
bool _state;
|
||||
bool _skip;
|
||||
public:
|
||||
RememberKlassesChecker(bool checking_on) : _state(false), _skip(false) {
|
||||
_skip = !(ClassUnloading && !UseConcMarkSweepGC ||
|
||||
CMSClassUnloadingEnabled && UseConcMarkSweepGC);
|
||||
if (_skip) {
|
||||
return;
|
||||
}
|
||||
_state = OopClosure::must_remember_klasses();
|
||||
OopClosure::set_must_remember_klasses(checking_on);
|
||||
}
|
||||
~RememberKlassesChecker() {
|
||||
if (_skip) {
|
||||
return;
|
||||
}
|
||||
OopClosure::set_must_remember_klasses(_state);
|
||||
}
|
||||
};
|
||||
#endif // ASSERT
|
||||
|
@ -98,10 +98,12 @@ constantPoolCacheOop oopFactory::new_constantPoolCache(int length,
|
||||
}
|
||||
|
||||
|
||||
klassOop oopFactory::new_instanceKlass(int vtable_len, int itable_len, int static_field_size,
|
||||
int nonstatic_oop_map_size, ReferenceType rt, TRAPS) {
|
||||
klassOop oopFactory::new_instanceKlass(int vtable_len, int itable_len,
|
||||
int static_field_size,
|
||||
unsigned int nonstatic_oop_map_count,
|
||||
ReferenceType rt, TRAPS) {
|
||||
instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj());
|
||||
return ikk->allocate_instance_klass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt, CHECK_NULL);
|
||||
return ikk->allocate_instance_klass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL);
|
||||
}
|
||||
|
||||
|
||||
|
@ -89,8 +89,10 @@ class oopFactory: AllStatic {
|
||||
TRAPS);
|
||||
|
||||
// Instance classes
|
||||
static klassOop new_instanceKlass(int vtable_len, int itable_len, int static_field_size,
|
||||
int nonstatic_oop_map_size, ReferenceType rt, TRAPS);
|
||||
static klassOop new_instanceKlass(int vtable_len, int itable_len,
|
||||
int static_field_size,
|
||||
unsigned int nonstatic_oop_map_count,
|
||||
ReferenceType rt, TRAPS);
|
||||
|
||||
// Methods
|
||||
private:
|
||||
|
@ -1231,6 +1231,11 @@ void ReferenceProcessor::preclean_discovered_references(
|
||||
|
||||
NOT_PRODUCT(verify_ok_to_handle_reflists());
|
||||
|
||||
#ifdef ASSERT
|
||||
bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
|
||||
CMSClassUnloadingEnabled && UseConcMarkSweepGC;
|
||||
RememberKlassesChecker mx(must_remember_klasses);
|
||||
#endif
|
||||
// Soft references
|
||||
{
|
||||
TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
|
||||
|
@ -1397,18 +1397,18 @@ template <class T> void assert_nothing(T *p) {}
|
||||
/* Compute oopmap block range. The common case \
|
||||
is nonstatic_oop_map_size == 1. */ \
|
||||
OopMapBlock* map = start_of_nonstatic_oop_maps(); \
|
||||
OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
|
||||
OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
|
||||
if (UseCompressedOops) { \
|
||||
while (map < end_map) { \
|
||||
InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
|
||||
obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
|
||||
obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
|
||||
do_oop, assert_fn) \
|
||||
++map; \
|
||||
} \
|
||||
} else { \
|
||||
while (map < end_map) { \
|
||||
InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
|
||||
obj->obj_field_addr<oop>(map->offset()), map->length(), \
|
||||
obj->obj_field_addr<oop>(map->offset()), map->count(), \
|
||||
do_oop, assert_fn) \
|
||||
++map; \
|
||||
} \
|
||||
@ -1418,19 +1418,19 @@ template <class T> void assert_nothing(T *p) {}
|
||||
#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
|
||||
{ \
|
||||
OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
|
||||
OopMapBlock* map = start_map + nonstatic_oop_map_size(); \
|
||||
OopMapBlock* map = start_map + nonstatic_oop_map_count(); \
|
||||
if (UseCompressedOops) { \
|
||||
while (start_map < map) { \
|
||||
--map; \
|
||||
InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
|
||||
obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
|
||||
obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
|
||||
do_oop, assert_fn) \
|
||||
} \
|
||||
} else { \
|
||||
while (start_map < map) { \
|
||||
--map; \
|
||||
InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
|
||||
obj->obj_field_addr<oop>(map->offset()), map->length(), \
|
||||
obj->obj_field_addr<oop>(map->offset()), map->count(), \
|
||||
do_oop, assert_fn) \
|
||||
} \
|
||||
} \
|
||||
@ -1444,11 +1444,11 @@ template <class T> void assert_nothing(T *p) {}
|
||||
usually non-existent extra overhead of examining \
|
||||
all the maps. */ \
|
||||
OopMapBlock* map = start_of_nonstatic_oop_maps(); \
|
||||
OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
|
||||
OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
|
||||
if (UseCompressedOops) { \
|
||||
while (map < end_map) { \
|
||||
InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
|
||||
obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
|
||||
obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
|
||||
low, high, \
|
||||
do_oop, assert_fn) \
|
||||
++map; \
|
||||
@ -1456,7 +1456,7 @@ template <class T> void assert_nothing(T *p) {}
|
||||
} else { \
|
||||
while (map < end_map) { \
|
||||
InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
|
||||
obj->obj_field_addr<oop>(map->offset()), map->length(), \
|
||||
obj->obj_field_addr<oop>(map->offset()), map->count(), \
|
||||
low, high, \
|
||||
do_oop, assert_fn) \
|
||||
++map; \
|
||||
@ -2217,14 +2217,15 @@ void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
|
||||
first_time = false;
|
||||
const int extra = java_lang_Class::number_of_fake_oop_fields;
|
||||
guarantee(ik->nonstatic_field_size() == extra, "just checking");
|
||||
guarantee(ik->nonstatic_oop_map_size() == 1, "just checking");
|
||||
guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
|
||||
guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
|
||||
|
||||
// Check that the map is (2,extra)
|
||||
int offset = java_lang_Class::klass_offset;
|
||||
|
||||
OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
|
||||
guarantee(map->offset() == offset && map->length() == extra, "just checking");
|
||||
guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
|
||||
"sanity");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,6 @@
|
||||
|
||||
// forward declaration for class -- see below for definition
|
||||
class SuperTypeClosure;
|
||||
class OopMapBlock;
|
||||
class JNIid;
|
||||
class jniIdMapBase;
|
||||
class BreakpointInfo;
|
||||
@ -99,6 +98,29 @@ class FieldPrinter: public FieldClosure {
|
||||
};
|
||||
#endif // !PRODUCT
|
||||
|
||||
// ValueObjs embedded in klass. Describes where oops are located in instances of
|
||||
// this klass.
|
||||
class OopMapBlock VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
// Byte offset of the first oop mapped by this block.
|
||||
int offset() const { return _offset; }
|
||||
void set_offset(int offset) { _offset = offset; }
|
||||
|
||||
// Number of oops in this block.
|
||||
uint count() const { return _count; }
|
||||
void set_count(uint count) { _count = count; }
|
||||
|
||||
// sizeof(OopMapBlock) in HeapWords.
|
||||
static const int size_in_words() {
|
||||
return align_size_up(int(sizeof(OopMapBlock)), HeapWordSize) >>
|
||||
LogHeapWordSize;
|
||||
}
|
||||
|
||||
private:
|
||||
int _offset;
|
||||
uint _count;
|
||||
};
|
||||
|
||||
class instanceKlass: public Klass {
|
||||
friend class VMStructs;
|
||||
public:
|
||||
@ -191,7 +213,7 @@ class instanceKlass: public Klass {
|
||||
int _nonstatic_field_size;
|
||||
int _static_field_size; // number words used by static fields (oop and non-oop) in this klass
|
||||
int _static_oop_field_size;// number of static oop fields in this klass
|
||||
int _nonstatic_oop_map_size;// number of nonstatic oop-map blocks allocated at end of this klass
|
||||
int _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks
|
||||
bool _is_marked_dependent; // used for marking during flushing and deoptimization
|
||||
bool _rewritten; // methods rewritten.
|
||||
bool _has_nonstatic_fields; // for sizing with UseCompressedOops
|
||||
@ -424,8 +446,16 @@ class instanceKlass: public Klass {
|
||||
void set_source_debug_extension(symbolOop n){ oop_store_without_check((oop*) &_source_debug_extension, (oop) n); }
|
||||
|
||||
// nonstatic oop-map blocks
|
||||
static int nonstatic_oop_map_size(unsigned int oop_map_count) {
|
||||
return oop_map_count * OopMapBlock::size_in_words();
|
||||
}
|
||||
unsigned int nonstatic_oop_map_count() const {
|
||||
return _nonstatic_oop_map_size / OopMapBlock::size_in_words();
|
||||
}
|
||||
int nonstatic_oop_map_size() const { return _nonstatic_oop_map_size; }
|
||||
void set_nonstatic_oop_map_size(int size) { _nonstatic_oop_map_size = size; }
|
||||
void set_nonstatic_oop_map_size(int words) {
|
||||
_nonstatic_oop_map_size = words;
|
||||
}
|
||||
|
||||
// RedefineClasses() support for previous versions:
|
||||
void add_previous_version(instanceKlassHandle ikh, BitMap *emcp_methods,
|
||||
@ -839,21 +869,6 @@ inline u2 instanceKlass::next_method_idnum() {
|
||||
}
|
||||
|
||||
|
||||
// ValueObjs embedded in klass. Describes where oops are located in instances of this klass.
|
||||
|
||||
class OopMapBlock VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
jushort _offset; // Offset of first oop in oop-map block
|
||||
jushort _length; // Length of oop-map block
|
||||
public:
|
||||
// Accessors
|
||||
jushort offset() const { return _offset; }
|
||||
void set_offset(jushort offset) { _offset = offset; }
|
||||
|
||||
jushort length() const { return _length; }
|
||||
void set_length(jushort length) { _length = length; }
|
||||
};
|
||||
|
||||
/* JNIid class for jfieldIDs only */
|
||||
class JNIid: public CHeapObj {
|
||||
friend class VMStructs;
|
||||
|
@ -402,9 +402,14 @@ int instanceKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
klassOop instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len, int static_field_size,
|
||||
int nonstatic_oop_map_size, ReferenceType rt, TRAPS) {
|
||||
klassOop
|
||||
instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len,
|
||||
int static_field_size,
|
||||
unsigned nonstatic_oop_map_count,
|
||||
ReferenceType rt, TRAPS) {
|
||||
|
||||
const int nonstatic_oop_map_size =
|
||||
instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count);
|
||||
int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + static_field_size + nonstatic_oop_map_size);
|
||||
|
||||
// Allocation
|
||||
@ -615,9 +620,9 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) {
|
||||
|
||||
st->print(BULLET"non-static oop maps: ");
|
||||
OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
|
||||
OopMapBlock* end_map = map + ik->nonstatic_oop_map_size();
|
||||
OopMapBlock* end_map = map + ik->nonstatic_oop_map_count();
|
||||
while (map < end_map) {
|
||||
st->print("%d-%d ", map->offset(), map->offset() + heapOopSize*(map->length() - 1));
|
||||
st->print("%d-%d ", map->offset(), map->offset() + heapOopSize*(map->count() - 1));
|
||||
map++;
|
||||
}
|
||||
st->cr();
|
||||
|
@ -39,7 +39,7 @@ class instanceKlassKlass : public klassKlass {
|
||||
klassOop allocate_instance_klass(int vtable_len,
|
||||
int itable_len,
|
||||
int static_field_size,
|
||||
int nonstatic_oop_map_size,
|
||||
unsigned int nonstatic_oop_map_count,
|
||||
ReferenceType rt,
|
||||
TRAPS);
|
||||
|
||||
|
@ -400,26 +400,26 @@ void instanceRefKlass::update_nonstatic_oop_maps(klassOop k) {
|
||||
assert(k == SystemDictionary::reference_klass() && first_time,
|
||||
"Invalid update of maps");
|
||||
debug_only(first_time = false);
|
||||
assert(ik->nonstatic_oop_map_size() == 1, "just checking");
|
||||
assert(ik->nonstatic_oop_map_count() == 1, "just checking");
|
||||
|
||||
OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
|
||||
|
||||
// Check that the current map is (2,4) - currently points at field with
|
||||
// offset 2 (words) and has 4 map entries.
|
||||
debug_only(int offset = java_lang_ref_Reference::referent_offset);
|
||||
debug_only(int length = ((java_lang_ref_Reference::discovered_offset -
|
||||
debug_only(unsigned int count = ((java_lang_ref_Reference::discovered_offset -
|
||||
java_lang_ref_Reference::referent_offset)/heapOopSize) + 1);
|
||||
|
||||
if (UseSharedSpaces) {
|
||||
assert(map->offset() == java_lang_ref_Reference::queue_offset &&
|
||||
map->length() == 1, "just checking");
|
||||
map->count() == 1, "just checking");
|
||||
} else {
|
||||
assert(map->offset() == offset && map->length() == length,
|
||||
assert(map->offset() == offset && map->count() == count,
|
||||
"just checking");
|
||||
|
||||
// Update map to (3,1) - point to offset of 3 (words) with 1 map entry.
|
||||
map->set_offset(java_lang_ref_Reference::queue_offset);
|
||||
map->set_length(1);
|
||||
map->set_count(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,12 @@ void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
|
||||
}
|
||||
}
|
||||
|
||||
void DataLayout::follow_weak_refs(BoolObjectClosure* cl) {
|
||||
ResourceMark m;
|
||||
data_in()->follow_weak_refs(cl);
|
||||
}
|
||||
|
||||
|
||||
// ==================================================================
|
||||
// ProfileData
|
||||
//
|
||||
@ -145,42 +151,92 @@ void JumpData::print_data_on(outputStream* st) {
|
||||
// which are used to store a type profile for the receiver of the check.
|
||||
|
||||
void ReceiverTypeData::follow_contents() {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
if (receiver(row) != NULL) {
|
||||
MarkSweep::mark_and_push(adr_receiver(row));
|
||||
}
|
||||
}
|
||||
// This is a set of weak references that need
|
||||
// to be followed at the end of the strong marking
|
||||
// phase. Memoize this object so it can be visited
|
||||
// in the weak roots processing phase.
|
||||
MarkSweep::revisit_mdo(data());
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void ReceiverTypeData::follow_contents(ParCompactionManager* cm) {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
if (receiver(row) != NULL) {
|
||||
PSParallelCompact::mark_and_push(cm, adr_receiver(row));
|
||||
}
|
||||
}
|
||||
// This is a set of weak references that need
|
||||
// to be followed at the end of the strong marking
|
||||
// phase. Memoize this object so it can be visited
|
||||
// in the weak roots processing phase.
|
||||
PSParallelCompact::revisit_mdo(cm, data());
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
void ReceiverTypeData::oop_iterate(OopClosure* blk) {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
if (receiver(row) != NULL) {
|
||||
blk->do_oop(adr_receiver(row));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
|
||||
if (blk->should_remember_mdo()) {
|
||||
// This is a set of weak references that need
|
||||
// to be followed at the end of the strong marking
|
||||
// phase. Memoize this object so it can be visited
|
||||
// in the weak roots processing phase.
|
||||
blk->remember_mdo(data());
|
||||
} else { // normal scan
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
if (receiver(row) != NULL) {
|
||||
oop* adr = adr_receiver(row);
|
||||
if (mr.contains(adr)) {
|
||||
blk->do_oop(adr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
|
||||
// Currently, this interface is called only during card-scanning for
|
||||
// a young gen gc, in which case this object cannot contribute anything,
|
||||
// since it does not contain any references that cross out of
|
||||
// the perm gen. However, for future more general use we allow
|
||||
// the possibility of calling for instance from more general
|
||||
// iterators (for example, a future regionalized perm gen for G1,
|
||||
// or the possibility of moving some references out of perm in
|
||||
// the case of other collectors). In that case, you will need
|
||||
// to relax or remove some of the assertions below.
|
||||
#ifdef ASSERT
|
||||
// Verify that none of the embedded oop references cross out of
|
||||
// this generation.
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
if (receiver(row) != NULL) {
|
||||
oop* adr = adr_receiver(row);
|
||||
CollectedHeap* h = Universe::heap();
|
||||
assert(h->is_permanent(adr) && h->is_permanent_or_null(*adr), "Not intra-perm");
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
assert(!blk->should_remember_mdo(), "Not expected to remember MDO");
|
||||
return; // Nothing to do, see comment above
|
||||
#if 0
|
||||
if (blk->should_remember_mdo()) {
|
||||
// This is a set of weak references that need
|
||||
// to be followed at the end of the strong marking
|
||||
// phase. Memoize this object so it can be visited
|
||||
// in the weak roots processing phase.
|
||||
blk->remember_mdo(data());
|
||||
} else { // normal scan
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
if (receiver(row) != NULL) {
|
||||
oop* adr = adr_receiver(row);
|
||||
if (mr.contains(adr)) {
|
||||
blk->do_oop(adr);
|
||||
} else if ((HeapWord*)adr >= mr.end()) {
|
||||
// Test that the current cursor and the two ends of the range
|
||||
// that we may have skipped iterating over are monotonically ordered;
|
||||
// this is just a paranoid assertion, just in case represetations
|
||||
// should change in the future rendering the short-circuit return
|
||||
// here invalid.
|
||||
assert((row+1 >= row_limit() || adr_receiver(row+1) > adr) &&
|
||||
(row+2 >= row_limit() || adr_receiver(row_limit()-1) > adr_receiver(row+1)), "Reducing?");
|
||||
break; // remaining should be outside this mr too
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void ReceiverTypeData::adjust_pointers() {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
if (receiver(row) != NULL) {
|
||||
@ -189,6 +245,15 @@ void ReceiverTypeData::adjust_pointers() {
|
||||
}
|
||||
}
|
||||
|
||||
void ReceiverTypeData::follow_weak_refs(BoolObjectClosure* is_alive_cl) {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
klassOop p = receiver(row);
|
||||
if (p != NULL && !is_alive_cl->do_object_b(p)) {
|
||||
clear_row(row);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef SERIALGC
|
||||
void ReceiverTypeData::update_pointers() {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
@ -625,30 +690,33 @@ ProfileData* methodDataOopDesc::data_at(int data_index) {
|
||||
return NULL;
|
||||
}
|
||||
DataLayout* data_layout = data_layout_at(data_index);
|
||||
return data_layout->data_in();
|
||||
}
|
||||
|
||||
switch (data_layout->tag()) {
|
||||
ProfileData* DataLayout::data_in() {
|
||||
switch (tag()) {
|
||||
case DataLayout::no_tag:
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
case DataLayout::bit_data_tag:
|
||||
return new BitData(data_layout);
|
||||
return new BitData(this);
|
||||
case DataLayout::counter_data_tag:
|
||||
return new CounterData(data_layout);
|
||||
return new CounterData(this);
|
||||
case DataLayout::jump_data_tag:
|
||||
return new JumpData(data_layout);
|
||||
return new JumpData(this);
|
||||
case DataLayout::receiver_type_data_tag:
|
||||
return new ReceiverTypeData(data_layout);
|
||||
return new ReceiverTypeData(this);
|
||||
case DataLayout::virtual_call_data_tag:
|
||||
return new VirtualCallData(data_layout);
|
||||
return new VirtualCallData(this);
|
||||
case DataLayout::ret_data_tag:
|
||||
return new RetData(data_layout);
|
||||
return new RetData(this);
|
||||
case DataLayout::branch_data_tag:
|
||||
return new BranchData(data_layout);
|
||||
return new BranchData(this);
|
||||
case DataLayout::multi_branch_data_tag:
|
||||
return new MultiBranchData(data_layout);
|
||||
return new MultiBranchData(this);
|
||||
case DataLayout::arg_info_data_tag:
|
||||
return new ArgInfoData(data_layout);
|
||||
return new ArgInfoData(this);
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,9 @@ class BytecodeStream;
|
||||
// with invocation counter incrementation. None of these races harm correct
|
||||
// execution of the compiled code.
|
||||
|
||||
// forward decl
|
||||
class ProfileData;
|
||||
|
||||
// DataLayout
|
||||
//
|
||||
// Overlay for generic profiling data.
|
||||
@ -231,6 +234,10 @@ public:
|
||||
temp._header._struct._flags = byte_constant;
|
||||
return temp._header._bits;
|
||||
}
|
||||
|
||||
// GC support
|
||||
ProfileData* data_in();
|
||||
void follow_weak_refs(BoolObjectClosure* cl);
|
||||
};
|
||||
|
||||
|
||||
@ -430,6 +437,7 @@ public:
|
||||
virtual void oop_iterate(OopClosure* blk) {}
|
||||
virtual void oop_iterate_m(OopClosure* blk, MemRegion mr) {}
|
||||
virtual void adjust_pointers() {}
|
||||
virtual void follow_weak_refs(BoolObjectClosure* is_alive_closure) {}
|
||||
|
||||
#ifndef SERIALGC
|
||||
// Parallel old support
|
||||
@ -667,11 +675,27 @@ public:
|
||||
return recv;
|
||||
}
|
||||
|
||||
void set_receiver(uint row, oop p) {
|
||||
assert((uint)row < row_limit(), "oob");
|
||||
set_oop_at(receiver_cell_index(row), p);
|
||||
}
|
||||
|
||||
uint receiver_count(uint row) {
|
||||
assert(row < row_limit(), "oob");
|
||||
return uint_at(receiver_count_cell_index(row));
|
||||
}
|
||||
|
||||
void set_receiver_count(uint row, uint count) {
|
||||
assert(row < row_limit(), "oob");
|
||||
set_uint_at(receiver_count_cell_index(row), count);
|
||||
}
|
||||
|
||||
void clear_row(uint row) {
|
||||
assert(row < row_limit(), "oob");
|
||||
set_receiver(row, NULL);
|
||||
set_receiver_count(row, 0);
|
||||
}
|
||||
|
||||
// Code generation support
|
||||
static ByteSize receiver_offset(uint row) {
|
||||
return cell_offset(receiver_cell_index(row));
|
||||
@ -688,6 +712,7 @@ public:
|
||||
virtual void oop_iterate(OopClosure* blk);
|
||||
virtual void oop_iterate_m(OopClosure* blk, MemRegion mr);
|
||||
virtual void adjust_pointers();
|
||||
virtual void follow_weak_refs(BoolObjectClosure* is_alive_closure);
|
||||
|
||||
#ifndef SERIALGC
|
||||
// Parallel old support
|
||||
|
@ -1707,6 +1707,9 @@ class CommandLineFlags {
|
||||
product(bool, TLABStats, true, \
|
||||
"Print various TLAB related information") \
|
||||
\
|
||||
product(bool, PrintRevisitStats, false, \
|
||||
"Print revisit (klass and MDO) stack related information") \
|
||||
\
|
||||
product_pd(bool, NeverActAsServerClassMachine, \
|
||||
"Never act like a server-class machine") \
|
||||
\
|
||||
|
@ -125,8 +125,14 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||
// there are no inline caches that referes to it.
|
||||
if (nm->is_marked_for_reclamation()) {
|
||||
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod 0x%x (marked for reclamation) being flushed", nm);
|
||||
}
|
||||
nm->flush();
|
||||
} else {
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod 0x%x (zombie) being marked for reclamation", nm);
|
||||
}
|
||||
nm->mark_for_reclamation();
|
||||
_rescan = true;
|
||||
}
|
||||
@ -134,6 +140,9 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||
// If there is no current activations of this method on the
|
||||
// stack we can safely convert it to a zombie method
|
||||
if (nm->can_not_entrant_be_converted()) {
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod 0x%x (not entrant) being made zombie", nm);
|
||||
}
|
||||
nm->make_zombie();
|
||||
_rescan = true;
|
||||
} else {
|
||||
@ -146,7 +155,9 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||
}
|
||||
} else if (nm->is_unloaded()) {
|
||||
// Unloaded code, just make it a zombie
|
||||
if (nm->is_osr_only_method()) {
|
||||
if (PrintMethodFlushing && Verbose)
|
||||
tty->print_cr("### Nmethod 0x%x (unloaded) being made zombie", nm);
|
||||
if (nm->is_osr_method()) {
|
||||
// No inline caches will ever point to osr methods, so we can just remove it
|
||||
nm->flush();
|
||||
} else {
|
||||
|
65563
hotspot/test/gc/6845368/bigobj.java
Normal file
65563
hotspot/test/gc/6845368/bigobj.java
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user