This commit is contained in:
Dmitry Samersoff 2015-07-23 15:25:28 +00:00
commit db6b2f6b85
11 changed files with 123 additions and 277 deletions

@ -1,40 +0,0 @@
package sun.jvm.hotspot.gc.g1;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
public class G1Allocator extends VMObject {
//size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("G1Allocator");
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
}
public long getSummaryBytes() {
return summaryBytesUsedField.getValue(addr);
}
public G1Allocator(Address addr) {
super(addr);
}
}

@ -36,6 +36,7 @@ import sun.jvm.hotspot.memory.MemRegion;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
@ -46,8 +47,8 @@ public class G1CollectedHeap extends CollectedHeap {
static private long hrmFieldOffset;
// MemRegion _g1_reserved;
static private long g1ReservedFieldOffset;
// G1Allocator* _allocator
static private AddressField g1Allocator;
// size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField;
// G1MonitoringSupport* _g1mm;
static private AddressField g1mmField;
// HeapRegionSet _old_set;
@ -67,7 +68,7 @@ public class G1CollectedHeap extends CollectedHeap {
Type type = db.lookupType("G1CollectedHeap");
hrmFieldOffset = type.getField("_hrm").getOffset();
g1Allocator = type.getAddressField("_allocator");
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
g1mmField = type.getAddressField("_g1mm");
oldSetFieldOffset = type.getField("_old_set").getOffset();
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
@ -78,7 +79,7 @@ public class G1CollectedHeap extends CollectedHeap {
}
public long used() {
return allocator().getSummaryBytes();
return summaryBytesUsedField.getValue(addr);
}
public long n_regions() {
@ -96,11 +97,6 @@ public class G1CollectedHeap extends CollectedHeap {
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
}
public G1Allocator allocator() {
Address g1AllocatorAddr = g1Allocator.getValue(addr);
return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr);
}
public HeapRegionSetBase oldSet() {
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,

@ -39,13 +39,8 @@ class G1Allocator : public CHeapObj<mtGC> {
protected:
G1CollectedHeap* _g1h;
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region.
size_t _summary_bytes_used;
public:
G1Allocator(G1CollectedHeap* heap) :
_g1h(heap), _summary_bytes_used(0) { }
G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
static G1Allocator* create_allocator(G1CollectedHeap* g1h);
@ -59,32 +54,13 @@ public:
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
virtual size_t used() = 0;
virtual size_t used_in_alloc_regions() = 0;
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained);
size_t used_unlocked() const {
return _summary_bytes_used;
}
void increase_used(size_t bytes) {
_summary_bytes_used += bytes;
}
void decrease_used(size_t bytes) {
assert(_summary_bytes_used >= bytes,
err_msg("invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
_summary_bytes_used, bytes));
_summary_bytes_used -= bytes;
}
void set_used(size_t bytes) {
_summary_bytes_used = bytes;
}
virtual HeapRegion* new_heap_region(uint hrs_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) {
@ -133,10 +109,10 @@ public:
return &_old_gc_alloc_region;
}
virtual size_t used() {
virtual size_t used_in_alloc_regions() {
assert(Heap_lock->owner() != NULL,
"Should be owned on this thread's behalf.");
size_t result = _summary_bytes_used;
size_t result = 0;
// Read only once in case it is set to NULL concurrently
HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();

@ -632,7 +632,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
check_bitmaps("Humongous Region Allocation", first_hr);
assert(first_hr->used() == word_size * HeapWordSize, "invariant");
_allocator->increase_used(first_hr->used());
increase_used(first_hr->used());
_humongous_set.add(first_hr);
return new_obj;
@ -998,7 +998,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
start_address = start_region->end();
if (start_address > last_address) {
_allocator->increase_used(word_size * HeapWordSize);
increase_used(word_size * HeapWordSize);
start_region->set_top(last_address + 1);
continue;
}
@ -1012,7 +1012,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
return false;
}
_allocator->increase_used(word_size * HeapWordSize);
increase_used(word_size * HeapWordSize);
if (commits != 0) {
ergo_verbose1(ErgoHeapSizing,
"attempt heap expansion",
@ -1104,7 +1104,7 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
if (start_address != bottom_address) {
size_t fill_size = pointer_delta(start_address, bottom_address);
G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
_allocator->increase_used(fill_size * HeapWordSize);
increase_used(fill_size * HeapWordSize);
}
}
}
@ -1917,7 +1917,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_ref_processor_cm(NULL),
_ref_processor_stw(NULL),
_bot_shared(NULL),
_evac_failure_scan_stack(NULL),
_cg1r(NULL),
_g1mm(NULL),
_refine_cte_cl(NULL),
@ -1930,6 +1929,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_free_regions_coming(false),
_young_list(new YoungList(this)),
_gc_time_stamp(0),
_summary_bytes_used(0),
_survivor_plab_stats(YoungPLABSize, PLABWeight),
_old_plab_stats(OldPLABSize, PLABWeight),
_expand_heap_after_alloc_failure(true),
@ -2204,6 +2204,11 @@ jint G1CollectedHeap::initialize() {
G1StringDedup::initialize();
_preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
for (uint i = 0; i < ParallelGCThreads; i++) {
new (&_preserved_objs[i]) OopAndMarkOopStack();
}
return JNI_OK;
}
@ -2371,7 +2376,7 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
// Computes the sum of the storage used by the various regions.
size_t G1CollectedHeap::used() const {
size_t result = _allocator->used();
size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
if (_archive_allocator != NULL) {
result += _archive_allocator->used();
}
@ -2379,7 +2384,7 @@ size_t G1CollectedHeap::used() const {
}
size_t G1CollectedHeap::used_unlocked() const {
return _allocator->used_unlocked();
return _summary_bytes_used;
}
class SumUsedClosure: public HeapRegionClosure {
@ -4102,7 +4107,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_young_list->reset_auxilary_lists();
if (evacuation_failed()) {
_allocator->set_used(recalculate_used());
set_used(recalculate_used());
if (_archive_allocator != NULL) {
_archive_allocator->clear_used();
}
@ -4114,7 +4119,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
} else {
// The "used" of the the collection set have already been subtracted
// when they were freed. Add in the bytes evacuated.
_allocator->increase_used(g1_policy()->bytes_copied_during_gc());
increase_used(g1_policy()->bytes_copied_during_gc());
}
if (collector_state()->during_initial_mark_pause()) {
@ -4255,21 +4260,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return true;
}
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
_drain_in_progress = false;
set_evac_failure_closure(cl);
_evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
}
void G1CollectedHeap::finalize_for_evac_failure() {
assert(_evac_failure_scan_stack != NULL &&
_evac_failure_scan_stack->length() == 0,
"Postcondition");
assert(!_drain_in_progress, "Postcondition");
delete _evac_failure_scan_stack;
_evac_failure_scan_stack = NULL;
}
void G1CollectedHeap::remove_self_forwarding_pointers() {
double remove_self_forwards_start = os::elapsedTime();
@ -4277,104 +4267,30 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
workers()->run_task(&rsfp_task);
// Now restore saved marks, if any.
assert(_objs_with_preserved_marks.size() ==
_preserved_marks_of_objs.size(), "Both or none.");
while (!_objs_with_preserved_marks.is_empty()) {
oop obj = _objs_with_preserved_marks.pop();
markOop m = _preserved_marks_of_objs.pop();
obj->set_mark(m);
for (uint i = 0; i < ParallelGCThreads; i++) {
OopAndMarkOopStack& cur = _preserved_objs[i];
while (!cur.is_empty()) {
OopAndMarkOop elem = cur.pop();
elem.set_mark();
}
cur.clear(true);
}
_objs_with_preserved_marks.clear(true);
_preserved_marks_of_objs.clear(true);
g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
}
void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
_evac_failure_scan_stack->push(obj);
}
void G1CollectedHeap::drain_evac_failure_scan_stack() {
assert(_evac_failure_scan_stack != NULL, "precondition");
while (_evac_failure_scan_stack->length() > 0) {
oop obj = _evac_failure_scan_stack->pop();
_evac_failure_closure->set_region(heap_region_containing(obj));
obj->oop_iterate_backwards(_evac_failure_closure);
}
}
oop
G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
oop old) {
assert(obj_in_cs(old),
err_msg("obj: " PTR_FORMAT " should still be in the CSet",
p2i(old)));
markOop m = old->mark();
oop forward_ptr = old->forward_to_atomic(old);
if (forward_ptr == NULL) {
// Forward-to-self succeeded.
assert(_par_scan_state != NULL, "par scan state");
OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
uint queue_num = _par_scan_state->queue_num();
void G1CollectedHeap::preserve_mark_during_evac_failure(uint queue_num, oop obj, markOop m) {
if (!_evacuation_failed) {
_evacuation_failed = true;
_evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
if (_evac_failure_closure != cl) {
MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
assert(!_drain_in_progress,
"Should only be true while someone holds the lock.");
// Set the global evac-failure closure to the current thread's.
assert(_evac_failure_closure == NULL, "Or locking has failed.");
set_evac_failure_closure(cl);
// Now do the common part.
handle_evacuation_failure_common(old, m);
// Reset to NULL.
set_evac_failure_closure(NULL);
} else {
// The lock is already held, and this is recursive.
assert(_drain_in_progress, "This should only be the recursive case.");
handle_evacuation_failure_common(old, m);
}
return old;
} else {
// Forward-to-self failed. Either someone else managed to allocate
// space for this object (old != forward_ptr) or they beat us in
// self-forwarding it (old == forward_ptr).
assert(old == forward_ptr || !obj_in_cs(forward_ptr),
err_msg("obj: " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
"should not be in the CSet",
p2i(old), p2i(forward_ptr)));
return forward_ptr;
}
}
void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
preserve_mark_if_necessary(old, m);
HeapRegion* r = heap_region_containing(old);
if (!r->evacuation_failed()) {
r->set_evacuation_failed(true);
_hr_printer.evac_failure(r);
}
push_on_evac_failure_scan_stack(old);
_evacuation_failed_info_array[queue_num].register_copy_failure(obj->size());
if (!_drain_in_progress) {
// prevent recursion in copy_to_survivor_space()
_drain_in_progress = true;
drain_evac_failure_scan_stack();
_drain_in_progress = false;
}
}
void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
assert(evacuation_failed(), "Oversaving!");
// We want to call the "for_promotion_failure" version only in the
// case of a promotion failure.
if (m->must_be_preserved_for_promotion_failure(obj)) {
_objs_with_preserved_marks.push(obj);
_preserved_marks_of_objs.push(m);
OopAndMarkOop elem(obj, m);
_preserved_objs[queue_num].push(elem);
}
}
@ -4450,15 +4366,8 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
mark_object(obj);
}
}
if (barrier == G1BarrierEvac) {
_par_scan_state->update_rs(_from, p, _worker_id);
}
}
template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
class G1ParEvacuateFollowersClosure : public VoidClosure {
protected:
G1CollectedHeap* _g1h;
@ -4597,9 +4506,6 @@ public:
ReferenceProcessor* rp = _g1h->ref_processor_stw();
G1ParScanThreadState pss(_g1h, worker_id, rp);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
pss.set_evac_failure_closure(&evac_failure_cl);
bool only_young = _g1h->collector_state()->gcs_are_young();
@ -5269,9 +5175,6 @@ public:
G1STWIsAliveClosure is_alive(_g1h);
G1ParScanThreadState pss(_g1h, worker_id, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
pss.set_evac_failure_closure(&evac_failure_cl);
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
@ -5368,10 +5271,6 @@ public:
HandleMark hm;
G1ParScanThreadState pss(_g1h, worker_id, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
pss.set_evac_failure_closure(&evac_failure_cl);
assert(pss.queue_is_empty(), "both queue and overflow should be empty");
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
@ -5476,15 +5375,11 @@ void G1CollectedHeap::process_discovered_references() {
// Use only a single queue for this PSS.
G1ParScanThreadState pss(this, 0, NULL);
assert(pss.queue_is_empty(), "pre-condition");
// We do not embed a reference processor in the copying/scanning
// closures while we're actually processing the discovered
// reference objects.
G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
pss.set_evac_failure_closure(&evac_failure_cl);
assert(pss.queue_is_empty(), "pre-condition");
G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
@ -5590,8 +5485,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
const uint n_workers = workers()->active_workers();
init_for_evac_failure(NULL);
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
double start_par_time_sec = os::elapsedTime();
double end_par_time_sec;
@ -5655,8 +5548,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
purge_code_root_memory();
finalize_for_evac_failure();
if (evacuation_failed()) {
remove_self_forwarding_pointers();
@ -5745,7 +5636,7 @@ void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
}
void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
_allocator->decrease_used(bytes);
decrease_used(bytes);
}
class G1ParCleanupCTTask : public AbstractGangTask {
@ -6395,6 +6286,21 @@ void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
_hrm.remove_all_free_regions();
}
void G1CollectedHeap::increase_used(size_t bytes) {
_summary_bytes_used += bytes;
}
void G1CollectedHeap::decrease_used(size_t bytes) {
assert(_summary_bytes_used >= bytes,
err_msg("invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
_summary_bytes_used, bytes));
_summary_bytes_used -= bytes;
}
void G1CollectedHeap::set_used(size_t bytes) {
_summary_bytes_used = bytes;
}
class RebuildRegionSetsClosure : public HeapRegionClosure {
private:
bool _free_list_only;
@ -6463,15 +6369,15 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
heap_region_iterate(&cl);
if (!free_list_only) {
_allocator->set_used(cl.total_used());
set_used(cl.total_used());
if (_archive_allocator != NULL) {
_archive_allocator->clear_used();
}
}
assert(_allocator->used_unlocked() == recalculate_used(),
err_msg("inconsistent _allocator->used_unlocked(), "
assert(used_unlocked() == recalculate_used(),
err_msg("inconsistent used_unlocked(), "
"value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
_allocator->used_unlocked(), recalculate_used()));
used_unlocked(), recalculate_used()));
}
void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
@ -6511,7 +6417,7 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
_allocator->increase_used(allocated_bytes);
increase_used(allocated_bytes);
_hr_printer.retire(alloc_region);
// We update the eden sizes here, when the region is retired,
// instead of when it's allocated, since this is the point that its

@ -251,6 +251,15 @@ private:
// Class that handles the different kinds of allocations.
G1Allocator* _allocator;
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region(s).
size_t _summary_bytes_used;
void increase_used(size_t bytes);
void decrease_used(size_t bytes);
void set_used(size_t bytes);
// Class that handles archive allocation ranges.
G1ArchiveAllocator* _archive_allocator;
@ -858,44 +867,27 @@ protected:
// forwarding pointers to themselves. Reset them.
void remove_self_forwarding_pointers();
// Together, these store an object with a preserved mark, and its mark value.
Stack<oop, mtGC> _objs_with_preserved_marks;
Stack<markOop, mtGC> _preserved_marks_of_objs;
struct OopAndMarkOop {
private:
oop _o;
markOop _m;
public:
OopAndMarkOop(oop obj, markOop m) : _o(obj), _m(m) {
}
void set_mark() {
_o->set_mark(_m);
}
};
typedef Stack<OopAndMarkOop,mtGC> OopAndMarkOopStack;
// Stores marks with the corresponding oop that we need to preserve during evacuation
// failure.
OopAndMarkOopStack* _preserved_objs;
// Preserve the mark of "obj", if necessary, in preparation for its mark
// word being overwritten with a self-forwarding-pointer.
void preserve_mark_if_necessary(oop obj, markOop m);
// The stack of evac-failure objects left to be scanned.
GrowableArray<oop>* _evac_failure_scan_stack;
// The closure to apply to evac-failure objects.
OopsInHeapRegionClosure* _evac_failure_closure;
// Set the field above.
void
set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
_evac_failure_closure = evac_failure_closure;
}
// Push "obj" on the scan stack.
void push_on_evac_failure_scan_stack(oop obj);
// Process scan stack entries until the stack is empty.
void drain_evac_failure_scan_stack();
// True iff an invocation of "drain_scan_stack" is in progress; to
// prevent unnecessary recursion.
bool _drain_in_progress;
// Do any necessary initialization for evacuation-failure handling.
// "cl" is the closure that will be used to process evac-failure
// objects.
void init_for_evac_failure(OopsInHeapRegionClosure* cl);
// Do any necessary cleanup for evacuation-failure handling data
// structures.
void finalize_for_evac_failure();
// An attempt to evacuate "obj" has failed; take necessary steps.
oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m);
void preserve_mark_during_evac_failure(uint queue, oop obj, markOop m);
#ifndef PRODUCT
// Support for forcing evacuation failures. Analogous to

@ -111,7 +111,6 @@ protected:
enum G1Barrier {
G1BarrierNone,
G1BarrierEvac,
G1BarrierKlass
};
@ -148,8 +147,6 @@ typedef G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> G1ParScanAndMar
// We use a separate closure to handle references during evacuation
// failure processing.
typedef G1ParCopyClosure<G1BarrierEvac, G1MarkNone> G1ParScanHeapEvacFailureClosure;
class FilterIntoCSClosure: public ExtendedOopClosure {
G1CollectedHeap* _g1;
OopClosure* _oc;

@ -144,8 +144,6 @@ bool G1ParScanThreadState::verify_task(StarTask ref) const {
#endif // ASSERT
void G1ParScanThreadState::trim_queue() {
assert(_evac_failure_cl != NULL, "not set");
StarTask ref;
do {
// Drain the overflow stack first, so other threads can steal.
@ -222,7 +220,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
return _g1h->handle_evacuation_failure_par(this, old);
return handle_evacuation_failure_par(old, old_mark);
}
}
}
@ -236,7 +234,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
// Doing this after all the allocation attempts also tests the
// undo_allocation() method too.
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
return _g1h->handle_evacuation_failure_par(this, old);
return handle_evacuation_failure_par(old, old_mark);
}
#endif // !PRODUCT
@ -301,3 +299,36 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
return forward_ptr;
}
}
oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
assert(_g1h->obj_in_cs(old),
err_msg("Object " PTR_FORMAT " should be in the CSet", p2i(old)));
oop forward_ptr = old->forward_to_atomic(old);
if (forward_ptr == NULL) {
// Forward-to-self succeeded. We are the "owner" of the object.
HeapRegion* r = _g1h->heap_region_containing(old);
if (!r->evacuation_failed()) {
r->set_evacuation_failed(true);
_g1h->hr_printer()->evac_failure(r);
}
_g1h->preserve_mark_during_evac_failure(_queue_num, old, m);
_scanner.set_region(r);
old->oop_iterate_backwards(&_scanner);
return old;
} else {
// Forward-to-self failed. Either someone else managed to allocate
// space for this object (old != forward_ptr) or they beat us in
// self-forwarding it (old == forward_ptr).
assert(old == forward_ptr || !_g1h->obj_in_cs(forward_ptr),
err_msg("Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
"should not be in the CSet",
p2i(old), p2i(forward_ptr)));
return forward_ptr;
}
}

@ -54,8 +54,6 @@ class G1ParScanThreadState : public StackObj {
uint _tenuring_threshold;
G1ParScanClosure _scanner;
OopsInHeapRegionClosure* _evac_failure_cl;
int _hash_seed;
uint _queue_num;
@ -114,12 +112,6 @@ class G1ParScanThreadState : public StackObj {
}
}
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
_evac_failure_cl = evac_failure_cl;
}
OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
int* hash_seed() { return &_hash_seed; }
uint queue_num() { return _queue_num; }
@ -211,6 +203,9 @@ class G1ParScanThreadState : public StackObj {
void trim_queue();
inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
// An attempt to evacuate "obj" has failed; take necessary steps.
oop handle_evacuation_failure_par(oop obj, markOop m);
};
#endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP

@ -45,13 +45,11 @@
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
nonstatic_field(HeapRegionManager, _num_committed, uint) \
\
nonstatic_field(G1Allocator, _summary_bytes_used, size_t) \
\
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _allocator, G1Allocator*) \
\
nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \
@ -78,12 +76,10 @@
declare_toplevel_type(HeapRegionSetBase) \
declare_toplevel_type(HeapRegionSetCount) \
declare_toplevel_type(G1MonitoringSupport) \
declare_toplevel_type(G1Allocator) \
\
declare_toplevel_type(G1CollectedHeap*) \
declare_toplevel_type(HeapRegion*) \
declare_toplevel_type(G1MonitoringSupport*) \
declare_toplevel_type(G1Allocator*) \
#endif // SHARE_VM_GC_G1_VMSTRUCTS_G1_HPP

@ -83,7 +83,6 @@ Mutex* DirtyCardQ_FL_lock = NULL;
Monitor* DirtyCardQ_CBL_mon = NULL;
Mutex* Shared_DirtyCardQ_lock = NULL;
Mutex* ParGCRareEvent_lock = NULL;
Mutex* EvacFailureStack_lock = NULL;
Mutex* DerivedPointerTableGC_lock = NULL;
Mutex* Compile_lock = NULL;
Monitor* MethodCompileQueue_lock = NULL;
@ -201,7 +200,6 @@ void mutex_init() {
def(OldSets_lock , Mutex , leaf , true, Monitor::_safepoint_check_never);
def(RootRegionScan_lock , Monitor, leaf , true, Monitor::_safepoint_check_never);
def(MMUTracker_lock , Mutex , leaf , true, Monitor::_safepoint_check_never);
def(EvacFailureStack_lock , Mutex , nonleaf , true, Monitor::_safepoint_check_never);
def(StringDedupQueue_lock , Monitor, leaf, true, Monitor::_safepoint_check_never);
def(StringDedupTable_lock , Mutex , leaf, true, Monitor::_safepoint_check_never);

@ -87,7 +87,6 @@ extern Mutex* Shared_DirtyCardQ_lock; // Lock protecting dirty card
// non-Java threads.
// (see option ExplicitGCInvokesConcurrent)
extern Mutex* ParGCRareEvent_lock; // Synchronizes various (rare) parallel GC ops.
extern Mutex* EvacFailureStack_lock; // guards the evac failure scan stack
extern Mutex* Compile_lock; // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc)
extern Monitor* MethodCompileQueue_lock; // a lock held when method compilations are enqueued, dequeued
extern Monitor* CompileThread_lock; // a lock held by compile threads during compilation system initialization