From e79a62412fb64ca5783fb536aa2640bf814304d9 Mon Sep 17 00:00:00 2001 From: Antonios Printezis Date: Thu, 14 Oct 2010 10:38:14 -0400 Subject: [PATCH 1/7] 6990359: G1: don't push a stolen entry on the taskqueue, deal with it directly When an entry is stolen, don't push it on the task queue but process it directly. Reviewed-by: iveresov, ysr, jcoomes --- .../src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp | 8 ++++++-- .../src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp | 1 - 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index d12e59fd316..c6cd6607eb4 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4118,10 +4118,14 @@ void G1ParEvacuateFollowersClosure::do_void() { while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { assert(pss->verify_task(stolen_task), "sanity"); if (stolen_task.is_narrow()) { - pss->push_on_queue((narrowOop*) stolen_task); + pss->deal_with_reference((narrowOop*) stolen_task); } else { - pss->push_on_queue((oop*) stolen_task); + pss->deal_with_reference((oop*) stolen_task); } + + // We've just processed a reference and we might have made + // available new entries on the queues. So we have to make sure + // we drain the queues as necessary. pss->trim_queue(); } } while (!offer_termination()); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index c474ce2ea5c..b97d8710a64 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -1772,7 +1772,6 @@ public: } } -private: template void deal_with_reference(T* ref_to_scan) { if (has_partial_array_mask(ref_to_scan)) { _partial_scan_cl->do_oop_nv(ref_to_scan); From 0c9bfb6003cbd798f8e9a71be2334d415144f921 Mon Sep 17 00:00:00 2001 From: Antonios Printezis Date: Fri, 15 Oct 2010 17:26:56 -0400 Subject: [PATCH 2/7] 6992189: G1: inconsistent base used in sparse rem set iterator The remembered set iterator for sparse tables incorrectly assumes that index 0 corresponds to the bottom of the heap, not address 0 as it is the case. Reviewed-by: ysr, jmasa --- .../gc_implementation/g1/heapRegionRemSet.cpp | 4 +--- .../vm/gc_implementation/g1/sparsePRT.cpp | 5 +---- .../vm/gc_implementation/g1/sparsePRT.hpp | 18 ++++-------------- 3 files changed, 6 insertions(+), 21 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp index c4c87327484..5d055f65597 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp @@ -1159,9 +1159,7 @@ HeapRegionRemSetIterator() : _hrrs(NULL), _g1h(G1CollectedHeap::heap()), _bosa(NULL), - _sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start()) - >> CardTableModRefBS::card_shift) -{} + _sparse_iter() { } void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) { _hrrs = hrrs; diff --git a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp index 5bc3ab29c70..86eaa729d3f 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp @@ -323,10 +323,7 @@ CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() { } size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) { - return - _heap_bot_card_ind - + (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) - + ci; + return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci; } bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) { diff --git a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp index 7812ac4fb53..92a69b4d5cc 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp @@ -169,7 +169,6 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC { int _bl_ind; // [-1, 0.._rsht->_capacity) short _card_ind; // [0..SparsePRTEntry::cards_num()) RSHashTable* _rsht; - size_t _heap_bot_card_ind; // If the bucket list pointed to by _bl_ind contains a card, sets // _bl_ind to the index of that entry, and returns the card. @@ -183,13 +182,11 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC { size_t compute_card_ind(CardIdx_t ci); public: - RSHashTableIter(size_t heap_bot_card_ind) : + RSHashTableIter() : _tbl_ind(RSHashTable::NullEntry), _bl_ind(RSHashTable::NullEntry), _card_ind((SparsePRTEntry::cards_num() - 1)), - _rsht(NULL), - _heap_bot_card_ind(heap_bot_card_ind) - {} + _rsht(NULL) {} void init(RSHashTable* rsht) { _rsht = rsht; @@ -280,19 +277,12 @@ public: bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const { return _next->contains_card(region_id, card_index); } - -#if 0 - void verify_is_cleared(); - void print(); -#endif }; -class SparsePRTIter: public /* RSHashTable:: */RSHashTableIter { +class SparsePRTIter: public RSHashTableIter { public: - SparsePRTIter(size_t heap_bot_card_ind) : - /* RSHashTable:: */RSHashTableIter(heap_bot_card_ind) - {} + SparsePRTIter() : RSHashTableIter() { } void init(const SparsePRT* sprt) { RSHashTableIter::init(sprt->cur()); From f6acb9efb026a05ee7f436fbf8f276857562ece4 Mon Sep 17 00:00:00 2001 From: John Cuthbertson Date: Mon, 18 Oct 2010 15:01:41 -0700 Subject: [PATCH 3/7] 6988458: G1: assert(mr.end() <= _cm->finger()) failed: otherwise the region shouldn't be on the stack The changes from 6941395 did not clear the CMTask::_aborted_region fields when concurrent marking aborted because of overflow. As a result, the next time around we could see a memory region whose start address was above the global finger and the assertion tripped. Moved the clearing of the aborted regions to ConcurrentMark::clear_marking_state, which is executed on all of the exit paths. Reviewed-by: tonyp, ysr, jmasa --- hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 5bf7faec667..ec9c9da4f81 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -2418,6 +2418,8 @@ void ConcurrentMark::clear_marking_state() { for (int i = 0; i < (int)_max_task_num; ++i) { OopTaskQueue* queue = _task_queues->queue(i); queue->set_empty(); + // Clear any partial regions from the CMTasks + _tasks[i]->clear_aborted_region(); } } @@ -2706,7 +2708,6 @@ void ConcurrentMark::abort() { clear_marking_state(); for (int i = 0; i < (int)_max_task_num; ++i) { _tasks[i]->clear_region_fields(); - _tasks[i]->clear_aborted_region(); } _has_aborted = true; @@ -2985,7 +2986,7 @@ void CMTask::reset(CMBitMap* nextMarkBitMap) { _nextMarkBitMap = nextMarkBitMap; clear_region_fields(); - clear_aborted_region(); + assert(_aborted_region.is_empty(), "should have been cleared"); _calls = 0; _elapsed_time_ms = 0.0; From 0c660e1f60c4bd37086a92e807ce071a1d948efa Mon Sep 17 00:00:00 2001 From: Antonios Printezis Date: Sat, 16 Oct 2010 17:12:19 -0400 Subject: [PATCH 4/7] 6991377: G1: race between concurrent refinement and humongous object allocation There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT. Reviewed-by: jcoomes, johnc, ysr --- .../g1/g1BlockOffsetTable.cpp | 21 ++- .../g1/g1BlockOffsetTable.hpp | 4 + .../vm/gc_implementation/g1/heapRegion.cpp | 37 +++--- .../vm/gc_implementation/g1/heapRegion.hpp | 11 +- .../vm/gc_implementation/g1/heapRegionSeq.cpp | 122 +++++++++++++++--- .../vm/gc_implementation/g1/sparsePRT.cpp | 6 +- .../vm/gc_implementation/g1/sparsePRT.hpp | 2 - 7 files changed, 149 insertions(+), 54 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp index 106c521698a..8eb6607ae14 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp @@ -175,7 +175,7 @@ G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size } assert(start_card > _array->index_for(_bottom), "Cannot be first card"); assert(_array->offset_array(start_card-1) <= N_words, - "Offset card has an unexpected value"); + "Offset card has an unexpected value"); size_t start_card_for_region = start_card; u_char offset = max_jubyte; for (int i = 0; i < BlockOffsetArray::N_powers; i++) { @@ -577,6 +577,16 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_ #endif } +void +G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) { + assert(_end == new_end, "_end should have already been updated"); + + // The first BOT entry should have offset 0. + _array->set_offset_array(_array->index_for(_bottom), 0); + // The rest should point to the first one. + set_remainder_to_point_to_start(_bottom + N_words, new_end); +} + ////////////////////////////////////////////////////////////////////// // G1BlockOffsetArrayContigSpace ////////////////////////////////////////////////////////////////////// @@ -626,3 +636,12 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry() { "Precondition of call"); _array->set_offset_array(bottom_index, 0); } + +void +G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) { + G1BlockOffsetArray::set_for_starts_humongous(new_end); + + // Make sure _next_offset_threshold and _next_offset_index point to new_end. + _next_offset_threshold = new_end; + _next_offset_index = _array->index_for(new_end); +} diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp index 7ce92971839..28c29502c7c 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp @@ -436,6 +436,8 @@ public: } void check_all_cards(size_t left_card, size_t right_card) const; + + virtual void set_for_starts_humongous(HeapWord* new_end); }; // A subtype of BlockOffsetArray that takes advantage of the fact @@ -484,4 +486,6 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray { HeapWord* block_start_unsafe(const void* addr); HeapWord* block_start_unsafe_const(const void* addr) const; + + virtual void set_for_starts_humongous(HeapWord* new_end); }; diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp index 58b55123aae..dbe2d24a6ce 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp @@ -377,10 +377,26 @@ void HeapRegion::calc_gc_efficiency() { } // -void HeapRegion::set_startsHumongous() { +void HeapRegion::set_startsHumongous(HeapWord* new_end) { + assert(end() == _orig_end, + "Should be normal before the humongous object allocation"); + assert(top() == bottom(), "should be empty"); + _humongous_type = StartsHumongous; _humongous_start_region = this; - assert(end() == _orig_end, "Should be normal before alloc."); + + set_end(new_end); + _offsets.set_for_starts_humongous(new_end); +} + +void HeapRegion::set_continuesHumongous(HeapRegion* start) { + assert(end() == _orig_end, + "Should be normal before the humongous object allocation"); + assert(top() == bottom(), "should be empty"); + assert(start->startsHumongous(), "pre-condition"); + + _humongous_type = ContinuesHumongous; + _humongous_start_region = start; } bool HeapRegion::claimHeapRegion(jint claimValue) { @@ -500,23 +516,6 @@ CompactibleSpace* HeapRegion::next_compaction_space() const { return blk.result(); } -void HeapRegion::set_continuesHumongous(HeapRegion* start) { - // The order is important here. - start->add_continuingHumongousRegion(this); - _humongous_type = ContinuesHumongous; - _humongous_start_region = start; -} - -void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) { - // Must join the blocks of the current H region seq with the block of the - // added region. - offsets()->join_blocks(bottom(), cont->bottom()); - arrayOop obj = (arrayOop)(bottom()); - obj->set_length((int) (obj->length() + cont->capacity()/jintSize)); - set_end(cont->end()); - set_top(cont->end()); -} - void HeapRegion::save_marks() { set_saved_mark(); } diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp index 42e96bbfb50..48118ceb7e8 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -395,14 +395,12 @@ class HeapRegion: public G1OffsetTableContigSpace { // Causes the current region to represent a humongous object spanning "n" // regions. - virtual void set_startsHumongous(); + void set_startsHumongous(HeapWord* new_end); // The regions that continue a humongous sequence should be added using // this method, in increasing address order. void set_continuesHumongous(HeapRegion* start); - void add_continuingHumongousRegion(HeapRegion* cont); - // If the region has a remembered set, return a pointer to it. HeapRegionRemSet* rem_set() const { return _rem_set; @@ -733,13 +731,6 @@ class HeapRegion: public G1OffsetTableContigSpace { FilterOutOfRegionClosure* cl, bool filter_young); - // The region "mr" is entirely in "this", and starts and ends at block - // boundaries. The caller declares that all the contained blocks are - // coalesced into one. - void declare_filled_region_to_BOT(MemRegion mr) { - _offsets.single_block(mr.start(), mr.end()); - } - // A version of block start that is guaranteed to find *some* block // boundary at or before "p", but does not object iteration, and may // therefore be used safely when the heap is unparseable. diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp index b71cf1918c9..0640cbe0d39 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp @@ -91,34 +91,118 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) { } if (sumSizes >= word_size) { _alloc_search_start = cur; - // Mark the allocated regions as allocated. + + // We need to initialize the region(s) we just discovered. This is + // a bit tricky given that it can happen concurrently with + // refinement threads refining cards on these regions and + // potentially wanting to refine the BOT as they are scanning + // those cards (this can happen shortly after a cleanup; see CR + // 6991377). So we have to set up the region(s) carefully and in + // a specific order. + + // Currently, allocs_are_zero_filled() returns false. The zero + // filling infrastructure will be going away soon (see CR 6977804). + // So no need to do anything else here. bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled(); + assert(!zf, "not supported"); + + // This will be the "starts humongous" region. HeapRegion* first_hr = _regions.at(first); - for (int i = first; i < cur; i++) { - HeapRegion* hr = _regions.at(i); - if (zf) - hr->ensure_zero_filled(); + { + MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); + first_hr->set_zero_fill_allocated(); + } + // The header of the new object will be placed at the bottom of + // the first region. + HeapWord* new_obj = first_hr->bottom(); + // This will be the new end of the first region in the series that + // should also match the end of the last region in the seriers. + // (Note: sumSizes = "region size" x "number of regions we found"). + HeapWord* new_end = new_obj + sumSizes; + // This will be the new top of the first region that will reflect + // this allocation. + HeapWord* new_top = new_obj + word_size; + + // First, we need to zero the header of the space that we will be + // allocating. When we update top further down, some refinement + // threads might try to scan the region. By zeroing the header we + // ensure that any thread that will try to scan the region will + // come across the zero klass word and bail out. + // + // NOTE: It would not have been correct to have used + // CollectedHeap::fill_with_object() and make the space look like + // an int array. The thread that is doing the allocation will + // later update the object header to a potentially different array + // type and, for a very short period of time, the klass and length + // fields will be inconsistent. This could cause a refinement + // thread to calculate the object size incorrectly. + Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); + + // We will set up the first region as "starts humongous". This + // will also update the BOT covering all the regions to reflect + // that there is a single object that starts at the bottom of the + // first region. + first_hr->set_startsHumongous(new_end); + + // Then, if there are any, we will set up the "continues + // humongous" regions. + HeapRegion* hr = NULL; + for (int i = first + 1; i < cur; ++i) { + hr = _regions.at(i); { MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); hr->set_zero_fill_allocated(); } - size_t sz = hr->capacity() / HeapWordSize; - HeapWord* tmp = hr->allocate(sz); - assert(tmp != NULL, "Humongous allocation failure"); - MemRegion mr = MemRegion(tmp, sz); - CollectedHeap::fill_with_object(mr); - hr->declare_filled_region_to_BOT(mr); - if (i == first) { - first_hr->set_startsHumongous(); + hr->set_continuesHumongous(first_hr); + } + // If we have "continues humongous" regions (hr != NULL), then the + // end of the last one should match new_end. + assert(hr == NULL || hr->end() == new_end, "sanity"); + + // Up to this point no concurrent thread would have been able to + // do any scanning on any region in this series. All the top + // fields still point to bottom, so the intersection between + // [bottom,top] and [card_start,card_end] will be empty. Before we + // update the top fields, we'll do a storestore to make sure that + // no thread sees the update to top before the zeroing of the + // object header and the BOT initialization. + OrderAccess::storestore(); + + // Now that the BOT and the object header have been initialized, + // we can update top of the "starts humongous" region. + assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), + "new_top should be in this region"); + first_hr->set_top(new_top); + + // Now, we will update the top fields of the "continues humongous" + // regions. The reason we need to do this is that, otherwise, + // these regions would look empty and this will confuse parts of + // G1. For example, the code that looks for a consecutive number + // of empty regions will consider them empty and try to + // re-allocate them. We can extend is_empty() to also include + // !continuesHumongous(), but it is easier to just update the top + // fields here. + hr = NULL; + for (int i = first + 1; i < cur; ++i) { + hr = _regions.at(i); + if ((i + 1) == cur) { + // last continues humongous region + assert(hr->bottom() < new_top && new_top <= hr->end(), + "new_top should fall on this region"); + hr->set_top(new_top); } else { - assert(i > first, "sanity"); - hr->set_continuesHumongous(first_hr); + // not last one + assert(new_top > hr->end(), "new_top should be above this region"); + hr->set_top(hr->end()); } } - HeapWord* first_hr_bot = first_hr->bottom(); - HeapWord* obj_end = first_hr_bot + word_size; - first_hr->set_top(obj_end); - return first_hr_bot; + // If we have continues humongous regions (hr != NULL), then the + // end of the last one should match new_end and its top should + // match new_top. + assert(hr == NULL || + (hr->end() == new_end && hr->top() == new_top), "sanity"); + + return new_obj; } else { // If we started from the beginning, we want to know why we can't alloc. return NULL; diff --git a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp index 86eaa729d3f..896b5ee70b3 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp @@ -308,7 +308,7 @@ void RSHashTable::add_entry(SparsePRTEntry* e) { assert(e2->num_valid_cards() > 0, "Postcondition."); } -CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() { +CardIdx_t RSHashTableIter::find_first_card_in_list() { CardIdx_t res; while (_bl_ind != RSHashTable::NullEntry) { res = _rsht->entry(_bl_ind)->card(0); @@ -322,11 +322,11 @@ CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() { return SparsePRTEntry::NullEntry; } -size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) { +size_t RSHashTableIter::compute_card_ind(CardIdx_t ci) { return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci; } -bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) { +bool RSHashTableIter::has_next(size_t& card_index) { _card_ind++; CardIdx_t ci; if (_card_ind < SparsePRTEntry::cards_num() && diff --git a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp index 92a69b4d5cc..715a5515eba 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp @@ -282,8 +282,6 @@ public: class SparsePRTIter: public RSHashTableIter { public: - SparsePRTIter() : RSHashTableIter() { } - void init(const SparsePRT* sprt) { RSHashTableIter::init(sprt->cur()); } From 554e77efb458c07c20446cc944cc61c34ad390b9 Mon Sep 17 00:00:00 2001 From: "Y. Srinivas Ramakrishna" Date: Thu, 21 Oct 2010 17:29:24 -0700 Subject: [PATCH 5/7] 6992998: CMSWaitDuration=0 causes hangs with +ExplicitGCInvokesConcurrent Closed a timing hole during which concurrent full gc requests can be missed. The hole can increase the latency of the response to a full gc request by up to the value of CMSWaitDuration. If CMSWaitDuration=0 is, as currently, interpreted as an unbounded wait, suitable in certain tuning scenarios, the application can potentially hang. Made two obscure tunables, including CMSWaitDuration, manageable. Reviewed-by: jcoomes, tonyp --- .../concurrentMarkSweepThread.cpp | 15 ++++++++++----- .../concurrentMarkSweepThread.hpp | 6 ++++-- hotspot/src/share/vm/runtime/globals.hpp | 4 ++-- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp index f0033815bcd..f9823af67d9 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -272,12 +272,16 @@ void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) { } } -// Wait until the next synchronous GC or a timeout, whichever is earlier. -void ConcurrentMarkSweepThread::wait_on_cms_lock(long t) { +// Wait until the next synchronous GC, a concurrent full gc request, +// or a timeout, whichever is earlier. +void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) { MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); + if (_should_terminate || _collector->_full_gc_requested) { + return; + } set_CMS_flag(CMS_cms_wants_token); // to provoke notifies - CGC_lock->wait(Mutex::_no_safepoint_check_flag, t); + CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis); clear_CMS_flag(CMS_cms_wants_token); assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token), "Should not be set"); @@ -289,7 +293,8 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() { icms_wait(); return; } else { - // Wait until the next synchronous GC or a timeout, whichever is earlier + // Wait until the next synchronous GC, a concurrent full gc + // request or a timeout, whichever is earlier. wait_on_cms_lock(CMSWaitDuration); } // Check if we should start a CMS collection cycle diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp index 26eac7c37c0..1390b20a1da 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp @@ -120,8 +120,10 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread { } // Wait on CMS lock until the next synchronous GC - // or given timeout, whichever is earlier. - void wait_on_cms_lock(long t); // milliseconds + // or given timeout, whichever is earlier. A timeout value + // of 0 indicates that there is no upper bound on the wait time. + // A concurrent full gc request terminates the wait. + void wait_on_cms_lock(long t_millis); // The CMS thread will yield during the work portion of its cycle // only when requested to. Both synchronous and asychronous requests diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp index 6005865544b..1fa26ce7fd6 100644 --- a/hotspot/src/share/vm/runtime/globals.hpp +++ b/hotspot/src/share/vm/runtime/globals.hpp @@ -1585,7 +1585,7 @@ class CommandLineFlags { "(Temporary, subject to experimentation)" \ "Nominal minimum work per abortable preclean iteration") \ \ - product(intx, CMSAbortablePrecleanWaitMillis, 100, \ + manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \ "(Temporary, subject to experimentation)" \ " Time that we sleep between iterations when not given" \ " enough work per iteration") \ @@ -1677,7 +1677,7 @@ class CommandLineFlags { product(uintx, CMSWorkQueueDrainThreshold, 10, \ "Don't drain below this size per parallel worker/thief") \ \ - product(intx, CMSWaitDuration, 2000, \ + manageable(intx, CMSWaitDuration, 2000, \ "Time in milliseconds that CMS thread waits for young GC") \ \ product(bool, CMSYield, true, \ From cbc7f8756a7e9569bbe1a38ce7cab0c0c6002bf7 Mon Sep 17 00:00:00 2001 From: "Y. Srinivas Ramakrishna" Date: Sat, 23 Oct 2010 23:03:49 -0700 Subject: [PATCH 6/7] 6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data Deprecated HandlePromotionFailure, removing the ability to turn off that feature, did away with one epoch look-ahead when deciding if a scavenge is likely to fail, relying on current data. Reviewed-by: jmasa, johnc, poonam --- .../concurrentMarkSweepGeneration.cpp | 84 ++++--------------- .../concurrentMarkSweepGeneration.hpp | 3 +- .../parNew/parNewGeneration.cpp | 17 +--- .../src/share/vm/memory/collectorPolicy.cpp | 8 +- .../src/share/vm/memory/defNewGeneration.cpp | 56 ++++--------- .../src/share/vm/memory/defNewGeneration.hpp | 14 ++-- .../src/share/vm/memory/genCollectedHeap.cpp | 14 +--- .../src/share/vm/memory/genCollectedHeap.hpp | 39 +++++---- hotspot/src/share/vm/memory/generation.cpp | 15 ++-- hotspot/src/share/vm/memory/generation.hpp | 22 +++-- .../src/share/vm/memory/tenuredGeneration.cpp | 35 +++----- .../src/share/vm/memory/tenuredGeneration.hpp | 3 +- hotspot/src/share/vm/runtime/arguments.cpp | 6 +- hotspot/src/share/vm/runtime/globals.hpp | 7 -- 14 files changed, 101 insertions(+), 222 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index 2573d31a808..ceb7ad5dfd4 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -354,12 +354,8 @@ void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) { double CMSStats::time_until_cms_gen_full() const { size_t cms_free = _cms_gen->cmsSpace()->free(); GenCollectedHeap* gch = GenCollectedHeap::heap(); - size_t expected_promotion = gch->get_gen(0)->capacity(); - if (HandlePromotionFailure) { - expected_promotion = MIN2( - (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(), - expected_promotion); - } + size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(), + (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); if (cms_free > expected_promotion) { // Start a cms collection if there isn't enough space to promote // for the next minor collection. Use the padded average as @@ -865,57 +861,18 @@ size_t ConcurrentMarkSweepGeneration::max_available() const { return free() + _virtual_space.uncommitted_size(); } -bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe( - size_t max_promotion_in_bytes, - bool younger_handles_promotion_failure) const { - - // This is the most conservative test. Full promotion is - // guaranteed if this is used. The multiplicative factor is to - // account for the worst case "dilatation". - double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes; - if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t - adjusted_max_promo_bytes = (double)max_uintx; +bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { + size_t available = max_available(); + size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); + bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); + if (PrintGC && Verbose) { + gclog_or_tty->print_cr( + "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT")," + "max_promo("SIZE_FORMAT")", + res? "":" not", available, res? ">=":"<", + av_promo, max_promotion_in_bytes); } - bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes); - - if (younger_handles_promotion_failure && !result) { - // Full promotion is not guaranteed because fragmentation - // of the cms generation can prevent the full promotion. - result = (max_available() >= (size_t)adjusted_max_promo_bytes); - - if (!result) { - // With promotion failure handling the test for the ability - // to support the promotion does not have to be guaranteed. - // Use an average of the amount promoted. - result = max_available() >= (size_t) - gc_stats()->avg_promoted()->padded_average(); - if (PrintGC && Verbose && result) { - gclog_or_tty->print_cr( - "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" - " max_available: " SIZE_FORMAT - " avg_promoted: " SIZE_FORMAT, - max_available(), (size_t) - gc_stats()->avg_promoted()->padded_average()); - } - } else { - if (PrintGC && Verbose) { - gclog_or_tty->print_cr( - "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" - " max_available: " SIZE_FORMAT - " adj_max_promo_bytes: " SIZE_FORMAT, - max_available(), (size_t)adjusted_max_promo_bytes); - } - } - } else { - if (PrintGC && Verbose) { - gclog_or_tty->print_cr( - "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" - " contiguous_available: " SIZE_FORMAT - " adj_max_promo_bytes: " SIZE_FORMAT, - max_contiguous_available(), (size_t)adjusted_max_promo_bytes); - } - } - return result; + return res; } // At a promotion failure dump information on block layout in heap @@ -6091,23 +6048,14 @@ void CMSCollector::sweep(bool asynch) { assert(_collectorState == Resizing, "Change of collector state to" " Resizing must be done under the freelistLocks (plural)"); - // Now that sweeping has been completed, if the GCH's - // incremental_collection_will_fail flag is set, clear it, + // Now that sweeping has been completed, we clear + // the incremental_collection_failed flag, // thus inviting a younger gen collection to promote into // this generation. If such a promotion may still fail, // the flag will be set again when a young collection is // attempted. - // I think the incremental_collection_will_fail flag's use - // is specific to a 2 generation collection policy, so i'll - // assert that that's the configuration we are operating within. - // The use of the flag can and should be generalized appropriately - // in the future to deal with a general n-generation system. - GenCollectedHeap* gch = GenCollectedHeap::heap(); - assert(gch->collector_policy()->is_two_generation_policy(), - "Resetting of incremental_collection_will_fail flag" - " may be incorrect otherwise"); - gch->clear_incremental_collection_will_fail(); + gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up gch->update_full_collections_completed(_collection_count_start); } diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp index 7a0670734e4..dce44e39729 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @@ -1185,8 +1185,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { virtual void par_promote_alloc_done(int thread_num); virtual void par_oop_since_save_marks_iterate_done(int thread_num); - virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, - bool younger_handles_promotion_failure) const; + virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; // Inform this (non-young) generation that a promotion failure was // encountered during a collection of a younger generation that diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp index 2aa4e7f4447..12064fa7aa0 100644 --- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @@ -846,7 +846,7 @@ void ParNewGeneration::collect(bool full, // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { - gch->set_incremental_collection_will_fail(); + gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); @@ -935,8 +935,6 @@ void ParNewGeneration::collect(bool full, assert(to()->is_empty(), "to space should be empty now"); } else { - assert(HandlePromotionFailure, - "Should only be here if promotion failure handling is on"); assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. @@ -947,7 +945,7 @@ void ParNewGeneration::collect(bool full, // All the spaces are in play for mark-sweep. swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. from()->set_next_compaction_space(to()); - gch->set_incremental_collection_will_fail(); + gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); @@ -1092,11 +1090,6 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( old, m, sz); if (new_obj == NULL) { - if (!HandlePromotionFailure) { - // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag - // is incorrectly set. In any case, its seriously wrong to be here! - vm_exit_out_of_memory(sz*wordSize, "promotion"); - } // promotion failed, forward to self _promotion_failed = true; new_obj = old; @@ -1206,12 +1199,6 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo( old, m, sz); if (new_obj == NULL) { - if (!HandlePromotionFailure) { - // A failed promotion likely means the MaxLiveObjectEvacuationRatio - // flag is incorrectly set. In any case, its seriously wrong to be - // here! - vm_exit_out_of_memory(sz*wordSize, "promotion"); - } // promotion failed, forward to self forward_ptr = old->forward_to_atomic(old); new_obj = old; diff --git a/hotspot/src/share/vm/memory/collectorPolicy.cpp b/hotspot/src/share/vm/memory/collectorPolicy.cpp index c6b8e3875e4..3ddb0f65bc5 100644 --- a/hotspot/src/share/vm/memory/collectorPolicy.cpp +++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp @@ -659,9 +659,6 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, } return result; // could be null if we are out of space } else if (!gch->incremental_collection_will_fail()) { - // The gc_prologues have not executed yet. The value - // for incremental_collection_will_fail() is the remanent - // of the last collection. // Do an incremental collection. gch->do_collection(false /* full */, false /* clear_all_soft_refs */, @@ -739,9 +736,8 @@ bool GenCollectorPolicy::should_try_older_generation_allocation( GenCollectedHeap* gch = GenCollectedHeap::heap(); size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); return (word_size > heap_word_size(gen0_capacity)) - || (GC_locker::is_active_and_needs_gc()) - || ( gch->last_incremental_collection_failed() - && gch->incremental_collection_will_fail()); + || GC_locker::is_active_and_needs_gc() + || gch->incremental_collection_failed(); } diff --git a/hotspot/src/share/vm/memory/defNewGeneration.cpp b/hotspot/src/share/vm/memory/defNewGeneration.cpp index ac7cc267f23..6976e1660f2 100644 --- a/hotspot/src/share/vm/memory/defNewGeneration.cpp +++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp @@ -510,7 +510,7 @@ void DefNewGeneration::collect(bool full, // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { - gch->set_incremental_collection_will_fail(); + gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); @@ -596,9 +596,8 @@ void DefNewGeneration::collect(bool full, if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } + assert(!gch->incremental_collection_failed(), "Should be clear"); } else { - assert(HandlePromotionFailure, - "Should not be here unless promotion failure handling is on"); assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. @@ -613,7 +612,7 @@ void DefNewGeneration::collect(bool full, // and from-space. swap_spaces(); // For uniformity wrt ParNewGeneration. from()->set_next_compaction_space(to()); - gch->set_incremental_collection_will_fail(); + gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); @@ -700,12 +699,6 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) { if (obj == NULL) { obj = _next_gen->promote(old, s); if (obj == NULL) { - if (!HandlePromotionFailure) { - // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag - // is incorrectly set. In any case, its seriously wrong to be here! - vm_exit_out_of_memory(s*wordSize, "promotion"); - } - handle_promotion_failure(old); return old; } @@ -812,31 +805,7 @@ bool DefNewGeneration::collection_attempt_is_safe() { assert(_next_gen != NULL, "This must be the youngest gen, and not the only gen"); } - - // Decide if there's enough room for a full promotion - // When using extremely large edens, we effectively lose a - // large amount of old space. Use the "MaxLiveObjectEvacuationRatio" - // flag to reduce the minimum evacuation space requirements. If - // there is not enough space to evacuate eden during a scavenge, - // the VM will immediately exit with an out of memory error. - // This flag has not been tested - // with collectors other than simple mark & sweep. - // - // Note that with the addition of promotion failure handling, the - // VM will not immediately exit but will undo the young generation - // collection. The parameter is left here for compatibility. - const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0; - - // worst_case_evacuation is based on "used()". For the case where this - // method is called after a collection, this is still appropriate because - // the case that needs to be detected is one in which a full collection - // has been done and has overflowed into the young generation. In that - // case a minor collection will fail (the overflow of the full collection - // means there is no space in the old generation for any promotion). - size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio); - - return _next_gen->promotion_attempt_is_safe(worst_case_evacuation, - HandlePromotionFailure); + return _next_gen->promotion_attempt_is_safe(used()); } void DefNewGeneration::gc_epilogue(bool full) { @@ -845,14 +814,17 @@ void DefNewGeneration::gc_epilogue(bool full) { // a minimum at the end of a collection. If it is not, then // the heap is approaching full. GenCollectedHeap* gch = GenCollectedHeap::heap(); - clear_should_allocate_from_space(); - if (collection_attempt_is_safe()) { - gch->clear_incremental_collection_will_fail(); - } else { - gch->set_incremental_collection_will_fail(); - if (full) { // we seem to be running out of space - set_should_allocate_from_space(); + if (full) { + assert(!GC_locker::is_active(), "We should not be executing here"); + if (!collection_attempt_is_safe()) { + gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state + set_should_allocate_from_space(); // we seem to be running out of space + } else { + gch->clear_incremental_collection_failed(); // We just did a full collection + clear_should_allocate_from_space(); // if set } + } else { + assert(!gch->incremental_collection_failed(), "Error"); } if (ZapUnusedHeapArea) { diff --git a/hotspot/src/share/vm/memory/defNewGeneration.hpp b/hotspot/src/share/vm/memory/defNewGeneration.hpp index 166510b0a38..6f9d9a7e68e 100644 --- a/hotspot/src/share/vm/memory/defNewGeneration.hpp +++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp @@ -82,12 +82,6 @@ protected: Stack _objs_with_preserved_marks; Stack _preserved_marks_of_objs; - // Returns true if the collection can be safely attempted. - // If this method returns false, a collection is not - // guaranteed to fail but the system may not be able - // to recover from the failure. - bool collection_attempt_is_safe(); - // Promotion failure handling OopClosure *_promo_failure_scan_stack_closure; void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) { @@ -304,6 +298,14 @@ protected: // GC support virtual void compute_new_size(); + + // Returns true if the collection is likely to be safely + // completed. Even if this method returns true, a collection + // may not be guaranteed to succeed, and the system should be + // able to safely unwind and recover from that failure, albeit + // at some additional cost. Override superclass's implementation. + virtual bool collection_attempt_is_safe(); + virtual void collect(bool full, bool clear_all_soft_refs, size_t size, diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.cpp b/hotspot/src/share/vm/memory/genCollectedHeap.cpp index c2b8dcb3fab..432f64737fe 100644 --- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp +++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp @@ -142,8 +142,7 @@ jint GenCollectedHeap::initialize() { } _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set()); - clear_incremental_collection_will_fail(); - clear_last_incremental_collection_failed(); + clear_incremental_collection_failed(); #ifndef SERIALGC // If we are running CMS, create the collector responsible @@ -1347,17 +1346,6 @@ class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { }; void GenCollectedHeap::gc_epilogue(bool full) { - // Remember if a partial collection of the heap failed, and - // we did a complete collection. - if (full && incremental_collection_will_fail()) { - set_last_incremental_collection_failed(); - } else { - clear_last_incremental_collection_failed(); - } - // Clear the flag, if set; the generation gc_epilogues will set the - // flag again if the condition persists despite the collection. - clear_incremental_collection_will_fail(); - #ifdef COMPILER2 assert(DerivedPointerTable::is_empty(), "derived pointer present"); size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.hpp b/hotspot/src/share/vm/memory/genCollectedHeap.hpp index 75266a2d9a9..859f47936a7 100644 --- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp +++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp @@ -62,11 +62,10 @@ public: // The generational collector policy. GenCollectorPolicy* _gen_policy; - // If a generation would bail out of an incremental collection, - // it sets this flag. If the flag is set, satisfy_failed_allocation - // will attempt allocating in all generations before doing a full GC. - bool _incremental_collection_will_fail; - bool _last_incremental_collection_failed; + // Indicates that the most recent previous incremental collection failed. + // The flag is cleared when an action is taken that might clear the + // condition that caused that incremental collection to fail. + bool _incremental_collection_failed; // In support of ExplicitGCInvokesConcurrent functionality unsigned int _full_collections_completed; @@ -469,26 +468,26 @@ public: // call to "save_marks". bool no_allocs_since_save_marks(int level); - // If a generation bails out of an incremental collection, - // it sets this flag. + // Returns true if an incremental collection is likely to fail. bool incremental_collection_will_fail() { - return _incremental_collection_will_fail; - } - void set_incremental_collection_will_fail() { - _incremental_collection_will_fail = true; - } - void clear_incremental_collection_will_fail() { - _incremental_collection_will_fail = false; + // Assumes a 2-generation system; the first disjunct remembers if an + // incremental collection failed, even when we thought (second disjunct) + // that it would not. + assert(heap()->collector_policy()->is_two_generation_policy(), + "the following definition may not be suitable for an n(>2)-generation system"); + return incremental_collection_failed() || !get_gen(0)->collection_attempt_is_safe(); } - bool last_incremental_collection_failed() const { - return _last_incremental_collection_failed; + // If a generation bails out of an incremental collection, + // it sets this flag. + bool incremental_collection_failed() const { + return _incremental_collection_failed; } - void set_last_incremental_collection_failed() { - _last_incremental_collection_failed = true; + void set_incremental_collection_failed() { + _incremental_collection_failed = true; } - void clear_last_incremental_collection_failed() { - _last_incremental_collection_failed = false; + void clear_incremental_collection_failed() { + _incremental_collection_failed = false; } // Promotion of obj into gen failed. Try to promote obj to higher non-perm diff --git a/hotspot/src/share/vm/memory/generation.cpp b/hotspot/src/share/vm/memory/generation.cpp index 6d0d48596b3..c2f51593778 100644 --- a/hotspot/src/share/vm/memory/generation.cpp +++ b/hotspot/src/share/vm/memory/generation.cpp @@ -165,15 +165,16 @@ size_t Generation::max_contiguous_available() const { return max; } -bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes, - bool not_used) const { +bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { + size_t available = max_contiguous_available(); + bool res = (available >= max_promotion_in_bytes); if (PrintGC && Verbose) { - gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe" - " contiguous_available: " SIZE_FORMAT - " promotion_in_bytes: " SIZE_FORMAT, - max_contiguous_available(), promotion_in_bytes); + gclog_or_tty->print_cr( + "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")", + res? "":" not", available, res? ">=":"<", + max_promotion_in_bytes); } - return max_contiguous_available() >= promotion_in_bytes; + return res; } // Ignores "ref" and calls allocate(). diff --git a/hotspot/src/share/vm/memory/generation.hpp b/hotspot/src/share/vm/memory/generation.hpp index af0bdf2b3f5..4398730a350 100644 --- a/hotspot/src/share/vm/memory/generation.hpp +++ b/hotspot/src/share/vm/memory/generation.hpp @@ -173,15 +173,11 @@ class Generation: public CHeapObj { // The largest number of contiguous free bytes in this or any higher generation. virtual size_t max_contiguous_available() const; - // Returns true if promotions of the specified amount can - // be attempted safely (without a vm failure). + // Returns true if promotions of the specified amount are + // likely to succeed without a promotion failure. // Promotion of the full amount is not guaranteed but - // can be attempted. - // younger_handles_promotion_failure - // is true if the younger generation handles a promotion - // failure. - virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, - bool younger_handles_promotion_failure) const; + // might be attempted in the worst case. + virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; // For a non-young generation, this interface can be used to inform a // generation that a promotion attempt into that generation failed. @@ -358,6 +354,16 @@ class Generation: public CHeapObj { return (full || should_allocate(word_size, is_tlab)); } + // Returns true if the collection is likely to be safely + // completed. Even if this method returns true, a collection + // may not be guaranteed to succeed, and the system should be + // able to safely unwind and recover from that failure, albeit + // at some additional cost. + virtual bool collection_attempt_is_safe() { + guarantee(false, "Are you sure you want to call this method?"); + return true; + } + // Perform a garbage collection. // If full is true attempt a full garbage collection of this generation. // Otherwise, attempting to (at least) free enough space to support an diff --git a/hotspot/src/share/vm/memory/tenuredGeneration.cpp b/hotspot/src/share/vm/memory/tenuredGeneration.cpp index 593cd6c025b..ea474825db0 100644 --- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp +++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp @@ -419,29 +419,16 @@ void TenuredGeneration::retire_alloc_buffers_before_full_gc() {} void TenuredGeneration::verify_alloc_buffers_clean() {} #endif // SERIALGC -bool TenuredGeneration::promotion_attempt_is_safe( - size_t max_promotion_in_bytes, - bool younger_handles_promotion_failure) const { - - bool result = max_contiguous_available() >= max_promotion_in_bytes; - - if (younger_handles_promotion_failure && !result) { - result = max_contiguous_available() >= - (size_t) gc_stats()->avg_promoted()->padded_average(); - if (PrintGC && Verbose && result) { - gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe" - " contiguous_available: " SIZE_FORMAT - " avg_promoted: " SIZE_FORMAT, - max_contiguous_available(), - gc_stats()->avg_promoted()->padded_average()); - } - } else { - if (PrintGC && Verbose) { - gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe" - " contiguous_available: " SIZE_FORMAT - " promotion_in_bytes: " SIZE_FORMAT, - max_contiguous_available(), max_promotion_in_bytes); - } +bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { + size_t available = max_contiguous_available(); + size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); + bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); + if (PrintGC && Verbose) { + gclog_or_tty->print_cr( + "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT")," + "max_promo("SIZE_FORMAT")", + res? "":" not", available, res? ">=":"<", + av_promo, max_promotion_in_bytes); } - return result; + return res; } diff --git a/hotspot/src/share/vm/memory/tenuredGeneration.hpp b/hotspot/src/share/vm/memory/tenuredGeneration.hpp index 82cc06fae44..3677867532f 100644 --- a/hotspot/src/share/vm/memory/tenuredGeneration.hpp +++ b/hotspot/src/share/vm/memory/tenuredGeneration.hpp @@ -101,8 +101,7 @@ class TenuredGeneration: public OneContigSpaceCardGeneration { virtual void update_gc_stats(int level, bool full); - virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes, - bool younger_handles_promotion_failure) const; + virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const; void verify_alloc_buffers_clean(); }; diff --git a/hotspot/src/share/vm/runtime/arguments.cpp b/hotspot/src/share/vm/runtime/arguments.cpp index 4b6d69f0566..4ef33935e32 100644 --- a/hotspot/src/share/vm/runtime/arguments.cpp +++ b/hotspot/src/share/vm/runtime/arguments.cpp @@ -185,6 +185,10 @@ static ObsoleteFlag obsolete_jvm_flags[] = { JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) }, { "UseDepthFirstScavengeOrder", JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) }, + { "HandlePromotionFailure", + JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) }, + { "MaxLiveObjectEvacuationRatio", + JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) }, { NULL, JDK_Version(0), JDK_Version(0) } }; @@ -1722,8 +1726,6 @@ bool Arguments::check_vm_args_consistency() { status = false; } - status = status && verify_percentage(MaxLiveObjectEvacuationRatio, - "MaxLiveObjectEvacuationRatio"); status = status && verify_percentage(AdaptiveSizePolicyWeight, "AdaptiveSizePolicyWeight"); status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight"); diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp index 1fa26ce7fd6..00f16350436 100644 --- a/hotspot/src/share/vm/runtime/globals.hpp +++ b/hotspot/src/share/vm/runtime/globals.hpp @@ -1786,10 +1786,6 @@ class CommandLineFlags { notproduct(bool, GCALotAtAllSafepoints, false, \ "Enforce ScavengeALot/GCALot at all potential safepoints") \ \ - product(bool, HandlePromotionFailure, true, \ - "The youngest generation collection does not require " \ - "a guarantee of full promotion of all live objects.") \ - \ product(bool, PrintPromotionFailure, false, \ "Print additional diagnostic information following " \ " promotion failure") \ @@ -3003,9 +2999,6 @@ class CommandLineFlags { product(intx, NewRatio, 2, \ "Ratio of new/old generation sizes") \ \ - product(uintx, MaxLiveObjectEvacuationRatio, 100, \ - "Max percent of eden objects that will be live at scavenge") \ - \ product_pd(uintx, NewSizeThreadIncrease, \ "Additional size added to desired new generation size per " \ "non-daemon thread (in bytes)") \ From 9333ab239954e8e1a9a5b39ed0e2655abe1e0ca6 Mon Sep 17 00:00:00 2001 From: "Y. Srinivas Ramakrishna" Date: Thu, 28 Oct 2010 14:46:29 -0700 Subject: [PATCH 7/7] 6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827 Sharpened an assert, introduced in 6896603, that intended to check that the incremental_collection_failed() predicate on the heap was being reset "soon enough". Reviewed-by: jmasa --- .../src/share/vm/memory/defNewGeneration.cpp | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/hotspot/src/share/vm/memory/defNewGeneration.cpp b/hotspot/src/share/vm/memory/defNewGeneration.cpp index 6976e1660f2..19f265a6729 100644 --- a/hotspot/src/share/vm/memory/defNewGeneration.cpp +++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp @@ -809,13 +809,16 @@ bool DefNewGeneration::collection_attempt_is_safe() { } void DefNewGeneration::gc_epilogue(bool full) { + DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) + + assert(!GC_locker::is_active(), "We should not be executing here"); // Check if the heap is approaching full after a collection has // been done. Generally the young generation is empty at // a minimum at the end of a collection. If it is not, then // the heap is approaching full. GenCollectedHeap* gch = GenCollectedHeap::heap(); if (full) { - assert(!GC_locker::is_active(), "We should not be executing here"); + DEBUG_ONLY(seen_incremental_collection_failed = false;) if (!collection_attempt_is_safe()) { gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state set_should_allocate_from_space(); // we seem to be running out of space @@ -824,7 +827,21 @@ void DefNewGeneration::gc_epilogue(bool full) { clear_should_allocate_from_space(); // if set } } else { - assert(!gch->incremental_collection_failed(), "Error"); +#ifdef ASSERT + // It is possible that incremental_collection_failed() == true + // here, because an attempted scavenge did not succeed. The policy + // is normally expected to cause a full collection which should + // clear that condition, so we should not be here twice in a row + // with incremental_collection_failed() == true without having done + // a full collection in between. + if (!seen_incremental_collection_failed && + gch->incremental_collection_failed()) { + seen_incremental_collection_failed = true; + } else if (seen_incremental_collection_failed) { + assert(!gch->incremental_collection_failed(), "Twice in a row"); + seen_incremental_collection_failed = false; + } +#endif // ASSERT } if (ZapUnusedHeapArea) {