2008-06-05 15:57:56 -07:00
/*
2014-01-31 09:58:06 +01:00
* Copyright ( c ) 2001 , 2014 , Oracle and / or its affiliates . All rights reserved .
2008-06-05 15:57:56 -07:00
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER .
*
* This code is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 only , as
* published by the Free Software Foundation .
*
* This code is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* version 2 for more details ( a copy is included in the LICENSE file that
* accompanied this code ) .
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2010-05-27 19:08:38 -07:00
* Please contact Oracle , 500 Oracle Parkway , Redwood Shores , CA 94065 USA
* or visit www . oracle . com if you need additional information or have any
* questions .
2008-06-05 15:57:56 -07:00
*
*/
2014-05-09 16:50:54 -04:00
# if !defined(__clang_major__) && defined(__GNUC__)
// FIXME, formats have issues. Disable this macro definition, compile, and study warnings for more information.
# define ATTRIBUTE_PRINTF(x,y)
# endif
2010-11-23 13:22:55 -08:00
# include "precompiled.hpp"
2014-05-07 14:16:45 -05:00
# include "classfile/stringTable.hpp"
2013-08-15 10:52:18 +02:00
# include "code/codeCache.hpp"
2010-11-23 13:22:55 -08:00
# include "code/icBuffer.hpp"
# include "gc_implementation/g1/bufferingOopClosure.hpp"
# include "gc_implementation/g1/concurrentG1Refine.hpp"
# include "gc_implementation/g1/concurrentG1RefineThread.hpp"
# include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
2011-03-30 10:26:59 -04:00
# include "gc_implementation/g1/g1AllocRegion.inline.hpp"
2010-11-23 13:22:55 -08:00
# include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
# include "gc_implementation/g1/g1CollectorPolicy.hpp"
2011-09-07 12:21:23 -04:00
# include "gc_implementation/g1/g1ErgoVerbose.hpp"
2011-12-23 11:14:18 -08:00
# include "gc_implementation/g1/g1EvacFailure.hpp"
2012-07-11 22:47:38 +02:00
# include "gc_implementation/g1/g1GCPhaseTimes.hpp"
2012-04-13 01:59:38 +02:00
# include "gc_implementation/g1/g1Log.hpp"
2010-11-23 13:22:55 -08:00
# include "gc_implementation/g1/g1MarkSweep.hpp"
# include "gc_implementation/g1/g1OopClosures.inline.hpp"
2014-06-26 15:45:07 +02:00
# include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
2014-08-19 14:09:10 +02:00
# include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
2010-11-23 13:22:55 -08:00
# include "gc_implementation/g1/g1RemSet.inline.hpp"
2014-03-18 19:07:22 +01:00
# include "gc_implementation/g1/g1StringDedup.hpp"
2013-06-10 11:30:51 +02:00
# include "gc_implementation/g1/g1YCTypes.hpp"
2012-01-10 18:58:13 -05:00
# include "gc_implementation/g1/heapRegion.inline.hpp"
2010-11-23 13:22:55 -08:00
# include "gc_implementation/g1/heapRegionRemSet.hpp"
2014-08-18 16:10:44 +02:00
# include "gc_implementation/g1/heapRegionSet.inline.hpp"
2010-11-23 13:22:55 -08:00
# include "gc_implementation/g1/vm_operations_g1.hpp"
2013-06-10 11:30:51 +02:00
# include "gc_implementation/shared/gcHeapSummary.hpp"
# include "gc_implementation/shared/gcTimer.hpp"
# include "gc_implementation/shared/gcTrace.hpp"
# include "gc_implementation/shared/gcTraceTime.hpp"
2010-11-23 13:22:55 -08:00
# include "gc_implementation/shared/isGCActiveMark.hpp"
2014-07-07 10:12:40 +02:00
# include "memory/allocation.hpp"
2010-11-23 13:22:55 -08:00
# include "memory/gcLocker.inline.hpp"
# include "memory/generationSpec.hpp"
2014-01-20 11:47:53 +01:00
# include "memory/iterator.hpp"
2011-09-22 10:57:37 -07:00
# include "memory/referenceProcessor.hpp"
2010-11-23 13:22:55 -08:00
# include "oops/oop.inline.hpp"
# include "oops/oop.pcgc.inline.hpp"
2014-06-04 11:56:44 +02:00
# include "runtime/atomic.inline.hpp"
2014-04-29 15:17:27 +02:00
# include "runtime/orderAccess.inline.hpp"
2010-11-23 13:22:55 -08:00
# include "runtime/vmThread.hpp"
2014-04-15 20:46:23 +02:00
# include "utilities/globalDefinitions.hpp"
2008-06-05 15:57:56 -07:00
2009-07-30 16:22:58 -04:00
size_t G1CollectedHeap : : _humongous_object_threshold_in_words = 0 ;
2008-06-05 15:57:56 -07:00
// turn it on so that the contents of the young list (scan-only /
// to-be-collected) are printed at "strategic" points before / during
// / after the collection --- this is useful for debugging
2010-04-22 10:02:38 -07:00
# define YOUNG_LIST_VERBOSE 0
2008-06-05 15:57:56 -07:00
// CURRENT STATUS
// This file is under construction. Search for "FIXME".
// INVARIANTS/NOTES
//
// All allocation activity covered by the G1CollectedHeap interface is
2010-08-24 17:24:33 -04:00
// serialized by acquiring the HeapLock. This happens in mem_allocate
// and allocate_new_tlab, which are the "entry" points to the
// allocation code from the rest of the JVM. (Note that this does not
// apply to TLAB allocation, which is not part of this interface: it
// is done by clients of this interface.)
2008-06-05 15:57:56 -07:00
2011-08-09 10:16:01 -07:00
// Notes on implementation of parallelism in different tasks.
//
// G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
// The number of GC workers is passed to heap_region_par_iterate_chunked().
// It does use run_task() which sets _n_workers in the task.
2014-07-07 10:12:40 +02:00
// G1ParTask executes g1_process_roots() ->
// SharedHeap::process_roots() which calls eventually to
2011-08-09 10:16:01 -07:00
// CardTableModRefBS::par_non_clean_card_iterate_work() which uses
2014-07-07 10:12:40 +02:00
// SequentialSubTasksDone. SharedHeap::process_roots() also
2011-08-09 10:16:01 -07:00
// directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
//
2008-06-05 15:57:56 -07:00
// Local to this file.
class RefineCardTableEntryClosure : public CardTableEntryClosure {
bool _concurrent ;
public :
2014-04-16 16:46:58 +02:00
RefineCardTableEntryClosure ( ) : _concurrent ( true ) { }
2014-04-03 17:49:31 +04:00
bool do_card_ptr ( jbyte * card_ptr , uint worker_i ) {
2014-04-16 16:46:58 +02:00
bool oops_into_cset = G1CollectedHeap : : heap ( ) - > g1_rem_set ( ) - > refine_card ( card_ptr , worker_i , false ) ;
2010-08-02 12:51:43 -07:00
// This path is executed by the concurrent refine or mutator threads,
// concurrently, and so we do not care if card_ptr contains references
// that point into the collection set.
assert ( ! oops_into_cset , " should be " ) ;
2014-04-11 12:29:24 +02:00
if ( _concurrent & & SuspendibleThreadSet : : should_yield ( ) ) {
2008-06-05 15:57:56 -07:00
// Caller will actually yield.
return false ;
}
// Otherwise, we finished successfully; return true.
return true ;
}
2014-04-16 16:46:58 +02:00
2008-06-05 15:57:56 -07:00
void set_concurrent ( bool b ) { _concurrent = b ; }
} ;
class ClearLoggedCardTableEntryClosure : public CardTableEntryClosure {
2014-04-16 16:47:02 +02:00
size_t _num_processed ;
2008-06-05 15:57:56 -07:00
CardTableModRefBS * _ctbs ;
int _histo [ 256 ] ;
2014-04-16 16:47:02 +02:00
public :
2008-06-05 15:57:56 -07:00
ClearLoggedCardTableEntryClosure ( ) :
2014-04-16 16:47:02 +02:00
_num_processed ( 0 ) , _ctbs ( G1CollectedHeap : : heap ( ) - > g1_barrier_set ( ) )
2008-06-05 15:57:56 -07:00
{
for ( int i = 0 ; i < 256 ; i + + ) _histo [ i ] = 0 ;
}
2014-04-16 16:47:02 +02:00
2014-04-03 17:49:31 +04:00
bool do_card_ptr ( jbyte * card_ptr , uint worker_i ) {
2014-04-16 16:47:02 +02:00
unsigned char * ujb = ( unsigned char * ) card_ptr ;
int ind = ( int ) ( * ujb ) ;
_histo [ ind ] + + ;
* card_ptr = ( jbyte ) CardTableModRefBS : : clean_card_val ( ) ;
_num_processed + + ;
2008-06-05 15:57:56 -07:00
return true ;
}
2014-04-16 16:47:02 +02:00
size_t num_processed ( ) { return _num_processed ; }
2008-06-05 15:57:56 -07:00
void print_histo ( ) {
gclog_or_tty - > print_cr ( " Card table value histogram: " ) ;
for ( int i = 0 ; i < 256 ; i + + ) {
if ( _histo [ i ] ! = 0 ) {
gclog_or_tty - > print_cr ( " %d: %d " , i , _histo [ i ] ) ;
}
}
}
} ;
2014-04-16 16:47:02 +02:00
class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
private :
size_t _num_processed ;
public :
RedirtyLoggedCardTableEntryClosure ( ) : CardTableEntryClosure ( ) , _num_processed ( 0 ) { }
2013-09-24 14:46:29 +02:00
2014-04-03 17:49:31 +04:00
bool do_card_ptr ( jbyte * card_ptr , uint worker_i ) {
2014-04-16 16:47:02 +02:00
* card_ptr = CardTableModRefBS : : dirty_card_val ( ) ;
_num_processed + + ;
2008-06-05 15:57:56 -07:00
return true ;
}
2014-04-16 16:47:02 +02:00
size_t num_processed ( ) const { return _num_processed ; }
2008-06-05 15:57:56 -07:00
} ;
2012-01-25 12:58:23 -05:00
YoungList : : YoungList ( G1CollectedHeap * g1h ) :
_g1h ( g1h ) , _head ( NULL ) , _length ( 0 ) , _last_sampled_rs_lengths ( 0 ) ,
_survivor_head ( NULL ) , _survivor_tail ( NULL ) , _survivor_length ( 0 ) {
guarantee ( check_list_empty ( false ) , " just making sure... " ) ;
2008-06-05 15:57:56 -07:00
}
void YoungList : : push_region ( HeapRegion * hr ) {
assert ( ! hr - > is_young ( ) , " should not already be young " ) ;
assert ( hr - > get_next_young_region ( ) = = NULL , " cause it should! " ) ;
hr - > set_next_young_region ( _head ) ;
_head = hr ;
2011-11-18 12:52:27 -05:00
_g1h - > g1_policy ( ) - > set_region_eden ( hr , ( int ) _length ) ;
2008-06-05 15:57:56 -07:00
+ + _length ;
}
void YoungList : : add_survivor_region ( HeapRegion * hr ) {
2009-02-06 01:38:50 +03:00
assert ( hr - > is_survivor ( ) , " should be flagged as survivor region " ) ;
2008-06-05 15:57:56 -07:00
assert ( hr - > get_next_young_region ( ) = = NULL , " cause it should! " ) ;
hr - > set_next_young_region ( _survivor_head ) ;
if ( _survivor_head = = NULL ) {
2009-02-06 01:38:50 +03:00
_survivor_tail = hr ;
2008-06-05 15:57:56 -07:00
}
_survivor_head = hr ;
+ + _survivor_length ;
}
void YoungList : : empty_list ( HeapRegion * list ) {
while ( list ! = NULL ) {
HeapRegion * next = list - > get_next_young_region ( ) ;
list - > set_next_young_region ( NULL ) ;
list - > uninstall_surv_rate_group ( ) ;
list - > set_not_young ( ) ;
list = next ;
}
}
void YoungList : : empty_list ( ) {
assert ( check_list_well_formed ( ) , " young list should be well formed " ) ;
empty_list ( _head ) ;
_head = NULL ;
_length = 0 ;
empty_list ( _survivor_head ) ;
_survivor_head = NULL ;
2009-02-06 01:38:50 +03:00
_survivor_tail = NULL ;
2008-06-05 15:57:56 -07:00
_survivor_length = 0 ;
_last_sampled_rs_lengths = 0 ;
assert ( check_list_empty ( false ) , " just making sure... " ) ;
}
bool YoungList : : check_list_well_formed ( ) {
bool ret = true ;
2012-04-18 07:21:15 -04:00
uint length = 0 ;
2008-06-05 15:57:56 -07:00
HeapRegion * curr = _head ;
HeapRegion * last = NULL ;
while ( curr ! = NULL ) {
2010-04-22 10:02:38 -07:00
if ( ! curr - > is_young ( ) ) {
2008-06-05 15:57:56 -07:00
gclog_or_tty - > print_cr ( " ### YOUNG REGION " PTR_FORMAT " - " PTR_FORMAT " "
2010-04-22 10:02:38 -07:00
" incorrectly tagged (y: %d, surv: %d) " ,
2008-06-05 15:57:56 -07:00
curr - > bottom ( ) , curr - > end ( ) ,
2010-04-22 10:02:38 -07:00
curr - > is_young ( ) , curr - > is_survivor ( ) ) ;
2008-06-05 15:57:56 -07:00
ret = false ;
}
+ + length ;
last = curr ;
curr = curr - > get_next_young_region ( ) ;
}
ret = ret & & ( length = = _length ) ;
if ( ! ret ) {
gclog_or_tty - > print_cr ( " ### YOUNG LIST seems not well formed! " ) ;
2012-04-18 07:21:15 -04:00
gclog_or_tty - > print_cr ( " ### list has %u entries, _length is %u " ,
2008-06-05 15:57:56 -07:00
length , _length ) ;
}
2010-04-22 10:02:38 -07:00
return ret ;
2008-06-05 15:57:56 -07:00
}
2010-04-22 10:02:38 -07:00
bool YoungList : : check_list_empty ( bool check_sample ) {
2008-06-05 15:57:56 -07:00
bool ret = true ;
if ( _length ! = 0 ) {
2012-04-18 07:21:15 -04:00
gclog_or_tty - > print_cr ( " ### YOUNG LIST should have 0 length, not %u " ,
2008-06-05 15:57:56 -07:00
_length ) ;
ret = false ;
}
if ( check_sample & & _last_sampled_rs_lengths ! = 0 ) {
gclog_or_tty - > print_cr ( " ### YOUNG LIST has non-zero last sampled RS lengths " ) ;
ret = false ;
}
if ( _head ! = NULL ) {
gclog_or_tty - > print_cr ( " ### YOUNG LIST does not have a NULL head " ) ;
ret = false ;
}
if ( ! ret ) {
gclog_or_tty - > print_cr ( " ### YOUNG LIST does not seem empty " ) ;
}
2010-04-22 10:02:38 -07:00
return ret ;
2008-06-05 15:57:56 -07:00
}
void
YoungList : : rs_length_sampling_init ( ) {
_sampled_rs_lengths = 0 ;
_curr = _head ;
}
bool
YoungList : : rs_length_sampling_more ( ) {
return _curr ! = NULL ;
}
void
YoungList : : rs_length_sampling_next ( ) {
assert ( _curr ! = NULL , " invariant " ) ;
2010-04-22 10:02:38 -07:00
size_t rs_length = _curr - > rem_set ( ) - > occupied ( ) ;
_sampled_rs_lengths + = rs_length ;
// The current region may not yet have been added to the
// incremental collection set (it gets added when it is
// retired as the current allocation region).
if ( _curr - > in_collection_set ( ) ) {
// Update the collection set policy information for this region
_g1h - > g1_policy ( ) - > update_incremental_cset_info ( _curr , rs_length ) ;
}
2008-06-05 15:57:56 -07:00
_curr = _curr - > get_next_young_region ( ) ;
if ( _curr = = NULL ) {
_last_sampled_rs_lengths = _sampled_rs_lengths ;
// gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
}
}
void
YoungList : : reset_auxilary_lists ( ) {
guarantee ( is_empty ( ) , " young list should be empty " ) ;
assert ( check_list_well_formed ( ) , " young list should be well formed " ) ;
// Add survivor regions to SurvRateGroup.
_g1h - > g1_policy ( ) - > note_start_adding_survivor_regions ( ) ;
2009-02-06 01:38:50 +03:00
_g1h - > g1_policy ( ) - > finished_recalculating_age_indexes ( true /* is_survivors */ ) ;
2010-04-22 10:02:38 -07:00
2011-11-18 12:52:27 -05:00
int young_index_in_cset = 0 ;
2008-06-05 15:57:56 -07:00
for ( HeapRegion * curr = _survivor_head ;
curr ! = NULL ;
curr = curr - > get_next_young_region ( ) ) {
2011-11-18 12:52:27 -05:00
_g1h - > g1_policy ( ) - > set_region_survivor ( curr , young_index_in_cset ) ;
2010-04-22 10:02:38 -07:00
// The region is a non-empty survivor so let's add it to
// the incremental collection set for the next evacuation
// pause.
_g1h - > g1_policy ( ) - > add_region_to_incremental_cset_rhs ( curr ) ;
2011-11-18 12:52:27 -05:00
young_index_in_cset + = 1 ;
2008-06-05 15:57:56 -07:00
}
2012-04-18 07:21:15 -04:00
assert ( ( uint ) young_index_in_cset = = _survivor_length , " post-condition " ) ;
2008-06-05 15:57:56 -07:00
_g1h - > g1_policy ( ) - > note_stop_adding_survivor_regions ( ) ;
2010-04-22 10:02:38 -07:00
_head = _survivor_head ;
_length = _survivor_length ;
2008-06-05 15:57:56 -07:00
if ( _survivor_head ! = NULL ) {
2010-04-22 10:02:38 -07:00
assert ( _survivor_tail ! = NULL , " cause it shouldn't be " ) ;
assert ( _survivor_length > 0 , " invariant " ) ;
_survivor_tail - > set_next_young_region ( NULL ) ;
2008-06-05 15:57:56 -07:00
}
2010-04-22 10:02:38 -07:00
// Don't clear the survivor list handles until the start of
// the next evacuation pause - we need it in order to re-tag
// the survivor regions from this evacuation pause as 'young'
// at the start of the next.
2008-06-05 15:57:56 -07:00
2009-02-06 01:38:50 +03:00
_g1h - > g1_policy ( ) - > finished_recalculating_age_indexes ( false /* is_survivors */ ) ;
2008-06-05 15:57:56 -07:00
assert ( check_list_well_formed ( ) , " young list should be well formed " ) ;
}
void YoungList : : print ( ) {
2010-04-22 10:02:38 -07:00
HeapRegion * lists [ ] = { _head , _survivor_head } ;
const char * names [ ] = { " YOUNG " , " SURVIVOR " } ;
2008-06-05 15:57:56 -07:00
for ( unsigned int list = 0 ; list < ARRAY_SIZE ( lists ) ; + + list ) {
gclog_or_tty - > print_cr ( " %s LIST CONTENTS " , names [ list ] ) ;
HeapRegion * curr = lists [ list ] ;
if ( curr = = NULL )
gclog_or_tty - > print_cr ( " empty " ) ;
while ( curr ! = NULL ) {
2012-04-25 10:23:12 -07:00
gclog_or_tty - > print_cr ( " " HR_FORMAT " , P: " PTR_FORMAT " N: " PTR_FORMAT " , age: %4d " ,
HR_FORMAT_PARAMS ( curr ) ,
2008-06-05 15:57:56 -07:00
curr - > prev_top_at_mark_start ( ) ,
curr - > next_top_at_mark_start ( ) ,
2012-04-25 10:23:12 -07:00
curr - > age_in_surv_rate_group_cond ( ) ) ;
2008-06-05 15:57:56 -07:00
curr = curr - > get_next_young_region ( ) ;
}
}
2014-05-09 16:50:54 -04:00
gclog_or_tty - > cr ( ) ;
2008-06-05 15:57:56 -07:00
}
2014-08-19 14:09:10 +02:00
void G1RegionMappingChangedListener : : reset_from_card_cache ( uint start_idx , size_t num_regions ) {
OtherRegionsTable : : invalidate ( start_idx , num_regions ) ;
}
void G1RegionMappingChangedListener : : on_commit ( uint start_idx , size_t num_regions ) {
reset_from_card_cache ( start_idx , num_regions ) ;
}
2009-05-19 04:05:31 -07:00
void G1CollectedHeap : : push_dirty_cards_region ( HeapRegion * hr )
{
// Claim the right to put the region on the dirty cards region list
// by installing a self pointer.
HeapRegion * next = hr - > get_next_dirty_cards_region ( ) ;
if ( next = = NULL ) {
HeapRegion * res = ( HeapRegion * )
Atomic : : cmpxchg_ptr ( hr , hr - > next_dirty_cards_region_addr ( ) ,
NULL ) ;
if ( res = = NULL ) {
HeapRegion * head ;
do {
// Put the region to the dirty cards region list.
head = _dirty_cards_region_list ;
next = ( HeapRegion * )
Atomic : : cmpxchg_ptr ( hr , & _dirty_cards_region_list , head ) ;
if ( next = = head ) {
assert ( hr - > get_next_dirty_cards_region ( ) = = hr ,
" hr->get_next_dirty_cards_region() != hr " ) ;
if ( next = = NULL ) {
// The last region in the list points to itself.
hr - > set_next_dirty_cards_region ( hr ) ;
} else {
hr - > set_next_dirty_cards_region ( next ) ;
}
}
} while ( next ! = head ) ;
}
}
}
HeapRegion * G1CollectedHeap : : pop_dirty_cards_region ( )
{
HeapRegion * head ;
HeapRegion * hr ;
do {
head = _dirty_cards_region_list ;
if ( head = = NULL ) {
return NULL ;
}
HeapRegion * new_head = head - > get_next_dirty_cards_region ( ) ;
if ( head = = new_head ) {
// The last region.
new_head = NULL ;
}
hr = ( HeapRegion * ) Atomic : : cmpxchg_ptr ( new_head , & _dirty_cards_region_list ,
head ) ;
} while ( hr ! = head ) ;
assert ( hr ! = NULL , " invariant " ) ;
hr - > set_next_dirty_cards_region ( NULL ) ;
return hr ;
}
2011-05-03 10:30:34 -07:00
# ifdef ASSERT
// A region is added to the collection set as it is retired
// so an address p can point to a region which will be in the
// collection set but has not yet been retired. This method
// therefore is only accurate during a GC pause after all
// regions have been retired. It is used for debugging
// to check if an nmethod has references to objects that can
// be move during a partial collection. Though it can be
// inaccurate, it is sufficient for G1 because the conservative
// implementation of is_scavengable() for G1 will indicate that
// all nmethods must be scanned during a partial collection.
bool G1CollectedHeap : : is_in_partial_collection ( const void * p ) {
2014-04-17 15:57:02 +02:00
if ( p = = NULL ) {
return false ;
}
return heap_region_containing ( p ) - > in_collection_set ( ) ;
2011-05-03 10:30:34 -07:00
}
# endif
// Returns true if the reference points to an object that
2013-06-10 11:30:51 +02:00
// can move in an incremental collection.
2011-05-03 10:30:34 -07:00
bool G1CollectedHeap : : is_scavengable ( const void * p ) {
HeapRegion * hr = heap_region_containing ( p ) ;
2014-04-17 15:57:02 +02:00
return ! hr - > isHumongous ( ) ;
2011-05-03 10:30:34 -07:00
}
2008-06-05 15:57:56 -07:00
void G1CollectedHeap : : check_ct_logs_at_safepoint ( ) {
DirtyCardQueueSet & dcqs = JavaThread : : dirty_card_queue_set ( ) ;
2013-09-24 14:46:29 +02:00
CardTableModRefBS * ct_bs = g1_barrier_set ( ) ;
2008-06-05 15:57:56 -07:00
// Count the dirty cards at the start.
CountNonCleanMemRegionClosure count1 ( this ) ;
ct_bs - > mod_card_iterate ( & count1 ) ;
int orig_count = count1 . n ( ) ;
// First clear the logged cards.
ClearLoggedCardTableEntryClosure clear ;
2014-04-16 16:46:58 +02:00
dcqs . apply_closure_to_all_completed_buffers ( & clear ) ;
dcqs . iterate_closure_all_threads ( & clear , false ) ;
2008-06-05 15:57:56 -07:00
clear . print_histo ( ) ;
// Now ensure that there's no dirty cards.
CountNonCleanMemRegionClosure count2 ( this ) ;
ct_bs - > mod_card_iterate ( & count2 ) ;
if ( count2 . n ( ) ! = 0 ) {
gclog_or_tty - > print_cr ( " Card table has %d entries; %d originally " ,
count2 . n ( ) , orig_count ) ;
}
guarantee ( count2 . n ( ) = = 0 , " Card table should be clean. " ) ;
RedirtyLoggedCardTableEntryClosure redirty ;
2014-04-16 16:46:58 +02:00
dcqs . apply_closure_to_all_completed_buffers ( & redirty ) ;
dcqs . iterate_closure_all_threads ( & redirty , false ) ;
2008-06-05 15:57:56 -07:00
gclog_or_tty - > print_cr ( " Log entries = %d, dirty cards = %d. " ,
2014-04-16 16:47:02 +02:00
clear . num_processed ( ) , orig_count ) ;
guarantee ( redirty . num_processed ( ) = = clear . num_processed ( ) ,
err_msg ( " Redirtied " SIZE_FORMAT " cards, bug cleared " SIZE_FORMAT ,
redirty . num_processed ( ) , clear . num_processed ( ) ) ) ;
2008-06-05 15:57:56 -07:00
CountNonCleanMemRegionClosure count3 ( this ) ;
ct_bs - > mod_card_iterate ( & count3 ) ;
if ( count3 . n ( ) ! = orig_count ) {
gclog_or_tty - > print_cr ( " Should have restored them all: orig = %d, final = %d. " ,
orig_count , count3 . n ( ) ) ;
guarantee ( count3 . n ( ) > = orig_count , " Should have restored them all. " ) ;
}
}
// Private class members.
G1CollectedHeap * G1CollectedHeap : : _g1h ;
// Private methods.
2011-01-19 19:30:42 -05:00
HeapRegion *
2014-02-28 15:27:09 +01:00
G1CollectedHeap : : new_region_try_secondary_free_list ( bool is_old ) {
2011-01-19 19:30:42 -05:00
MutexLockerEx x ( SecondaryFreeList_lock , Mutex : : _no_safepoint_check_flag ) ;
while ( ! _secondary_free_list . is_empty ( ) | | free_regions_coming ( ) ) {
if ( ! _secondary_free_list . is_empty ( ) ) {
if ( G1ConcRegionFreeingVerbose ) {
gclog_or_tty - > print_cr ( " G1ConcRegionFreeing [region alloc] : "
2012-04-18 07:21:15 -04:00
" secondary_free_list has %u entries " ,
2011-01-19 19:30:42 -05:00
_secondary_free_list . length ( ) ) ;
}
// It looks as if there are free regions available on the
// secondary_free_list. Let's move them to the free_list and try
// again to allocate from it.
append_secondary_free_list ( ) ;
2014-08-26 09:36:53 +02:00
assert ( _hrm . num_free_regions ( ) > 0 , " if the secondary_free_list was not "
2011-01-19 19:30:42 -05:00
" empty we should have moved at least one entry to the free_list " ) ;
2014-08-26 09:36:53 +02:00
HeapRegion * res = _hrm . allocate_free_region ( is_old ) ;
2011-01-19 19:30:42 -05:00
if ( G1ConcRegionFreeingVerbose ) {
gclog_or_tty - > print_cr ( " G1ConcRegionFreeing [region alloc] : "
" allocated " HR_FORMAT " from secondary_free_list " ,
HR_FORMAT_PARAMS ( res ) ) ;
}
return res ;
}
2013-06-10 11:30:51 +02:00
// Wait here until we get notified either when (a) there are no
2011-01-19 19:30:42 -05:00
// more free regions coming or (b) some regions have been moved on
// the secondary_free_list.
SecondaryFreeList_lock - > wait ( Mutex : : _no_safepoint_check_flag ) ;
}
if ( G1ConcRegionFreeingVerbose ) {
gclog_or_tty - > print_cr ( " G1ConcRegionFreeing [region alloc] : "
" could not allocate from secondary_free_list " ) ;
}
return NULL ;
}
2008-06-05 15:57:56 -07:00
2014-02-28 15:27:09 +01:00
HeapRegion * G1CollectedHeap : : new_region ( size_t word_size , bool is_old , bool do_expand ) {
2011-10-05 08:44:10 -07:00
assert ( ! isHumongous ( word_size ) | | word_size < = HeapRegion : : GrainWords ,
2011-01-19 19:30:42 -05:00
" the only time we use this to allocate a humongous region is "
" when we are allocating a single humongous region " ) ;
2008-06-05 15:57:56 -07:00
2011-01-19 19:30:42 -05:00
HeapRegion * res ;
if ( G1StressConcRegionFreeing ) {
if ( ! _secondary_free_list . is_empty ( ) ) {
if ( G1ConcRegionFreeingVerbose ) {
gclog_or_tty - > print_cr ( " G1ConcRegionFreeing [region alloc] : "
" forced to look at the secondary_free_list " ) ;
}
2014-02-28 15:27:09 +01:00
res = new_region_try_secondary_free_list ( is_old ) ;
2011-01-19 19:30:42 -05:00
if ( res ! = NULL ) {
return res ;
}
}
}
2014-02-28 15:27:09 +01:00
2014-08-26 09:36:53 +02:00
res = _hrm . allocate_free_region ( is_old ) ;
2014-02-28 15:27:09 +01:00
2011-01-19 19:30:42 -05:00
if ( res = = NULL ) {
if ( G1ConcRegionFreeingVerbose ) {
gclog_or_tty - > print_cr ( " G1ConcRegionFreeing [region alloc] : "
" res == NULL, trying the secondary_free_list " ) ;
}
2014-02-28 15:27:09 +01:00
res = new_region_try_secondary_free_list ( is_old ) ;
2011-01-19 19:30:42 -05:00
}
2012-01-05 05:54:01 -05:00
if ( res = = NULL & & do_expand & & _expand_heap_after_alloc_failure ) {
// Currently, only attempts to allocate GC alloc regions set
// do_expand to true. So, we should only reach here during a
// safepoint. If this assumption changes we might have to
// reconsider the use of _expand_heap_after_alloc_failure.
assert ( SafepointSynchronize : : is_at_safepoint ( ) , " invariant " ) ;
2011-09-07 12:21:23 -04:00
ergo_verbose1 ( ErgoHeapSizing ,
" attempt heap expansion " ,
ergo_format_reason ( " region allocation request failed " )
ergo_format_byte ( " allocation request " ) ,
word_size * HeapWordSize ) ;
2011-02-02 10:41:20 -08:00
if ( expand ( word_size * HeapWordSize ) ) {
2012-01-05 05:54:01 -05:00
// Given that expand() succeeded in expanding the heap, and we
// always expand the heap by an amount aligned to the heap
2014-02-28 15:27:09 +01:00
// region size, the free list should in theory not be empty.
2014-08-18 16:10:44 +02:00
// In either case allocate_free_region() will check for NULL.
2014-08-26 09:36:53 +02:00
res = _hrm . allocate_free_region ( is_old ) ;
2012-01-05 05:54:01 -05:00
} else {
_expand_heap_after_alloc_failure = false ;
2011-02-02 10:41:20 -08:00
}
2008-06-05 15:57:56 -07:00
}
return res ;
}
2011-03-04 17:13:19 -05:00
HeapWord *
2012-04-18 07:21:15 -04:00
G1CollectedHeap : : humongous_obj_allocate_initialize_regions ( uint first ,
uint num_regions ,
2011-03-04 17:13:19 -05:00
size_t word_size ) {
2014-08-26 09:36:53 +02:00
assert ( first ! = G1_NO_HRM_INDEX , " pre-condition " ) ;
2011-03-04 17:13:19 -05:00
assert ( isHumongous ( word_size ) , " word_size should be humongous " ) ;
assert ( num_regions * HeapRegion : : GrainWords > = word_size , " pre-condition " ) ;
// Index of last region in the series + 1.
2012-04-18 07:21:15 -04:00
uint last = first + num_regions ;
2011-03-04 17:13:19 -05:00
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
// refinement threads refining cards on these regions and
// potentially wanting to refine the BOT as they are scanning
// those cards (this can happen shortly after a cleanup; see CR
// 6991377). So we have to set up the region(s) carefully and in
// a specific order.
// The word size sum of all the regions we will allocate.
2012-04-18 07:21:15 -04:00
size_t word_size_sum = ( size_t ) num_regions * HeapRegion : : GrainWords ;
2011-03-04 17:13:19 -05:00
assert ( word_size < = word_size_sum , " sanity " ) ;
// This will be the "starts humongous" region.
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
HeapRegion * first_hr = region_at ( first ) ;
2011-03-04 17:13:19 -05:00
// The header of the new object will be placed at the bottom of
// the first region.
HeapWord * new_obj = first_hr - > bottom ( ) ;
// This will be the new end of the first region in the series that
2013-06-10 11:30:51 +02:00
// should also match the end of the last region in the series.
2011-03-04 17:13:19 -05:00
HeapWord * new_end = new_obj + word_size_sum ;
// This will be the new top of the first region that will reflect
// this allocation.
HeapWord * new_top = new_obj + word_size ;
// First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement
// threads might try to scan the region. By zeroing the header we
// ensure that any thread that will try to scan the region will
// come across the zero klass word and bail out.
//
// NOTE: It would not have been correct to have used
// CollectedHeap::fill_with_object() and make the space look like
// an int array. The thread that is doing the allocation will
// later update the object header to a potentially different array
// type and, for a very short period of time, the klass and length
// fields will be inconsistent. This could cause a refinement
// thread to calculate the object size incorrectly.
Copy : : fill_to_words ( new_obj , oopDesc : : header_size ( ) , 0 ) ;
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
first_hr - > set_startsHumongous ( new_top , new_end ) ;
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion * hr = NULL ;
2012-04-18 07:21:15 -04:00
for ( uint i = first + 1 ; i < last ; + + i ) {
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
hr = region_at ( i ) ;
2011-03-04 17:13:19 -05:00
hr - > set_continuesHumongous ( first_hr ) ;
}
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
assert ( hr = = NULL | | hr - > end ( ) = = new_end , " sanity " ) ;
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
// [bottom,top] and [card_start,card_end] will be empty. Before we
// update the top fields, we'll do a storestore to make sure that
// no thread sees the update to top before the zeroing of the
// object header and the BOT initialization.
OrderAccess : : storestore ( ) ;
// Now that the BOT and the object header have been initialized,
// we can update top of the "starts humongous" region.
assert ( first_hr - > bottom ( ) < new_top & & new_top < = first_hr - > end ( ) ,
" new_top should be in this region " ) ;
first_hr - > set_top ( new_top ) ;
2011-06-24 12:38:49 -04:00
if ( _hr_printer . is_active ( ) ) {
HeapWord * bottom = first_hr - > bottom ( ) ;
HeapWord * end = first_hr - > orig_end ( ) ;
if ( ( first + 1 ) = = last ) {
// the series has a single humongous region
_hr_printer . alloc ( G1HRPrinter : : SingleHumongous , first_hr , new_top ) ;
} else {
// the series has more than one humongous regions
_hr_printer . alloc ( G1HRPrinter : : StartsHumongous , first_hr , end ) ;
}
}
2011-03-04 17:13:19 -05:00
// Now, we will update the top fields of the "continues humongous"
// regions. The reason we need to do this is that, otherwise,
// these regions would look empty and this will confuse parts of
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
// !continuesHumongous(), but it is easier to just update the top
// fields here. The way we set top for all regions (i.e., top ==
// end for all regions but the last one, top == new_top for the
// last one) is actually used when we will free up the humongous
// region in free_humongous_region().
hr = NULL ;
2012-04-18 07:21:15 -04:00
for ( uint i = first + 1 ; i < last ; + + i ) {
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
hr = region_at ( i ) ;
2011-03-04 17:13:19 -05:00
if ( ( i + 1 ) = = last ) {
// last continues humongous region
assert ( hr - > bottom ( ) < new_top & & new_top < = hr - > end ( ) ,
" new_top should fall on this region " ) ;
hr - > set_top ( new_top ) ;
2011-06-24 12:38:49 -04:00
_hr_printer . alloc ( G1HRPrinter : : ContinuesHumongous , hr , new_top ) ;
2011-03-04 17:13:19 -05:00
} else {
// not last one
assert ( new_top > hr - > end ( ) , " new_top should be above this region " ) ;
hr - > set_top ( hr - > end ( ) ) ;
2011-06-24 12:38:49 -04:00
_hr_printer . alloc ( G1HRPrinter : : ContinuesHumongous , hr , hr - > end ( ) ) ;
2011-03-04 17:13:19 -05:00
}
}
// If we have continues humongous regions (hr != NULL), then the
// end of the last one should match new_end and its top should
// match new_top.
assert ( hr = = NULL | |
( hr - > end ( ) = = new_end & & hr - > top ( ) = = new_top ) , " sanity " ) ;
2014-04-29 09:33:20 +02:00
check_bitmaps ( " Humongous Region Allocation " , first_hr ) ;
2011-03-04 17:13:19 -05:00
assert ( first_hr - > used ( ) = = word_size * HeapWordSize , " invariant " ) ;
_summary_bytes_used + = first_hr - > used ( ) ;
_humongous_set . add ( first_hr ) ;
return new_obj ;
}
2008-06-05 15:57:56 -07:00
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
2010-08-24 17:24:33 -04:00
HeapWord * G1CollectedHeap : : humongous_obj_allocate ( size_t word_size ) {
2011-01-19 19:30:42 -05:00
assert_heap_locked_or_at_safepoint ( true /* should_be_vm_thread */ ) ;
2008-06-05 15:57:56 -07:00
2011-01-19 19:30:42 -05:00
verify_region_sets_optional ( ) ;
2008-06-05 15:57:56 -07:00
2014-08-26 09:36:53 +02:00
uint first = G1_NO_HRM_INDEX ;
2014-08-18 16:10:44 +02:00
uint obj_regions = ( uint ) ( align_size_up_ ( word_size , HeapRegion : : GrainWords ) / HeapRegion : : GrainWords ) ;
if ( obj_regions = = 1 ) {
// Only one region to allocate, try to use a fast path by directly allocating
// from the free lists. Do not try to expand here, we will potentially do that
// later.
HeapRegion * hr = new_region ( word_size , true /* is_old */ , false /* do_expand */ ) ;
if ( hr ! = NULL ) {
2014-08-26 09:36:53 +02:00
first = hr - > hrm_index ( ) ;
2014-08-18 16:10:44 +02:00
}
} else {
// We can't allocate humongous regions spanning more than one region while
// cleanupComplete() is running, since some of the regions we find to be
// empty might not yet be added to the free list. It is not straightforward
// to know in which list they are on so that we can remove them. We only
// need to do this if we need to allocate more than one region to satisfy the
// current humongous allocation request. If we are only allocating one region
2014-08-19 14:09:10 +02:00
// we use the one-region region allocation code (see above), that already
// potentially waits for regions from the secondary free list.
2014-08-18 16:10:44 +02:00
wait_while_free_regions_coming ( ) ;
append_secondary_free_list_if_not_empty_with_lock ( ) ;
// Policy: Try only empty regions (i.e. already committed first). Maybe we
// are lucky enough to find some.
2014-08-26 09:36:53 +02:00
first = _hrm . find_contiguous_only_empty ( obj_regions ) ;
if ( first ! = G1_NO_HRM_INDEX ) {
_hrm . allocate_free_regions_starting_at ( first , obj_regions ) ;
2014-08-18 16:10:44 +02:00
}
}
2011-02-02 10:41:20 -08:00
2014-08-26 09:36:53 +02:00
if ( first = = G1_NO_HRM_INDEX ) {
2014-08-18 16:10:44 +02:00
// Policy: We could not find enough regions for the humongous object in the
// free list. Look through the heap to find a mix of free and uncommitted regions.
// If so, try expansion.
2014-08-26 09:36:53 +02:00
first = _hrm . find_contiguous_empty_or_unavailable ( obj_regions ) ;
if ( first ! = G1_NO_HRM_INDEX ) {
2014-08-18 16:10:44 +02:00
// We found something. Make sure these regions are committed, i.e. expand
// the heap. Alternatively we could do a defragmentation GC.
2011-09-07 12:21:23 -04:00
ergo_verbose1 ( ErgoHeapSizing ,
" attempt heap expansion " ,
ergo_format_reason ( " humongous allocation request failed " )
ergo_format_byte ( " allocation request " ) ,
word_size * HeapWordSize ) ;
2014-08-18 16:10:44 +02:00
2014-08-26 09:36:53 +02:00
_hrm . expand_at ( first , obj_regions ) ;
2014-08-18 16:10:44 +02:00
g1_policy ( ) - > record_new_heap_size ( num_regions ( ) ) ;
# ifdef ASSERT
for ( uint i = first ; i < first + obj_regions ; + + i ) {
HeapRegion * hr = region_at ( i ) ;
assert ( hr - > is_empty ( ) , " sanity " ) ;
assert ( is_on_master_free_list ( hr ) , " sanity " ) ;
2011-02-02 10:41:20 -08:00
}
2014-08-18 16:10:44 +02:00
# endif
2014-08-26 09:36:53 +02:00
_hrm . allocate_free_regions_starting_at ( first , obj_regions ) ;
2014-08-18 16:10:44 +02:00
} else {
// Policy: Potentially trigger a defragmentation GC.
2008-06-05 15:57:56 -07:00
}
}
2011-01-19 19:30:42 -05:00
2011-03-04 17:13:19 -05:00
HeapWord * result = NULL ;
2014-08-26 09:36:53 +02:00
if ( first ! = G1_NO_HRM_INDEX ) {
2014-08-18 16:10:44 +02:00
result = humongous_obj_allocate_initialize_regions ( first , obj_regions , word_size ) ;
2011-03-04 17:13:19 -05:00
assert ( result ! = NULL , " it should always return a valid result " ) ;
2011-09-23 16:07:49 -04:00
// A successful humongous object allocation changes the used space
// information of the old generation so we need to recalculate the
// sizes and update the jstat counters here.
g1mm ( ) - > update_sizes ( ) ;
2008-06-05 15:57:56 -07:00
}
2011-01-19 19:30:42 -05:00
verify_region_sets_optional ( ) ;
2011-03-04 17:13:19 -05:00
return result ;
2008-06-05 15:57:56 -07:00
}
2011-03-30 10:26:59 -04:00
HeapWord * G1CollectedHeap : : allocate_new_tlab ( size_t word_size ) {
assert_heap_not_locked_and_not_at_safepoint ( ) ;
assert ( ! isHumongous ( word_size ) , " we do not allow humongous TLABs " ) ;
2010-08-24 17:24:33 -04:00
2011-03-30 10:26:59 -04:00
unsigned int dummy_gc_count_before ;
2013-03-28 10:27:28 +01:00
int dummy_gclocker_retry_count = 0 ;
return attempt_allocation ( word_size , & dummy_gc_count_before , & dummy_gclocker_retry_count ) ;
2010-08-24 17:24:33 -04:00
}
HeapWord *
2011-03-30 10:26:59 -04:00
G1CollectedHeap : : mem_allocate ( size_t word_size ,
bool * gc_overhead_limit_was_exceeded ) {
assert_heap_not_locked_and_not_at_safepoint ( ) ;
2011-01-12 16:34:25 -05:00
2013-06-10 11:30:51 +02:00
// Loop until the allocation is satisfied, or unsatisfied after GC.
2013-03-28 10:27:28 +01:00
for ( int try_count = 1 , gclocker_retry_count = 0 ; /* we'll return */ ; try_count + = 1 ) {
2011-03-30 10:26:59 -04:00
unsigned int gc_count_before ;
2011-01-12 16:34:25 -05:00
2011-03-30 10:26:59 -04:00
HeapWord * result = NULL ;
if ( ! isHumongous ( word_size ) ) {
2013-03-28 10:27:28 +01:00
result = attempt_allocation ( word_size , & gc_count_before , & gclocker_retry_count ) ;
2010-08-24 17:24:33 -04:00
} else {
2013-03-28 10:27:28 +01:00
result = attempt_allocation_humongous ( word_size , & gc_count_before , & gclocker_retry_count ) ;
2011-03-30 10:26:59 -04:00
}
if ( result ! = NULL ) {
return result ;
}
2010-08-24 17:24:33 -04:00
2011-03-30 10:26:59 -04:00
// Create the garbage collection operation...
VM_G1CollectForAllocation op ( gc_count_before , word_size ) ;
// ...and get the VM thread to execute it.
VMThread : : execute ( & op ) ;
2010-08-24 17:24:33 -04:00
2011-03-30 10:26:59 -04:00
if ( op . prologue_succeeded ( ) & & op . pause_succeeded ( ) ) {
// If the operation was successful we'll return the result even
// if it is NULL. If the allocation attempt failed immediately
// after a Full GC, it's unlikely we'll be able to allocate now.
HeapWord * result = op . result ( ) ;
if ( result ! = NULL & & ! isHumongous ( word_size ) ) {
2010-08-24 17:24:33 -04:00
// Allocations that take place on VM operations do not do any
2011-03-30 10:26:59 -04:00
// card dirtying and we have to do it here. We only have to do
// this for non-humongous allocations, though.
2010-08-24 17:24:33 -04:00
dirty_young_block ( result , word_size ) ;
}
return result ;
2011-03-30 10:26:59 -04:00
} else {
2013-03-28 10:27:28 +01:00
if ( gclocker_retry_count > GCLockerRetryAllocationCount ) {
return NULL ;
}
2011-03-30 10:26:59 -04:00
assert ( op . result ( ) = = NULL ,
" the result should be NULL if the VM op did not succeed " ) ;
2010-08-24 17:24:33 -04:00
}
// Give a warning if we seem to be looping forever.
if ( ( QueuedAllocationWarningCount > 0 ) & &
( try_count % QueuedAllocationWarningCount = = 0 ) ) {
2011-03-30 10:26:59 -04:00
warning ( " G1CollectedHeap::mem_allocate retries %d times " , try_count ) ;
2010-08-24 17:24:33 -04:00
}
}
2011-03-30 10:26:59 -04:00
ShouldNotReachHere ( ) ;
2010-08-24 17:24:33 -04:00
return NULL ;
}
2011-03-30 10:26:59 -04:00
HeapWord * G1CollectedHeap : : attempt_allocation_slow ( size_t word_size ,
2013-03-28 10:27:28 +01:00
unsigned int * gc_count_before_ret ,
int * gclocker_retry_count_ret ) {
2011-03-30 10:26:59 -04:00
// Make sure you read the note in attempt_allocation_humongous().
assert_heap_not_locked_and_not_at_safepoint ( ) ;
assert ( ! isHumongous ( word_size ) , " attempt_allocation_slow() should not "
" be called for humongous allocation requests " ) ;
2010-08-24 17:24:33 -04:00
2011-03-30 10:26:59 -04:00
// We should only get here after the first-level allocation attempt
// (attempt_allocation()) failed to allocate.
// We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll
// return NULL.
2010-08-24 17:24:33 -04:00
HeapWord * result = NULL ;
2011-03-30 10:26:59 -04:00
for ( int try_count = 1 ; /* we'll return */ ; try_count + = 1 ) {
bool should_try_gc ;
unsigned int gc_count_before ;
2010-08-24 17:24:33 -04:00
2011-03-30 10:26:59 -04:00
{
MutexLockerEx x ( Heap_lock ) ;
result = _mutator_alloc_region . attempt_allocation_locked ( word_size ,
false /* bot_updates */ ) ;
if ( result ! = NULL ) {
return result ;
2010-07-19 11:06:34 -07:00
}
2008-06-05 15:57:56 -07:00
2011-03-30 10:26:59 -04:00
// If we reach here, attempt_allocation_locked() above failed to
// allocate a new region. So the mutator alloc region should be NULL.
assert ( _mutator_alloc_region . get ( ) = = NULL , " only way to get here " ) ;
2008-06-05 15:57:56 -07:00
2011-03-30 10:26:59 -04:00
if ( GC_locker : : is_active_and_needs_gc ( ) ) {
if ( g1_policy ( ) - > can_expand_young_list ( ) ) {
2011-09-07 12:21:23 -04:00
// No need for an ergo verbose message here,
// can_expand_young_list() does this when it returns true.
2011-03-30 10:26:59 -04:00
result = _mutator_alloc_region . attempt_allocation_force ( word_size ,
false /* bot_updates */ ) ;
if ( result ! = NULL ) {
return result ;
}
}
should_try_gc = false ;
} else {
2012-05-29 10:18:02 -07:00
// The GCLocker may not be active but the GCLocker initiated
// GC may not yet have been performed (GCLocker::needs_gc()
// returns true). In this case we do not try this GC and
// wait until the GCLocker initiated GC is performed, and
// then retry the allocation.
if ( GC_locker : : needs_gc ( ) ) {
should_try_gc = false ;
} else {
// Read the GC count while still holding the Heap_lock.
gc_count_before = total_collections ( ) ;
should_try_gc = true ;
}
2011-03-30 10:26:59 -04:00
}
}
2010-08-24 17:24:33 -04:00
2011-03-30 10:26:59 -04:00
if ( should_try_gc ) {
bool succeeded ;
2013-08-21 22:35:56 +02:00
result = do_collection_pause ( word_size , gc_count_before , & succeeded ,
GCCause : : _g1_inc_collection_pause ) ;
2010-08-24 17:24:33 -04:00
if ( result ! = NULL ) {
2011-03-30 10:26:59 -04:00
assert ( succeeded , " only way to get back a non-NULL result " ) ;
2010-08-24 17:24:33 -04:00
return result ;
}
2011-03-30 10:26:59 -04:00
if ( succeeded ) {
// If we get here we successfully scheduled a collection which
// failed to allocate. No point in trying to allocate
// further. We'll just return NULL.
MutexLockerEx x ( Heap_lock ) ;
2012-02-14 08:21:08 -05:00
* gc_count_before_ret = total_collections ( ) ;
2011-03-30 10:26:59 -04:00
return NULL ;
}
} else {
2013-03-28 10:27:28 +01:00
if ( * gclocker_retry_count_ret > GCLockerRetryAllocationCount ) {
MutexLockerEx x ( Heap_lock ) ;
* gc_count_before_ret = total_collections ( ) ;
return NULL ;
}
2012-05-29 10:18:02 -07:00
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
2011-03-30 10:26:59 -04:00
GC_locker : : stall_until_clear ( ) ;
2013-03-28 10:27:28 +01:00
( * gclocker_retry_count_ret ) + = 1 ;
2010-08-24 17:24:33 -04:00
}
2013-06-10 11:30:51 +02:00
// We can reach here if we were unsuccessful in scheduling a
2011-03-30 10:26:59 -04:00
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
// performed a collection and reclaimed enough space. We do the
// first attempt (without holding the Heap_lock) here and the
// follow-on attempt will be at the start of the next loop
// iteration (after taking the Heap_lock).
result = _mutator_alloc_region . attempt_allocation ( word_size ,
false /* bot_updates */ ) ;
2012-03-12 14:59:00 -07:00
if ( result ! = NULL ) {
2011-03-30 10:26:59 -04:00
return result ;
2008-06-05 15:57:56 -07:00
}
2010-08-24 17:24:33 -04:00
// Give a warning if we seem to be looping forever.
if ( ( QueuedAllocationWarningCount > 0 ) & &
( try_count % QueuedAllocationWarningCount = = 0 ) ) {
2011-03-30 10:26:59 -04:00
warning ( " G1CollectedHeap::attempt_allocation_slow() "
2010-08-24 17:24:33 -04:00
" retries %d times " , try_count ) ;
2008-06-05 15:57:56 -07:00
}
}
2010-08-24 17:24:33 -04:00
ShouldNotReachHere ( ) ;
return NULL ;
2008-06-05 15:57:56 -07:00
}
2011-03-30 10:26:59 -04:00
HeapWord * G1CollectedHeap : : attempt_allocation_humongous ( size_t word_size ,
2013-03-28 10:27:28 +01:00
unsigned int * gc_count_before_ret ,
int * gclocker_retry_count_ret ) {
2011-03-30 10:26:59 -04:00
// The structure of this method has a lot of similarities to
// attempt_allocation_slow(). The reason these two were not merged
// into a single one is that such a method would require several "if
// allocation is not humongous do this, otherwise do that"
// conditional paths which would obscure its flow. In fact, an early
// version of this code did use a unified method which was harder to
// follow and, as a result, it had subtle bugs that were hard to
// track down. So keeping these two methods separate allows each to
// be more readable. It will be good to keep these two in sync as
// much as possible.
2010-08-24 17:24:33 -04:00
assert_heap_not_locked_and_not_at_safepoint ( ) ;
2011-03-30 10:26:59 -04:00
assert ( isHumongous ( word_size ) , " attempt_allocation_humongous() "
" should only be called for humongous allocations " ) ;
2008-06-05 15:57:56 -07:00
2012-01-23 20:36:16 +01:00
// Humongous objects can exhaust the heap quickly, so we should check if we
// need to start a marking cycle at each humongous object allocation. We do
// the check before we do the actual allocation. The reason for doing it
// before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC.
2012-02-14 08:21:08 -05:00
if ( g1_policy ( ) - > need_to_start_conc_mark ( " concurrent humongous allocation " ,
word_size ) ) {
2012-01-23 20:36:16 +01:00
collect ( GCCause : : _g1_humongous_allocation ) ;
}
2011-03-30 10:26:59 -04:00
// We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll
// return NULL.
HeapWord * result = NULL ;
2010-08-24 17:24:33 -04:00
for ( int try_count = 1 ; /* we'll return */ ; try_count + = 1 ) {
2011-03-30 10:26:59 -04:00
bool should_try_gc ;
2010-08-24 17:24:33 -04:00
unsigned int gc_count_before ;
2011-03-30 10:26:59 -04:00
2008-06-05 15:57:56 -07:00
{
2011-03-30 10:26:59 -04:00
MutexLockerEx x ( Heap_lock ) ;
2010-08-24 17:24:33 -04:00
2011-03-30 10:26:59 -04:00
// Given that humongous objects are not allocated in young
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
result = humongous_obj_allocate ( word_size ) ;
2012-01-23 20:36:16 +01:00
if ( result ! = NULL ) {
return result ;
2011-03-30 10:26:59 -04:00
}
2010-08-24 17:24:33 -04:00
2012-01-23 20:36:16 +01:00
if ( GC_locker : : is_active_and_needs_gc ( ) ) {
should_try_gc = false ;
} else {
2012-05-29 10:18:02 -07:00
// The GCLocker may not be active but the GCLocker initiated
// GC may not yet have been performed (GCLocker::needs_gc()
// returns true). In this case we do not try this GC and
// wait until the GCLocker initiated GC is performed, and
// then retry the allocation.
if ( GC_locker : : needs_gc ( ) ) {
should_try_gc = false ;
} else {
// Read the GC count while still holding the Heap_lock.
gc_count_before = total_collections ( ) ;
should_try_gc = true ;
}
2008-06-05 15:57:56 -07:00
}
}
2011-03-30 10:26:59 -04:00
if ( should_try_gc ) {
// If we failed to allocate the humongous object, we should try to
// do a collection pause (if we're allowed) in case it reclaims
// enough space for the allocation to succeed after the pause.
2010-08-24 17:24:33 -04:00
2011-03-30 10:26:59 -04:00
bool succeeded ;
2013-08-21 22:35:56 +02:00
result = do_collection_pause ( word_size , gc_count_before , & succeeded ,
GCCause : : _g1_humongous_allocation ) ;
2011-03-30 10:26:59 -04:00
if ( result ! = NULL ) {
assert ( succeeded , " only way to get back a non-NULL result " ) ;
return result ;
}
if ( succeeded ) {
// If we get here we successfully scheduled a collection which
// failed to allocate. No point in trying to allocate
// further. We'll just return NULL.
MutexLockerEx x ( Heap_lock ) ;
2012-02-14 08:21:08 -05:00
* gc_count_before_ret = total_collections ( ) ;
2011-03-30 10:26:59 -04:00
return NULL ;
2010-08-24 17:24:33 -04:00
}
} else {
2013-03-28 10:27:28 +01:00
if ( * gclocker_retry_count_ret > GCLockerRetryAllocationCount ) {
MutexLockerEx x ( Heap_lock ) ;
* gc_count_before_ret = total_collections ( ) ;
return NULL ;
}
2012-05-29 10:18:02 -07:00
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
2011-03-30 10:26:59 -04:00
GC_locker : : stall_until_clear ( ) ;
2013-03-28 10:27:28 +01:00
( * gclocker_retry_count_ret ) + = 1 ;
2008-06-05 15:57:56 -07:00
}
2013-06-10 11:30:51 +02:00
// We can reach here if we were unsuccessful in scheduling a
2011-03-30 10:26:59 -04:00
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
// performed a collection and reclaimed enough space. Give a
// warning if we seem to be looping forever.
2008-06-05 15:57:56 -07:00
if ( ( QueuedAllocationWarningCount > 0 ) & &
( try_count % QueuedAllocationWarningCount = = 0 ) ) {
2011-03-30 10:26:59 -04:00
warning ( " G1CollectedHeap::attempt_allocation_humongous() "
" retries %d times " , try_count ) ;
2008-06-05 15:57:56 -07:00
}
}
2010-08-24 17:24:33 -04:00
ShouldNotReachHere ( ) ;
2011-03-30 10:26:59 -04:00
return NULL ;
2008-06-05 15:57:56 -07:00
}
2011-03-30 10:26:59 -04:00
HeapWord * G1CollectedHeap : : attempt_allocation_at_safepoint ( size_t word_size ,
bool expect_null_mutator_alloc_region ) {
2011-01-19 19:30:42 -05:00
assert_at_safepoint ( true /* should_be_vm_thread */ ) ;
2011-03-30 10:26:59 -04:00
assert ( _mutator_alloc_region . get ( ) = = NULL | |
! expect_null_mutator_alloc_region ,
" the current alloc region was unexpectedly found to be non-NULL " ) ;
2011-01-19 19:30:42 -05:00
2011-03-30 10:26:59 -04:00
if ( ! isHumongous ( word_size ) ) {
return _mutator_alloc_region . attempt_allocation_locked ( word_size ,
false /* bot_updates */ ) ;
} else {
2012-01-16 22:10:05 +01:00
HeapWord * result = humongous_obj_allocate ( word_size ) ;
if ( result ! = NULL & & g1_policy ( ) - > need_to_start_conc_mark ( " STW humongous allocation " ) ) {
g1_policy ( ) - > set_initiate_conc_mark_if_possible ( ) ;
}
return result ;
2008-06-05 15:57:56 -07:00
}
2011-03-30 10:26:59 -04:00
ShouldNotReachHere ( ) ;
2008-06-05 15:57:56 -07:00
}
class PostMCRemSetClearClosure : public HeapRegionClosure {
2012-07-19 15:15:54 -07:00
G1CollectedHeap * _g1h ;
2008-06-05 15:57:56 -07:00
ModRefBarrierSet * _mr_bs ;
public :
2012-07-19 15:15:54 -07:00
PostMCRemSetClearClosure ( G1CollectedHeap * g1h , ModRefBarrierSet * mr_bs ) :
2013-08-15 10:52:18 +02:00
_g1h ( g1h ) , _mr_bs ( mr_bs ) { }
2008-06-05 15:57:56 -07:00
bool doHeapRegion ( HeapRegion * r ) {
2013-08-15 10:52:18 +02:00
HeapRegionRemSet * hrrs = r - > rem_set ( ) ;
2012-07-19 15:15:54 -07:00
if ( r - > continuesHumongous ( ) ) {
2013-08-15 10:52:18 +02:00
// We'll assert that the strong code root list and RSet is empty
assert ( hrrs - > strong_code_roots_list_length ( ) = = 0 , " sanity " ) ;
assert ( hrrs - > occupied ( ) = = 0 , " RSet should be empty " ) ;
2008-06-05 15:57:56 -07:00
return false ;
2012-07-19 15:15:54 -07:00
}
2013-08-15 10:52:18 +02:00
2012-07-19 15:15:54 -07:00
_g1h - > reset_gc_time_stamps ( r ) ;
2013-08-15 10:52:18 +02:00
hrrs - > clear ( ) ;
2008-06-05 15:57:56 -07:00
// You might think here that we could clear just the cards
// corresponding to the used region. But no: if we leave a dirty card
// in a region we might allocate into, then it would prevent that card
// from being enqueued, and cause it to be missed.
// Re: the performance cost: we shouldn't be doing full GC anyway!
_mr_bs - > clear ( MemRegion ( r - > bottom ( ) , r - > end ( ) ) ) ;
2013-08-15 10:52:18 +02:00
2008-06-05 15:57:56 -07:00
return false ;
}
} ;
2012-07-19 15:15:54 -07:00
void G1CollectedHeap : : clear_rsets_post_compaction ( ) {
2013-09-24 14:46:29 +02:00
PostMCRemSetClearClosure rs_clear ( this , g1_barrier_set ( ) ) ;
2012-07-19 15:15:54 -07:00
heap_region_iterate ( & rs_clear ) ;
}
2008-06-05 15:57:56 -07:00
2009-03-10 00:47:05 -07:00
class RebuildRSOutOfRegionClosure : public HeapRegionClosure {
G1CollectedHeap * _g1h ;
UpdateRSOopClosure _cl ;
int _worker_i ;
public :
RebuildRSOutOfRegionClosure ( G1CollectedHeap * g1 , int worker_i = 0 ) :
2010-10-12 09:36:48 -07:00
_cl ( g1 - > g1_rem_set ( ) , worker_i ) ,
2009-03-10 00:47:05 -07:00
_worker_i ( worker_i ) ,
_g1h ( g1 )
{ }
2010-11-16 14:07:33 -08:00
2009-03-10 00:47:05 -07:00
bool doHeapRegion ( HeapRegion * r ) {
if ( ! r - > continuesHumongous ( ) ) {
_cl . set_from ( r ) ;
r - > oop_iterate ( & _cl ) ;
}
return false ;
}
} ;
class ParRebuildRSTask : public AbstractGangTask {
G1CollectedHeap * _g1 ;
public :
ParRebuildRSTask ( G1CollectedHeap * g1 )
: AbstractGangTask ( " ParRebuildRSTask " ) ,
_g1 ( g1 )
{ }
2011-12-14 13:34:57 -08:00
void work ( uint worker_id ) {
RebuildRSOutOfRegionClosure rebuild_rs ( _g1 , worker_id ) ;
_g1 - > heap_region_par_iterate_chunked ( & rebuild_rs , worker_id ,
2011-08-09 10:16:01 -07:00
_g1 - > workers ( ) - > active_workers ( ) ,
2009-03-10 00:47:05 -07:00
HeapRegion : : RebuildRSClaimValue ) ;
}
} ;
2011-06-24 12:38:49 -04:00
class PostCompactionPrinterClosure : public HeapRegionClosure {
private :
G1HRPrinter * _hr_printer ;
public :
bool doHeapRegion ( HeapRegion * hr ) {
assert ( ! hr - > is_young ( ) , " not expecting to find young regions " ) ;
// We only generate output for non-empty regions.
if ( ! hr - > is_empty ( ) ) {
if ( ! hr - > isHumongous ( ) ) {
_hr_printer - > post_compaction ( hr , G1HRPrinter : : Old ) ;
} else if ( hr - > startsHumongous ( ) ) {
2012-07-19 15:15:54 -07:00
if ( hr - > region_num ( ) = = 1 ) {
2011-06-24 12:38:49 -04:00
// single humongous region
_hr_printer - > post_compaction ( hr , G1HRPrinter : : SingleHumongous ) ;
} else {
_hr_printer - > post_compaction ( hr , G1HRPrinter : : StartsHumongous ) ;
}
} else {
assert ( hr - > continuesHumongous ( ) , " only way to get here " ) ;
_hr_printer - > post_compaction ( hr , G1HRPrinter : : ContinuesHumongous ) ;
}
}
return false ;
}
PostCompactionPrinterClosure ( G1HRPrinter * hr_printer )
: _hr_printer ( hr_printer ) { }
} ;
2014-08-26 09:36:53 +02:00
void G1CollectedHeap : : print_hrm_post_compaction ( ) {
2012-07-19 15:15:54 -07:00
PostCompactionPrinterClosure cl ( hr_printer ( ) ) ;
heap_region_iterate ( & cl ) ;
}
2010-08-24 17:24:33 -04:00
bool G1CollectedHeap : : do_collection ( bool explicit_gc ,
2010-06-28 14:13:17 -04:00
bool clear_all_soft_refs ,
2008-06-05 15:57:56 -07:00
size_t word_size ) {
2011-01-19 19:30:42 -05:00
assert_at_safepoint ( true /* should_be_vm_thread */ ) ;
2010-04-06 10:59:45 -04:00
if ( GC_locker : : check_active_before_gc ( ) ) {
2010-08-24 17:24:33 -04:00
return false ;
2010-04-06 10:59:45 -04:00
}
2013-06-10 11:30:51 +02:00
STWGCTimer * gc_timer = G1MarkSweep : : gc_timer ( ) ;
2013-11-23 12:25:13 +01:00
gc_timer - > register_gc_start ( ) ;
2013-06-10 11:30:51 +02:00
SerialOldTracer * gc_tracer = G1MarkSweep : : gc_tracer ( ) ;
gc_tracer - > report_gc_start ( gc_cause ( ) , gc_timer - > gc_start ( ) ) ;
2011-01-10 17:14:53 -05:00
SvcGCMarker sgcm ( SvcGCMarker : : FULL ) ;
2008-06-05 15:57:56 -07:00
ResourceMark rm ;
2012-02-01 07:59:01 -08:00
print_heap_before_gc ( ) ;
2013-06-10 11:30:51 +02:00
trace_heap_before_gc ( gc_tracer ) ;
2009-07-07 14:23:00 -04:00
2014-03-31 17:09:38 +02:00
size_t metadata_prev_used = MetaspaceAux : : used_bytes ( ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
2011-01-19 19:30:42 -05:00
verify_region_sets_optional ( ) ;
2008-06-05 15:57:56 -07:00
2010-04-13 13:52:10 -07:00
const bool do_clear_all_soft_refs = clear_all_soft_refs | |
collector_policy ( ) - > should_clear_all_soft_refs ( ) ;
ClearedAllSoftRefs casr ( do_clear_all_soft_refs , collector_policy ( ) ) ;
2008-06-05 15:57:56 -07:00
{
IsGCActiveMark x ;
// Timing
2012-04-25 12:36:37 +02:00
assert ( gc_cause ( ) ! = GCCause : : _java_lang_system_gc | | explicit_gc , " invariant " ) ;
2012-04-13 01:59:38 +02:00
gclog_or_tty - > date_stamp ( G1Log : : fine ( ) & & PrintGCDateStamps ) ;
TraceCPUTime tcpu ( G1Log : : finer ( ) , true , gclog_or_tty ) ;
2012-04-25 12:36:37 +02:00
2013-04-10 10:57:34 -07:00
{
2014-06-19 13:31:14 +02:00
GCTraceTime t ( GCCauseString ( " Full GC " , gc_cause ( ) ) , G1Log : : fine ( ) , true , NULL , gc_tracer - > gc_id ( ) ) ;
2013-04-10 10:57:34 -07:00
TraceCollectorStats tcs ( g1mm ( ) - > full_collection_counters ( ) ) ;
TraceMemoryManagerStats tms ( true /* fullGC */ , gc_cause ( ) ) ;
double start = os : : elapsedTime ( ) ;
g1_policy ( ) - > record_full_collection_start ( ) ;
// Note: When we have a more flexible GC logging framework that
// allows us to add optional attributes to a GC log record we
// could consider timing and reporting how long we wait in the
// following two methods.
wait_while_free_regions_coming ( ) ;
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
// they are done. By telling them to abort, they should complete
// early.
_cm - > root_regions ( ) - > abort ( ) ;
_cm - > root_regions ( ) - > wait_until_scan_finished ( ) ;
append_secondary_free_list_if_not_empty_with_lock ( ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
gc_prologue ( true ) ;
increment_total_collections ( true /* full gc */ ) ;
increment_old_marking_cycles_started ( ) ;
2011-01-19 19:30:42 -05:00
2013-04-10 10:57:34 -07:00
assert ( used ( ) = = recalculate_used ( ) , " Should be equal " ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
verify_before_gc ( ) ;
2008-06-05 15:57:56 -07:00
2014-04-29 09:33:20 +02:00
check_bitmaps ( " Full GC Start " ) ;
2013-06-10 11:30:51 +02:00
pre_full_gc_dump ( gc_timer ) ;
2011-06-14 11:01:10 -07:00
2013-04-10 10:57:34 -07:00
COMPILER2_PRESENT ( DerivedPointerTable : : clear ( ) ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
// Disable discovery and empty the discovered lists
// for the CM ref processor.
ref_processor_cm ( ) - > disable_discovery ( ) ;
ref_processor_cm ( ) - > abandon_partial_discovery ( ) ;
ref_processor_cm ( ) - > verify_no_references_recorded ( ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
// Abandon current iterations of concurrent marking and concurrent
// refinement, if any are in progress. We have to do this before
// wait_until_scan_finished() below.
concurrent_mark ( ) - > abort ( ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
// Make sure we'll choose a new allocation region afterwards.
release_mutator_alloc_region ( ) ;
abandon_gc_alloc_regions ( ) ;
g1_rem_set ( ) - > cleanupHRRS ( ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
// We should call this after we retire any currently active alloc
// regions so that all the ALLOC / RETIRE events are generated
// before the start GC event.
_hr_printer . start_gc ( true /* full */ , ( size_t ) total_collections ( ) ) ;
2010-04-22 10:02:38 -07:00
2013-04-10 10:57:34 -07:00
// We may have added regions to the current incremental collection
// set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh
// after this full GC.
abandon_collection_set ( g1_policy ( ) - > inc_cset_head ( ) ) ;
g1_policy ( ) - > clear_incremental_cset ( ) ;
g1_policy ( ) - > stop_incremental_cset_building ( ) ;
2011-06-24 12:38:49 -04:00
2013-04-10 10:57:34 -07:00
tear_down_region_sets ( false /* free_list_only */ ) ;
g1_policy ( ) - > set_gcs_are_young ( true ) ;
2010-04-22 10:02:38 -07:00
2013-04-10 10:57:34 -07:00
// See the comments in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() about
// how reference processing currently works in G1.
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
// Temporarily make discovery by the STW ref processor single threaded (non-MT).
ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser ( ref_processor_stw ( ) , false ) ;
2010-12-01 17:34:02 -08:00
2013-04-10 10:57:34 -07:00
// Temporarily clear the STW ref processor's _is_alive_non_header field.
ReferenceProcessorIsAliveMutator stw_rp_is_alive_null ( ref_processor_stw ( ) , NULL ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
ref_processor_stw ( ) - > enable_discovery ( true /*verify_disabled*/ , true /*verify_no_refs*/ ) ;
ref_processor_stw ( ) - > setup_policy ( do_clear_all_soft_refs ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
// Do collection work
{
HandleMark hm ; // Discard invalid handles created during gc
G1MarkSweep : : invoke_at_safepoint ( ref_processor_stw ( ) , do_clear_all_soft_refs ) ;
}
2008-06-05 15:57:56 -07:00
2014-08-18 16:10:44 +02:00
assert ( num_free_regions ( ) = = 0 , " we should not have added any free regions " ) ;
2013-04-10 10:57:34 -07:00
rebuild_region_sets ( false /* free_list_only */ ) ;
2011-09-22 10:57:37 -07:00
2013-04-10 10:57:34 -07:00
// Enqueue any discovered reference objects that have
// not been removed from the discovered lists.
ref_processor_stw ( ) - > enqueue_discovered_references ( ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
COMPILER2_PRESENT ( DerivedPointerTable : : update_pointers ( ) ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
MemoryService : : track_memory_usage ( ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
assert ( ! ref_processor_stw ( ) - > discovery_enabled ( ) , " Postcondition " ) ;
ref_processor_stw ( ) - > verify_no_references_recorded ( ) ;
2011-09-22 10:57:37 -07:00
2013-04-10 10:57:34 -07:00
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph : : purge ( ) ;
2013-08-15 10:52:18 +02:00
MetaspaceAux : : verify_metrics ( ) ;
2011-09-22 10:57:37 -07:00
2013-04-10 10:57:34 -07:00
// Note: since we've just done a full GC, concurrent
// marking is no longer active. Therefore we need not
// re-enable reference discovery for the CM ref processor.
// That will be done at the start of the next marking cycle.
assert ( ! ref_processor_cm ( ) - > discovery_enabled ( ) , " Postcondition " ) ;
ref_processor_cm ( ) - > verify_no_references_recorded ( ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
2013-04-10 10:57:34 -07:00
reset_gc_time_stamp ( ) ;
// Since everything potentially moved, we will clear all remembered
2013-06-10 11:30:51 +02:00
// sets, and clear all cards. Later we will rebuild remembered
2013-04-10 10:57:34 -07:00
// sets. We will also reset the GC time stamps of the regions.
clear_rsets_post_compaction ( ) ;
check_gc_time_stamps ( ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
// Resize the heap if necessary.
resize_if_necessary_after_full_collection ( explicit_gc ? 0 : word_size ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
if ( _hr_printer . is_active ( ) ) {
// We should do this after we potentially resize the heap so
// that all the COMMIT / UNCOMMIT events are generated before
// the end GC event.
2008-06-05 15:57:56 -07:00
2014-08-26 09:36:53 +02:00
print_hrm_post_compaction ( ) ;
2013-04-10 10:57:34 -07:00
_hr_printer . end_gc ( true /* full */ , ( size_t ) total_collections ( ) ) ;
}
2011-06-24 12:38:49 -04:00
2013-05-09 11:16:39 -07:00
G1HotCardCache * hot_card_cache = _cg1r - > hot_card_cache ( ) ;
if ( hot_card_cache - > use_cache ( ) ) {
hot_card_cache - > reset_card_counts ( ) ;
hot_card_cache - > reset_hot_cache ( ) ;
2013-04-10 10:57:34 -07:00
}
2011-06-24 12:38:49 -04:00
2013-04-10 10:57:34 -07:00
// Rebuild remembered sets of all regions.
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
uint n_workers =
AdaptiveSizePolicy : : calc_active_workers ( workers ( ) - > total_workers ( ) ,
workers ( ) - > active_workers ( ) ,
Threads : : number_of_non_daemon_threads ( ) ) ;
assert ( UseDynamicNumberOfGCThreads | |
n_workers = = workers ( ) - > total_workers ( ) ,
" If not dynamic should be using all the workers " ) ;
workers ( ) - > set_active_workers ( n_workers ) ;
// Set parallel threads in the heap (_n_par_threads) only
// before a parallel phase and always reset it to 0 after
// the phase so that the number of parallel threads does
// no get carried forward to a serial phase where there
// may be code that is "possibly_parallel".
set_par_threads ( n_workers ) ;
ParRebuildRSTask rebuild_rs_task ( this ) ;
assert ( check_heap_region_claim_values (
HeapRegion : : InitialClaimValue ) , " sanity check " ) ;
assert ( UseDynamicNumberOfGCThreads | |
workers ( ) - > active_workers ( ) = = workers ( ) - > total_workers ( ) ,
" Unless dynamic should use total workers " ) ;
// Use the most recent number of active workers
assert ( workers ( ) - > active_workers ( ) > 0 ,
" Active workers not properly set " ) ;
set_par_threads ( workers ( ) - > active_workers ( ) ) ;
workers ( ) - > run_task ( & rebuild_rs_task ) ;
set_par_threads ( 0 ) ;
assert ( check_heap_region_claim_values (
HeapRegion : : RebuildRSClaimValue ) , " sanity check " ) ;
reset_heap_region_claim_values ( ) ;
} else {
RebuildRSOutOfRegionClosure rebuild_rs ( this ) ;
heap_region_iterate ( & rebuild_rs ) ;
}
2008-06-05 15:57:56 -07:00
2013-08-15 10:52:18 +02:00
// Rebuild the strong code root lists for each region
rebuild_strong_code_roots ( ) ;
2013-04-10 10:57:34 -07:00
if ( true ) { // FIXME
MetaspaceGC : : compute_new_size ( ) ;
}
2011-08-09 10:16:01 -07:00
2013-04-10 10:57:34 -07:00
# ifdef TRACESPINNING
ParallelTaskTerminator : : print_termination_counts ( ) ;
# endif
2009-03-10 00:47:05 -07:00
2013-04-10 10:57:34 -07:00
// Discard all rset updates
JavaThread : : dirty_card_queue_set ( ) . abandon_logs ( ) ;
assert ( ! G1DeferredRSUpdate
| | ( G1DeferredRSUpdate & &
( dirty_card_queue_set ( ) . completed_buffers_num ( ) = = 0 ) ) , " Should not be any " ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
_young_list - > reset_sampled_info ( ) ;
// At this point there should be no regions in the
// entire heap tagged as young.
assert ( check_young_list_empty ( true /* check_heap */ ) ,
" young list should be empty at this point " ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
// Update the number of full collections that have been completed.
increment_old_marking_cycles_completed ( false /* concurrent */ ) ;
2010-04-22 10:02:38 -07:00
2014-08-26 09:36:53 +02:00
_hrm . verify_optional ( ) ;
2013-04-10 10:57:34 -07:00
verify_region_sets_optional ( ) ;
2010-04-22 10:02:38 -07:00
2013-06-04 10:04:06 -07:00
verify_after_gc ( ) ;
2014-04-29 09:33:20 +02:00
// Clear the previous marking bitmap, if needed for bitmap verification.
// Note we cannot do this when we clear the next marking bitmap in
// ConcurrentMark::abort() above since VerifyDuringGC verifies the
// objects marked during a full GC against the previous bitmap.
// But we need to clear it before calling check_bitmaps below since
// the full GC has compacted objects and updated TAMS but not updated
// the prev bitmap.
if ( G1VerifyBitmaps ) {
( ( CMBitMap * ) concurrent_mark ( ) - > prevMarkBitMap ( ) ) - > clearAll ( ) ;
}
check_bitmaps ( " Full GC End " ) ;
2013-04-10 10:57:34 -07:00
// Start a new incremental collection set for the next pause
assert ( g1_policy ( ) - > collection_set ( ) = = NULL , " must be " ) ;
g1_policy ( ) - > start_incremental_cset_building ( ) ;
2011-03-30 10:26:59 -04:00
2013-04-10 10:57:34 -07:00
clear_cset_fast_test ( ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
init_mutator_alloc_region ( ) ;
2009-02-08 13:18:01 -08:00
2013-04-10 10:57:34 -07:00
double end = os : : elapsedTime ( ) ;
g1_policy ( ) - > record_full_collection_end ( ) ;
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
if ( G1Log : : fine ( ) ) {
g1_policy ( ) - > print_heap_transition ( ) ;
}
2008-06-05 15:57:56 -07:00
2013-04-10 10:57:34 -07:00
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm ( ) - > update_sizes ( ) ;
2009-07-07 14:23:00 -04:00
2013-04-10 10:57:34 -07:00
gc_epilogue ( true ) ;
}
2010-06-28 14:13:17 -04:00
2013-04-10 10:57:34 -07:00
if ( G1Log : : finer ( ) ) {
2013-05-16 09:24:26 -07:00
g1_policy ( ) - > print_detailed_heap_transition ( true /* full */ ) ;
2013-04-10 10:57:34 -07:00
}
2012-06-04 13:29:34 +02:00
print_heap_after_gc ( ) ;
2013-06-10 11:30:51 +02:00
trace_heap_after_gc ( gc_tracer ) ;
post_full_gc_dump ( gc_timer ) ;
2012-06-04 13:29:34 +02:00
2013-11-23 12:25:13 +01:00
gc_timer - > register_gc_end ( ) ;
2013-06-10 11:30:51 +02:00
gc_tracer - > report_gc_end ( gc_timer - > gc_end ( ) , gc_timer - > time_partitions ( ) ) ;
2012-06-04 13:29:34 +02:00
}
2011-01-19 19:30:42 -05:00
2010-08-24 17:24:33 -04:00
return true ;
2008-06-05 15:57:56 -07:00
}
void G1CollectedHeap : : do_full_collection ( bool clear_all_soft_refs ) {
2010-08-24 17:24:33 -04:00
// do_collection() will return whether it succeeded in performing
// the GC. Currently, there is no facility on the
// do_full_collection() API to notify the caller than the collection
// did not succeed (e.g., because it was locked out by the GC
// locker). So, right now, we'll ignore the return value.
bool dummy = do_collection ( true , /* explicit_gc */
clear_all_soft_refs ,
0 /* word_size */ ) ;
2008-06-05 15:57:56 -07:00
}
// This code is mostly copied from TenuredGeneration.
void
G1CollectedHeap : :
resize_if_necessary_after_full_collection ( size_t word_size ) {
// Include the current allocation, if any, and bytes that will be
// pre-allocated to support collections, as "used".
const size_t used_after_gc = used ( ) ;
const size_t capacity_after_gc = capacity ( ) ;
const size_t free_after_gc = capacity_after_gc - used_after_gc ;
2010-08-17 14:40:00 -04:00
// This is enforced in arguments.cpp.
assert ( MinHeapFreeRatio < = MaxHeapFreeRatio ,
" otherwise the code below doesn't make sense " ) ;
2008-06-05 15:57:56 -07:00
// We don't have floating point command-line arguments
2010-08-17 14:40:00 -04:00
const double minimum_free_percentage = ( double ) MinHeapFreeRatio / 100.0 ;
2008-06-05 15:57:56 -07:00
const double maximum_used_percentage = 1.0 - minimum_free_percentage ;
2010-08-17 14:40:00 -04:00
const double maximum_free_percentage = ( double ) MaxHeapFreeRatio / 100.0 ;
2008-06-05 15:57:56 -07:00
const double minimum_used_percentage = 1.0 - maximum_free_percentage ;
2010-08-17 14:40:00 -04:00
const size_t min_heap_size = collector_policy ( ) - > min_heap_byte_size ( ) ;
const size_t max_heap_size = collector_policy ( ) - > max_heap_byte_size ( ) ;
// We have to be careful here as these two calculations can overflow
// 32-bit size_t's.
double used_after_gc_d = ( double ) used_after_gc ;
double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage ;
double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage ;
// Let's make sure that they are both under the max heap size, which
// by default will make them fit into a size_t.
double desired_capacity_upper_bound = ( double ) max_heap_size ;
minimum_desired_capacity_d = MIN2 ( minimum_desired_capacity_d ,
desired_capacity_upper_bound ) ;
maximum_desired_capacity_d = MIN2 ( maximum_desired_capacity_d ,
desired_capacity_upper_bound ) ;
// We can now safely turn them into size_t's.
size_t minimum_desired_capacity = ( size_t ) minimum_desired_capacity_d ;
size_t maximum_desired_capacity = ( size_t ) maximum_desired_capacity_d ;
// This assert only makes sense here, before we adjust them
// with respect to the min and max heap size.
assert ( minimum_desired_capacity < = maximum_desired_capacity ,
err_msg ( " minimum_desired_capacity = " SIZE_FORMAT " , "
" maximum_desired_capacity = " SIZE_FORMAT ,
minimum_desired_capacity , maximum_desired_capacity ) ) ;
// Should not be greater than the heap max size. No need to adjust
// it with respect to the heap min size as it's a lower bound (i.e.,
// we'll try to make the capacity larger than it, not smaller).
minimum_desired_capacity = MIN2 ( minimum_desired_capacity , max_heap_size ) ;
// Should not be less than the heap min size. No need to adjust it
// with respect to the heap max size as it's an upper bound (i.e.,
// we'll try to make the capacity smaller than it, not greater).
maximum_desired_capacity = MAX2 ( maximum_desired_capacity , min_heap_size ) ;
2008-06-05 15:57:56 -07:00
2010-08-17 14:40:00 -04:00
if ( capacity_after_gc < minimum_desired_capacity ) {
2008-06-05 15:57:56 -07:00
// Don't expand unless it's significant
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc ;
2011-09-07 12:21:23 -04:00
ergo_verbose4 ( ErgoHeapSizing ,
" attempt heap expansion " ,
ergo_format_reason ( " capacity lower than "
" min desired capacity after Full GC " )
ergo_format_byte ( " capacity " )
ergo_format_byte ( " occupancy " )
ergo_format_byte_perc ( " min desired capacity " ) ,
capacity_after_gc , used_after_gc ,
minimum_desired_capacity , ( double ) MinHeapFreeRatio ) ;
expand ( expand_bytes ) ;
2008-06-05 15:57:56 -07:00
// No expansion, now see if we want to shrink
2010-08-17 14:40:00 -04:00
} else if ( capacity_after_gc > maximum_desired_capacity ) {
2008-06-05 15:57:56 -07:00
// Capacity too large, compute shrinking size
size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity ;
2011-09-07 12:21:23 -04:00
ergo_verbose4 ( ErgoHeapSizing ,
" attempt heap shrinking " ,
ergo_format_reason ( " capacity higher than "
" max desired capacity after Full GC " )
ergo_format_byte ( " capacity " )
ergo_format_byte ( " occupancy " )
ergo_format_byte_perc ( " max desired capacity " ) ,
capacity_after_gc , used_after_gc ,
maximum_desired_capacity , ( double ) MaxHeapFreeRatio ) ;
2008-06-05 15:57:56 -07:00
shrink ( shrink_bytes ) ;
}
}
HeapWord *
2010-08-24 17:24:33 -04:00
G1CollectedHeap : : satisfy_failed_allocation ( size_t word_size ,
bool * succeeded ) {
2011-01-19 19:30:42 -05:00
assert_at_safepoint ( true /* should_be_vm_thread */ ) ;
2010-08-24 17:24:33 -04:00
* succeeded = true ;
// Let's attempt the allocation first.
2011-03-30 10:26:59 -04:00
HeapWord * result =
attempt_allocation_at_safepoint ( word_size ,
false /* expect_null_mutator_alloc_region */ ) ;
2010-08-24 17:24:33 -04:00
if ( result ! = NULL ) {
assert ( * succeeded , " sanity " ) ;
return result ;
}
2008-06-05 15:57:56 -07:00
// In a G1 heap, we're supposed to keep allocation from failing by
// incremental pauses. Therefore, at least for now, we'll favor
// expansion over collection. (This might change in the future if we can
// do something smarter than full collection to satisfy a failed alloc.)
result = expand_and_allocate ( word_size ) ;
if ( result ! = NULL ) {
2010-08-24 17:24:33 -04:00
assert ( * succeeded , " sanity " ) ;
2008-06-05 15:57:56 -07:00
return result ;
}
2010-08-24 17:24:33 -04:00
// Expansion didn't work, we'll try to do a Full GC.
bool gc_succeeded = do_collection ( false , /* explicit_gc */
false , /* clear_all_soft_refs */
word_size ) ;
if ( ! gc_succeeded ) {
* succeeded = false ;
return NULL ;
}
2008-06-05 15:57:56 -07:00
2010-08-24 17:24:33 -04:00
// Retry the allocation
result = attempt_allocation_at_safepoint ( word_size ,
2011-03-30 10:26:59 -04:00
true /* expect_null_mutator_alloc_region */ ) ;
2008-06-05 15:57:56 -07:00
if ( result ! = NULL ) {
2010-08-24 17:24:33 -04:00
assert ( * succeeded , " sanity " ) ;
2008-06-05 15:57:56 -07:00
return result ;
}
2010-08-24 17:24:33 -04:00
// Then, try a Full GC that will collect all soft references.
gc_succeeded = do_collection ( false , /* explicit_gc */
true , /* clear_all_soft_refs */
word_size ) ;
if ( ! gc_succeeded ) {
* succeeded = false ;
return NULL ;
}
// Retry the allocation once more
result = attempt_allocation_at_safepoint ( word_size ,
2011-03-30 10:26:59 -04:00
true /* expect_null_mutator_alloc_region */ ) ;
2008-06-05 15:57:56 -07:00
if ( result ! = NULL ) {
2010-08-24 17:24:33 -04:00
assert ( * succeeded , " sanity " ) ;
2008-06-05 15:57:56 -07:00
return result ;
}
2010-04-13 13:52:10 -07:00
assert ( ! collector_policy ( ) - > should_clear_all_soft_refs ( ) ,
2010-08-24 17:24:33 -04:00
" Flag should have been handled and cleared prior to this point " ) ;
2010-04-13 13:52:10 -07:00
2008-06-05 15:57:56 -07:00
// What else? We might try synchronous finalization later. If the total
// space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be
// appropriate.
2010-08-24 17:24:33 -04:00
assert ( * succeeded , " sanity " ) ;
2008-06-05 15:57:56 -07:00
return NULL ;
}
// Attempting to expand the heap sufficiently
// to support an allocation of the given "word_size". If
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
HeapWord * G1CollectedHeap : : expand_and_allocate ( size_t word_size ) {
2011-01-19 19:30:42 -05:00
assert_at_safepoint ( true /* should_be_vm_thread */ ) ;
verify_region_sets_optional ( ) ;
2010-08-24 17:24:33 -04:00
2011-02-02 10:41:20 -08:00
size_t expand_bytes = MAX2 ( word_size * HeapWordSize , MinHeapDeltaBytes ) ;
2011-09-07 12:21:23 -04:00
ergo_verbose1 ( ErgoHeapSizing ,
" attempt heap expansion " ,
ergo_format_reason ( " allocation request failed " )
ergo_format_byte ( " allocation request " ) ,
word_size * HeapWordSize ) ;
2011-02-02 10:41:20 -08:00
if ( expand ( expand_bytes ) ) {
2014-08-26 09:36:53 +02:00
_hrm . verify_optional ( ) ;
2011-02-02 10:41:20 -08:00
verify_region_sets_optional ( ) ;
return attempt_allocation_at_safepoint ( word_size ,
2011-03-30 10:26:59 -04:00
false /* expect_null_mutator_alloc_region */ ) ;
2008-06-05 15:57:56 -07:00
}
2011-02-02 10:41:20 -08:00
return NULL ;
2008-06-05 15:57:56 -07:00
}
2011-02-02 10:41:20 -08:00
bool G1CollectedHeap : : expand ( size_t expand_bytes ) {
size_t aligned_expand_bytes = ReservedSpace : : page_align_size_up ( expand_bytes ) ;
2008-06-05 15:57:56 -07:00
aligned_expand_bytes = align_size_up ( aligned_expand_bytes ,
HeapRegion : : GrainBytes ) ;
2011-09-07 12:21:23 -04:00
ergo_verbose2 ( ErgoHeapSizing ,
" expand the heap " ,
ergo_format_byte ( " requested expansion amount " )
ergo_format_byte ( " attempted expansion amount " ) ,
expand_bytes , aligned_expand_bytes ) ;
2011-02-02 10:41:20 -08:00
2014-08-18 16:10:44 +02:00
if ( is_maximal_no_gc ( ) ) {
2013-10-01 07:52:52 +02:00
ergo_verbose0 ( ErgoHeapSizing ,
" did not expand the heap " ,
ergo_format_reason ( " heap already fully expanded " ) ) ;
return false ;
}
2014-08-18 16:10:44 +02:00
uint regions_to_expand = ( uint ) ( aligned_expand_bytes / HeapRegion : : GrainBytes ) ;
assert ( regions_to_expand > 0 , " Must expand by at least one region " ) ;
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
2014-08-26 09:36:53 +02:00
uint expanded_by = _hrm . expand_by ( regions_to_expand ) ;
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
2014-08-18 16:10:44 +02:00
if ( expanded_by > 0 ) {
size_t actual_expand_bytes = expanded_by * HeapRegion : : GrainBytes ;
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
assert ( actual_expand_bytes < = aligned_expand_bytes , " post-condition " ) ;
2014-08-18 16:10:44 +02:00
g1_policy ( ) - > record_new_heap_size ( num_regions ( ) ) ;
2011-02-02 10:41:20 -08:00
} else {
2011-09-07 12:21:23 -04:00
ergo_verbose0 ( ErgoHeapSizing ,
" did not expand the heap " ,
ergo_format_reason ( " heap expansion operation failed " ) ) ;
2011-02-02 10:41:20 -08:00
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
if ( G1ExitOnExpansionFailure & &
2014-08-26 09:36:53 +02:00
_hrm . available ( ) > = regions_to_expand ) {
2011-02-02 10:41:20 -08:00
// We had head room...
2013-04-30 11:56:52 -07:00
vm_exit_out_of_memory ( aligned_expand_bytes , OOM_MMAP_ERROR , " G1 heap expansion " ) ;
2008-06-05 15:57:56 -07:00
}
}
2014-08-18 16:10:44 +02:00
return regions_to_expand > 0 ;
2008-06-05 15:57:56 -07:00
}
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
void G1CollectedHeap : : shrink_helper ( size_t shrink_bytes ) {
2008-06-05 15:57:56 -07:00
size_t aligned_shrink_bytes =
ReservedSpace : : page_align_size_down ( shrink_bytes ) ;
aligned_shrink_bytes = align_size_down ( aligned_shrink_bytes ,
HeapRegion : : GrainBytes ) ;
2013-05-06 21:30:34 +02:00
uint num_regions_to_remove = ( uint ) ( shrink_bytes / HeapRegion : : GrainBytes ) ;
2014-08-26 09:36:53 +02:00
uint num_regions_removed = _hrm . shrink_by ( num_regions_to_remove ) ;
2013-05-06 21:30:34 +02:00
size_t shrunk_bytes = num_regions_removed * HeapRegion : : GrainBytes ;
2011-09-07 12:21:23 -04:00
ergo_verbose3 ( ErgoHeapSizing ,
" shrink the heap " ,
ergo_format_byte ( " requested shrinking amount " )
ergo_format_byte ( " aligned shrinking amount " )
ergo_format_byte ( " attempted shrinking amount " ) ,
2013-05-06 21:30:34 +02:00
shrink_bytes , aligned_shrink_bytes , shrunk_bytes ) ;
if ( num_regions_removed > 0 ) {
2014-08-18 16:10:44 +02:00
g1_policy ( ) - > record_new_heap_size ( num_regions ( ) ) ;
2011-09-07 12:21:23 -04:00
} else {
ergo_verbose0 ( ErgoHeapSizing ,
" did not shrink the heap " ,
ergo_format_reason ( " heap shrinking operation failed " ) ) ;
2008-06-05 15:57:56 -07:00
}
}
void G1CollectedHeap : : shrink ( size_t shrink_bytes ) {
2011-01-19 19:30:42 -05:00
verify_region_sets_optional ( ) ;
2011-08-12 11:31:06 -04:00
// We should only reach here at the end of a Full GC which means we
// should not not be holding to any GC alloc regions. The method
// below will make sure of that and do any remaining clean up.
abandon_gc_alloc_regions ( ) ;
2011-01-19 19:30:42 -05:00
// Instead of tearing down / rebuilding the free lists here, we
// could instead use the remove_all_pending() method on free_list to
// remove only the ones that we need to remove.
2011-11-07 22:11:12 -05:00
tear_down_region_sets ( true /* free_list_only */ ) ;
2008-06-05 15:57:56 -07:00
shrink_helper ( shrink_bytes ) ;
2011-11-07 22:11:12 -05:00
rebuild_region_sets ( true /* free_list_only */ ) ;
2011-01-19 19:30:42 -05:00
2014-08-26 09:36:53 +02:00
_hrm . verify_optional ( ) ;
2011-01-19 19:30:42 -05:00
verify_region_sets_optional ( ) ;
2008-06-05 15:57:56 -07:00
}
// Public methods.
# ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
# pragma warning( disable:4355 ) // 'this' : used in base member initializer list
# endif // _MSC_VER
G1CollectedHeap : : G1CollectedHeap ( G1CollectorPolicy * policy_ ) :
SharedHeap ( policy_ ) ,
_g1_policy ( policy_ ) ,
2009-12-16 15:12:51 -08:00
_dirty_card_queue_set ( false ) ,
2010-08-02 12:51:43 -07:00
_into_cset_dirty_card_queue_set ( false ) ,
2011-09-22 10:57:37 -07:00
_is_alive_closure_cm ( this ) ,
_is_alive_closure_stw ( this ) ,
_ref_processor_cm ( NULL ) ,
_ref_processor_stw ( NULL ) ,
2008-06-05 15:57:56 -07:00
_process_strong_tasks ( new SubTasksDone ( G1H_PS_NumElements ) ) ,
_bot_shared ( NULL ) ,
2013-06-10 11:30:51 +02:00
_evac_failure_scan_stack ( NULL ) ,
2008-06-05 15:57:56 -07:00
_mark_in_progress ( false ) ,
2011-01-19 19:30:42 -05:00
_cg1r ( NULL ) , _summary_bytes_used ( 0 ) ,
2011-09-23 16:07:49 -04:00
_g1mm ( NULL ) ,
2008-06-05 15:57:56 -07:00
_refine_cte_cl ( NULL ) ,
_full_collection ( false ) ,
2014-03-14 10:15:46 +01:00
_secondary_free_list ( " Secondary Free List " , new SecondaryFreeRegionListMtSafeChecker ( ) ) ,
_old_set ( " Old Set " , false /* humongous */ , new OldRegionSetMtSafeChecker ( ) ) ,
_humongous_set ( " Master Humongous Set " , true /* humongous */ , new HumongousRegionSetMtSafeChecker ( ) ) ,
2014-07-23 09:03:32 +02:00
_humongous_is_live ( ) ,
_has_humongous_reclaim_candidates ( false ) ,
2011-01-19 19:30:42 -05:00
_free_regions_coming ( false ) ,
2008-06-05 15:57:56 -07:00
_young_list ( new YoungList ( this ) ) ,
_gc_time_stamp ( 0 ) ,
2011-08-12 11:31:06 -04:00
_retained_old_gc_alloc_region ( NULL ) ,
2012-08-06 12:20:14 -07:00
_survivor_plab_stats ( YoungPLABSize , PLABWeight ) ,
_old_plab_stats ( OldPLABSize , PLABWeight ) ,
2012-01-05 05:54:01 -05:00
_expand_heap_after_alloc_failure ( true ) ,
2009-01-16 13:02:20 -05:00
_surviving_young_words ( NULL ) ,
2012-06-05 22:30:24 +02:00
_old_marking_cycles_started ( 0 ) ,
_old_marking_cycles_completed ( 0 ) ,
2013-06-10 11:30:51 +02:00
_concurrent_cycle_started ( false ) ,
2014-08-25 09:10:13 +02:00
_heap_summary_sent ( false ) ,
2014-04-16 10:55:26 +02:00
_in_cset_fast_test ( ) ,
2011-12-14 17:43:55 -08:00
_dirty_cards_region_list ( NULL ) ,
_worker_cset_start_region ( NULL ) ,
2013-06-10 11:30:51 +02:00
_worker_cset_start_region_time_stamp ( NULL ) ,
_gc_timer_stw ( new ( ResourceObj : : C_HEAP , mtGC ) STWGCTimer ( ) ) ,
_gc_timer_cm ( new ( ResourceObj : : C_HEAP , mtGC ) ConcurrentGCTimer ( ) ) ,
_gc_tracer_stw ( new ( ResourceObj : : C_HEAP , mtGC ) G1NewTracer ( ) ) ,
_gc_tracer_cm ( new ( ResourceObj : : C_HEAP , mtGC ) G1OldTracer ( ) ) {
_g1h = this ;
2008-06-05 15:57:56 -07:00
if ( _process_strong_tasks = = NULL | | ! _process_strong_tasks - > valid ( ) ) {
vm_exit_during_initialization ( " Failed necessary allocation. " ) ;
}
2009-07-30 16:22:58 -04:00
_humongous_object_threshold_in_words = HeapRegion : : GrainWords / 2 ;
2008-06-05 15:57:56 -07:00
int n_queues = MAX2 ( ( int ) ParallelGCThreads , 1 ) ;
_task_queues = new RefToScanQueueSet ( n_queues ) ;
2014-03-17 10:13:55 +01:00
uint n_rem_sets = HeapRegionRemSet : : num_par_rem_sets ( ) ;
2008-06-05 15:57:56 -07:00
assert ( n_rem_sets > 0 , " Invariant. " ) ;
2012-06-28 17:03:16 -04:00
_worker_cset_start_region = NEW_C_HEAP_ARRAY ( HeapRegion * , n_queues , mtGC ) ;
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY ( unsigned int , n_queues , mtGC ) ;
2013-06-10 11:30:51 +02:00
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY ( EvacuationFailedInfo , n_queues , mtGC ) ;
2011-12-14 17:43:55 -08:00
2008-06-05 15:57:56 -07:00
for ( int i = 0 ; i < n_queues ; i + + ) {
RefToScanQueue * q = new RefToScanQueue ( ) ;
q - > initialize ( ) ;
_task_queues - > register_queue ( i , q ) ;
2013-06-10 11:30:51 +02:00
: : new ( & _evacuation_failed_info_array [ i ] ) EvacuationFailedInfo ( ) ;
2008-06-05 15:57:56 -07:00
}
2011-12-14 17:43:55 -08:00
clear_cset_start_regions ( ) ;
2012-08-28 15:20:08 -07:00
// Initialize the G1EvacuationFailureALot counters and flags.
NOT_PRODUCT ( reset_evacuation_should_fail ( ) ; )
2008-06-05 15:57:56 -07:00
guarantee ( _task_queues ! = NULL , " task_queues allocation failure. " ) ;
}
jint G1CollectedHeap : : initialize ( ) {
2010-01-13 15:26:39 -08:00
CollectedHeap : : pre_initialize ( ) ;
2008-06-05 15:57:56 -07:00
os : : enable_vtime ( ) ;
2012-04-13 01:59:38 +02:00
G1Log : : init ( ) ;
2008-06-05 15:57:56 -07:00
// Necessary to satisfy locking discipline assertions.
MutexLocker x ( Heap_lock ) ;
2011-06-24 12:38:49 -04:00
// We have to initialize the printer before committing the heap, as
// it will be used then.
_hr_printer . set_active ( G1PrintHeapRegions ) ;
2008-06-05 15:57:56 -07:00
// While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the
// system which believe this to be true (e.g. oop->object_size in some
// cases incorrectly returns the size in wordSize units rather than
// HeapWordSize).
guarantee ( HeapWordSize = = wordSize , " HeapWordSize must equal wordSize " ) ;
size_t init_byte_size = collector_policy ( ) - > initial_heap_byte_size ( ) ;
size_t max_byte_size = collector_policy ( ) - > max_heap_byte_size ( ) ;
2013-11-01 17:09:38 +01:00
size_t heap_alignment = collector_policy ( ) - > heap_alignment ( ) ;
2008-06-05 15:57:56 -07:00
// Ensure that the sizes are properly aligned.
Universe : : check_alignment ( init_byte_size , HeapRegion : : GrainBytes , " g1 heap " ) ;
Universe : : check_alignment ( max_byte_size , HeapRegion : : GrainBytes , " g1 heap " ) ;
2013-08-16 13:22:32 +02:00
Universe : : check_alignment ( max_byte_size , heap_alignment , " g1 heap " ) ;
2008-06-05 15:57:56 -07:00
2014-04-16 16:46:58 +02:00
_refine_cte_cl = new RefineCardTableEntryClosure ( ) ;
_cg1r = new ConcurrentG1Refine ( this , _refine_cte_cl ) ;
2008-06-05 15:57:56 -07:00
// Reserve the maximum.
2009-03-12 10:37:46 -07:00
2011-08-02 12:13:13 -07:00
// When compressed oops are enabled, the preferred heap base
// is calculated by subtracting the requested size from the
// 32Gb boundary and using the result as the base address for
// heap reservation. If the requested size is not aligned to
// HeapRegion::GrainBytes (i.e. the alignment that is passed
// into the ReservedHeapSpace constructor) then the actual
// base of the reserved heap may end up differing from the
// address that was requested (i.e. the preferred heap base).
// If this happens then we could end up using a non-optimal
// compressed oops mode.
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
ReservedSpace heap_rs = Universe : : reserve_heap ( max_byte_size ,
2013-08-16 13:22:32 +02:00
heap_alignment ) ;
2008-06-05 15:57:56 -07:00
// It is important to do this in a way such that concurrent readers can't
2013-06-10 11:30:51 +02:00
// temporarily think something is in the heap. (I've actually seen this
2008-06-05 15:57:56 -07:00
// happen in asserts: DLD.)
_reserved . set_word_size ( 0 ) ;
_reserved . set_start ( ( HeapWord * ) heap_rs . base ( ) ) ;
_reserved . set_end ( ( HeapWord * ) ( heap_rs . base ( ) + heap_rs . size ( ) ) ) ;
// Create the gen rem set (and barrier set) for the entire reserved region.
_rem_set = collector_policy ( ) - > create_rem_set ( _reserved , 2 ) ;
set_barrier_set ( rem_set ( ) - > bs ( ) ) ;
2013-09-24 14:46:29 +02:00
if ( ! barrier_set ( ) - > is_a ( BarrierSet : : G1SATBCTLogging ) ) {
vm_exit_during_initialization ( " G1 requires a G1SATBLoggingCardTableModRefBS " ) ;
2008-06-05 15:57:56 -07:00
return JNI_ENOMEM ;
}
// Also create a G1 rem set.
2013-09-24 14:46:29 +02:00
_g1_rem_set = new G1RemSet ( this , g1_barrier_set ( ) ) ;
2008-06-05 15:57:56 -07:00
// Carve out the G1 part of the heap.
2014-08-18 16:10:44 +02:00
ReservedSpace g1_rs = heap_rs . first_part ( max_byte_size ) ;
2014-08-19 14:09:10 +02:00
G1RegionToSpaceMapper * heap_storage =
G1RegionToSpaceMapper : : create_mapper ( g1_rs ,
UseLargePages ? os : : large_page_size ( ) : os : : vm_page_size ( ) ,
HeapRegion : : GrainBytes ,
1 ,
mtJavaHeap ) ;
heap_storage - > set_mapping_changed_listener ( & _listener ) ;
// Reserve space for the block offset table. We do not support automatic uncommit
// for the card table at this time. BOT only.
ReservedSpace bot_rs ( G1BlockOffsetSharedArray : : compute_size ( g1_rs . size ( ) / HeapWordSize ) ) ;
G1RegionToSpaceMapper * bot_storage =
G1RegionToSpaceMapper : : create_mapper ( bot_rs ,
os : : vm_page_size ( ) ,
HeapRegion : : GrainBytes ,
G1BlockOffsetSharedArray : : N_bytes ,
mtGC ) ;
ReservedSpace cardtable_rs ( G1SATBCardTableLoggingModRefBS : : compute_size ( g1_rs . size ( ) / HeapWordSize ) ) ;
G1RegionToSpaceMapper * cardtable_storage =
G1RegionToSpaceMapper : : create_mapper ( cardtable_rs ,
os : : vm_page_size ( ) ,
HeapRegion : : GrainBytes ,
G1BlockOffsetSharedArray : : N_bytes ,
mtGC ) ;
// Reserve space for the card counts table.
ReservedSpace card_counts_rs ( G1BlockOffsetSharedArray : : compute_size ( g1_rs . size ( ) / HeapWordSize ) ) ;
G1RegionToSpaceMapper * card_counts_storage =
G1RegionToSpaceMapper : : create_mapper ( card_counts_rs ,
os : : vm_page_size ( ) ,
HeapRegion : : GrainBytes ,
G1BlockOffsetSharedArray : : N_bytes ,
mtGC ) ;
// Reserve space for prev and next bitmap.
size_t bitmap_size = CMBitMap : : compute_size ( g1_rs . size ( ) ) ;
ReservedSpace prev_bitmap_rs ( ReservedSpace : : allocation_align_size_up ( bitmap_size ) ) ;
G1RegionToSpaceMapper * prev_bitmap_storage =
G1RegionToSpaceMapper : : create_mapper ( prev_bitmap_rs ,
os : : vm_page_size ( ) ,
HeapRegion : : GrainBytes ,
CMBitMap : : mark_distance ( ) ,
mtGC ) ;
ReservedSpace next_bitmap_rs ( ReservedSpace : : allocation_align_size_up ( bitmap_size ) ) ;
G1RegionToSpaceMapper * next_bitmap_storage =
G1RegionToSpaceMapper : : create_mapper ( next_bitmap_rs ,
os : : vm_page_size ( ) ,
HeapRegion : : GrainBytes ,
CMBitMap : : mark_distance ( ) ,
mtGC ) ;
2014-08-26 09:36:53 +02:00
_hrm . initialize ( heap_storage , prev_bitmap_storage , next_bitmap_storage , bot_storage , cardtable_storage , card_counts_storage ) ;
2014-08-19 14:09:10 +02:00
g1_barrier_set ( ) - > initialize ( cardtable_storage ) ;
// Do later initialization work for concurrent refinement.
_cg1r - > init ( card_counts_storage ) ;
2013-05-09 11:16:39 -07:00
2009-06-11 17:19:33 -07:00
// 6843694 - ensure that the maximum region index can fit
// in the remembered set structures.
2012-04-18 07:21:15 -04:00
const uint max_region_idx = ( 1U < < ( sizeof ( RegionIdx_t ) * BitsPerByte - 1 ) ) - 1 ;
2009-06-11 17:19:33 -07:00
guarantee ( ( max_regions ( ) - 1 ) < = max_region_idx , " too many regions " ) ;
size_t max_cards_per_region = ( ( size_t ) 1 < < ( sizeof ( CardIdx_t ) * BitsPerByte - 1 ) ) - 1 ;
2009-07-30 16:22:58 -04:00
guarantee ( HeapRegion : : CardsPerRegion > 0 , " make sure it's initialized " ) ;
2011-10-05 08:44:10 -07:00
guarantee ( HeapRegion : : CardsPerRegion < max_cards_per_region ,
2009-07-30 16:22:58 -04:00
" too many cards per region " ) ;
2009-06-11 17:19:33 -07:00
2014-03-14 10:15:46 +01:00
FreeRegionList : : set_unrealistically_long_length ( max_regions ( ) + 1 ) ;
2011-01-19 19:30:42 -05:00
2014-08-19 14:09:10 +02:00
_bot_shared = new G1BlockOffsetSharedArray ( _reserved , bot_storage ) ;
2008-06-05 15:57:56 -07:00
_g1h = this ;
2014-08-26 09:36:53 +02:00
_in_cset_fast_test . initialize ( _hrm . reserved ( ) . start ( ) , _hrm . reserved ( ) . end ( ) , HeapRegion : : GrainBytes ) ;
_humongous_is_live . initialize ( _hrm . reserved ( ) . start ( ) , _hrm . reserved ( ) . end ( ) , HeapRegion : : GrainBytes ) ;
2010-04-22 10:02:38 -07:00
2008-06-05 15:57:56 -07:00
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
2014-08-19 14:09:10 +02:00
_cm = new ConcurrentMark ( this , prev_bitmap_storage , next_bitmap_storage ) ;
2012-10-01 09:28:13 -07:00
if ( _cm = = NULL | | ! _cm - > completed_initialization ( ) ) {
vm_shutdown_during_initialization ( " Could not create/initialize ConcurrentMark " ) ;
return JNI_ENOMEM ;
}
2008-06-05 15:57:56 -07:00
_cmThread = _cm - > cmThread ( ) ;
// Initialize the from_card cache structure of HeapRegionRemSet.
HeapRegionRemSet : : init_heap ( max_regions ( ) ) ;
2009-03-25 13:10:54 -07:00
// Now expand into the initial heap size.
2011-02-02 10:41:20 -08:00
if ( ! expand ( init_byte_size ) ) {
2012-10-01 09:28:13 -07:00
vm_shutdown_during_initialization ( " Failed to allocate initial heap. " ) ;
2011-02-02 10:41:20 -08:00
return JNI_ENOMEM ;
}
2008-06-05 15:57:56 -07:00
// Perform any initialization actions delegated to the policy.
g1_policy ( ) - > init ( ) ;
JavaThread : : satb_mark_queue_set ( ) . initialize ( SATB_Q_CBL_mon ,
SATB_Q_FL_lock ,
2009-12-16 15:12:51 -08:00
G1SATBProcessCompletedThreshold ,
2008-06-05 15:57:56 -07:00
Shared_SATB_Q_lock ) ;
2009-05-11 16:30:56 -07:00
2014-04-16 16:46:58 +02:00
JavaThread : : dirty_card_queue_set ( ) . initialize ( _refine_cte_cl ,
DirtyCardQ_CBL_mon ,
2009-05-11 16:30:56 -07:00
DirtyCardQ_FL_lock ,
2009-12-16 15:12:51 -08:00
concurrent_g1_refine ( ) - > yellow_zone ( ) ,
concurrent_g1_refine ( ) - > red_zone ( ) ,
2009-05-11 16:30:56 -07:00
Shared_DirtyCardQ_lock ) ;
2009-03-06 13:50:14 -08:00
if ( G1DeferredRSUpdate ) {
2014-04-16 16:46:58 +02:00
dirty_card_queue_set ( ) . initialize ( NULL , // Should never be called by the Java code
DirtyCardQ_CBL_mon ,
2009-03-06 13:50:14 -08:00
DirtyCardQ_FL_lock ,
2009-12-16 15:12:51 -08:00
- 1 , // never trigger processing
- 1 , // no limit on length
2009-03-06 13:50:14 -08:00
Shared_DirtyCardQ_lock ,
& JavaThread : : dirty_card_queue_set ( ) ) ;
}
2010-08-02 12:51:43 -07:00
// Initialize the card queue set used to hold cards containing
// references into the collection set.
2014-04-16 16:46:58 +02:00
_into_cset_dirty_card_queue_set . initialize ( NULL , // Should never be called by the Java code
DirtyCardQ_CBL_mon ,
2010-08-02 12:51:43 -07:00
DirtyCardQ_FL_lock ,
- 1 , // never trigger processing
- 1 , // no limit on length
Shared_DirtyCardQ_lock ,
& JavaThread : : dirty_card_queue_set ( ) ) ;
2008-06-05 15:57:56 -07:00
// In case we're keeping closure specialization stats, initialize those
// counts and that mechanism.
SpecializationStats : : clear ( ) ;
2014-08-18 16:10:44 +02:00
// Here we allocate the dummy HeapRegion that is required by the
// G1AllocRegion class.
2014-08-26 09:36:53 +02:00
HeapRegion * dummy_region = _hrm . get_dummy_region ( ) ;
2014-08-19 14:09:10 +02:00
2011-03-30 10:26:59 -04:00
// We'll re-use the same region whether the alloc region will
// require BOT updates or not and, if it doesn't, then a non-young
// region will complain that it cannot support allocations without
// BOT updates. So we'll tag the dummy region as young to avoid that.
dummy_region - > set_young ( ) ;
// Make sure it's full.
dummy_region - > set_top ( dummy_region - > end ( ) ) ;
G1AllocRegion : : setup ( this , dummy_region ) ;
init_mutator_alloc_region ( ) ;
2011-04-21 10:23:44 -07:00
// Do create of the monitoring and management support so that
// values in the heap have been properly initialized.
2011-09-23 16:07:49 -04:00
_g1mm = new G1MonitoringSupport ( this ) ;
2011-04-21 10:23:44 -07:00
2014-03-18 19:07:22 +01:00
G1StringDedup : : initialize ( ) ;
2008-06-05 15:57:56 -07:00
return JNI_OK ;
}
2014-04-11 11:00:12 +02:00
void G1CollectedHeap : : stop ( ) {
2014-06-13 13:46:06 +02:00
// Stop all concurrent threads. We do this to make sure these threads
// do not continue to execute and access resources (e.g. gclog_or_tty)
2014-06-04 14:16:20 +02:00
// that are destroyed during shutdown.
2014-06-13 13:46:06 +02:00
_cg1r - > stop ( ) ;
_cmThread - > stop ( ) ;
if ( G1StringDedup : : is_enabled ( ) ) {
G1StringDedup : : stop ( ) ;
}
2014-04-11 11:00:12 +02:00
}
2014-07-23 09:03:32 +02:00
void G1CollectedHeap : : clear_humongous_is_live_table ( ) {
guarantee ( G1ReclaimDeadHumongousObjectsAtYoungGC , " Should only be called if true " ) ;
_humongous_is_live . clear ( ) ;
}
2013-09-11 16:25:02 +02:00
size_t G1CollectedHeap : : conservative_max_heap_alignment ( ) {
return HeapRegion : : max_region_size ( ) ;
}
2008-06-05 15:57:56 -07:00
void G1CollectedHeap : : ref_processing_init ( ) {
2010-12-01 17:34:02 -08:00
// Reference processing in G1 currently works as follows:
//
2011-09-22 10:57:37 -07:00
// * There are two reference processor instances. One is
// used to record and process discovered references
// during concurrent marking; the other is used to
// record and process references during STW pauses
// (both full and incremental).
// * Both ref processors need to 'span' the entire heap as
// the regions in the collection set may be dotted around.
//
// * For the concurrent marking ref processor:
// * Reference discovery is enabled at initial marking.
// * Reference discovery is disabled and the discovered
// references processed etc during remarking.
// * Reference discovery is MT (see below).
// * Reference discovery requires a barrier (see below).
// * Reference processing may or may not be MT
// (depending on the value of ParallelRefProcEnabled
// and ParallelGCThreads).
// * A full GC disables reference discovery by the CM
// ref processor and abandons any entries on it's
// discovered lists.
//
// * For the STW processor:
// * Non MT discovery is enabled at the start of a full GC.
// * Processing and enqueueing during a full GC is non-MT.
// * During a full GC, references are processed after marking.
//
// * Discovery (may or may not be MT) is enabled at the start
// of an incremental evacuation pause.
// * References are processed near the end of a STW evacuation pause.
// * For both types of GC:
// * Discovery is atomic - i.e. not concurrent.
// * Reference discovery will not need a barrier.
2010-12-01 17:34:02 -08:00
2008-06-05 15:57:56 -07:00
SharedHeap : : ref_processing_init ( ) ;
MemRegion mr = reserved_region ( ) ;
2011-09-22 10:57:37 -07:00
// Concurrent Mark ref processor
_ref_processor_cm =
new ReferenceProcessor ( mr , // span
ParallelRefProcEnabled & & ( ParallelGCThreads > 1 ) ,
// mt processing
( int ) ParallelGCThreads ,
// degree of mt processing
( ParallelGCThreads > 1 ) | | ( ConcGCThreads > 1 ) ,
// mt discovery
( int ) MAX2 ( ParallelGCThreads , ConcGCThreads ) ,
// degree of mt discovery
false ,
// Reference discovery is not atomic
2014-06-03 10:44:36 +02:00
& _is_alive_closure_cm ) ;
2011-09-22 10:57:37 -07:00
// is alive closure
// (for efficiency/performance)
// STW ref processor
_ref_processor_stw =
2011-03-17 10:32:46 -07:00
new ReferenceProcessor ( mr , // span
2011-09-22 10:57:37 -07:00
ParallelRefProcEnabled & & ( ParallelGCThreads > 1 ) ,
// mt processing
MAX2 ( ( int ) ParallelGCThreads , 1 ) ,
// degree of mt processing
( ParallelGCThreads > 1 ) ,
// mt discovery
MAX2 ( ( int ) ParallelGCThreads , 1 ) ,
// degree of mt discovery
true ,
// Reference discovery is atomic
2014-06-03 10:44:36 +02:00
& _is_alive_closure_stw ) ;
2011-09-22 10:57:37 -07:00
// is alive closure
// (for efficiency/performance)
2008-06-05 15:57:56 -07:00
}
size_t G1CollectedHeap : : capacity ( ) const {
2014-08-26 09:36:53 +02:00
return _hrm . length ( ) * HeapRegion : : GrainBytes ;
2008-06-05 15:57:56 -07:00
}
2012-07-19 15:15:54 -07:00
void G1CollectedHeap : : reset_gc_time_stamps ( HeapRegion * hr ) {
assert ( ! hr - > continuesHumongous ( ) , " pre-condition " ) ;
hr - > reset_gc_time_stamp ( ) ;
if ( hr - > startsHumongous ( ) ) {
2014-08-26 09:36:53 +02:00
uint first_index = hr - > hrm_index ( ) + 1 ;
2012-07-19 15:15:54 -07:00
uint last_index = hr - > last_hc_index ( ) ;
for ( uint i = first_index ; i < last_index ; i + = 1 ) {
HeapRegion * chr = region_at ( i ) ;
assert ( chr - > continuesHumongous ( ) , " sanity " ) ;
chr - > reset_gc_time_stamp ( ) ;
}
}
}
# ifndef PRODUCT
class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
private :
unsigned _gc_time_stamp ;
bool _failures ;
public :
CheckGCTimeStampsHRClosure ( unsigned gc_time_stamp ) :
_gc_time_stamp ( gc_time_stamp ) , _failures ( false ) { }
virtual bool doHeapRegion ( HeapRegion * hr ) {
unsigned region_gc_time_stamp = hr - > get_gc_time_stamp ( ) ;
if ( _gc_time_stamp ! = region_gc_time_stamp ) {
gclog_or_tty - > print_cr ( " Region " HR_FORMAT " has GC time stamp = %d, "
" expected %d " , HR_FORMAT_PARAMS ( hr ) ,
region_gc_time_stamp , _gc_time_stamp ) ;
_failures = true ;
}
return false ;
}
bool failures ( ) { return _failures ; }
} ;
void G1CollectedHeap : : check_gc_time_stamps ( ) {
CheckGCTimeStampsHRClosure cl ( _gc_time_stamp ) ;
heap_region_iterate ( & cl ) ;
guarantee ( ! cl . failures ( ) , " all GC time stamps should have been reset " ) ;
}
# endif // PRODUCT
2010-08-02 12:51:43 -07:00
void G1CollectedHeap : : iterate_dirty_card_closure ( CardTableEntryClosure * cl ,
DirtyCardQueue * into_cset_dcq ,
bool concurrent ,
2014-04-03 17:49:31 +04:00
uint worker_i ) {
2009-08-03 12:59:30 -07:00
// Clean cards in the hot card cache
2013-05-09 11:16:39 -07:00
G1HotCardCache * hot_card_cache = _cg1r - > hot_card_cache ( ) ;
hot_card_cache - > drain ( worker_i , g1_rem_set ( ) , into_cset_dcq ) ;
2009-08-03 12:59:30 -07:00
2008-06-05 15:57:56 -07:00
DirtyCardQueueSet & dcqs = JavaThread : : dirty_card_queue_set ( ) ;
int n_completed_buffers = 0 ;
2010-08-02 12:51:43 -07:00
while ( dcqs . apply_closure_to_completed_buffer ( cl , worker_i , 0 , true ) ) {
2008-06-05 15:57:56 -07:00
n_completed_buffers + + ;
}
2012-08-23 10:21:12 +02:00
g1_policy ( ) - > phase_times ( ) - > record_update_rs_processed_buffers ( worker_i , n_completed_buffers ) ;
2008-06-05 15:57:56 -07:00
dcqs . clear_n_completed_buffers ( ) ;
assert ( ! dcqs . completed_buffers_exist_dirty ( ) , " Completed buffers exist! " ) ;
}
// Computes the sum of the storage used by the various regions.
size_t G1CollectedHeap : : used ( ) const {
2009-07-22 18:25:00 -07:00
assert ( Heap_lock - > owner ( ) ! = NULL ,
" Should be owned on this thread's behalf. " ) ;
2008-06-05 15:57:56 -07:00
size_t result = _summary_bytes_used ;
2009-07-14 15:40:39 -07:00
// Read only once in case it is set to NULL concurrently
2011-03-30 10:26:59 -04:00
HeapRegion * hr = _mutator_alloc_region . get ( ) ;
2009-07-14 15:40:39 -07:00
if ( hr ! = NULL )
result + = hr - > used ( ) ;
2008-06-05 15:57:56 -07:00
return result ;
}
2009-07-15 12:22:59 -04:00
size_t G1CollectedHeap : : used_unlocked ( ) const {
size_t result = _summary_bytes_used ;
return result ;
}
2008-06-05 15:57:56 -07:00
class SumUsedClosure : public HeapRegionClosure {
size_t _used ;
public :
SumUsedClosure ( ) : _used ( 0 ) { }
bool doHeapRegion ( HeapRegion * r ) {
if ( ! r - > continuesHumongous ( ) ) {
_used + = r - > used ( ) ;
}
return false ;
}
size_t result ( ) { return _used ; }
} ;
size_t G1CollectedHeap : : recalculate_used ( ) const {
2014-03-17 10:13:42 +01:00
double recalculate_used_start = os : : elapsedTime ( ) ;
2008-06-05 15:57:56 -07:00
SumUsedClosure blk ;
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
heap_region_iterate ( & blk ) ;
2014-03-17 10:13:42 +01:00
g1_policy ( ) - > phase_times ( ) - > record_evac_fail_recalc_used_time ( ( os : : elapsedTime ( ) - recalculate_used_start ) * 1000.0 ) ;
2008-06-05 15:57:56 -07:00
return blk . result ( ) ;
}
2010-06-28 14:13:17 -04:00
bool G1CollectedHeap : : should_do_concurrent_full_gc ( GCCause : : Cause cause ) {
2012-02-14 08:21:08 -05:00
switch ( cause ) {
case GCCause : : _gc_locker : return GCLockerInvokesConcurrent ;
case GCCause : : _java_lang_system_gc : return ExplicitGCInvokesConcurrent ;
case GCCause : : _g1_humongous_allocation : return true ;
default : return false ;
}
2010-06-28 14:13:17 -04:00
}
2011-04-19 15:46:59 -04:00
# ifndef PRODUCT
void G1CollectedHeap : : allocate_dummy_regions ( ) {
// Let's fill up most of the region
size_t word_size = HeapRegion : : GrainWords - 1024 ;
// And as a result the region we'll allocate will be humongous.
guarantee ( isHumongous ( word_size ) , " sanity " ) ;
for ( uintx i = 0 ; i < G1DummyRegionsPerGC ; + + i ) {
// Let's use the existing mechanism for the allocation
HeapWord * dummy_obj = humongous_obj_allocate ( word_size ) ;
if ( dummy_obj ! = NULL ) {
MemRegion mr ( dummy_obj , word_size ) ;
CollectedHeap : : fill_with_object ( mr ) ;
} else {
// If we can't allocate once, we probably cannot allocate
// again. Let's get out of the loop.
break ;
}
}
}
# endif // !PRODUCT
2012-06-05 22:30:24 +02:00
void G1CollectedHeap : : increment_old_marking_cycles_started ( ) {
assert ( _old_marking_cycles_started = = _old_marking_cycles_completed | |
_old_marking_cycles_started = = _old_marking_cycles_completed + 1 ,
err_msg ( " Wrong marking cycle count (started: %d, completed: %d) " ,
_old_marking_cycles_started , _old_marking_cycles_completed ) ) ;
_old_marking_cycles_started + + ;
}
void G1CollectedHeap : : increment_old_marking_cycles_completed ( bool concurrent ) {
2010-06-28 14:13:17 -04:00
MonitorLockerEx x ( FullGCCount_lock , Mutex : : _no_safepoint_check_flag ) ;
2010-12-14 16:19:44 -05:00
// We assume that if concurrent == true, then the caller is a
// concurrent thread that was joined the Suspendible Thread
// Set. If there's ever a cheap way to check this, we should add an
// assert here.
2010-06-28 14:13:17 -04:00
// Given that this method is called at the end of a Full GC or of a
// concurrent cycle, and those can be nested (i.e., a Full GC can
// interrupt a concurrent cycle), the number of full collections
// completed should be either one (in the case where there was no
// nesting) or two (when a Full GC interrupted a concurrent cycle)
// behind the number of full collections started.
// This is the case for the inner caller, i.e. a Full GC.
2010-12-14 16:19:44 -05:00
assert ( concurrent | |
2012-06-05 22:30:24 +02:00
( _old_marking_cycles_started = = _old_marking_cycles_completed + 1 ) | |
( _old_marking_cycles_started = = _old_marking_cycles_completed + 2 ) ,
err_msg ( " for inner caller (Full GC): _old_marking_cycles_started = %u "
" is inconsistent with _old_marking_cycles_completed = %u " ,
_old_marking_cycles_started , _old_marking_cycles_completed ) ) ;
2010-06-28 14:13:17 -04:00
// This is the case for the outer caller, i.e. the concurrent cycle.
2010-12-14 16:19:44 -05:00
assert ( ! concurrent | |
2012-06-05 22:30:24 +02:00
( _old_marking_cycles_started = = _old_marking_cycles_completed + 1 ) ,
2010-12-14 16:19:44 -05:00
err_msg ( " for outer caller (concurrent cycle): "
2012-06-05 22:30:24 +02:00
" _old_marking_cycles_started = %u "
" is inconsistent with _old_marking_cycles_completed = %u " ,
_old_marking_cycles_started , _old_marking_cycles_completed ) ) ;
2010-06-28 14:13:17 -04:00
2012-06-05 22:30:24 +02:00
_old_marking_cycles_completed + = 1 ;
2010-06-28 14:13:17 -04:00
2010-10-01 18:23:16 -07:00
// We need to clear the "in_progress" flag in the CM thread before
// we wake up any waiters (especially when ExplicitInvokesConcurrent
// is set) so that if a waiter requests another System.gc() it doesn't
2013-06-10 11:30:51 +02:00
// incorrectly see that a marking cycle is still in progress.
2010-12-14 16:19:44 -05:00
if ( concurrent ) {
2010-10-01 18:23:16 -07:00
_cmThread - > clear_in_progress ( ) ;
}
2010-06-28 14:13:17 -04:00
// This notify_all() will ensure that a thread that called
// System.gc() with (with ExplicitGCInvokesConcurrent set or not)
// and it's waiting for a full GC to finish will be woken up. It is
// waiting in VM_G1IncCollectionPause::doit_epilogue().
FullGCCount_lock - > notify_all ( ) ;
}
2013-11-23 12:25:13 +01:00
void G1CollectedHeap : : register_concurrent_cycle_start ( const Ticks & start_time ) {
2013-06-10 11:30:51 +02:00
_concurrent_cycle_started = true ;
_gc_timer_cm - > register_gc_start ( start_time ) ;
_gc_tracer_cm - > report_gc_start ( gc_cause ( ) , _gc_timer_cm - > gc_start ( ) ) ;
trace_heap_before_gc ( _gc_tracer_cm ) ;
}
void G1CollectedHeap : : register_concurrent_cycle_end ( ) {
if ( _concurrent_cycle_started ) {
if ( _cm - > has_aborted ( ) ) {
_gc_tracer_cm - > report_concurrent_mode_failure ( ) ;
}
2013-08-22 11:23:15 +02:00
2013-11-23 12:25:13 +01:00
_gc_timer_cm - > register_gc_end ( ) ;
2013-06-10 11:30:51 +02:00
_gc_tracer_cm - > report_gc_end ( _gc_timer_cm - > gc_end ( ) , _gc_timer_cm - > time_partitions ( ) ) ;
2014-08-25 09:10:13 +02:00
// Clear state variables to prepare for the next concurrent cycle.
2013-06-10 11:30:51 +02:00
_concurrent_cycle_started = false ;
2014-08-25 09:10:13 +02:00
_heap_summary_sent = false ;
2013-06-10 11:30:51 +02:00
}
}
void G1CollectedHeap : : trace_heap_after_concurrent_cycle ( ) {
if ( _concurrent_cycle_started ) {
2014-08-25 09:10:13 +02:00
// This function can be called when:
// the cleanup pause is run
// the concurrent cycle is aborted before the cleanup pause.
// the concurrent cycle is aborted after the cleanup pause,
// but before the concurrent cycle end has been registered.
// Make sure that we only send the heap information once.
if ( ! _heap_summary_sent ) {
trace_heap_after_gc ( _gc_tracer_cm ) ;
_heap_summary_sent = true ;
}
2013-06-10 11:30:51 +02:00
}
}
G1YCType G1CollectedHeap : : yc_type ( ) {
bool is_young = g1_policy ( ) - > gcs_are_young ( ) ;
bool is_initial_mark = g1_policy ( ) - > during_initial_mark_pause ( ) ;
bool is_during_mark = mark_in_progress ( ) ;
if ( is_initial_mark ) {
return InitialMark ;
} else if ( is_during_mark ) {
return DuringMark ;
} else if ( is_young ) {
return Normal ;
} else {
return Mixed ;
}
}
2009-11-19 13:43:25 -08:00
void G1CollectedHeap : : collect ( GCCause : : Cause cause ) {
2012-02-14 08:21:08 -05:00
assert_heap_not_locked ( ) ;
2008-06-05 15:57:56 -07:00
2010-06-28 14:13:17 -04:00
unsigned int gc_count_before ;
2012-06-05 22:30:24 +02:00
unsigned int old_marking_count_before ;
2012-02-14 08:21:08 -05:00
bool retry_gc ;
do {
retry_gc = false ;
{
MutexLocker ml ( Heap_lock ) ;
2010-06-28 14:13:17 -04:00
2012-02-14 08:21:08 -05:00
// Read the GC count while holding the Heap_lock
gc_count_before = total_collections ( ) ;
2012-06-05 22:30:24 +02:00
old_marking_count_before = _old_marking_cycles_started ;
2012-02-14 08:21:08 -05:00
}
if ( should_do_concurrent_full_gc ( cause ) ) {
// Schedule an initial-mark evacuation pause that will start a
// concurrent cycle. We're setting word_size to 0 which means that
// we are not requesting a post-GC allocation.
2010-06-28 14:13:17 -04:00
VM_G1IncCollectionPause op ( gc_count_before ,
2010-08-24 17:24:33 -04:00
0 , /* word_size */
2012-02-14 08:21:08 -05:00
true , /* should_initiate_conc_mark */
2010-06-28 14:13:17 -04:00
g1_policy ( ) - > max_pause_time_ms ( ) ,
cause ) ;
2012-03-12 14:59:00 -07:00
2009-11-19 13:43:25 -08:00
VMThread : : execute ( & op ) ;
2012-02-14 08:21:08 -05:00
if ( ! op . pause_succeeded ( ) ) {
2012-06-05 22:30:24 +02:00
if ( old_marking_count_before = = _old_marking_cycles_started ) {
2012-03-12 14:59:00 -07:00
retry_gc = op . should_retry_gc ( ) ;
2012-02-14 08:21:08 -05:00
} else {
// A Full GC happened while we were trying to schedule the
// initial-mark GC. No point in starting a new cycle given
// that the whole heap was collected anyway.
}
2012-03-12 14:59:00 -07:00
if ( retry_gc ) {
if ( GC_locker : : is_active_and_needs_gc ( ) ) {
GC_locker : : stall_until_clear ( ) ;
}
}
2012-02-14 08:21:08 -05:00
}
2010-06-28 14:13:17 -04:00
} else {
2014-08-21 16:44:41 +02:00
if ( cause = = GCCause : : _gc_locker | | cause = = GCCause : : _wb_young_gc
2012-02-14 08:21:08 -05:00
DEBUG_ONLY ( | | cause = = GCCause : : _scavenge_alot ) ) {
// Schedule a standard evacuation pause. We're setting word_size
// to 0 which means that we are not requesting a post-GC allocation.
VM_G1IncCollectionPause op ( gc_count_before ,
0 , /* word_size */
false , /* should_initiate_conc_mark */
g1_policy ( ) - > max_pause_time_ms ( ) ,
cause ) ;
VMThread : : execute ( & op ) ;
} else {
// Schedule a Full GC.
2012-06-05 22:30:24 +02:00
VM_G1CollectFull op ( gc_count_before , old_marking_count_before , cause ) ;
2012-02-14 08:21:08 -05:00
VMThread : : execute ( & op ) ;
}
2009-11-19 13:43:25 -08:00
}
2012-02-14 08:21:08 -05:00
} while ( retry_gc ) ;
2008-06-05 15:57:56 -07:00
}
bool G1CollectedHeap : : is_in ( const void * p ) const {
2014-08-26 09:36:53 +02:00
if ( _hrm . reserved ( ) . contains ( p ) ) {
2014-08-19 14:09:10 +02:00
// Given that we know that p is in the reserved space,
2011-12-14 12:15:26 +01:00
// heap_region_containing_raw() should successfully
// return the containing region.
HeapRegion * hr = heap_region_containing_raw ( p ) ;
2008-06-05 15:57:56 -07:00
return hr - > is_in ( p ) ;
} else {
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
return false ;
2008-06-05 15:57:56 -07:00
}
}
2014-08-19 14:09:10 +02:00
# ifdef ASSERT
bool G1CollectedHeap : : is_in_exact ( const void * p ) const {
bool contains = reserved_region ( ) . contains ( p ) ;
2014-08-26 09:36:53 +02:00
bool available = _hrm . is_available ( addr_to_region ( ( HeapWord * ) p ) ) ;
2014-08-19 14:09:10 +02:00
if ( contains & & available ) {
return true ;
} else {
return false ;
}
}
# endif
2008-06-05 15:57:56 -07:00
// Iteration functions.
2014-08-07 22:28:53 +02:00
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2008-06-05 15:57:56 -07:00
class IterateOopClosureRegionClosure : public HeapRegionClosure {
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
ExtendedOopClosure * _cl ;
2008-06-05 15:57:56 -07:00
public :
2014-08-07 22:28:53 +02:00
IterateOopClosureRegionClosure ( ExtendedOopClosure * cl ) : _cl ( cl ) { }
2008-06-05 15:57:56 -07:00
bool doHeapRegion ( HeapRegion * r ) {
2012-07-19 15:15:54 -07:00
if ( ! r - > continuesHumongous ( ) ) {
2008-06-05 15:57:56 -07:00
r - > oop_iterate ( _cl ) ;
}
return false ;
}
} ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
void G1CollectedHeap : : oop_iterate ( ExtendedOopClosure * cl ) {
2014-08-07 22:28:53 +02:00
IterateOopClosureRegionClosure blk ( cl ) ;
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
heap_region_iterate ( & blk ) ;
2008-06-05 15:57:56 -07:00
}
// Iterates an ObjectClosure over all objects within a HeapRegion.
class IterateObjectClosureRegionClosure : public HeapRegionClosure {
ObjectClosure * _cl ;
public :
IterateObjectClosureRegionClosure ( ObjectClosure * cl ) : _cl ( cl ) { }
bool doHeapRegion ( HeapRegion * r ) {
if ( ! r - > continuesHumongous ( ) ) {
r - > object_iterate ( _cl ) ;
}
return false ;
}
} ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
void G1CollectedHeap : : object_iterate ( ObjectClosure * cl ) {
2008-06-05 15:57:56 -07:00
IterateObjectClosureRegionClosure blk ( cl ) ;
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
heap_region_iterate ( & blk ) ;
2008-06-05 15:57:56 -07:00
}
// Calls a SpaceClosure on a HeapRegion.
class SpaceClosureRegionClosure : public HeapRegionClosure {
SpaceClosure * _cl ;
public :
SpaceClosureRegionClosure ( SpaceClosure * cl ) : _cl ( cl ) { }
bool doHeapRegion ( HeapRegion * r ) {
_cl - > do_space ( r ) ;
return false ;
}
} ;
void G1CollectedHeap : : space_iterate ( SpaceClosure * cl ) {
SpaceClosureRegionClosure blk ( cl ) ;
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
heap_region_iterate ( & blk ) ;
2008-06-05 15:57:56 -07:00
}
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
void G1CollectedHeap : : heap_region_iterate ( HeapRegionClosure * cl ) const {
2014-08-26 09:36:53 +02:00
_hrm . iterate ( cl ) ;
2008-06-05 15:57:56 -07:00
}
void
G1CollectedHeap : : heap_region_par_iterate_chunked ( HeapRegionClosure * cl ,
2012-07-19 15:15:54 -07:00
uint worker_id ,
2014-08-18 16:10:44 +02:00
uint num_workers ,
jint claim_value ) const {
2014-08-26 09:36:53 +02:00
_hrm . par_iterate ( cl , worker_id , num_workers , claim_value ) ;
2008-06-05 15:57:56 -07:00
}
2008-10-06 13:16:35 -04:00
class ResetClaimValuesClosure : public HeapRegionClosure {
public :
bool doHeapRegion ( HeapRegion * r ) {
r - > set_claim_value ( HeapRegion : : InitialClaimValue ) ;
return false ;
}
} ;
2011-12-23 11:14:18 -08:00
void G1CollectedHeap : : reset_heap_region_claim_values ( ) {
2008-10-06 13:16:35 -04:00
ResetClaimValuesClosure blk ;
heap_region_iterate ( & blk ) ;
}
2011-12-23 11:14:18 -08:00
void G1CollectedHeap : : reset_cset_heap_region_claim_values ( ) {
ResetClaimValuesClosure blk ;
collection_set_iterate ( & blk ) ;
}
2008-08-06 11:57:31 -04:00
# ifdef ASSERT
// This checks whether all regions in the heap have the correct claim
// value. I also piggy-backed on this a check to ensure that the
// humongous_start_region() information on "continues humongous"
// regions is correct.
class CheckClaimValuesClosure : public HeapRegionClosure {
private :
jint _claim_value ;
2012-04-18 07:21:15 -04:00
uint _failures ;
2008-08-06 11:57:31 -04:00
HeapRegion * _sh_region ;
2012-04-18 07:21:15 -04:00
2008-08-06 11:57:31 -04:00
public :
CheckClaimValuesClosure ( jint claim_value ) :
_claim_value ( claim_value ) , _failures ( 0 ) , _sh_region ( NULL ) { }
bool doHeapRegion ( HeapRegion * r ) {
if ( r - > claim_value ( ) ! = _claim_value ) {
2011-11-17 12:40:15 -08:00
gclog_or_tty - > print_cr ( " Region " HR_FORMAT " , "
2008-08-06 11:57:31 -04:00
" claim value = %d, should be %d " ,
2011-11-17 12:40:15 -08:00
HR_FORMAT_PARAMS ( r ) ,
r - > claim_value ( ) , _claim_value ) ;
2008-08-06 11:57:31 -04:00
+ + _failures ;
}
if ( ! r - > isHumongous ( ) ) {
_sh_region = NULL ;
} else if ( r - > startsHumongous ( ) ) {
_sh_region = r ;
} else if ( r - > continuesHumongous ( ) ) {
if ( r - > humongous_start_region ( ) ! = _sh_region ) {
2011-11-17 12:40:15 -08:00
gclog_or_tty - > print_cr ( " Region " HR_FORMAT " , "
2008-08-06 11:57:31 -04:00
" HS = " PTR_FORMAT " , should be " PTR_FORMAT ,
2011-11-17 12:40:15 -08:00
HR_FORMAT_PARAMS ( r ) ,
2008-08-06 11:57:31 -04:00
r - > humongous_start_region ( ) ,
_sh_region ) ;
+ + _failures ;
}
}
return false ;
}
2012-04-18 07:21:15 -04:00
uint failures ( ) { return _failures ; }
2008-08-06 11:57:31 -04:00
} ;
bool G1CollectedHeap : : check_heap_region_claim_values ( jint claim_value ) {
CheckClaimValuesClosure cl ( claim_value ) ;
heap_region_iterate ( & cl ) ;
return cl . failures ( ) = = 0 ;
}
2011-11-17 12:40:15 -08:00
class CheckClaimValuesInCSetHRClosure : public HeapRegionClosure {
2012-04-18 07:21:15 -04:00
private :
jint _claim_value ;
uint _failures ;
2011-11-17 12:40:15 -08:00
public :
CheckClaimValuesInCSetHRClosure ( jint claim_value ) :
2012-04-18 07:21:15 -04:00
_claim_value ( claim_value ) , _failures ( 0 ) { }
2011-11-17 12:40:15 -08:00
2012-04-18 07:21:15 -04:00
uint failures ( ) { return _failures ; }
2011-11-17 12:40:15 -08:00
bool doHeapRegion ( HeapRegion * hr ) {
assert ( hr - > in_collection_set ( ) , " how? " ) ;
assert ( ! hr - > isHumongous ( ) , " H-region in CSet " ) ;
if ( hr - > claim_value ( ) ! = _claim_value ) {
gclog_or_tty - > print_cr ( " CSet Region " HR_FORMAT " , "
" claim value = %d, should be %d " ,
HR_FORMAT_PARAMS ( hr ) ,
hr - > claim_value ( ) , _claim_value ) ;
_failures + = 1 ;
}
return false ;
}
} ;
bool G1CollectedHeap : : check_cset_heap_region_claim_values ( jint claim_value ) {
CheckClaimValuesInCSetHRClosure cl ( claim_value ) ;
collection_set_iterate ( & cl ) ;
return cl . failures ( ) = = 0 ;
}
2008-08-06 11:57:31 -04:00
# endif // ASSERT
2011-12-14 17:43:55 -08:00
// Clear the cached CSet starting regions and (more importantly)
// the time stamps. Called when we reset the GC time stamp.
void G1CollectedHeap : : clear_cset_start_regions ( ) {
assert ( _worker_cset_start_region ! = NULL , " sanity " ) ;
assert ( _worker_cset_start_region_time_stamp ! = NULL , " sanity " ) ;
int n_queues = MAX2 ( ( int ) ParallelGCThreads , 1 ) ;
for ( int i = 0 ; i < n_queues ; i + + ) {
_worker_cset_start_region [ i ] = NULL ;
_worker_cset_start_region_time_stamp [ i ] = 0 ;
}
}
2011-11-17 12:40:15 -08:00
2011-12-14 17:43:55 -08:00
// Given the id of a worker, obtain or calculate a suitable
// starting region for iterating over the current collection set.
2014-04-03 17:49:31 +04:00
HeapRegion * G1CollectedHeap : : start_cset_region_for_worker ( uint worker_i ) {
2011-12-14 17:43:55 -08:00
assert ( get_gc_time_stamp ( ) > 0 , " should have been updated by now " ) ;
HeapRegion * result = NULL ;
unsigned gc_time_stamp = get_gc_time_stamp ( ) ;
if ( _worker_cset_start_region_time_stamp [ worker_i ] = = gc_time_stamp ) {
// Cached starting region for current worker was set
// during the current pause - so it's valid.
// Note: the cached starting heap region may be NULL
// (when the collection set is empty).
result = _worker_cset_start_region [ worker_i ] ;
assert ( result = = NULL | | result - > in_collection_set ( ) , " sanity " ) ;
return result ;
}
// The cached entry was not valid so let's calculate
// a suitable starting heap region for this worker.
// We want the parallel threads to start their collection
// set iteration at different collection set regions to
// avoid contention.
// If we have:
// n collection set regions
// p threads
// Then thread t will start at region floor ((t * n) / p)
result = g1_policy ( ) - > collection_set ( ) ;
2011-11-17 12:40:15 -08:00
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
2012-04-18 07:21:15 -04:00
uint cs_size = g1_policy ( ) - > cset_region_length ( ) ;
2011-12-14 13:34:57 -08:00
uint active_workers = workers ( ) - > active_workers ( ) ;
2011-12-14 17:43:55 -08:00
assert ( UseDynamicNumberOfGCThreads | |
active_workers = = workers ( ) - > total_workers ( ) ,
" Unless dynamic should use total workers " ) ;
2012-04-18 07:21:15 -04:00
uint end_ind = ( cs_size * worker_i ) / active_workers ;
uint start_ind = 0 ;
2011-12-14 17:43:55 -08:00
if ( worker_i > 0 & &
_worker_cset_start_region_time_stamp [ worker_i - 1 ] = = gc_time_stamp ) {
// Previous workers starting region is valid
// so let's iterate from there
start_ind = ( cs_size * ( worker_i - 1 ) ) / active_workers ;
result = _worker_cset_start_region [ worker_i - 1 ] ;
}
2012-04-18 07:21:15 -04:00
for ( uint i = start_ind ; i < end_ind ; i + + ) {
2011-11-17 12:40:15 -08:00
result = result - > next_in_collection_set ( ) ;
}
}
2011-12-14 17:43:55 -08:00
// Note: the calculated starting heap region may be NULL
// (when the collection set is empty).
assert ( result = = NULL | | result - > in_collection_set ( ) , " sanity " ) ;
assert ( _worker_cset_start_region_time_stamp [ worker_i ] ! = gc_time_stamp ,
" should be updated only once per pause " ) ;
_worker_cset_start_region [ worker_i ] = result ;
OrderAccess : : storestore ( ) ;
_worker_cset_start_region_time_stamp [ worker_i ] = gc_time_stamp ;
2011-11-17 12:40:15 -08:00
return result ;
}
2008-06-05 15:57:56 -07:00
void G1CollectedHeap : : collection_set_iterate ( HeapRegionClosure * cl ) {
HeapRegion * r = g1_policy ( ) - > collection_set ( ) ;
while ( r ! = NULL ) {
HeapRegion * next = r - > next_in_collection_set ( ) ;
if ( cl - > doHeapRegion ( r ) ) {
cl - > incomplete ( ) ;
return ;
}
r = next ;
}
}
void G1CollectedHeap : : collection_set_iterate_from ( HeapRegion * r ,
HeapRegionClosure * cl ) {
2010-06-28 14:13:17 -04:00
if ( r = = NULL ) {
// The CSet is empty so there's nothing to do.
return ;
}
2008-06-05 15:57:56 -07:00
assert ( r - > in_collection_set ( ) ,
" Start region must be a member of the collection set. " ) ;
HeapRegion * cur = r ;
while ( cur ! = NULL ) {
HeapRegion * next = cur - > next_in_collection_set ( ) ;
if ( cl - > doHeapRegion ( cur ) & & false ) {
cl - > incomplete ( ) ;
return ;
}
cur = next ;
}
cur = g1_policy ( ) - > collection_set ( ) ;
while ( cur ! = r ) {
HeapRegion * next = cur - > next_in_collection_set ( ) ;
if ( cl - > doHeapRegion ( cur ) & & false ) {
cl - > incomplete ( ) ;
return ;
}
cur = next ;
}
}
2014-07-21 10:00:31 +02:00
HeapRegion * G1CollectedHeap : : next_compaction_region ( const HeapRegion * from ) const {
2014-08-26 09:36:53 +02:00
HeapRegion * result = _hrm . next_region_in_heap ( from ) ;
2014-08-18 16:10:44 +02:00
while ( result ! = NULL & & result - > isHumongous ( ) ) {
2014-08-26 09:36:53 +02:00
result = _hrm . next_region_in_heap ( result ) ;
2014-07-21 10:00:31 +02:00
}
2014-08-18 16:10:44 +02:00
return result ;
2008-06-05 15:57:56 -07:00
}
Space * G1CollectedHeap : : space_containing ( const void * addr ) const {
2014-04-17 15:57:02 +02:00
return heap_region_containing ( addr ) ;
2008-06-05 15:57:56 -07:00
}
HeapWord * G1CollectedHeap : : block_start ( const void * addr ) const {
Space * sp = space_containing ( addr ) ;
2014-04-17 15:57:02 +02:00
return sp - > block_start ( addr ) ;
2008-06-05 15:57:56 -07:00
}
size_t G1CollectedHeap : : block_size ( const HeapWord * addr ) const {
Space * sp = space_containing ( addr ) ;
return sp - > block_size ( addr ) ;
}
bool G1CollectedHeap : : block_is_obj ( const HeapWord * addr ) const {
Space * sp = space_containing ( addr ) ;
return sp - > block_is_obj ( addr ) ;
}
bool G1CollectedHeap : : supports_tlab_allocation ( ) const {
return true ;
}
size_t G1CollectedHeap : : tlab_capacity ( Thread * ignored ) const {
2014-01-27 13:14:53 +01:00
return ( _g1_policy - > young_list_target_length ( ) - young_list ( ) - > survivor_length ( ) ) * HeapRegion : : GrainBytes ;
}
size_t G1CollectedHeap : : tlab_used ( Thread * ignored ) const {
return young_list ( ) - > eden_used_bytes ( ) ;
}
// For G1 TLABs should not contain humongous objects, so the maximum TLAB size
// must be smaller than the humongous object limit.
size_t G1CollectedHeap : : max_tlab_size ( ) const {
return align_size_down ( _humongous_object_threshold_in_words - 1 , MinObjAlignment ) ;
2008-06-05 15:57:56 -07:00
}
size_t G1CollectedHeap : : unsafe_max_tlab_alloc ( Thread * ignored ) const {
// Return the remaining space in the cur alloc region, but not less than
// the min TLAB size.
2010-03-11 11:44:43 -08:00
// Also, this value can be at most the humongous object threshold,
2013-06-10 11:30:51 +02:00
// since we can't allow tlabs to grow big enough to accommodate
2010-03-11 11:44:43 -08:00
// humongous objects.
2011-03-30 10:26:59 -04:00
HeapRegion * hr = _mutator_alloc_region . get ( ) ;
2014-01-27 13:14:53 +01:00
size_t max_tlab = max_tlab_size ( ) * wordSize ;
2011-03-30 10:26:59 -04:00
if ( hr = = NULL ) {
2014-01-27 13:14:53 +01:00
return max_tlab ;
2008-06-05 15:57:56 -07:00
} else {
2014-01-27 13:14:53 +01:00
return MIN2 ( MAX2 ( hr - > free ( ) , ( size_t ) MinTLABSize ) , max_tlab ) ;
2008-06-05 15:57:56 -07:00
}
}
size_t G1CollectedHeap : : max_capacity ( ) const {
2014-08-26 09:36:53 +02:00
return _hrm . reserved ( ) . byte_size ( ) ;
2008-06-05 15:57:56 -07:00
}
jlong G1CollectedHeap : : millis_since_last_gc ( ) {
// assert(false, "NYI");
return 0 ;
}
void G1CollectedHeap : : prepare_for_verify ( ) {
if ( SafepointSynchronize : : is_at_safepoint ( ) | | ! UseTLAB ) {
ensure_parsability ( false ) ;
}
g1_rem_set ( ) - > prepare_for_verify ( ) ;
}
2012-07-19 15:15:54 -07:00
bool G1CollectedHeap : : allocated_since_marking ( oop obj , HeapRegion * hr ,
VerifyOption vo ) {
switch ( vo ) {
case VerifyOption_G1UsePrevMarking :
return hr - > obj_allocated_since_prev_marking ( obj ) ;
case VerifyOption_G1UseNextMarking :
return hr - > obj_allocated_since_next_marking ( obj ) ;
case VerifyOption_G1UseMarkWord :
return false ;
default :
ShouldNotReachHere ( ) ;
}
return false ; // keep some compilers happy
}
HeapWord * G1CollectedHeap : : top_at_mark_start ( HeapRegion * hr , VerifyOption vo ) {
switch ( vo ) {
case VerifyOption_G1UsePrevMarking : return hr - > prev_top_at_mark_start ( ) ;
case VerifyOption_G1UseNextMarking : return hr - > next_top_at_mark_start ( ) ;
case VerifyOption_G1UseMarkWord : return NULL ;
default : ShouldNotReachHere ( ) ;
}
return NULL ; // keep some compilers happy
}
bool G1CollectedHeap : : is_marked ( oop obj , VerifyOption vo ) {
switch ( vo ) {
case VerifyOption_G1UsePrevMarking : return isMarkedPrev ( obj ) ;
case VerifyOption_G1UseNextMarking : return isMarkedNext ( obj ) ;
case VerifyOption_G1UseMarkWord : return obj - > is_gc_marked ( ) ;
default : ShouldNotReachHere ( ) ;
}
return false ; // keep some compilers happy
}
const char * G1CollectedHeap : : top_at_mark_start_str ( VerifyOption vo ) {
switch ( vo ) {
case VerifyOption_G1UsePrevMarking : return " PTAMS " ;
case VerifyOption_G1UseNextMarking : return " NTAMS " ;
case VerifyOption_G1UseMarkWord : return " NONE " ;
default : ShouldNotReachHere ( ) ;
}
return NULL ; // keep some compilers happy
}
2014-01-20 11:47:53 +01:00
class VerifyRootsClosure : public OopClosure {
2013-08-15 10:52:18 +02:00
private :
G1CollectedHeap * _g1h ;
VerifyOption _vo ;
bool _failures ;
public :
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
VerifyRootsClosure ( VerifyOption vo ) :
_g1h ( G1CollectedHeap : : heap ( ) ) ,
_vo ( vo ) ,
_failures ( false ) { }
bool failures ( ) { return _failures ; }
template < class T > void do_oop_nv ( T * p ) {
T heap_oop = oopDesc : : load_heap_oop ( p ) ;
if ( ! oopDesc : : is_null ( heap_oop ) ) {
oop obj = oopDesc : : decode_heap_oop_not_null ( heap_oop ) ;
if ( _g1h - > is_obj_dead_cond ( obj , _vo ) ) {
gclog_or_tty - > print_cr ( " Root location " PTR_FORMAT " "
" points to dead obj " PTR_FORMAT , p , ( void * ) obj ) ;
if ( _vo = = VerifyOption_G1UseMarkWord ) {
gclog_or_tty - > print_cr ( " Mark word: " PTR_FORMAT , ( void * ) ( obj - > mark ( ) ) ) ;
}
obj - > print_on ( gclog_or_tty ) ;
_failures = true ;
}
}
}
void do_oop ( oop * p ) { do_oop_nv ( p ) ; }
void do_oop ( narrowOop * p ) { do_oop_nv ( p ) ; }
} ;
2014-01-20 11:47:53 +01:00
class G1VerifyCodeRootOopClosure : public OopClosure {
2013-08-15 10:52:18 +02:00
G1CollectedHeap * _g1h ;
OopClosure * _root_cl ;
nmethod * _nm ;
VerifyOption _vo ;
bool _failures ;
template < class T > void do_oop_work ( T * p ) {
// First verify that this root is live
_root_cl - > do_oop ( p ) ;
if ( ! G1VerifyHeapRegionCodeRoots ) {
// We're not verifying the code roots attached to heap region.
return ;
}
// Don't check the code roots during marking verification in a full GC
if ( _vo = = VerifyOption_G1UseMarkWord ) {
return ;
}
// Now verify that the current nmethod (which contains p) is
// in the code root list of the heap region containing the
// object referenced by p.
T heap_oop = oopDesc : : load_heap_oop ( p ) ;
if ( ! oopDesc : : is_null ( heap_oop ) ) {
oop obj = oopDesc : : decode_heap_oop_not_null ( heap_oop ) ;
// Now fetch the region containing the object
HeapRegion * hr = _g1h - > heap_region_containing ( obj ) ;
HeapRegionRemSet * hrrs = hr - > rem_set ( ) ;
// Verify that the strong code root list for this region
// contains the nmethod
if ( ! hrrs - > strong_code_roots_list_contains ( _nm ) ) {
gclog_or_tty - > print_cr ( " Code root location " PTR_FORMAT " "
" from nmethod " PTR_FORMAT " not in strong "
" code roots for region [ " PTR_FORMAT " , " PTR_FORMAT " ) " ,
p , _nm , hr - > bottom ( ) , hr - > end ( ) ) ;
_failures = true ;
}
}
}
public :
G1VerifyCodeRootOopClosure ( G1CollectedHeap * g1h , OopClosure * root_cl , VerifyOption vo ) :
_g1h ( g1h ) , _root_cl ( root_cl ) , _vo ( vo ) , _nm ( NULL ) , _failures ( false ) { }
void do_oop ( oop * p ) { do_oop_work ( p ) ; }
void do_oop ( narrowOop * p ) { do_oop_work ( p ) ; }
void set_nmethod ( nmethod * nm ) { _nm = nm ; }
bool failures ( ) { return _failures ; }
} ;
class G1VerifyCodeRootBlobClosure : public CodeBlobClosure {
G1VerifyCodeRootOopClosure * _oop_cl ;
public :
G1VerifyCodeRootBlobClosure ( G1VerifyCodeRootOopClosure * oop_cl ) :
_oop_cl ( oop_cl ) { }
void do_code_blob ( CodeBlob * cb ) {
nmethod * nm = cb - > as_nmethod_or_null ( ) ;
if ( nm ! = NULL ) {
_oop_cl - > set_nmethod ( nm ) ;
nm - > oops_do ( _oop_cl ) ;
}
}
} ;
class YoungRefCounterClosure : public OopClosure {
G1CollectedHeap * _g1h ;
int _count ;
public :
YoungRefCounterClosure ( G1CollectedHeap * g1h ) : _g1h ( g1h ) , _count ( 0 ) { }
void do_oop ( oop * p ) { if ( _g1h - > is_in_young ( * p ) ) { _count + + ; } }
void do_oop ( narrowOop * p ) { ShouldNotReachHere ( ) ; }
int count ( ) { return _count ; }
void reset_count ( ) { _count = 0 ; } ;
} ;
class VerifyKlassClosure : public KlassClosure {
YoungRefCounterClosure _young_ref_counter_closure ;
OopClosure * _oop_closure ;
public :
VerifyKlassClosure ( G1CollectedHeap * g1h , OopClosure * cl ) : _young_ref_counter_closure ( g1h ) , _oop_closure ( cl ) { }
void do_klass ( Klass * k ) {
k - > oops_do ( _oop_closure ) ;
_young_ref_counter_closure . reset_count ( ) ;
k - > oops_do ( & _young_ref_counter_closure ) ;
if ( _young_ref_counter_closure . count ( ) > 0 ) {
2014-04-15 20:46:23 +02:00
guarantee ( k - > has_modified_oops ( ) , err_msg ( " Klass " PTR_FORMAT " , has young refs but is not dirty. " , k ) ) ;
2013-08-15 10:52:18 +02:00
}
}
} ;
2008-06-05 15:57:56 -07:00
class VerifyLivenessOopClosure : public OopClosure {
2011-06-14 11:01:10 -07:00
G1CollectedHeap * _g1h ;
VerifyOption _vo ;
2008-06-05 15:57:56 -07:00
public :
2011-06-14 11:01:10 -07:00
VerifyLivenessOopClosure ( G1CollectedHeap * g1h , VerifyOption vo ) :
_g1h ( g1h ) , _vo ( vo )
{ }
2009-07-14 15:40:39 -07:00
void do_oop ( narrowOop * p ) { do_oop_work ( p ) ; }
void do_oop ( oop * p ) { do_oop_work ( p ) ; }
template < class T > void do_oop_work ( T * p ) {
oop obj = oopDesc : : load_decode_heap_oop ( p ) ;
2011-06-14 11:01:10 -07:00
guarantee ( obj = = NULL | | ! _g1h - > is_obj_dead_cond ( obj , _vo ) ,
2009-07-14 15:40:39 -07:00
" Dead object referenced by a not dead object " ) ;
2008-06-05 15:57:56 -07:00
}
} ;
class VerifyObjsInRegionClosure : public ObjectClosure {
2009-06-12 16:20:16 -04:00
private :
2008-06-05 15:57:56 -07:00
G1CollectedHeap * _g1h ;
size_t _live_bytes ;
HeapRegion * _hr ;
2011-06-14 11:01:10 -07:00
VerifyOption _vo ;
2008-06-05 15:57:56 -07:00
public :
2011-06-14 11:01:10 -07:00
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
VerifyObjsInRegionClosure ( HeapRegion * hr , VerifyOption vo )
: _live_bytes ( 0 ) , _hr ( hr ) , _vo ( vo ) {
2008-06-05 15:57:56 -07:00
_g1h = G1CollectedHeap : : heap ( ) ;
}
void do_object ( oop o ) {
2011-06-14 11:01:10 -07:00
VerifyLivenessOopClosure isLive ( _g1h , _vo ) ;
2008-06-05 15:57:56 -07:00
assert ( o ! = NULL , " Huh? " ) ;
2011-06-14 11:01:10 -07:00
if ( ! _g1h - > is_obj_dead_cond ( o , _vo ) ) {
// If the object is alive according to the mark word,
// then verify that the marking information agrees.
// Note we can't verify the contra-positive of the
// above: if the object is dead (according to the mark
// word), it may not be marked, or may have been marked
// but has since became dead, or may have been allocated
// since the last marking.
if ( _vo = = VerifyOption_G1UseMarkWord ) {
guarantee ( ! _g1h - > is_obj_dead ( o ) , " mark word and concurrent mark mismatch " ) ;
}
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
o - > oop_iterate_no_header ( & isLive ) ;
2010-04-15 15:52:55 -07:00
if ( ! _hr - > obj_allocated_since_prev_marking ( o ) ) {
size_t obj_size = o - > size ( ) ; // Make sure we don't overflow
_live_bytes + = ( obj_size * HeapWordSize ) ;
}
2008-06-05 15:57:56 -07:00
}
}
size_t live_bytes ( ) { return _live_bytes ; }
} ;
class PrintObjsInRegionClosure : public ObjectClosure {
HeapRegion * _hr ;
G1CollectedHeap * _g1 ;
public :
PrintObjsInRegionClosure ( HeapRegion * hr ) : _hr ( hr ) {
_g1 = G1CollectedHeap : : heap ( ) ;
} ;
void do_object ( oop o ) {
if ( o ! = NULL ) {
HeapWord * start = ( HeapWord * ) o ;
size_t word_sz = o - > size ( ) ;
gclog_or_tty - > print ( " \n Printing obj " PTR_FORMAT " of size " SIZE_FORMAT
" isMarkedPrev %d isMarkedNext %d isAllocSince %d \n " ,
( void * ) o , word_sz ,
_g1 - > isMarkedPrev ( o ) ,
_g1 - > isMarkedNext ( o ) ,
_hr - > obj_allocated_since_prev_marking ( o ) ) ;
HeapWord * end = start + word_sz ;
HeapWord * cur ;
int * val ;
for ( cur = start ; cur < end ; cur + + ) {
val = ( int * ) cur ;
2014-04-09 13:54:32 +02:00
gclog_or_tty - > print ( " \t " PTR_FORMAT " :%d \n " , val , * val ) ;
2008-06-05 15:57:56 -07:00
}
}
}
} ;
class VerifyRegionClosure : public HeapRegionClosure {
2009-06-12 16:20:16 -04:00
private :
2012-07-19 15:15:54 -07:00
bool _par ;
VerifyOption _vo ;
bool _failures ;
2009-06-12 16:20:16 -04:00
public :
2011-06-14 11:01:10 -07:00
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
2012-04-16 08:57:18 +02:00
VerifyRegionClosure ( bool par , VerifyOption vo )
: _par ( par ) ,
2011-06-14 11:01:10 -07:00
_vo ( vo ) ,
2009-10-02 16:20:42 -04:00
_failures ( false ) { }
bool failures ( ) {
return _failures ;
}
2009-07-14 15:40:39 -07:00
2008-06-05 15:57:56 -07:00
bool doHeapRegion ( HeapRegion * r ) {
2009-03-16 08:01:32 -07:00
if ( ! r - > continuesHumongous ( ) ) {
2009-10-02 16:20:42 -04:00
bool failures = false ;
2012-04-16 08:57:18 +02:00
r - > verify ( _vo , & failures ) ;
2009-10-02 16:20:42 -04:00
if ( failures ) {
_failures = true ;
} else {
2011-06-14 11:01:10 -07:00
VerifyObjsInRegionClosure not_dead_yet_cl ( r , _vo ) ;
2009-10-02 16:20:42 -04:00
r - > object_iterate ( & not_dead_yet_cl ) ;
2012-01-10 18:58:13 -05:00
if ( _vo ! = VerifyOption_G1UseNextMarking ) {
if ( r - > max_live_bytes ( ) < not_dead_yet_cl . live_bytes ( ) ) {
gclog_or_tty - > print_cr ( " [ " PTR_FORMAT " , " PTR_FORMAT " ] "
" max_live_bytes " SIZE_FORMAT " "
" < calculated " SIZE_FORMAT ,
r - > bottom ( ) , r - > end ( ) ,
r - > max_live_bytes ( ) ,
2009-10-02 16:20:42 -04:00
not_dead_yet_cl . live_bytes ( ) ) ;
2012-01-10 18:58:13 -05:00
_failures = true ;
}
} else {
// When vo == UseNextMarking we cannot currently do a sanity
// check on the live bytes as the calculation has not been
// finalized yet.
2009-10-02 16:20:42 -04:00
}
}
2008-06-05 15:57:56 -07:00
}
2009-10-02 16:20:42 -04:00
return false ; // stop the region iteration if we hit a failure
2008-06-05 15:57:56 -07:00
}
} ;
2013-08-15 10:52:18 +02:00
// This is the task used for parallel verification of the heap regions
2008-10-06 13:16:35 -04:00
class G1ParVerifyTask : public AbstractGangTask {
private :
G1CollectedHeap * _g1h ;
2011-06-14 11:01:10 -07:00
VerifyOption _vo ;
bool _failures ;
2008-10-06 13:16:35 -04:00
public :
2011-06-14 11:01:10 -07:00
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
2012-04-16 08:57:18 +02:00
G1ParVerifyTask ( G1CollectedHeap * g1h , VerifyOption vo ) :
2008-10-06 13:16:35 -04:00
AbstractGangTask ( " Parallel verify task " ) ,
2009-07-14 15:40:39 -07:00
_g1h ( g1h ) ,
2011-06-14 11:01:10 -07:00
_vo ( vo ) ,
2009-10-02 16:20:42 -04:00
_failures ( false ) { }
bool failures ( ) {
return _failures ;
}
2008-10-06 13:16:35 -04:00
2011-12-14 13:34:57 -08:00
void work ( uint worker_id ) {
2009-03-16 08:01:32 -07:00
HandleMark hm ;
2012-04-16 08:57:18 +02:00
VerifyRegionClosure blk ( true , _vo ) ;
2011-12-14 13:34:57 -08:00
_g1h - > heap_region_par_iterate_chunked ( & blk , worker_id ,
2011-08-09 10:16:01 -07:00
_g1h - > workers ( ) - > active_workers ( ) ,
2008-10-06 13:16:35 -04:00
HeapRegion : : ParVerifyClaimValue ) ;
2009-10-02 16:20:42 -04:00
if ( blk . failures ( ) ) {
_failures = true ;
}
2008-10-06 13:16:35 -04:00
}
} ;
2013-08-15 10:52:18 +02:00
void G1CollectedHeap : : verify ( bool silent , VerifyOption vo ) {
2013-03-29 13:49:37 -07:00
if ( SafepointSynchronize : : is_at_safepoint ( ) ) {
2011-08-09 10:16:01 -07:00
assert ( Thread : : current ( ) - > is_VM_thread ( ) ,
2013-03-29 13:49:37 -07:00
" Expected to be executed serially by the VM thread at this point " ) ;
2011-08-09 10:16:01 -07:00
2013-08-15 10:52:18 +02:00
if ( ! silent ) { gclog_or_tty - > print ( " Roots " ) ; }
VerifyRootsClosure rootsCl ( vo ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
VerifyKlassClosure klassCl ( this , & rootsCl ) ;
2014-07-07 10:12:40 +02:00
CLDToKlassAndOopClosure cldCl ( & klassCl , & rootsCl , false ) ;
2011-06-14 11:01:10 -07:00
2011-04-26 21:17:24 -07:00
// We apply the relevant closures to all the oops in the
2014-07-07 10:12:40 +02:00
// system dictionary, class loader data graph, the string table
// and the nmethods in the code cache.
2014-02-13 17:44:39 +01:00
G1VerifyCodeRootOopClosure codeRootsCl ( this , & rootsCl , vo ) ;
G1VerifyCodeRootBlobClosure blobsCl ( & codeRootsCl ) ;
2014-07-07 10:12:40 +02:00
process_all_roots ( true , // activate StrongRootsScope
SO_AllCodeCache , // roots scanning options
& rootsCl ,
& cldCl ,
& blobsCl ) ;
2014-02-13 17:44:39 +01:00
2013-08-15 10:52:18 +02:00
bool failures = rootsCl . failures ( ) | | codeRootsCl . failures ( ) ;
2011-06-14 11:01:10 -07:00
if ( vo ! = VerifyOption_G1UseMarkWord ) {
// If we're verifying during a full GC then the region sets
// will have been torn down at the start of the GC. Therefore
// verifying the region sets will fail. So we only verify
// the region sets when not in a full GC.
if ( ! silent ) { gclog_or_tty - > print ( " HeapRegionSets " ) ; }
verify_region_sets ( ) ;
}
2011-01-19 19:30:42 -05:00
if ( ! silent ) { gclog_or_tty - > print ( " HeapRegions " ) ; }
2008-10-06 13:16:35 -04:00
if ( GCParallelVerificationEnabled & & ParallelGCThreads > 1 ) {
assert ( check_heap_region_claim_values ( HeapRegion : : InitialClaimValue ) ,
" sanity check " ) ;
2012-04-16 08:57:18 +02:00
G1ParVerifyTask task ( this , vo ) ;
2011-08-09 10:16:01 -07:00
assert ( UseDynamicNumberOfGCThreads | |
workers ( ) - > active_workers ( ) = = workers ( ) - > total_workers ( ) ,
" If not dynamic should be using all the workers " ) ;
int n_workers = workers ( ) - > active_workers ( ) ;
2008-10-06 13:16:35 -04:00
set_par_threads ( n_workers ) ;
workers ( ) - > run_task ( & task ) ;
set_par_threads ( 0 ) ;
2009-10-02 16:20:42 -04:00
if ( task . failures ( ) ) {
failures = true ;
}
2008-10-06 13:16:35 -04:00
2011-08-09 10:16:01 -07:00
// Checks that the expected amount of parallel work was done.
// The implication is that n_workers is > 0.
2008-10-06 13:16:35 -04:00
assert ( check_heap_region_claim_values ( HeapRegion : : ParVerifyClaimValue ) ,
" sanity check " ) ;
reset_heap_region_claim_values ( ) ;
assert ( check_heap_region_claim_values ( HeapRegion : : InitialClaimValue ) ,
" sanity check " ) ;
} else {
2012-04-16 08:57:18 +02:00
VerifyRegionClosure blk ( false , vo ) ;
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
heap_region_iterate ( & blk ) ;
2009-10-02 16:20:42 -04:00
if ( blk . failures ( ) ) {
failures = true ;
}
2008-10-06 13:16:35 -04:00
}
2011-01-19 19:30:42 -05:00
if ( ! silent ) gclog_or_tty - > print ( " RemSet " ) ;
2008-06-05 15:57:56 -07:00
rem_set ( ) - > verify ( ) ;
2009-10-02 16:20:42 -04:00
2014-03-18 19:07:22 +01:00
if ( G1StringDedup : : is_enabled ( ) ) {
if ( ! silent ) gclog_or_tty - > print ( " StrDedup " ) ;
G1StringDedup : : verify ( ) ;
}
2009-10-02 16:20:42 -04:00
if ( failures ) {
gclog_or_tty - > print_cr ( " Heap: " ) ;
2011-11-08 00:41:28 -05:00
// It helps to have the per-region information in the output to
// help us track down what went wrong. This is why we call
// print_extended_on() instead of print_on().
print_extended_on ( gclog_or_tty ) ;
2014-05-09 16:50:54 -04:00
gclog_or_tty - > cr ( ) ;
2010-05-18 11:02:18 -07:00
# ifndef PRODUCT
2009-09-30 14:50:51 -04:00
if ( VerifyDuringGC & & G1VerifyDuringGCPrintReachable ) {
2010-03-30 15:43:03 -04:00
concurrent_mark ( ) - > print_reachable ( " at-verification-failure " ,
2011-06-14 11:01:10 -07:00
vo , false /* all */ ) ;
2009-10-02 16:20:42 -04:00
}
2010-05-18 11:02:18 -07:00
# endif
2009-10-02 16:20:42 -04:00
gclog_or_tty - > flush ( ) ;
}
guarantee ( ! failures , " there should not have been any failures " ) ;
2008-06-05 15:57:56 -07:00
} else {
2014-03-18 19:07:22 +01:00
if ( ! silent ) {
gclog_or_tty - > print ( " (SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet " ) ;
if ( G1StringDedup : : is_enabled ( ) ) {
gclog_or_tty - > print ( " , StrDedup " ) ;
}
gclog_or_tty - > print ( " ) " ) ;
}
2008-06-05 15:57:56 -07:00
}
}
2013-08-15 10:52:18 +02:00
void G1CollectedHeap : : verify ( bool silent ) {
verify ( silent , VerifyOption_G1UsePrevMarking ) ;
}
double G1CollectedHeap : : verify ( bool guard , const char * msg ) {
double verify_time_ms = 0.0 ;
if ( guard & & total_collections ( ) > = VerifyGCStartAt ) {
double verify_start = os : : elapsedTime ( ) ;
HandleMark hm ; // Discard invalid handles created during verification
prepare_for_verify ( ) ;
Universe : : verify ( VerifyOption_G1UsePrevMarking , msg ) ;
verify_time_ms = ( os : : elapsedTime ( ) - verify_start ) * 1000 ;
}
return verify_time_ms ;
}
void G1CollectedHeap : : verify_before_gc ( ) {
double verify_time_ms = verify ( VerifyBeforeGC , " VerifyBeforeGC: " ) ;
g1_policy ( ) - > phase_times ( ) - > record_verify_before_time_ms ( verify_time_ms ) ;
}
void G1CollectedHeap : : verify_after_gc ( ) {
double verify_time_ms = verify ( VerifyAfterGC , " VerifyAfterGC: " ) ;
g1_policy ( ) - > phase_times ( ) - > record_verify_after_time_ms ( verify_time_ms ) ;
}
2008-06-05 15:57:56 -07:00
class PrintRegionClosure : public HeapRegionClosure {
outputStream * _st ;
public :
PrintRegionClosure ( outputStream * st ) : _st ( st ) { }
bool doHeapRegion ( HeapRegion * r ) {
r - > print_on ( _st ) ;
return false ;
}
} ;
2014-04-02 09:17:38 +02:00
bool G1CollectedHeap : : is_obj_dead_cond ( const oop obj ,
const HeapRegion * hr ,
const VerifyOption vo ) const {
switch ( vo ) {
case VerifyOption_G1UsePrevMarking : return is_obj_dead ( obj , hr ) ;
case VerifyOption_G1UseNextMarking : return is_obj_ill ( obj , hr ) ;
case VerifyOption_G1UseMarkWord : return ! obj - > is_gc_marked ( ) ;
default : ShouldNotReachHere ( ) ;
}
return false ; // keep some compilers happy
}
bool G1CollectedHeap : : is_obj_dead_cond ( const oop obj ,
const VerifyOption vo ) const {
switch ( vo ) {
case VerifyOption_G1UsePrevMarking : return is_obj_dead ( obj ) ;
case VerifyOption_G1UseNextMarking : return is_obj_ill ( obj ) ;
case VerifyOption_G1UseMarkWord : return ! obj - > is_gc_marked ( ) ;
default : ShouldNotReachHere ( ) ;
}
return false ; // keep some compilers happy
}
2008-06-05 15:57:56 -07:00
void G1CollectedHeap : : print_on ( outputStream * st ) const {
2009-07-07 14:23:00 -04:00
st - > print ( " %-20s " , " garbage-first heap " ) ;
st - > print ( " total " SIZE_FORMAT " K, used " SIZE_FORMAT " K " ,
2009-07-15 12:22:59 -04:00
capacity ( ) / K , used_unlocked ( ) / K ) ;
2009-07-07 14:23:00 -04:00
st - > print ( " [ " INTPTR_FORMAT " , " INTPTR_FORMAT " , " INTPTR_FORMAT " ) " ,
2014-08-26 09:36:53 +02:00
_hrm . reserved ( ) . start ( ) ,
_hrm . reserved ( ) . start ( ) + _hrm . length ( ) + HeapRegion : : GrainWords ,
_hrm . reserved ( ) . end ( ) ) ;
2009-07-07 14:23:00 -04:00
st - > cr ( ) ;
2011-10-05 08:44:10 -07:00
st - > print ( " region size " SIZE_FORMAT " K, " , HeapRegion : : GrainBytes / K ) ;
2012-04-18 07:21:15 -04:00
uint young_regions = _young_list - > length ( ) ;
st - > print ( " %u young ( " SIZE_FORMAT " K), " , young_regions ,
( size_t ) young_regions * HeapRegion : : GrainBytes / K ) ;
uint survivor_regions = g1_policy ( ) - > recorded_survivor_regions ( ) ;
st - > print ( " %u survivors ( " SIZE_FORMAT " K) " , survivor_regions ,
( size_t ) survivor_regions * HeapRegion : : GrainBytes / K ) ;
2009-07-07 14:23:00 -04:00
st - > cr ( ) ;
2012-10-08 09:12:31 -07:00
MetaspaceAux : : print_on ( st ) ;
2009-07-07 14:23:00 -04:00
}
2011-11-08 00:41:28 -05:00
void G1CollectedHeap : : print_extended_on ( outputStream * st ) const {
print_on ( st ) ;
// Print the per-region information.
st - > cr ( ) ;
2012-04-18 07:21:15 -04:00
st - > print_cr ( " Heap Regions: (Y=young(eden), SU=young(survivor), "
" HS=humongous(starts), HC=humongous(continues), "
" CS=collection set, F=free, TS=gc time stamp, "
" PTAMS=previous top-at-mark-start, "
" NTAMS=next top-at-mark-start) " ) ;
2008-06-05 15:57:56 -07:00
PrintRegionClosure blk ( st ) ;
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
heap_region_iterate ( & blk ) ;
2008-06-05 15:57:56 -07:00
}
2013-04-10 14:26:49 +02:00
void G1CollectedHeap : : print_on_error ( outputStream * st ) const {
this - > CollectedHeap : : print_on_error ( st ) ;
if ( _cm ! = NULL ) {
st - > cr ( ) ;
_cm - > print_on_error ( st ) ;
}
}
2008-06-05 15:57:56 -07:00
void G1CollectedHeap : : print_gc_threads_on ( outputStream * st ) const {
2010-09-20 14:38:38 -07:00
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
2009-10-02 16:12:07 -04:00
workers ( ) - > print_worker_threads_on ( st ) ;
2008-06-05 15:57:56 -07:00
}
2009-10-02 16:12:07 -04:00
_cmThread - > print_on ( st ) ;
2008-06-05 15:57:56 -07:00
st - > cr ( ) ;
2009-10-02 16:12:07 -04:00
_cm - > print_worker_threads_on ( st ) ;
_cg1r - > print_worker_threads_on ( st ) ;
2014-03-18 19:07:22 +01:00
if ( G1StringDedup : : is_enabled ( ) ) {
G1StringDedup : : print_worker_threads_on ( st ) ;
}
2008-06-05 15:57:56 -07:00
}
void G1CollectedHeap : : gc_threads_do ( ThreadClosure * tc ) const {
2010-09-20 14:38:38 -07:00
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
2008-06-05 15:57:56 -07:00
workers ( ) - > threads_do ( tc ) ;
}
tc - > do_thread ( _cmThread ) ;
2009-05-11 16:30:56 -07:00
_cg1r - > threads_do ( tc ) ;
2014-03-18 19:07:22 +01:00
if ( G1StringDedup : : is_enabled ( ) ) {
G1StringDedup : : threads_do ( tc ) ;
}
2008-06-05 15:57:56 -07:00
}
void G1CollectedHeap : : print_tracing_info ( ) const {
// We'll overload this to mean "trace GC pause statistics."
2014-05-02 02:11:34 +02:00
if ( TraceYoungGenTime | | TraceOldGenTime ) {
2008-06-05 15:57:56 -07:00
// The "G1CollectorPolicy" is keeping track of these stats, so delegate
// to that.
g1_policy ( ) - > print_tracing_info ( ) ;
}
2009-04-30 15:07:53 -07:00
if ( G1SummarizeRSetStats ) {
2008-06-05 15:57:56 -07:00
g1_rem_set ( ) - > print_summary_info ( ) ;
}
2010-02-23 23:13:23 -05:00
if ( G1SummarizeConcMark ) {
2008-06-05 15:57:56 -07:00
concurrent_mark ( ) - > print_summary_info ( ) ;
}
g1_policy ( ) - > print_yg_surv_rate_info ( ) ;
SpecializationStats : : print ( ) ;
}
2011-06-21 15:23:07 -04:00
# ifndef PRODUCT
// Helpful for debugging RSet issues.
class PrintRSetsClosure : public HeapRegionClosure {
private :
const char * _msg ;
size_t _occupied_sum ;
public :
bool doHeapRegion ( HeapRegion * r ) {
HeapRegionRemSet * hrrs = r - > rem_set ( ) ;
size_t occupied = hrrs - > occupied ( ) ;
_occupied_sum + = occupied ;
gclog_or_tty - > print_cr ( " Printing RSet for region " HR_FORMAT ,
HR_FORMAT_PARAMS ( r ) ) ;
if ( occupied = = 0 ) {
gclog_or_tty - > print_cr ( " RSet is empty " ) ;
} else {
hrrs - > print ( ) ;
}
gclog_or_tty - > print_cr ( " ---------- " ) ;
return false ;
}
PrintRSetsClosure ( const char * msg ) : _msg ( msg ) , _occupied_sum ( 0 ) {
gclog_or_tty - > cr ( ) ;
gclog_or_tty - > print_cr ( " ======================================== " ) ;
2014-05-09 16:50:54 -04:00
gclog_or_tty - > print_cr ( " %s " , msg ) ;
2011-06-21 15:23:07 -04:00
gclog_or_tty - > cr ( ) ;
}
~ PrintRSetsClosure ( ) {
gclog_or_tty - > print_cr ( " Occupied Sum: " SIZE_FORMAT , _occupied_sum ) ;
gclog_or_tty - > print_cr ( " ======================================== " ) ;
gclog_or_tty - > cr ( ) ;
}
} ;
void G1CollectedHeap : : print_cset_rsets ( ) {
PrintRSetsClosure cl ( " Printing CSet RSets " ) ;
collection_set_iterate ( & cl ) ;
}
void G1CollectedHeap : : print_all_rsets ( ) {
PrintRSetsClosure cl ( " Printing All RSets " ) ; ;
heap_region_iterate ( & cl ) ;
}
# endif // PRODUCT
2008-06-05 15:57:56 -07:00
G1CollectedHeap * G1CollectedHeap : : heap ( ) {
assert ( _sh - > kind ( ) = = CollectedHeap : : G1CollectedHeap ,
" not a garbage-first heap " ) ;
return _g1h ;
}
void G1CollectedHeap : : gc_prologue ( bool full /* Ignored */ ) {
2010-02-01 17:29:01 -08:00
// always_do_update_barrier = false;
2008-06-05 15:57:56 -07:00
assert ( InlineCacheBuffer : : is_empty ( ) , " should have cleaned up ICBuffer " ) ;
// Fill TLAB's and such
2014-01-27 13:14:53 +01:00
accumulate_statistics_all_tlabs ( ) ;
2008-06-05 15:57:56 -07:00
ensure_parsability ( true ) ;
2013-09-26 12:49:45 +02:00
if ( G1SummarizeRSetStats & & ( G1SummarizeRSetStatsPeriod > 0 ) & &
( total_collections ( ) % G1SummarizeRSetStatsPeriod = = 0 ) ) {
g1_rem_set ( ) - > print_periodic_summary_info ( " Before GC RS summary " ) ;
}
2008-06-05 15:57:56 -07:00
}
void G1CollectedHeap : : gc_epilogue ( bool full /* Ignored */ ) {
2013-05-28 09:32:06 +02:00
if ( G1SummarizeRSetStats & &
( G1SummarizeRSetStatsPeriod > 0 ) & &
// we are at the end of the GC. Total collections has already been increased.
( ( total_collections ( ) - 1 ) % G1SummarizeRSetStatsPeriod = = 0 ) ) {
2013-09-26 12:49:45 +02:00
g1_rem_set ( ) - > print_periodic_summary_info ( " After GC RS summary " ) ;
2013-05-28 09:32:06 +02:00
}
2008-06-05 15:57:56 -07:00
// FIXME: what is this about?
// I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
// is set.
COMPILER2_PRESENT ( assert ( DerivedPointerTable : : is_empty ( ) ,
" derived pointer present " ) ) ;
2010-02-01 17:29:01 -08:00
// always_do_update_barrier = true;
2011-09-22 10:57:37 -07:00
2014-01-27 13:14:53 +01:00
resize_all_tlabs ( ) ;
2011-09-22 10:57:37 -07:00
// We have just completed a GC. Update the soft reference
// policy with the new heap occupancy
Universe : : update_heap_info_at_gc ( ) ;
2008-06-05 15:57:56 -07:00
}
2010-08-24 17:24:33 -04:00
HeapWord * G1CollectedHeap : : do_collection_pause ( size_t word_size ,
unsigned int gc_count_before ,
2013-08-21 22:35:56 +02:00
bool * succeeded ,
GCCause : : Cause gc_cause ) {
2010-08-24 17:24:33 -04:00
assert_heap_not_locked_and_not_at_safepoint ( ) ;
2008-06-05 15:57:56 -07:00
g1_policy ( ) - > record_stop_world_start ( ) ;
2010-08-24 17:24:33 -04:00
VM_G1IncCollectionPause op ( gc_count_before ,
word_size ,
false , /* should_initiate_conc_mark */
g1_policy ( ) - > max_pause_time_ms ( ) ,
2013-08-21 22:35:56 +02:00
gc_cause ) ;
2010-08-24 17:24:33 -04:00
VMThread : : execute ( & op ) ;
HeapWord * result = op . result ( ) ;
bool ret_succeeded = op . prologue_succeeded ( ) & & op . pause_succeeded ( ) ;
assert ( result = = NULL | | ret_succeeded ,
" the result should be NULL if the VM did not succeed " ) ;
* succeeded = ret_succeeded ;
assert_heap_not_locked ( ) ;
return result ;
2008-06-05 15:57:56 -07:00
}
void
G1CollectedHeap : : doConcurrentMark ( ) {
2009-07-14 15:40:39 -07:00
MutexLockerEx x ( CGC_lock , Mutex : : _no_safepoint_check_flag ) ;
if ( ! _cmThread - > in_progress ( ) ) {
_cmThread - > set_started ( ) ;
CGC_lock - > notify ( ) ;
2008-06-05 15:57:56 -07:00
}
}
size_t G1CollectedHeap : : pending_card_num ( ) {
size_t extra_cards = 0 ;
JavaThread * curr = Threads : : first ( ) ;
while ( curr ! = NULL ) {
DirtyCardQueue & dcq = curr - > dirty_card_queue ( ) ;
extra_cards + = dcq . size ( ) ;
curr = curr - > next ( ) ;
}
DirtyCardQueueSet & dcqs = JavaThread : : dirty_card_queue_set ( ) ;
size_t buffer_size = dcqs . buffer_size ( ) ;
size_t buffer_num = dcqs . completed_buffers_num ( ) ;
2012-08-21 14:10:39 -07:00
// PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
// in bytes - not the number of 'entries'. We need to convert
// into a number of cards.
return ( buffer_size * buffer_num + extra_cards ) / oopSize ;
2008-06-05 15:57:56 -07:00
}
size_t G1CollectedHeap : : cards_scanned ( ) {
2010-10-12 09:36:48 -07:00
return g1_rem_set ( ) - > cardsScanned ( ) ;
2008-06-05 15:57:56 -07:00
}
2014-07-23 09:03:32 +02:00
bool G1CollectedHeap : : humongous_region_is_always_live ( uint index ) {
HeapRegion * region = region_at ( index ) ;
assert ( region - > startsHumongous ( ) , " Must start a humongous object " ) ;
return oop ( region - > bottom ( ) ) - > is_objArray ( ) | | ! region - > rem_set ( ) - > is_empty ( ) ;
}
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
private :
size_t _total_humongous ;
size_t _candidate_humongous ;
public :
RegisterHumongousWithInCSetFastTestClosure ( ) : _total_humongous ( 0 ) , _candidate_humongous ( 0 ) {
}
virtual bool doHeapRegion ( HeapRegion * r ) {
if ( ! r - > startsHumongous ( ) ) {
return false ;
}
G1CollectedHeap * g1h = G1CollectedHeap : : heap ( ) ;
2014-08-26 09:36:53 +02:00
uint region_idx = r - > hrm_index ( ) ;
2014-07-23 09:03:32 +02:00
bool is_candidate = ! g1h - > humongous_region_is_always_live ( region_idx ) ;
// Is_candidate already filters out humongous regions with some remembered set.
// This will not lead to humongous object that we mistakenly keep alive because
// during young collection the remembered sets will only be added to.
if ( is_candidate ) {
g1h - > register_humongous_region_with_in_cset_fast_test ( region_idx ) ;
_candidate_humongous + + ;
}
_total_humongous + + ;
return false ;
}
size_t total_humongous ( ) const { return _total_humongous ; }
size_t candidate_humongous ( ) const { return _candidate_humongous ; }
} ;
void G1CollectedHeap : : register_humongous_regions_with_in_cset_fast_test ( ) {
if ( ! G1ReclaimDeadHumongousObjectsAtYoungGC ) {
g1_policy ( ) - > phase_times ( ) - > record_fast_reclaim_humongous_stats ( 0 , 0 ) ;
return ;
}
RegisterHumongousWithInCSetFastTestClosure cl ;
heap_region_iterate ( & cl ) ;
g1_policy ( ) - > phase_times ( ) - > record_fast_reclaim_humongous_stats ( cl . total_humongous ( ) ,
cl . candidate_humongous ( ) ) ;
_has_humongous_reclaim_candidates = cl . candidate_humongous ( ) > 0 ;
if ( _has_humongous_reclaim_candidates ) {
clear_humongous_is_live_table ( ) ;
}
}
2008-06-05 15:57:56 -07:00
void
G1CollectedHeap : : setup_surviving_young_words ( ) {
2012-04-18 07:21:15 -04:00
assert ( _surviving_young_words = = NULL , " pre-condition " ) ;
uint array_length = g1_policy ( ) - > young_cset_region_length ( ) ;
2012-06-28 17:03:16 -04:00
_surviving_young_words = NEW_C_HEAP_ARRAY ( size_t , ( size_t ) array_length , mtGC ) ;
2008-06-05 15:57:56 -07:00
if ( _surviving_young_words = = NULL ) {
2013-04-30 11:56:52 -07:00
vm_exit_out_of_memory ( sizeof ( size_t ) * array_length , OOM_MALLOC_ERROR ,
2008-06-05 15:57:56 -07:00
" Not enough space for young surv words summary. " ) ;
}
2012-04-18 07:21:15 -04:00
memset ( _surviving_young_words , 0 , ( size_t ) array_length * sizeof ( size_t ) ) ;
2009-07-14 15:40:39 -07:00
# ifdef ASSERT
2012-04-18 07:21:15 -04:00
for ( uint i = 0 ; i < array_length ; + + i ) {
2009-07-14 15:40:39 -07:00
assert ( _surviving_young_words [ i ] = = 0 , " memset above " ) ;
2008-06-05 15:57:56 -07:00
}
2009-07-14 15:40:39 -07:00
# endif // !ASSERT
2008-06-05 15:57:56 -07:00
}
void
G1CollectedHeap : : update_surviving_young_words ( size_t * surv_young_words ) {
MutexLockerEx x ( ParGCRareEvent_lock , Mutex : : _no_safepoint_check_flag ) ;
2012-04-18 07:21:15 -04:00
uint array_length = g1_policy ( ) - > young_cset_region_length ( ) ;
for ( uint i = 0 ; i < array_length ; + + i ) {
2008-06-05 15:57:56 -07:00
_surviving_young_words [ i ] + = surv_young_words [ i ] ;
2012-04-18 07:21:15 -04:00
}
2008-06-05 15:57:56 -07:00
}
void
G1CollectedHeap : : cleanup_surviving_young_words ( ) {
guarantee ( _surviving_young_words ! = NULL , " pre-condition " ) ;
2012-06-28 17:03:16 -04:00
FREE_C_HEAP_ARRAY ( size_t , _surviving_young_words , mtGC ) ;
2008-06-05 15:57:56 -07:00
_surviving_young_words = NULL ;
}
2011-06-21 15:23:07 -04:00
# ifdef ASSERT
class VerifyCSetClosure : public HeapRegionClosure {
public :
bool doHeapRegion ( HeapRegion * hr ) {
// Here we check that the CSet region's RSet is ready for parallel
// iteration. The fields that we'll verify are only manipulated
// when the region is part of a CSet and is collected. Afterwards,
// we reset these fields when we clear the region's RSet (when the
// region is freed) so they are ready when the region is
// re-allocated. The only exception to this is if there's an
// evacuation failure and instead of freeing the region we leave
// it in the heap. In that case, we reset these fields during
// evacuation failure handling.
guarantee ( hr - > rem_set ( ) - > verify_ready_for_par_iteration ( ) , " verification " ) ;
// Here's a good place to add any other checks we'd like to
// perform on CSet regions.
2010-02-11 15:52:19 -08:00
return false ;
}
} ;
2011-06-21 15:23:07 -04:00
# endif // ASSERT
2010-02-11 15:52:19 -08:00
2010-08-09 05:41:05 -07:00
# if TASKQUEUE_STATS
void G1CollectedHeap : : print_taskqueue_stats_hdr ( outputStream * const st ) {
st - > print_raw_cr ( " GC Task Stats " ) ;
st - > print_raw ( " thr " ) ; TaskQueueStats : : print_header ( 1 , st ) ; st - > cr ( ) ;
st - > print_raw ( " --- " ) ; TaskQueueStats : : print_header ( 2 , st ) ; st - > cr ( ) ;
}
void G1CollectedHeap : : print_taskqueue_stats ( outputStream * const st ) const {
print_taskqueue_stats_hdr ( st ) ;
TaskQueueStats totals ;
2010-08-25 14:39:55 -07:00
const int n = workers ( ) ! = NULL ? workers ( ) - > total_workers ( ) : 1 ;
2010-08-09 05:41:05 -07:00
for ( int i = 0 ; i < n ; + + i ) {
st - > print ( " %3d " , i ) ; task_queue ( i ) - > stats . print ( st ) ; st - > cr ( ) ;
totals + = task_queue ( i ) - > stats ;
}
st - > print_raw ( " tot " ) ; totals . print ( st ) ; st - > cr ( ) ;
DEBUG_ONLY ( totals . verify ( ) ) ;
}
void G1CollectedHeap : : reset_taskqueue_stats ( ) {
2010-08-25 14:39:55 -07:00
const int n = workers ( ) ! = NULL ? workers ( ) - > total_workers ( ) : 1 ;
2010-08-09 05:41:05 -07:00
for ( int i = 0 ; i < n ; + + i ) {
task_queue ( i ) - > stats . reset ( ) ;
}
}
# endif // TASKQUEUE_STATS
2012-09-17 10:33:13 +02:00
void G1CollectedHeap : : log_gc_header ( ) {
if ( ! G1Log : : fine ( ) ) {
return ;
}
2014-06-19 13:31:14 +02:00
gclog_or_tty - > gclog_stamp ( _gc_tracer_stw - > gc_id ( ) ) ;
2012-09-17 10:33:13 +02:00
GCCauseString gc_cause_str = GCCauseString ( " GC pause " , gc_cause ( ) )
2013-01-04 17:04:46 -08:00
. append ( g1_policy ( ) - > gcs_are_young ( ) ? " (young) " : " (mixed) " )
2012-09-17 10:33:13 +02:00
. append ( g1_policy ( ) - > during_initial_mark_pause ( ) ? " (initial-mark) " : " " ) ;
gclog_or_tty - > print ( " [%s " , ( const char * ) gc_cause_str ) ;
}
void G1CollectedHeap : : log_gc_footer ( double pause_time_sec ) {
if ( ! G1Log : : fine ( ) ) {
return ;
}
if ( G1Log : : finer ( ) ) {
if ( evacuation_failed ( ) ) {
gclog_or_tty - > print ( " (to-space exhausted) " ) ;
}
gclog_or_tty - > print_cr ( " , %3.7f secs] " , pause_time_sec ) ;
g1_policy ( ) - > phase_times ( ) - > note_gc_end ( ) ;
g1_policy ( ) - > phase_times ( ) - > print ( pause_time_sec ) ;
g1_policy ( ) - > print_detailed_heap_transition ( ) ;
} else {
if ( evacuation_failed ( ) ) {
gclog_or_tty - > print ( " -- " ) ;
}
g1_policy ( ) - > print_heap_transition ( ) ;
gclog_or_tty - > print_cr ( " , %3.7f secs] " , pause_time_sec ) ;
}
2012-11-30 11:46:17 -08:00
gclog_or_tty - > flush ( ) ;
2012-09-17 10:33:13 +02:00
}
2010-08-24 17:24:33 -04:00
bool
2010-06-28 14:13:17 -04:00
G1CollectedHeap : : do_collection_pause_at_safepoint ( double target_pause_time_ms ) {
2011-01-19 19:30:42 -05:00
assert_at_safepoint ( true /* should_be_vm_thread */ ) ;
guarantee ( ! is_gc_active ( ) , " collection is not reentrant " ) ;
2010-04-06 10:59:45 -04:00
if ( GC_locker : : check_active_before_gc ( ) ) {
2010-08-24 17:24:33 -04:00
return false ;
2010-04-06 10:59:45 -04:00
}
2013-11-23 12:25:13 +01:00
_gc_timer_stw - > register_gc_start ( ) ;
2013-06-10 11:30:51 +02:00
_gc_tracer_stw - > report_gc_start ( gc_cause ( ) , _gc_timer_stw - > gc_start ( ) ) ;
2011-01-10 17:14:53 -05:00
SvcGCMarker sgcm ( SvcGCMarker : : MINOR ) ;
2010-12-19 20:57:16 -05:00
ResourceMark rm ;
2012-02-01 07:59:01 -08:00
print_heap_before_gc ( ) ;
2013-06-10 11:30:51 +02:00
trace_heap_before_gc ( _gc_tracer_stw ) ;
2008-06-05 15:57:56 -07:00
2011-01-19 19:30:42 -05:00
verify_region_sets_optional ( ) ;
2011-03-30 10:26:59 -04:00
verify_dirty_young_regions ( ) ;
2011-01-19 19:30:42 -05:00
2012-01-17 10:21:43 -08:00
// This call will decide whether this pause is an initial-mark
// pause. If it is, during_initial_mark_pause() will return true
// for the duration of this pause.
g1_policy ( ) - > decide_on_conc_mark_initiation ( ) ;
// We do not allow initial-mark to be piggy-backed on a mixed GC.
assert ( ! g1_policy ( ) - > during_initial_mark_pause ( ) | |
g1_policy ( ) - > gcs_are_young ( ) , " sanity " ) ;
2010-04-06 10:59:45 -04:00
2012-01-17 10:21:43 -08:00
// We also do not allow mixed GCs during marking.
assert ( ! mark_in_progress ( ) | | g1_policy ( ) - > gcs_are_young ( ) , " sanity " ) ;
2011-09-28 10:36:31 -07:00
2012-01-17 10:21:43 -08:00
// Record whether this pause is an initial mark. When the current
// thread has completed its logging output and it's safe to signal
// the CM thread, the flag's value in the policy has been reset.
bool should_start_conc_mark = g1_policy ( ) - > during_initial_mark_pause ( ) ;
2011-09-28 10:36:31 -07:00
2012-01-17 10:21:43 -08:00
// Inner scope for scope based logging, timers, and stats collection
{
2013-06-10 11:30:51 +02:00
EvacuationInfo evacuation_info ;
2010-06-28 14:13:17 -04:00
if ( g1_policy ( ) - > during_initial_mark_pause ( ) ) {
// We are about to start a marking cycle, so we increment the
// full collection counter.
2012-06-05 22:30:24 +02:00
increment_old_marking_cycles_started ( ) ;
2013-06-10 11:30:51 +02:00
register_concurrent_cycle_start ( _gc_timer_stw - > gc_start ( ) ) ;
2010-06-28 14:13:17 -04:00
}
2013-06-10 11:30:51 +02:00
_gc_tracer_stw - > report_yc_type ( yc_type ( ) ) ;
2012-04-13 01:59:38 +02:00
TraceCPUTime tcpu ( G1Log : : finer ( ) , true , gclog_or_tty ) ;
2012-04-25 12:36:37 +02:00
2012-07-11 22:47:38 +02:00
int active_workers = ( G1CollectedHeap : : use_parallel_gc_threads ( ) ?
workers ( ) - > active_workers ( ) : 1 ) ;
2012-08-23 10:21:12 +02:00
double pause_start_sec = os : : elapsedTime ( ) ;
g1_policy ( ) - > phase_times ( ) - > note_gc_start ( active_workers ) ;
2012-09-17 10:33:13 +02:00
log_gc_header ( ) ;
2008-07-10 09:29:54 -07:00
2011-04-21 10:23:44 -07:00
TraceCollectorStats tcs ( g1mm ( ) - > incremental_collection_counters ( ) ) ;
2011-05-12 10:30:11 -07:00
TraceMemoryManagerStats tms ( false /* fullGC */ , gc_cause ( ) ) ;
2009-11-20 14:47:01 -05:00
2011-03-04 17:13:19 -05:00
// If the secondary_free_list is not empty, append it to the
// free_list. No need to wait for the cleanup operation to finish;
// the region allocation code will check the secondary_free_list
// and wait if necessary. If the G1StressConcRegionFreeing flag is
// set, skip this step so that the region allocation code has to
// get entries from the secondary_free_list.
2011-01-19 19:30:42 -05:00
if ( ! G1StressConcRegionFreeing ) {
2011-03-04 17:13:19 -05:00
append_secondary_free_list_if_not_empty_with_lock ( ) ;
2011-01-19 19:30:42 -05:00
}
2008-06-05 15:57:56 -07:00
2013-08-15 10:52:18 +02:00
assert ( check_young_list_well_formed ( ) , " young list should be well formed " ) ;
assert ( check_heap_region_claim_values ( HeapRegion : : InitialClaimValue ) ,
" sanity check " ) ;
2008-06-05 15:57:56 -07:00
2011-08-09 10:16:01 -07:00
// Don't dynamically change the number of GC threads this early. A value of
// 0 is used to indicate serial work. When parallel work is done,
// it will be set.
2009-07-07 14:23:00 -04:00
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC
IsGCActiveMark x ;
gc_prologue ( false ) ;
increment_total_collections ( false /* full gc */ ) ;
2011-08-12 11:31:06 -04:00
increment_gc_time_stamp ( ) ;
2008-06-05 15:57:56 -07:00
2012-08-23 10:21:12 +02:00
verify_before_gc ( ) ;
2014-07-07 10:12:40 +02:00
2014-04-29 09:33:20 +02:00
check_bitmaps ( " GC Start " ) ;
2008-06-05 15:57:56 -07:00
2009-07-07 14:23:00 -04:00
COMPILER2_PRESENT ( DerivedPointerTable : : clear ( ) ) ;
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
// Please see comment in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() to see how
// reference processing currently works in G1.
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
// Enable discovery in the STW reference processor
ref_processor_stw ( ) - > enable_discovery ( true /*verify_disabled*/ ,
true /*verify_no_refs*/ ) ;
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
{
// We want to temporarily turn off discovery by the
// CM ref processor, if necessary, and turn it back on
// on again later if we do. Using a scoped
// NoRefDiscovery object will do this.
NoRefDiscovery no_cm_discovery ( ref_processor_cm ( ) ) ;
// Forget the current alloc region (we might even choose it to be part
// of the collection set!).
release_mutator_alloc_region ( ) ;
// We should call this after we retire the mutator alloc
// region(s) so that all the ALLOC / RETIRE events are generated
// before the start GC event.
_hr_printer . start_gc ( false /* full */ , ( size_t ) total_collections ( ) ) ;
2012-07-11 22:47:38 +02:00
// This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will
// investigate this in CR 7178365.
//
// Preserving the old comment here if that helps the investigation:
//
2011-09-22 10:57:37 -07:00
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
2012-07-11 22:47:38 +02:00
double sample_start_time_sec = os : : elapsedTime ( ) ;
2008-06-05 15:57:56 -07:00
2010-04-22 10:02:38 -07:00
# if YOUNG_LIST_VERBOSE
2011-09-22 10:57:37 -07:00
gclog_or_tty - > print_cr ( " \n Before recording pause start. \n Young_list: " ) ;
_young_list - > print ( ) ;
g1_policy ( ) - > print_collection_set ( g1_policy ( ) - > inc_cset_head ( ) , gclog_or_tty ) ;
2010-04-22 10:02:38 -07:00
# endif // YOUNG_LIST_VERBOSE
2013-04-10 10:57:34 -07:00
g1_policy ( ) - > record_collection_pause_start ( sample_start_time_sec ) ;
2008-06-05 15:57:56 -07:00
2012-01-25 12:58:23 -05:00
double scan_wait_start = os : : elapsedTime ( ) ;
// We have to wait until the CM threads finish scanning the
// root regions as it's the only way to ensure that all the
// objects on them have been correctly scanned before we start
// moving them during the GC.
bool waited = _cm - > root_regions ( ) - > wait_until_scan_finished ( ) ;
2012-07-11 22:47:38 +02:00
double wait_time_ms = 0.0 ;
2012-01-25 12:58:23 -05:00
if ( waited ) {
double scan_wait_end = os : : elapsedTime ( ) ;
2012-07-11 22:47:38 +02:00
wait_time_ms = ( scan_wait_end - scan_wait_start ) * 1000.0 ;
2012-01-25 12:58:23 -05:00
}
2012-07-11 22:47:38 +02:00
g1_policy ( ) - > phase_times ( ) - > record_root_region_scan_wait_time ( wait_time_ms ) ;
2012-01-25 12:58:23 -05:00
2010-04-22 10:02:38 -07:00
# if YOUNG_LIST_VERBOSE
2011-09-22 10:57:37 -07:00
gclog_or_tty - > print_cr ( " \n After recording pause start. \n Young_list: " ) ;
_young_list - > print ( ) ;
2010-04-22 10:02:38 -07:00
# endif // YOUNG_LIST_VERBOSE
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
if ( g1_policy ( ) - > during_initial_mark_pause ( ) ) {
concurrent_mark ( ) - > checkpointRootsInitialPre ( ) ;
}
2008-06-05 15:57:56 -07:00
2010-04-22 10:02:38 -07:00
# if YOUNG_LIST_VERBOSE
2011-09-22 10:57:37 -07:00
gclog_or_tty - > print_cr ( " \n Before choosing collection set. \n Young_list: " ) ;
_young_list - > print ( ) ;
g1_policy ( ) - > print_collection_set ( g1_policy ( ) - > inc_cset_head ( ) , gclog_or_tty ) ;
2010-04-22 10:02:38 -07:00
# endif // YOUNG_LIST_VERBOSE
2009-03-25 13:10:54 -07:00
2013-06-10 11:30:51 +02:00
g1_policy ( ) - > finalize_cset ( target_pause_time_ms , evacuation_info ) ;
2011-09-22 10:57:37 -07:00
2014-07-23 09:03:32 +02:00
register_humongous_regions_with_in_cset_fast_test ( ) ;
2012-01-10 18:58:13 -05:00
_cm - > note_start_of_gc ( ) ;
// We should not verify the per-thread SATB buffers given that
// we have not filtered them yet (we'll do so during the
2012-02-15 13:06:53 -05:00
// GC). We also call this after finalize_cset() to
2012-01-10 18:58:13 -05:00
// ensure that the CSet has been finalized.
_cm - > verify_no_cset_oops ( true /* verify_stacks */ ,
true /* verify_enqueued_buffers */ ,
false /* verify_thread_buffers */ ,
true /* verify_fingers */ ) ;
2011-09-22 10:57:37 -07:00
if ( _hr_printer . is_active ( ) ) {
HeapRegion * hr = g1_policy ( ) - > collection_set ( ) ;
while ( hr ! = NULL ) {
G1HRPrinter : : RegionType type ;
if ( ! hr - > is_young ( ) ) {
type = G1HRPrinter : : Old ;
} else if ( hr - > is_survivor ( ) ) {
type = G1HRPrinter : : Survivor ;
} else {
type = G1HRPrinter : : Eden ;
}
_hr_printer . cset ( hr ) ;
hr = hr - > next_in_collection_set ( ) ;
2011-06-24 12:38:49 -04:00
}
}
2011-06-21 15:23:07 -04:00
# ifdef ASSERT
2011-09-22 10:57:37 -07:00
VerifyCSetClosure cl ;
collection_set_iterate ( & cl ) ;
2011-06-21 15:23:07 -04:00
# endif // ASSERT
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
setup_surviving_young_words ( ) ;
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
// Initialize the GC alloc regions.
2013-06-10 11:30:51 +02:00
init_gc_alloc_regions ( evacuation_info ) ;
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
// Actually do the work...
2013-06-10 11:30:51 +02:00
evacuate_collection_set ( evacuation_info ) ;
2010-04-22 10:02:38 -07:00
2012-01-10 18:58:13 -05:00
// We do this to mainly verify the per-thread SATB buffers
// (which have been filtered by now) since we didn't verify
// them earlier. No point in re-checking the stacks / enqueued
// buffers given that the CSet has not changed since last time
// we checked.
_cm - > verify_no_cset_oops ( false /* verify_stacks */ ,
false /* verify_enqueued_buffers */ ,
true /* verify_thread_buffers */ ,
true /* verify_fingers */ ) ;
2013-06-10 11:30:51 +02:00
free_collection_set ( g1_policy ( ) - > collection_set ( ) , evacuation_info ) ;
2014-07-23 09:03:32 +02:00
eagerly_reclaim_humongous_regions ( ) ;
2011-09-22 10:57:37 -07:00
g1_policy ( ) - > clear_collection_set ( ) ;
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
cleanup_surviving_young_words ( ) ;
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
// Start a new incremental collection set for the next pause.
g1_policy ( ) - > start_incremental_cset_building ( ) ;
2010-04-22 10:02:38 -07:00
2011-09-22 10:57:37 -07:00
clear_cset_fast_test ( ) ;
2010-04-22 10:02:38 -07:00
2011-09-22 10:57:37 -07:00
_young_list - > reset_sampled_info ( ) ;
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
// Don't check the whole heap at this point as the
// GC alloc regions from this pause have been tagged
// as survivors and moved on to the survivor list.
// Survivor regions will fail the !is_young() check.
assert ( check_young_list_empty ( false /* check_heap */ ) ,
" young list should be empty " ) ;
2010-04-22 10:02:38 -07:00
# if YOUNG_LIST_VERBOSE
2011-09-22 10:57:37 -07:00
gclog_or_tty - > print_cr ( " Before recording survivors. \n Young List: " ) ;
_young_list - > print ( ) ;
2010-04-22 10:02:38 -07:00
# endif // YOUNG_LIST_VERBOSE
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
g1_policy ( ) - > record_survivor_regions ( _young_list - > survivor_length ( ) ,
2013-06-10 11:30:51 +02:00
_young_list - > first_survivor_region ( ) ,
_young_list - > last_survivor_region ( ) ) ;
2010-04-22 10:02:38 -07:00
2011-09-22 10:57:37 -07:00
_young_list - > reset_auxilary_lists ( ) ;
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
if ( evacuation_failed ( ) ) {
_summary_bytes_used = recalculate_used ( ) ;
2013-06-10 11:30:51 +02:00
uint n_queues = MAX2 ( ( int ) ParallelGCThreads , 1 ) ;
for ( uint i = 0 ; i < n_queues ; i + + ) {
if ( _evacuation_failed_info_array [ i ] . has_failed ( ) ) {
_gc_tracer_stw - > report_evacuation_failed ( _evacuation_failed_info_array [ i ] ) ;
}
}
2011-09-22 10:57:37 -07:00
} else {
// The "used" of the the collection set have already been subtracted
// when they were freed. Add in the bytes evacuated.
_summary_bytes_used + = g1_policy ( ) - > bytes_copied_during_gc ( ) ;
}
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
if ( g1_policy ( ) - > during_initial_mark_pause ( ) ) {
2012-01-25 12:58:23 -05:00
// We have to do this before we notify the CM threads that
// they can start working to make sure that all the
// appropriate initialization is done on the CM object.
2011-09-22 10:57:37 -07:00
concurrent_mark ( ) - > checkpointRootsInitialPost ( ) ;
set_marking_started ( ) ;
2012-01-17 10:21:43 -08:00
// Note that we don't actually trigger the CM thread at
// this point. We do that later when we're sure that
// the current thread has completed its logging output.
2011-09-22 10:57:37 -07:00
}
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
allocate_dummy_regions ( ) ;
2011-04-19 15:46:59 -04:00
2010-04-22 10:02:38 -07:00
# if YOUNG_LIST_VERBOSE
2011-09-22 10:57:37 -07:00
gclog_or_tty - > print_cr ( " \n End of the pause. \n Young_list: " ) ;
_young_list - > print ( ) ;
g1_policy ( ) - > print_collection_set ( g1_policy ( ) - > inc_cset_head ( ) , gclog_or_tty ) ;
2010-04-22 10:02:38 -07:00
# endif // YOUNG_LIST_VERBOSE
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
init_mutator_alloc_region ( ) ;
{
size_t expand_bytes = g1_policy ( ) - > expansion_amount ( ) ;
if ( expand_bytes > 0 ) {
size_t bytes_before = capacity ( ) ;
2012-01-09 23:50:41 -05:00
// No need for an ergo verbose message here,
// expansion_amount() does this when it returns a value > 0.
2011-09-22 10:57:37 -07:00
if ( ! expand ( expand_bytes ) ) {
2014-08-18 16:10:44 +02:00
// We failed to expand the heap. Cannot do anything about it.
2011-09-22 10:57:37 -07:00
}
2011-09-08 16:29:41 +02:00
}
}
2013-06-10 11:30:51 +02:00
// We redo the verification but now wrt to the new CSet which
2012-01-10 18:58:13 -05:00
// has just got initialized after the previous CSet was freed.
_cm - > verify_no_cset_oops ( true /* verify_stacks */ ,
true /* verify_enqueued_buffers */ ,
true /* verify_thread_buffers */ ,
true /* verify_fingers */ ) ;
_cm - > note_end_of_gc ( ) ;
2012-07-11 22:47:38 +02:00
// This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will
// investigate this in CR 7178365.
double sample_end_time_sec = os : : elapsedTime ( ) ;
double pause_time_ms = ( sample_end_time_sec - sample_start_time_sec ) * MILLIUNITS ;
2013-06-10 11:30:51 +02:00
g1_policy ( ) - > record_collection_pause_end ( pause_time_ms , evacuation_info ) ;
2011-09-22 10:57:37 -07:00
MemoryService : : track_memory_usage ( ) ;
// In prepare_for_verify() below we'll need to scan the deferred
// update buffers to bring the RSets up-to-date if
// G1HRRSFlushLogBuffersOnVerify has been set. While scanning
// the update buffers we'll probably need to scan cards on the
// regions we just allocated to (i.e., the GC alloc
// regions). However, during the last GC we called
// set_saved_mark() on all the GC alloc regions, so card
// scanning might skip the [saved_mark_word()...top()] area of
// those regions (i.e., the area we allocated objects into
// during the last GC). But it shouldn't. Given that
// saved_mark_word() is conditional on whether the GC time stamp
// on the region is current or not, by incrementing the GC time
// stamp here we invalidate all the GC time stamps on all the
// regions and saved_mark_word() will simply return top() for
// all the regions. This is a nicer way of ensuring this rather
// than iterating over the regions and fixing them. In fact, the
// GC time stamp increment here also ensures that
// saved_mark_word() will return top() between pauses, i.e.,
// during concurrent refinement. So we don't need the
// is_gc_active() check to decided which top to use when
// scanning cards (see CR 7039627).
increment_gc_time_stamp ( ) ;
2012-08-23 10:21:12 +02:00
verify_after_gc ( ) ;
2014-04-29 09:33:20 +02:00
check_bitmaps ( " GC End " ) ;
2011-08-12 11:31:06 -04:00
2011-09-22 10:57:37 -07:00
assert ( ! ref_processor_stw ( ) - > discovery_enabled ( ) , " Postcondition " ) ;
ref_processor_stw ( ) - > verify_no_references_recorded ( ) ;
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
// CM reference discovery will be re-enabled if necessary.
}
2008-06-05 15:57:56 -07:00
2011-06-24 12:38:49 -04:00
// We should do this after we potentially expand the heap so
// that all the COMMIT events are generated before the end GC
// event, and after we retire the GC alloc regions so that all
// RETIRE events are generated before the end GC event.
_hr_printer . end_gc ( false /* full */ , ( size_t ) total_collections ( ) ) ;
2009-02-08 13:18:01 -08:00
# ifdef TRACESPINNING
2009-07-07 14:23:00 -04:00
ParallelTaskTerminator : : print_termination_counts ( ) ;
2009-02-08 13:18:01 -08:00
# endif
2008-06-05 15:57:56 -07:00
2009-07-07 14:23:00 -04:00
gc_epilogue ( false ) ;
}
2008-06-05 15:57:56 -07:00
2012-11-30 11:46:17 -08:00
// Print the remainder of the GC log output.
log_gc_footer ( os : : elapsedTime ( ) - pause_start_sec ) ;
2012-07-11 22:47:38 +02:00
// It is not yet to safe to tell the concurrent mark to
2012-06-04 13:29:34 +02:00
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
// logging output either.
2012-01-17 10:21:43 -08:00
2014-08-26 09:36:53 +02:00
_hrm . verify_optional ( ) ;
2012-06-04 13:29:34 +02:00
verify_region_sets_optional ( ) ;
TASKQUEUE_STATS_ONLY ( if ( ParallelGCVerbose ) print_taskqueue_stats ( ) ) ;
TASKQUEUE_STATS_ONLY ( reset_taskqueue_stats ( ) ) ;
2011-01-19 19:30:42 -05:00
2012-06-04 13:29:34 +02:00
print_heap_after_gc ( ) ;
2013-06-10 11:30:51 +02:00
trace_heap_after_gc ( _gc_tracer_stw ) ;
2010-08-09 05:41:05 -07:00
2012-06-04 13:29:34 +02:00
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm ( ) - > update_sizes ( ) ;
2011-04-21 10:23:44 -07:00
2013-06-10 11:30:51 +02:00
_gc_tracer_stw - > report_evacuation_info ( & evacuation_info ) ;
_gc_tracer_stw - > report_tenuring_threshold ( _g1_policy - > tenuring_threshold ( ) ) ;
2013-11-23 12:25:13 +01:00
_gc_timer_stw - > register_gc_end ( ) ;
2013-06-10 11:30:51 +02:00
_gc_tracer_stw - > report_gc_end ( _gc_timer_stw - > gc_end ( ) , _gc_timer_stw - > time_partitions ( ) ) ;
}
2012-01-17 10:21:43 -08:00
// It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output
// that came from the pause.
if ( should_start_conc_mark ) {
// CAUTION: after the doConcurrentMark() call below,
// the concurrent marking thread(s) could be running
// concurrently with us. Make sure that anything after
// this point does not assume that we are the only GC thread
// running. Note: of course, the actual marking work will
// not start until the safepoint itself is released in
2014-04-11 12:29:24 +02:00
// SuspendibleThreadSet::desynchronize().
2012-01-17 10:21:43 -08:00
doConcurrentMark ( ) ;
}
2010-08-24 17:24:33 -04:00
return true ;
2008-06-05 15:57:56 -07:00
}
2010-04-16 08:48:16 -07:00
size_t G1CollectedHeap : : desired_plab_sz ( GCAllocPurpose purpose )
{
size_t gclab_word_size ;
switch ( purpose ) {
case GCAllocForSurvived :
2012-08-06 12:20:14 -07:00
gclab_word_size = _survivor_plab_stats . desired_plab_sz ( ) ;
2010-04-16 08:48:16 -07:00
break ;
case GCAllocForTenured :
2012-08-06 12:20:14 -07:00
gclab_word_size = _old_plab_stats . desired_plab_sz ( ) ;
2010-04-16 08:48:16 -07:00
break ;
default :
assert ( false , " unknown GCAllocPurpose " ) ;
2012-08-06 12:20:14 -07:00
gclab_word_size = _old_plab_stats . desired_plab_sz ( ) ;
2010-04-16 08:48:16 -07:00
break ;
}
2012-08-06 12:20:14 -07:00
// Prevent humongous PLAB sizes for two reasons:
// * PLABs are allocated using a similar paths as oops, but should
// never be in a humongous region
// * Allowing humongous PLABs needlessly churns the region free lists
return MIN2 ( _humongous_object_threshold_in_words , gclab_word_size ) ;
2010-04-16 08:48:16 -07:00
}
2011-03-30 10:26:59 -04:00
void G1CollectedHeap : : init_mutator_alloc_region ( ) {
assert ( _mutator_alloc_region . get ( ) = = NULL , " pre-condition " ) ;
_mutator_alloc_region . init ( ) ;
}
void G1CollectedHeap : : release_mutator_alloc_region ( ) {
_mutator_alloc_region . release ( ) ;
assert ( _mutator_alloc_region . get ( ) = = NULL , " post-condition " ) ;
}
2010-04-16 08:48:16 -07:00
2014-07-07 10:12:40 +02:00
void G1CollectedHeap : : use_retained_old_gc_alloc_region ( EvacuationInfo & evacuation_info ) {
2011-08-12 11:31:06 -04:00
HeapRegion * retained_region = _retained_old_gc_alloc_region ;
_retained_old_gc_alloc_region = NULL ;
// We will discard the current GC alloc region if:
// a) it's in the collection set (it can happen!),
// b) it's already full (no point in using it),
// c) it's empty (this means that it was emptied during
// a cleanup and it should be on the free list now), or
// d) it's humongous (this means that it was emptied
// during a cleanup and was added to the free list, but
2013-06-10 11:30:51 +02:00
// has been subsequently used to allocate a humongous
2011-08-12 11:31:06 -04:00
// object that may be less than the region size).
if ( retained_region ! = NULL & &
! retained_region - > in_collection_set ( ) & &
! ( retained_region - > top ( ) = = retained_region - > end ( ) ) & &
! retained_region - > is_empty ( ) & &
! retained_region - > isHumongous ( ) ) {
2014-06-26 10:00:00 +02:00
retained_region - > record_top_and_timestamp ( ) ;
2011-11-07 22:11:12 -05:00
// The retained region was added to the old region set when it was
// retired. We have to remove it now, since we don't allow regions
// we allocate to in the region sets. We'll re-add it later, when
// it's retired again.
_old_set . remove ( retained_region ) ;
2012-01-10 18:58:13 -05:00
bool during_im = g1_policy ( ) - > during_initial_mark_pause ( ) ;
retained_region - > note_start_of_copying ( during_im ) ;
2011-08-12 11:31:06 -04:00
_old_gc_alloc_region . set ( retained_region ) ;
_hr_printer . reuse ( retained_region ) ;
2013-06-10 11:30:51 +02:00
evacuation_info . set_alloc_regions_used_before ( retained_region - > used ( ) ) ;
2011-08-12 11:31:06 -04:00
}
}
2014-07-07 10:12:40 +02:00
void G1CollectedHeap : : init_gc_alloc_regions ( EvacuationInfo & evacuation_info ) {
assert_at_safepoint ( true /* should_be_vm_thread */ ) ;
_survivor_gc_alloc_region . init ( ) ;
_old_gc_alloc_region . init ( ) ;
use_retained_old_gc_alloc_region ( evacuation_info ) ;
}
2013-06-10 11:30:51 +02:00
void G1CollectedHeap : : release_gc_alloc_regions ( uint no_of_gc_workers , EvacuationInfo & evacuation_info ) {
evacuation_info . set_allocation_regions ( _survivor_gc_alloc_region . count ( ) +
_old_gc_alloc_region . count ( ) ) ;
2011-08-12 11:31:06 -04:00
_survivor_gc_alloc_region . release ( ) ;
// If we have an old GC alloc region to release, we'll save it in
// _retained_old_gc_alloc_region. If we don't
// _retained_old_gc_alloc_region will become NULL. This is what we
// want either way so no reason to check explicitly for either
// condition.
_retained_old_gc_alloc_region = _old_gc_alloc_region . release ( ) ;
2012-08-06 12:20:14 -07:00
if ( ResizePLAB ) {
2012-10-04 10:04:13 -07:00
_survivor_plab_stats . adjust_desired_plab_sz ( no_of_gc_workers ) ;
_old_plab_stats . adjust_desired_plab_sz ( no_of_gc_workers ) ;
2012-08-06 12:20:14 -07:00
}
2008-06-05 15:57:56 -07:00
}
2011-08-12 11:31:06 -04:00
void G1CollectedHeap : : abandon_gc_alloc_regions ( ) {
assert ( _survivor_gc_alloc_region . get ( ) = = NULL , " pre-condition " ) ;
assert ( _old_gc_alloc_region . get ( ) = = NULL , " pre-condition " ) ;
_retained_old_gc_alloc_region = NULL ;
2009-03-15 22:03:38 -04:00
}
2008-06-05 15:57:56 -07:00
void G1CollectedHeap : : init_for_evac_failure ( OopsInHeapRegionClosure * cl ) {
_drain_in_progress = false ;
set_evac_failure_closure ( cl ) ;
2012-06-28 17:03:16 -04:00
_evac_failure_scan_stack = new ( ResourceObj : : C_HEAP , mtGC ) GrowableArray < oop > ( 40 , true ) ;
2008-06-05 15:57:56 -07:00
}
void G1CollectedHeap : : finalize_for_evac_failure ( ) {
assert ( _evac_failure_scan_stack ! = NULL & &
_evac_failure_scan_stack - > length ( ) = = 0 ,
" Postcondition " ) ;
assert ( ! _drain_in_progress , " Postcondition " ) ;
2009-10-27 02:42:24 -07:00
delete _evac_failure_scan_stack ;
2008-06-05 15:57:56 -07:00
_evac_failure_scan_stack = NULL ;
}
2011-12-23 11:14:18 -08:00
void G1CollectedHeap : : remove_self_forwarding_pointers ( ) {
assert ( check_cset_heap_region_claim_values ( HeapRegion : : InitialClaimValue ) , " sanity " ) ;
2008-06-05 15:57:56 -07:00
2014-03-17 10:13:42 +01:00
double remove_self_forwards_start = os : : elapsedTime ( ) ;
2011-12-23 11:14:18 -08:00
G1ParRemoveSelfForwardPtrsTask rsfp_task ( this ) ;
2008-06-05 15:57:56 -07:00
2011-12-23 11:14:18 -08:00
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
set_par_threads ( ) ;
workers ( ) - > run_task ( & rsfp_task ) ;
set_par_threads ( 0 ) ;
2009-03-06 13:50:14 -08:00
} else {
2011-12-23 11:14:18 -08:00
rsfp_task . work ( 0 ) ;
2008-06-05 15:57:56 -07:00
}
2011-12-23 11:14:18 -08:00
assert ( check_cset_heap_region_claim_values ( HeapRegion : : ParEvacFailureClaimValue ) , " sanity " ) ;
// Reset the claim values in the regions in the collection set.
reset_cset_heap_region_claim_values ( ) ;
assert ( check_cset_heap_region_claim_values ( HeapRegion : : InitialClaimValue ) , " sanity " ) ;
2008-06-05 15:57:56 -07:00
// Now restore saved marks, if any.
2013-02-10 21:15:16 +01:00
assert ( _objs_with_preserved_marks . size ( ) = =
_preserved_marks_of_objs . size ( ) , " Both or none. " ) ;
while ( ! _objs_with_preserved_marks . is_empty ( ) ) {
oop obj = _objs_with_preserved_marks . pop ( ) ;
markOop m = _preserved_marks_of_objs . pop ( ) ;
obj - > set_mark ( m ) ;
2008-06-05 15:57:56 -07:00
}
2013-02-10 21:15:16 +01:00
_objs_with_preserved_marks . clear ( true ) ;
_preserved_marks_of_objs . clear ( true ) ;
2014-03-17 10:13:42 +01:00
g1_policy ( ) - > phase_times ( ) - > record_evac_fail_remove_self_forwards ( ( os : : elapsedTime ( ) - remove_self_forwards_start ) * 1000.0 ) ;
2008-06-05 15:57:56 -07:00
}
void G1CollectedHeap : : push_on_evac_failure_scan_stack ( oop obj ) {
_evac_failure_scan_stack - > push ( obj ) ;
}
void G1CollectedHeap : : drain_evac_failure_scan_stack ( ) {
assert ( _evac_failure_scan_stack ! = NULL , " precondition " ) ;
while ( _evac_failure_scan_stack - > length ( ) > 0 ) {
oop obj = _evac_failure_scan_stack - > pop ( ) ;
_evac_failure_closure - > set_region ( heap_region_containing ( obj ) ) ;
obj - > oop_iterate_backwards ( _evac_failure_closure ) ;
}
}
oop
2013-06-10 11:30:51 +02:00
G1CollectedHeap : : handle_evacuation_failure_par ( G1ParScanThreadState * _par_scan_state ,
2012-01-10 18:58:13 -05:00
oop old ) {
2011-05-05 09:15:52 -04:00
assert ( obj_in_cs ( old ) ,
err_msg ( " obj: " PTR_FORMAT " should still be in the CSet " ,
( HeapWord * ) old ) ) ;
2008-06-05 15:57:56 -07:00
markOop m = old - > mark ( ) ;
oop forward_ptr = old - > forward_to_atomic ( old ) ;
if ( forward_ptr = = NULL ) {
// Forward-to-self succeeded.
2013-06-10 11:30:51 +02:00
assert ( _par_scan_state ! = NULL , " par scan state " ) ;
OopsInHeapRegionClosure * cl = _par_scan_state - > evac_failure_closure ( ) ;
uint queue_num = _par_scan_state - > queue_num ( ) ;
2011-09-20 15:39:17 -07:00
2013-06-10 11:30:51 +02:00
_evacuation_failed = true ;
_evacuation_failed_info_array [ queue_num ] . register_copy_failure ( old - > size ( ) ) ;
2008-06-05 15:57:56 -07:00
if ( _evac_failure_closure ! = cl ) {
MutexLockerEx x ( EvacFailureStack_lock , Mutex : : _no_safepoint_check_flag ) ;
assert ( ! _drain_in_progress ,
" Should only be true while someone holds the lock. " ) ;
// Set the global evac-failure closure to the current thread's.
assert ( _evac_failure_closure = = NULL , " Or locking has failed. " ) ;
set_evac_failure_closure ( cl ) ;
// Now do the common part.
handle_evacuation_failure_common ( old , m ) ;
// Reset to NULL.
set_evac_failure_closure ( NULL ) ;
} else {
// The lock is already held, and this is recursive.
assert ( _drain_in_progress , " This should only be the recursive case. " ) ;
handle_evacuation_failure_common ( old , m ) ;
}
return old ;
} else {
2011-05-05 09:15:52 -04:00
// Forward-to-self failed. Either someone else managed to allocate
// space for this object (old != forward_ptr) or they beat us in
// self-forwarding it (old == forward_ptr).
assert ( old = = forward_ptr | | ! obj_in_cs ( forward_ptr ) ,
err_msg ( " obj: " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
" should not be in the CSet " ,
( HeapWord * ) old , ( HeapWord * ) forward_ptr ) ) ;
2008-06-05 15:57:56 -07:00
return forward_ptr ;
}
}
void G1CollectedHeap : : handle_evacuation_failure_common ( oop old , markOop m ) {
preserve_mark_if_necessary ( old , m ) ;
HeapRegion * r = heap_region_containing ( old ) ;
if ( ! r - > evacuation_failed ( ) ) {
r - > set_evacuation_failed ( true ) ;
2011-06-24 12:38:49 -04:00
_hr_printer . evac_failure ( r ) ;
2008-06-05 15:57:56 -07:00
}
push_on_evac_failure_scan_stack ( old ) ;
if ( ! _drain_in_progress ) {
// prevent recursion in copy_to_survivor_space()
_drain_in_progress = true ;
drain_evac_failure_scan_stack ( ) ;
_drain_in_progress = false ;
}
}
void G1CollectedHeap : : preserve_mark_if_necessary ( oop obj , markOop m ) {
2010-12-17 23:41:31 -08:00
assert ( evacuation_failed ( ) , " Oversaving! " ) ;
// We want to call the "for_promotion_failure" version only in the
// case of a promotion failure.
if ( m - > must_be_preserved_for_promotion_failure ( obj ) ) {
2013-02-10 21:15:16 +01:00
_objs_with_preserved_marks . push ( obj ) ;
_preserved_marks_of_objs . push ( m ) ;
2008-06-05 15:57:56 -07:00
}
}
HeapWord * G1CollectedHeap : : par_allocate_during_gc ( GCAllocPurpose purpose ,
size_t word_size ) {
2011-08-12 11:31:06 -04:00
if ( purpose = = GCAllocForSurvived ) {
HeapWord * result = survivor_attempt_allocation ( word_size ) ;
if ( result ! = NULL ) {
return result ;
2008-06-05 15:57:56 -07:00
} else {
2011-08-12 11:31:06 -04:00
// Let's try to allocate in the old gen in case we can fit the
// object there.
return old_attempt_allocation ( word_size ) ;
2008-06-05 15:57:56 -07:00
}
2011-08-12 11:31:06 -04:00
} else {
assert ( purpose = = GCAllocForTenured , " sanity " ) ;
HeapWord * result = old_attempt_allocation ( word_size ) ;
if ( result ! = NULL ) {
return result ;
2008-06-05 15:57:56 -07:00
} else {
2011-08-12 11:31:06 -04:00
// Let's try to allocate in the survivors in case we can fit the
// object there.
return survivor_attempt_allocation ( word_size ) ;
2008-06-05 15:57:56 -07:00
}
}
2011-08-12 11:31:06 -04:00
ShouldNotReachHere ( ) ;
// Trying to keep some compilers happy.
return NULL ;
2008-06-05 15:57:56 -07:00
}
2011-08-29 10:13:06 -07:00
G1ParGCAllocBuffer : : G1ParGCAllocBuffer ( size_t gclab_word_size ) :
2014-04-16 11:05:37 +02:00
ParGCAllocBuffer ( gclab_word_size ) , _retired ( true ) { }
2011-08-29 10:13:06 -07:00
2014-02-24 09:40:21 +01:00
void G1ParCopyHelper : : mark_object ( oop obj ) {
2014-04-17 15:57:02 +02:00
assert ( ! _g1 - > heap_region_containing ( obj ) - > in_collection_set ( ) , " should not mark objects in the CSet " ) ;
2012-01-10 18:58:13 -05:00
// We know that the object is not moving so it's safe to read its size.
2012-01-12 00:06:47 -08:00
_cm - > grayRoot ( obj , ( size_t ) obj - > size ( ) , _worker_id ) ;
2012-01-10 18:58:13 -05:00
}
2014-02-24 09:40:21 +01:00
void G1ParCopyHelper : : mark_forwarded_object ( oop from_obj , oop to_obj ) {
2012-01-10 18:58:13 -05:00
assert ( from_obj - > is_forwarded ( ) , " from obj should be forwarded " ) ;
assert ( from_obj - > forwardee ( ) = = to_obj , " to obj should be the forwardee " ) ;
assert ( from_obj ! = to_obj , " should not be self-forwarded " ) ;
2014-04-17 15:57:02 +02:00
assert ( _g1 - > heap_region_containing ( from_obj ) - > in_collection_set ( ) , " from obj should be in the CSet " ) ;
assert ( ! _g1 - > heap_region_containing ( to_obj ) - > in_collection_set ( ) , " should not mark objects in the CSet " ) ;
2012-01-10 18:58:13 -05:00
// The object might be in the process of being copied by another
// worker so we cannot trust that its to-space image is
// well-formed. So we have to read its size from its from-space
// image which we know should not be changing.
2012-01-12 00:06:47 -08:00
_cm - > grayRoot ( to_obj , ( size_t ) from_obj - > size ( ) , _worker_id ) ;
2008-06-05 15:57:56 -07:00
}
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
template < class T >
void G1ParCopyHelper : : do_klass_barrier ( T * p , oop new_obj ) {
if ( _g1 - > heap_region_containing_raw ( new_obj ) - > is_young ( ) ) {
_scanned_klass - > record_modified_oops ( ) ;
}
}
2014-07-07 10:12:40 +02:00
template < G1Barrier barrier , G1Mark do_mark_object >
2009-07-14 15:40:39 -07:00
template < class T >
2014-02-24 09:40:21 +01:00
void G1ParCopyClosure < barrier , do_mark_object > : : do_oop_work ( T * p ) {
2014-02-24 09:40:49 +01:00
T heap_oop = oopDesc : : load_heap_oop ( p ) ;
if ( oopDesc : : is_null ( heap_oop ) ) {
return ;
}
oop obj = oopDesc : : decode_heap_oop_not_null ( heap_oop ) ;
2011-08-29 10:13:06 -07:00
2012-01-12 00:06:47 -08:00
assert ( _worker_id = = _par_scan_state - > queue_num ( ) , " sanity " ) ;
2014-07-23 09:03:32 +02:00
G1CollectedHeap : : in_cset_state_t state = _g1 - > in_cset_state ( obj ) ;
if ( state = = G1CollectedHeap : : InCSet ) {
2012-01-10 18:58:13 -05:00
oop forwardee ;
2009-01-16 13:02:20 -05:00
if ( obj - > is_forwarded ( ) ) {
2012-01-10 18:58:13 -05:00
forwardee = obj - > forwardee ( ) ;
2009-01-16 13:02:20 -05:00
} else {
2014-02-24 09:41:04 +01:00
forwardee = _par_scan_state - > copy_to_survivor_space ( obj ) ;
2012-01-10 18:58:13 -05:00
}
assert ( forwardee ! = NULL , " forwardee should not be NULL " ) ;
oopDesc : : encode_store_heap_oop ( p , forwardee ) ;
2014-07-07 10:12:40 +02:00
if ( do_mark_object ! = G1MarkNone & & forwardee ! = obj ) {
2012-01-10 18:58:13 -05:00
// If the object is self-forwarded we don't need to explicitly
// mark it, the evacuation failure protocol will do so.
mark_forwarded_object ( obj , forwardee ) ;
2008-06-05 15:57:56 -07:00
}
2012-01-10 18:58:13 -05:00
2014-01-20 11:47:53 +01:00
if ( barrier = = G1BarrierKlass ) {
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
do_klass_barrier ( p , forwardee ) ;
2008-06-05 15:57:56 -07:00
}
2011-08-29 10:13:06 -07:00
} else {
2014-07-23 09:03:32 +02:00
if ( state = = G1CollectedHeap : : IsHumongous ) {
_g1 - > set_humongous_is_live ( obj ) ;
}
2011-08-29 10:13:06 -07:00
// The object is not in collection set. If we're a root scanning
2014-07-07 10:12:40 +02:00
// closure during an initial mark pause then attempt to mark the object.
if ( do_mark_object = = G1MarkFromRoot ) {
2012-01-10 18:58:13 -05:00
mark_object ( obj ) ;
2011-08-29 10:13:06 -07:00
}
2009-01-16 13:02:20 -05:00
}
2008-06-05 15:57:56 -07:00
2014-02-24 09:40:49 +01:00
if ( barrier = = G1BarrierEvac ) {
2012-01-12 00:06:47 -08:00
_par_scan_state - > update_rs ( _from , p , _worker_id ) ;
2009-01-16 13:02:20 -05:00
}
2008-06-05 15:57:56 -07:00
}
2014-07-07 10:12:40 +02:00
template void G1ParCopyClosure < G1BarrierEvac , G1MarkNone > : : do_oop_work ( oop * p ) ;
template void G1ParCopyClosure < G1BarrierEvac , G1MarkNone > : : do_oop_work ( narrowOop * p ) ;
2008-06-05 15:57:56 -07:00
class G1ParEvacuateFollowersClosure : public VoidClosure {
protected :
G1CollectedHeap * _g1h ;
G1ParScanThreadState * _par_scan_state ;
RefToScanQueueSet * _queues ;
ParallelTaskTerminator * _terminator ;
G1ParScanThreadState * par_scan_state ( ) { return _par_scan_state ; }
RefToScanQueueSet * queues ( ) { return _queues ; }
ParallelTaskTerminator * terminator ( ) { return _terminator ; }
public :
G1ParEvacuateFollowersClosure ( G1CollectedHeap * g1h ,
G1ParScanThreadState * par_scan_state ,
RefToScanQueueSet * queues ,
ParallelTaskTerminator * terminator )
: _g1h ( g1h ) , _par_scan_state ( par_scan_state ) ,
_queues ( queues ) , _terminator ( terminator ) { }
2010-10-12 11:29:45 -07:00
void do_void ( ) ;
2009-07-14 15:40:39 -07:00
2010-10-12 11:29:45 -07:00
private :
inline bool offer_termination ( ) ;
} ;
bool G1ParEvacuateFollowersClosure : : offer_termination ( ) {
G1ParScanThreadState * const pss = par_scan_state ( ) ;
pss - > start_term_time ( ) ;
const bool res = terminator ( ) - > offer_termination ( ) ;
pss - > end_term_time ( ) ;
return res ;
}
void G1ParEvacuateFollowersClosure : : do_void ( ) {
G1ParScanThreadState * const pss = par_scan_state ( ) ;
pss - > trim_queue ( ) ;
do {
2014-06-26 15:48:05 +02:00
pss - > steal_and_trim_queue ( queues ( ) ) ;
2010-10-12 11:29:45 -07:00
} while ( ! offer_termination ( ) ) ;
}
2008-06-05 15:57:56 -07:00
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
class G1KlassScanClosure : public KlassClosure {
G1ParCopyHelper * _closure ;
bool _process_only_dirty ;
int _count ;
public :
G1KlassScanClosure ( G1ParCopyHelper * closure , bool process_only_dirty )
: _process_only_dirty ( process_only_dirty ) , _closure ( closure ) , _count ( 0 ) { }
void do_klass ( Klass * klass ) {
// If the klass has not been dirtied we know that there's
// no references into the young gen and we can skip it.
if ( ! _process_only_dirty | | klass - > has_modified_oops ( ) ) {
// Clean the klass since we're going to scavenge all the metadata.
klass - > clear_modified_oops ( ) ;
// Tell the closure that this klass is the Klass to scavenge
// and is the one to dirty if oops are left pointing into the young gen.
_closure - > set_scanned_klass ( klass ) ;
klass - > oops_do ( _closure ) ;
_closure - > set_scanned_klass ( NULL ) ;
}
_count + + ;
}
} ;
2008-06-05 15:57:56 -07:00
class G1ParTask : public AbstractGangTask {
protected :
G1CollectedHeap * _g1h ;
RefToScanQueueSet * _queues ;
ParallelTaskTerminator _terminator ;
2011-12-14 13:34:57 -08:00
uint _n_workers ;
2008-06-05 15:57:56 -07:00
Mutex _stats_lock ;
Mutex * stats_lock ( ) { return & _stats_lock ; }
public :
2014-06-26 15:48:05 +02:00
G1ParTask ( G1CollectedHeap * g1h , RefToScanQueueSet * task_queues )
2008-06-05 15:57:56 -07:00
: AbstractGangTask ( " G1 collection " ) ,
_g1h ( g1h ) ,
_queues ( task_queues ) ,
2011-08-09 10:16:01 -07:00
_terminator ( 0 , _queues ) ,
_stats_lock ( Mutex : : leaf , " parallel G1 stats lock " , true )
2008-06-05 15:57:56 -07:00
{ }
RefToScanQueueSet * queues ( ) { return _queues ; }
RefToScanQueue * work_queue ( int i ) {
return queues ( ) - > queue ( i ) ;
}
2011-08-09 10:16:01 -07:00
ParallelTaskTerminator * terminator ( ) { return & _terminator ; }
virtual void set_for_termination ( int active_workers ) {
// This task calls set_n_termination() in par_non_clean_card_iterate_work()
// in the young space (_par_seq_tasks) in the G1 heap
// for SequentialSubTasksDone.
// This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
// both of which need setting by set_n_termination().
_g1h - > SharedHeap : : set_n_termination ( active_workers ) ;
_g1h - > set_n_termination ( active_workers ) ;
terminator ( ) - > reset_for_reuse ( active_workers ) ;
_n_workers = active_workers ;
}
2014-07-07 10:12:40 +02:00
// Helps out with CLD processing.
//
// During InitialMark we need to:
// 1) Scavenge all CLDs for the young GC.
// 2) Mark all objects directly reachable from strong CLDs.
template < G1Mark do_mark_object >
class G1CLDClosure : public CLDClosure {
G1ParCopyClosure < G1BarrierNone , do_mark_object > * _oop_closure ;
G1ParCopyClosure < G1BarrierKlass , do_mark_object > _oop_in_klass_closure ;
G1KlassScanClosure _klass_in_cld_closure ;
bool _claim ;
public :
G1CLDClosure ( G1ParCopyClosure < G1BarrierNone , do_mark_object > * oop_closure ,
bool only_young , bool claim )
: _oop_closure ( oop_closure ) ,
_oop_in_klass_closure ( oop_closure - > g1 ( ) ,
oop_closure - > pss ( ) ,
oop_closure - > rp ( ) ) ,
_klass_in_cld_closure ( & _oop_in_klass_closure , only_young ) ,
_claim ( claim ) {
}
void do_cld ( ClassLoaderData * cld ) {
cld - > oops_do ( _oop_closure , & _klass_in_cld_closure , _claim ) ;
}
} ;
class G1CodeBlobClosure : public CodeBlobClosure {
OopClosure * _f ;
public :
G1CodeBlobClosure ( OopClosure * f ) : _f ( f ) { }
void do_code_blob ( CodeBlob * blob ) {
nmethod * that = blob - > as_nmethod_or_null ( ) ;
if ( that ! = NULL ) {
if ( ! that - > test_set_oops_do_mark ( ) ) {
that - > oops_do ( _f ) ;
that - > fix_oop_relocations ( ) ;
}
}
}
} ;
2011-12-14 13:34:57 -08:00
void work ( uint worker_id ) {
if ( worker_id > = _n_workers ) return ; // no work needed this round
2010-04-22 15:20:16 -04:00
double start_time_ms = os : : elapsedTime ( ) * 1000.0 ;
2012-07-11 22:47:38 +02:00
_g1h - > g1_policy ( ) - > phase_times ( ) - > record_gc_worker_start_time ( worker_id , start_time_ms ) ;
2010-04-22 15:20:16 -04:00
2012-03-13 11:05:32 -07:00
{
ResourceMark rm ;
HandleMark hm ;
2008-06-05 15:57:56 -07:00
2012-03-13 11:05:32 -07:00
ReferenceProcessor * rp = _g1h - > ref_processor_stw ( ) ;
2011-09-22 10:57:37 -07:00
2014-02-24 09:41:04 +01:00
G1ParScanThreadState pss ( _g1h , worker_id , rp ) ;
2012-03-13 11:05:32 -07:00
G1ParScanHeapEvacFailureClosure evac_failure_cl ( _g1h , & pss , rp ) ;
2008-06-05 15:57:56 -07:00
2012-03-13 11:05:32 -07:00
pss . set_evac_failure_closure ( & evac_failure_cl ) ;
2008-06-05 15:57:56 -07:00
2014-07-07 10:12:40 +02:00
bool only_young = _g1h - > g1_policy ( ) - > gcs_are_young ( ) ;
// Non-IM young GC.
G1ParCopyClosure < G1BarrierNone , G1MarkNone > scan_only_root_cl ( _g1h , & pss , rp ) ;
G1CLDClosure < G1MarkNone > scan_only_cld_cl ( & scan_only_root_cl ,
only_young , // Only process dirty klasses.
false ) ; // No need to claim CLDs.
// IM young GC.
// Strong roots closures.
G1ParCopyClosure < G1BarrierNone , G1MarkFromRoot > scan_mark_root_cl ( _g1h , & pss , rp ) ;
G1CLDClosure < G1MarkFromRoot > scan_mark_cld_cl ( & scan_mark_root_cl ,
false , // Process all klasses.
true ) ; // Need to claim CLDs.
// Weak roots closures.
G1ParCopyClosure < G1BarrierNone , G1MarkPromotedFromRoot > scan_mark_weak_root_cl ( _g1h , & pss , rp ) ;
G1CLDClosure < G1MarkPromotedFromRoot > scan_mark_weak_cld_cl ( & scan_mark_weak_root_cl ,
false , // Process all klasses.
true ) ; // Need to claim CLDs.
G1CodeBlobClosure scan_only_code_cl ( & scan_only_root_cl ) ;
G1CodeBlobClosure scan_mark_code_cl ( & scan_mark_root_cl ) ;
// IM Weak code roots are handled later.
OopClosure * strong_root_cl ;
OopClosure * weak_root_cl ;
CLDClosure * strong_cld_cl ;
CLDClosure * weak_cld_cl ;
CodeBlobClosure * strong_code_cl ;
2008-06-05 15:57:56 -07:00
2012-03-13 11:05:32 -07:00
if ( _g1h - > g1_policy ( ) - > during_initial_mark_pause ( ) ) {
// We also need to mark copied objects.
2014-07-07 10:12:40 +02:00
strong_root_cl = & scan_mark_root_cl ;
strong_cld_cl = & scan_mark_cld_cl ;
strong_code_cl = & scan_mark_code_cl ;
2014-08-06 09:55:16 +02:00
if ( ClassUnloadingWithConcurrentMark ) {
weak_root_cl = & scan_mark_weak_root_cl ;
weak_cld_cl = & scan_mark_weak_cld_cl ;
} else {
weak_root_cl = & scan_mark_root_cl ;
weak_cld_cl = & scan_mark_cld_cl ;
}
2014-07-07 10:12:40 +02:00
} else {
strong_root_cl = & scan_only_root_cl ;
weak_root_cl = & scan_only_root_cl ;
strong_cld_cl = & scan_only_cld_cl ;
weak_cld_cl = & scan_only_cld_cl ;
strong_code_cl = & scan_only_code_cl ;
2012-03-13 11:05:32 -07:00
}
2008-06-05 15:57:56 -07:00
2011-09-22 10:57:37 -07:00
2014-07-07 10:12:40 +02:00
G1ParPushHeapRSClosure push_heap_rs_cl ( _g1h , & pss ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
2012-03-13 11:05:32 -07:00
pss . start_strong_roots ( ) ;
2014-07-07 10:12:40 +02:00
_g1h - > g1_process_roots ( strong_root_cl ,
weak_root_cl ,
& push_heap_rs_cl ,
strong_cld_cl ,
weak_cld_cl ,
strong_code_cl ,
worker_id ) ;
2012-03-13 11:05:32 -07:00
pss . end_strong_roots ( ) ;
2011-08-01 10:04:28 -07:00
2012-03-13 11:05:32 -07:00
{
double start = os : : elapsedTime ( ) ;
G1ParEvacuateFollowersClosure evac ( _g1h , & pss , _queues , & _terminator ) ;
evac . do_void ( ) ;
double elapsed_ms = ( os : : elapsedTime ( ) - start ) * 1000.0 ;
double term_ms = pss . term_time ( ) * 1000.0 ;
2012-08-23 10:21:12 +02:00
_g1h - > g1_policy ( ) - > phase_times ( ) - > add_obj_copy_time ( worker_id , elapsed_ms - term_ms ) ;
2012-07-11 22:47:38 +02:00
_g1h - > g1_policy ( ) - > phase_times ( ) - > record_termination ( worker_id , term_ms , pss . term_attempts ( ) ) ;
2012-03-13 11:05:32 -07:00
}
_g1h - > g1_policy ( ) - > record_thread_age_table ( pss . age_table ( ) ) ;
_g1h - > update_surviving_young_words ( pss . surviving_young_words ( ) + 1 ) ;
if ( ParallelGCVerbose ) {
MutexLocker x ( stats_lock ( ) ) ;
pss . print_termination_stats ( worker_id ) ;
}
2008-06-05 15:57:56 -07:00
2014-06-26 15:48:05 +02:00
assert ( pss . queue_is_empty ( ) , " should be empty " ) ;
2008-06-05 15:57:56 -07:00
2012-03-13 11:05:32 -07:00
// Close the inner scope so that the ResourceMark and HandleMark
// destructors are executed here and are included as part of the
// "GC Worker Time".
2008-06-05 15:57:56 -07:00
}
2010-04-22 15:20:16 -04:00
double end_time_ms = os : : elapsedTime ( ) * 1000.0 ;
2012-07-11 22:47:38 +02:00
_g1h - > g1_policy ( ) - > phase_times ( ) - > record_gc_worker_end_time ( worker_id , end_time_ms ) ;
2008-06-05 15:57:56 -07:00
}
} ;
// *** Common G1 Evacuation Stuff
2010-09-20 14:38:38 -07:00
// This method is run in a GC worker.
2008-06-05 15:57:56 -07:00
void
G1CollectedHeap : :
2014-07-07 10:12:40 +02:00
g1_process_roots ( OopClosure * scan_non_heap_roots ,
OopClosure * scan_non_heap_weak_roots ,
OopsInHeapRegionClosure * scan_rs ,
CLDClosure * scan_strong_clds ,
CLDClosure * scan_weak_clds ,
CodeBlobClosure * scan_strong_code ,
uint worker_i ) {
// First scan the shared roots.
2008-06-05 15:57:56 -07:00
double ext_roots_start = os : : elapsedTime ( ) ;
double closure_app_time_sec = 0.0 ;
2014-07-07 10:12:40 +02:00
bool during_im = _g1h - > g1_policy ( ) - > during_initial_mark_pause ( ) ;
2014-08-06 09:55:16 +02:00
bool trace_metadata = during_im & & ClassUnloadingWithConcurrentMark ;
2014-07-07 10:12:40 +02:00
2008-06-05 15:57:56 -07:00
BufferingOopClosure buf_scan_non_heap_roots ( scan_non_heap_roots ) ;
2014-07-07 10:12:40 +02:00
BufferingOopClosure buf_scan_non_heap_weak_roots ( scan_non_heap_weak_roots ) ;
2008-06-05 15:57:56 -07:00
2014-07-07 10:12:40 +02:00
process_roots ( false , // no scoping; this is parallel code
SharedHeap : : SO_None ,
& buf_scan_non_heap_roots ,
& buf_scan_non_heap_weak_roots ,
scan_strong_clds ,
2014-08-06 09:55:16 +02:00
// Unloading Initial Marks handle the weak CLDs separately.
( trace_metadata ? NULL : scan_weak_clds ) ,
2014-07-07 10:12:40 +02:00
scan_strong_code ) ;
2010-04-22 10:02:38 -07:00
2011-09-22 10:57:37 -07:00
// Now the CM ref_processor roots.
2011-08-01 10:04:28 -07:00
if ( ! _process_strong_tasks - > is_task_claimed ( G1H_PS_refProcessor_oops_do ) ) {
2011-09-22 10:57:37 -07:00
// We need to treat the discovered reference lists of the
// concurrent mark ref processor as roots and keep entries
// (which are added by the marking threads) on them live
// until they can be processed at the end of marking.
ref_processor_cm ( ) - > weak_oops_do ( & buf_scan_non_heap_roots ) ;
2011-08-01 10:04:28 -07:00
}
2014-08-06 09:55:16 +02:00
if ( trace_metadata ) {
2014-07-07 10:12:40 +02:00
// Barrier to make sure all workers passed
// the strong CLD and strong nmethods phases.
active_strong_roots_scope ( ) - > wait_until_all_workers_done_with_threads ( n_par_threads ( ) ) ;
// Now take the complement of the strong CLDs.
ClassLoaderDataGraph : : roots_cld_do ( NULL , scan_weak_clds ) ;
}
2011-08-01 10:04:28 -07:00
// Finish up any enqueued closure apps (attributed as object copy time).
2008-06-05 15:57:56 -07:00
buf_scan_non_heap_roots . done ( ) ;
2014-07-07 10:12:40 +02:00
buf_scan_non_heap_weak_roots . done ( ) ;
2011-08-01 10:04:28 -07:00
2014-07-07 10:12:40 +02:00
double obj_copy_time_sec = buf_scan_non_heap_roots . closure_app_seconds ( )
+ buf_scan_non_heap_weak_roots . closure_app_seconds ( ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
2012-07-11 22:47:38 +02:00
g1_policy ( ) - > phase_times ( ) - > record_obj_copy_time ( worker_i , obj_copy_time_sec * 1000.0 ) ;
2011-08-01 10:04:28 -07:00
2008-06-05 15:57:56 -07:00
double ext_root_time_ms =
2012-08-23 10:21:12 +02:00
( ( os : : elapsedTime ( ) - ext_roots_start ) - obj_copy_time_sec ) * 1000.0 ;
2011-08-01 10:04:28 -07:00
2012-07-11 22:47:38 +02:00
g1_policy ( ) - > phase_times ( ) - > record_ext_root_scan_time ( worker_i , ext_root_time_ms ) ;
2008-06-05 15:57:56 -07:00
2012-01-10 18:58:13 -05:00
// During conc marking we have to filter the per-thread SATB buffers
// to make sure we remove any oops into the CSet (which will show up
// as implicitly live).
2012-08-23 10:21:12 +02:00
double satb_filtering_ms = 0.0 ;
2012-01-10 18:58:13 -05:00
if ( ! _process_strong_tasks - > is_task_claimed ( G1H_PS_filter_satb_buffers ) ) {
if ( mark_in_progress ( ) ) {
2012-08-23 10:21:12 +02:00
double satb_filter_start = os : : elapsedTime ( ) ;
2012-01-10 18:58:13 -05:00
JavaThread : : satb_mark_queue_set ( ) . filter_thread_buffers ( ) ;
2012-08-23 10:21:12 +02:00
satb_filtering_ms = ( os : : elapsedTime ( ) - satb_filter_start ) * 1000.0 ;
2012-01-10 18:58:13 -05:00
}
2008-06-05 15:57:56 -07:00
}
2012-07-11 22:47:38 +02:00
g1_policy ( ) - > phase_times ( ) - > record_satb_filtering_time ( worker_i , satb_filtering_ms ) ;
2008-06-05 15:57:56 -07:00
2014-07-07 12:37:11 +02:00
// Now scan the complement of the collection set.
2014-07-07 10:12:40 +02:00
MarkingCodeBlobClosure scavenge_cs_nmethods ( scan_non_heap_weak_roots , CodeBlobToOopClosure : : FixRelocations ) ;
g1_rem_set ( ) - > oops_into_collection_set_do ( scan_rs , & scavenge_cs_nmethods , worker_i ) ;
2014-02-10 12:58:09 +01:00
2008-06-05 15:57:56 -07:00
_process_strong_tasks - > all_tasks_completed ( ) ;
}
2014-01-20 11:47:07 +01:00
class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
private :
BoolObjectClosure * _is_alive ;
int _initial_string_table_size ;
int _initial_symbol_table_size ;
bool _process_strings ;
int _strings_processed ;
int _strings_removed ;
bool _process_symbols ;
int _symbols_processed ;
int _symbols_removed ;
2014-02-05 14:29:34 +01:00
bool _do_in_parallel ;
2014-01-20 11:47:07 +01:00
public :
G1StringSymbolTableUnlinkTask ( BoolObjectClosure * is_alive , bool process_strings , bool process_symbols ) :
2014-07-07 10:12:40 +02:00
AbstractGangTask ( " String/Symbol Unlinking " ) ,
_is_alive ( is_alive ) ,
2014-02-05 14:29:34 +01:00
_do_in_parallel ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) ,
2014-01-20 11:47:07 +01:00
_process_strings ( process_strings ) , _strings_processed ( 0 ) , _strings_removed ( 0 ) ,
_process_symbols ( process_symbols ) , _symbols_processed ( 0 ) , _symbols_removed ( 0 ) {
_initial_string_table_size = StringTable : : the_table ( ) - > table_size ( ) ;
_initial_symbol_table_size = SymbolTable : : the_table ( ) - > table_size ( ) ;
if ( process_strings ) {
StringTable : : clear_parallel_claimed_index ( ) ;
}
if ( process_symbols ) {
SymbolTable : : clear_parallel_claimed_index ( ) ;
}
}
~ G1StringSymbolTableUnlinkTask ( ) {
2014-02-05 14:29:34 +01:00
guarantee ( ! _process_strings | | ! _do_in_parallel | | StringTable : : parallel_claimed_index ( ) > = _initial_string_table_size ,
2014-04-04 09:46:10 +02:00
err_msg ( " claim value %d after unlink less than initial string table size %d " ,
2014-01-20 11:47:07 +01:00
StringTable : : parallel_claimed_index ( ) , _initial_string_table_size ) ) ;
2014-02-05 14:29:34 +01:00
guarantee ( ! _process_symbols | | ! _do_in_parallel | | SymbolTable : : parallel_claimed_index ( ) > = _initial_symbol_table_size ,
2014-04-04 09:46:10 +02:00
err_msg ( " claim value %d after unlink less than initial symbol table size %d " ,
2014-01-20 11:47:07 +01:00
SymbolTable : : parallel_claimed_index ( ) , _initial_symbol_table_size ) ) ;
2014-07-07 10:12:40 +02:00
if ( G1TraceStringSymbolTableScrubbing ) {
gclog_or_tty - > print_cr ( " Cleaned string and symbol table, "
" strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
" symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed " ,
strings_processed ( ) , strings_removed ( ) ,
symbols_processed ( ) , symbols_removed ( ) ) ;
}
2014-01-20 11:47:07 +01:00
}
void work ( uint worker_id ) {
2014-02-05 14:29:34 +01:00
if ( _do_in_parallel ) {
2014-01-20 11:47:07 +01:00
int strings_processed = 0 ;
int strings_removed = 0 ;
int symbols_processed = 0 ;
int symbols_removed = 0 ;
if ( _process_strings ) {
StringTable : : possibly_parallel_unlink ( _is_alive , & strings_processed , & strings_removed ) ;
Atomic : : add ( strings_processed , & _strings_processed ) ;
Atomic : : add ( strings_removed , & _strings_removed ) ;
}
if ( _process_symbols ) {
SymbolTable : : possibly_parallel_unlink ( & symbols_processed , & symbols_removed ) ;
Atomic : : add ( symbols_processed , & _symbols_processed ) ;
Atomic : : add ( symbols_removed , & _symbols_removed ) ;
}
} else {
if ( _process_strings ) {
StringTable : : unlink ( _is_alive , & _strings_processed , & _strings_removed ) ;
}
if ( _process_symbols ) {
SymbolTable : : unlink ( & _symbols_processed , & _symbols_removed ) ;
}
}
}
size_t strings_processed ( ) const { return ( size_t ) _strings_processed ; }
size_t strings_removed ( ) const { return ( size_t ) _strings_removed ; }
size_t symbols_processed ( ) const { return ( size_t ) _symbols_processed ; }
size_t symbols_removed ( ) const { return ( size_t ) _symbols_removed ; }
} ;
2014-07-07 10:12:40 +02:00
class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
private :
static Monitor * _lock ;
BoolObjectClosure * const _is_alive ;
const bool _unloading_occurred ;
const uint _num_workers ;
// Variables used to claim nmethods.
nmethod * _first_nmethod ;
volatile nmethod * _claimed_nmethod ;
// The list of nmethods that need to be processed by the second pass.
volatile nmethod * _postponed_list ;
volatile uint _num_entered_barrier ;
public :
G1CodeCacheUnloadingTask ( uint num_workers , BoolObjectClosure * is_alive , bool unloading_occurred ) :
_is_alive ( is_alive ) ,
_unloading_occurred ( unloading_occurred ) ,
_num_workers ( num_workers ) ,
_first_nmethod ( NULL ) ,
_claimed_nmethod ( NULL ) ,
_postponed_list ( NULL ) ,
_num_entered_barrier ( 0 )
{
nmethod : : increase_unloading_clock ( ) ;
_first_nmethod = CodeCache : : alive_nmethod ( CodeCache : : first ( ) ) ;
_claimed_nmethod = ( volatile nmethod * ) _first_nmethod ;
}
~ G1CodeCacheUnloadingTask ( ) {
CodeCache : : verify_clean_inline_caches ( ) ;
CodeCache : : set_needs_cache_clean ( false ) ;
guarantee ( CodeCache : : scavenge_root_nmethods ( ) = = NULL , " Must be " ) ;
CodeCache : : verify_icholder_relocations ( ) ;
}
private :
void add_to_postponed_list ( nmethod * nm ) {
nmethod * old ;
do {
old = ( nmethod * ) _postponed_list ;
nm - > set_unloading_next ( old ) ;
} while ( ( nmethod * ) Atomic : : cmpxchg_ptr ( nm , & _postponed_list , old ) ! = old ) ;
}
void clean_nmethod ( nmethod * nm ) {
bool postponed = nm - > do_unloading_parallel ( _is_alive , _unloading_occurred ) ;
if ( postponed ) {
// This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
add_to_postponed_list ( nm ) ;
}
// Mark that this thread has been cleaned/unloaded.
// After this call, it will be safe to ask if this nmethod was unloaded or not.
nm - > set_unloading_clock ( nmethod : : global_unloading_clock ( ) ) ;
}
void clean_nmethod_postponed ( nmethod * nm ) {
nm - > do_unloading_parallel_postponed ( _is_alive , _unloading_occurred ) ;
}
static const int MaxClaimNmethods = 16 ;
void claim_nmethods ( nmethod * * claimed_nmethods , int * num_claimed_nmethods ) {
nmethod * first ;
nmethod * last ;
do {
* num_claimed_nmethods = 0 ;
first = last = ( nmethod * ) _claimed_nmethod ;
if ( first ! = NULL ) {
for ( int i = 0 ; i < MaxClaimNmethods ; i + + ) {
last = CodeCache : : alive_nmethod ( CodeCache : : next ( last ) ) ;
if ( last = = NULL ) {
break ;
}
claimed_nmethods [ i ] = last ;
( * num_claimed_nmethods ) + + ;
}
}
} while ( ( nmethod * ) Atomic : : cmpxchg_ptr ( last , & _claimed_nmethod , first ) ! = first ) ;
}
nmethod * claim_postponed_nmethod ( ) {
nmethod * claim ;
nmethod * next ;
do {
claim = ( nmethod * ) _postponed_list ;
if ( claim = = NULL ) {
return NULL ;
}
next = claim - > unloading_next ( ) ;
} while ( ( nmethod * ) Atomic : : cmpxchg_ptr ( next , & _postponed_list , claim ) ! = claim ) ;
return claim ;
}
public :
// Mark that we're done with the first pass of nmethod cleaning.
void barrier_mark ( uint worker_id ) {
MonitorLockerEx ml ( _lock , Mutex : : _no_safepoint_check_flag ) ;
_num_entered_barrier + + ;
if ( _num_entered_barrier = = _num_workers ) {
ml . notify_all ( ) ;
}
}
// See if we have to wait for the other workers to
// finish their first-pass nmethod cleaning work.
void barrier_wait ( uint worker_id ) {
if ( _num_entered_barrier < _num_workers ) {
MonitorLockerEx ml ( _lock , Mutex : : _no_safepoint_check_flag ) ;
while ( _num_entered_barrier < _num_workers ) {
ml . wait ( Mutex : : _no_safepoint_check_flag , 0 , false ) ;
}
}
}
// Cleaning and unloading of nmethods. Some work has to be postponed
// to the second pass, when we know which nmethods survive.
void work_first_pass ( uint worker_id ) {
// The first nmethods is claimed by the first worker.
if ( worker_id = = 0 & & _first_nmethod ! = NULL ) {
clean_nmethod ( _first_nmethod ) ;
_first_nmethod = NULL ;
}
int num_claimed_nmethods ;
nmethod * claimed_nmethods [ MaxClaimNmethods ] ;
while ( true ) {
claim_nmethods ( claimed_nmethods , & num_claimed_nmethods ) ;
if ( num_claimed_nmethods = = 0 ) {
break ;
}
for ( int i = 0 ; i < num_claimed_nmethods ; i + + ) {
clean_nmethod ( claimed_nmethods [ i ] ) ;
}
}
}
void work_second_pass ( uint worker_id ) {
nmethod * nm ;
// Take care of postponed nmethods.
while ( ( nm = claim_postponed_nmethod ( ) ) ! = NULL ) {
clean_nmethod_postponed ( nm ) ;
}
}
} ;
Monitor * G1CodeCacheUnloadingTask : : _lock = new Monitor ( Mutex : : leaf , " Code Cache Unload lock " ) ;
class G1KlassCleaningTask : public StackObj {
BoolObjectClosure * _is_alive ;
volatile jint _clean_klass_tree_claimed ;
ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator ;
public :
G1KlassCleaningTask ( BoolObjectClosure * is_alive ) :
_is_alive ( is_alive ) ,
_clean_klass_tree_claimed ( 0 ) ,
_klass_iterator ( ) {
}
private :
bool claim_clean_klass_tree_task ( ) {
if ( _clean_klass_tree_claimed ) {
return false ;
}
return Atomic : : cmpxchg ( 1 , ( jint * ) & _clean_klass_tree_claimed , 0 ) = = 0 ;
}
InstanceKlass * claim_next_klass ( ) {
Klass * klass ;
do {
klass = _klass_iterator . next_klass ( ) ;
} while ( klass ! = NULL & & ! klass - > oop_is_instance ( ) ) ;
return ( InstanceKlass * ) klass ;
}
public :
void clean_klass ( InstanceKlass * ik ) {
ik - > clean_implementors_list ( _is_alive ) ;
ik - > clean_method_data ( _is_alive ) ;
// G1 specific cleanup work that has
// been moved here to be done in parallel.
ik - > clean_dependent_nmethods ( ) ;
}
void work ( ) {
ResourceMark rm ;
// One worker will clean the subklass/sibling klass tree.
if ( claim_clean_klass_tree_task ( ) ) {
Klass : : clean_subklass_tree ( _is_alive ) ;
}
// All workers will help cleaning the classes,
InstanceKlass * klass ;
while ( ( klass = claim_next_klass ( ) ) ! = NULL ) {
clean_klass ( klass ) ;
}
}
} ;
// To minimize the remark pause times, the tasks below are done in parallel.
class G1ParallelCleaningTask : public AbstractGangTask {
private :
G1StringSymbolTableUnlinkTask _string_symbol_task ;
G1CodeCacheUnloadingTask _code_cache_task ;
G1KlassCleaningTask _klass_cleaning_task ;
public :
// The constructor is run in the VMThread.
G1ParallelCleaningTask ( BoolObjectClosure * is_alive , bool process_strings , bool process_symbols , uint num_workers , bool unloading_occurred ) :
AbstractGangTask ( " Parallel Cleaning " ) ,
_string_symbol_task ( is_alive , process_strings , process_symbols ) ,
_code_cache_task ( num_workers , is_alive , unloading_occurred ) ,
_klass_cleaning_task ( is_alive ) {
}
// The parallel work done by all worker threads.
void work ( uint worker_id ) {
// Do first pass of code cache cleaning.
_code_cache_task . work_first_pass ( worker_id ) ;
2014-07-09 16:44:30 +02:00
// Let the threads mark that the first pass is done.
2014-07-07 10:12:40 +02:00
_code_cache_task . barrier_mark ( worker_id ) ;
// Clean the Strings and Symbols.
_string_symbol_task . work ( worker_id ) ;
// Wait for all workers to finish the first code cache cleaning pass.
_code_cache_task . barrier_wait ( worker_id ) ;
// Do the second code cache cleaning work, which realize on
// the liveness information gathered during the first pass.
_code_cache_task . work_second_pass ( worker_id ) ;
// Clean all klasses that were not unloaded.
_klass_cleaning_task . work ( ) ;
}
} ;
void G1CollectedHeap : : parallel_cleaning ( BoolObjectClosure * is_alive ,
bool process_strings ,
bool process_symbols ,
bool class_unloading_occurred ) {
2014-01-20 11:47:07 +01:00
uint n_workers = ( G1CollectedHeap : : use_parallel_gc_threads ( ) ?
2014-07-07 10:12:40 +02:00
workers ( ) - > active_workers ( ) : 1 ) ;
2014-01-20 11:47:07 +01:00
2014-07-07 10:12:40 +02:00
G1ParallelCleaningTask g1_unlink_task ( is_alive , process_strings , process_symbols ,
n_workers , class_unloading_occurred ) ;
2014-01-20 11:47:07 +01:00
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
set_par_threads ( n_workers ) ;
workers ( ) - > run_task ( & g1_unlink_task ) ;
set_par_threads ( 0 ) ;
} else {
g1_unlink_task . work ( 0 ) ;
}
2014-07-07 10:12:40 +02:00
}
void G1CollectedHeap : : unlink_string_and_symbol_table ( BoolObjectClosure * is_alive ,
bool process_strings , bool process_symbols ) {
{
uint n_workers = ( G1CollectedHeap : : use_parallel_gc_threads ( ) ?
_g1h - > workers ( ) - > active_workers ( ) : 1 ) ;
G1StringSymbolTableUnlinkTask g1_unlink_task ( is_alive , process_strings , process_symbols ) ;
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
set_par_threads ( n_workers ) ;
workers ( ) - > run_task ( & g1_unlink_task ) ;
set_par_threads ( 0 ) ;
} else {
g1_unlink_task . work ( 0 ) ;
}
2014-01-20 11:47:07 +01:00
}
2014-03-18 19:07:22 +01:00
if ( G1StringDedup : : is_enabled ( ) ) {
G1StringDedup : : unlink ( is_alive ) ;
}
2014-01-20 11:47:07 +01:00
}
2014-04-16 16:46:58 +02:00
class G1RedirtyLoggedCardsTask : public AbstractGangTask {
private :
DirtyCardQueueSet * _queue ;
public :
G1RedirtyLoggedCardsTask ( DirtyCardQueueSet * queue ) : AbstractGangTask ( " Redirty Cards " ) , _queue ( queue ) { }
virtual void work ( uint worker_id ) {
double start_time = os : : elapsedTime ( ) ;
2014-04-16 16:47:02 +02:00
RedirtyLoggedCardTableEntryClosure cl ;
2014-04-16 16:46:58 +02:00
if ( G1CollectedHeap : : heap ( ) - > use_parallel_gc_threads ( ) ) {
_queue - > par_apply_closure_to_all_completed_buffers ( & cl ) ;
} else {
_queue - > apply_closure_to_all_completed_buffers ( & cl ) ;
}
G1GCPhaseTimes * timer = G1CollectedHeap : : heap ( ) - > g1_policy ( ) - > phase_times ( ) ;
timer - > record_redirty_logged_cards_time_ms ( worker_id , ( os : : elapsedTime ( ) - start_time ) * 1000.0 ) ;
timer - > record_redirty_logged_cards_processed_cards ( worker_id , cl . num_processed ( ) ) ;
}
2014-03-17 10:13:27 +01:00
} ;
void G1CollectedHeap : : redirty_logged_cards ( ) {
guarantee ( G1DeferredRSUpdate , " Must only be called when using deferred RS updates. " ) ;
double redirty_logged_cards_start = os : : elapsedTime ( ) ;
2014-04-16 16:46:58 +02:00
uint n_workers = ( G1CollectedHeap : : use_parallel_gc_threads ( ) ?
_g1h - > workers ( ) - > active_workers ( ) : 1 ) ;
G1RedirtyLoggedCardsTask redirty_task ( & dirty_card_queue_set ( ) ) ;
dirty_card_queue_set ( ) . reset_for_par_iteration ( ) ;
if ( use_parallel_gc_threads ( ) ) {
set_par_threads ( n_workers ) ;
workers ( ) - > run_task ( & redirty_task ) ;
set_par_threads ( 0 ) ;
} else {
redirty_task . work ( 0 ) ;
}
2014-03-17 10:13:27 +01:00
DirtyCardQueueSet & dcq = JavaThread : : dirty_card_queue_set ( ) ;
dcq . merge_bufferlists ( & dirty_card_queue_set ( ) ) ;
assert ( dirty_card_queue_set ( ) . completed_buffers_num ( ) = = 0 , " All should be consumed " ) ;
g1_policy ( ) - > phase_times ( ) - > record_redirty_logged_cards_time_ms ( ( os : : elapsedTime ( ) - redirty_logged_cards_start ) * 1000.0 ) ;
}
2011-09-22 10:57:37 -07:00
// Weak Reference Processing support
// An always "is_alive" closure that is used to preserve referents.
// If the object is non-null then it's alive. Used in the preservation
// of referent objects that are pointed to by reference objects
// discovered by the CM ref processor.
class G1AlwaysAliveClosure : public BoolObjectClosure {
G1CollectedHeap * _g1 ;
public :
G1AlwaysAliveClosure ( G1CollectedHeap * g1 ) : _g1 ( g1 ) { }
bool do_object_b ( oop p ) {
if ( p ! = NULL ) {
return true ;
}
return false ;
}
} ;
bool G1STWIsAliveClosure : : do_object_b ( oop p ) {
// An object is reachable if it is outside the collection set,
// or is inside and copied.
return ! _g1 - > obj_in_cs ( p ) | | p - > is_forwarded ( ) ;
}
// Non Copying Keep Alive closure
class G1KeepAliveClosure : public OopClosure {
G1CollectedHeap * _g1 ;
public :
G1KeepAliveClosure ( G1CollectedHeap * g1 ) : _g1 ( g1 ) { }
void do_oop ( narrowOop * p ) { guarantee ( false , " Not needed " ) ; }
2014-07-23 09:03:32 +02:00
void do_oop ( oop * p ) {
2011-09-22 10:57:37 -07:00
oop obj = * p ;
2014-07-23 09:03:32 +02:00
G1CollectedHeap : : in_cset_state_t cset_state = _g1 - > in_cset_state ( obj ) ;
if ( obj = = NULL | | cset_state = = G1CollectedHeap : : InNeither ) {
return ;
}
if ( cset_state = = G1CollectedHeap : : InCSet ) {
2011-09-22 10:57:37 -07:00
assert ( obj - > is_forwarded ( ) , " invariant " ) ;
* p = obj - > forwardee ( ) ;
2014-07-23 09:03:32 +02:00
} else {
assert ( ! obj - > is_forwarded ( ) , " invariant " ) ;
assert ( cset_state = = G1CollectedHeap : : IsHumongous ,
err_msg ( " Only allowed InCSet state is IsHumongous, but is %d " , cset_state ) ) ;
_g1 - > set_humongous_is_live ( obj ) ;
2011-09-22 10:57:37 -07:00
}
}
} ;
// Copying Keep Alive closure - can be called from both
// serial and parallel code as long as different worker
// threads utilize different G1ParScanThreadState instances
// and different queues.
class G1CopyingKeepAliveClosure : public OopClosure {
G1CollectedHeap * _g1h ;
OopClosure * _copy_non_heap_obj_cl ;
G1ParScanThreadState * _par_scan_state ;
public :
G1CopyingKeepAliveClosure ( G1CollectedHeap * g1h ,
OopClosure * non_heap_obj_cl ,
G1ParScanThreadState * pss ) :
_g1h ( g1h ) ,
_copy_non_heap_obj_cl ( non_heap_obj_cl ) ,
_par_scan_state ( pss )
{ }
virtual void do_oop ( narrowOop * p ) { do_oop_work ( p ) ; }
virtual void do_oop ( oop * p ) { do_oop_work ( p ) ; }
template < class T > void do_oop_work ( T * p ) {
oop obj = oopDesc : : load_decode_heap_oop ( p ) ;
2014-07-23 09:03:32 +02:00
if ( _g1h - > is_in_cset_or_humongous ( obj ) ) {
2011-09-22 10:57:37 -07:00
// If the referent object has been forwarded (either copied
// to a new location or to itself in the event of an
// evacuation failure) then we need to update the reference
// field and, if both reference and referent are in the G1
// heap, update the RSet for the referent.
//
// If the referent has not been forwarded then we have to keep
// it alive by policy. Therefore we have copy the referent.
//
// If the reference field is in the G1 heap then we can push
// on the PSS queue. When the queue is drained (after each
// phase of reference processing) the object and it's followers
// will be copied, the reference field set to point to the
// new location, and the RSet updated. Otherwise we need to
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
// use the the non-heap or metadata closures directly to copy
2013-06-10 11:30:51 +02:00
// the referent object and update the pointer, while avoiding
2011-09-22 10:57:37 -07:00
// updating the RSet.
if ( _g1h - > is_in_g1_reserved ( p ) ) {
_par_scan_state - > push_on_queue ( p ) ;
} else {
2014-05-15 18:23:26 -04:00
assert ( ! Metaspace : : contains ( ( const void * ) p ) ,
2014-06-25 08:28:01 +02:00
err_msg ( " Unexpectedly found a pointer from metadata: "
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
PTR_FORMAT , p ) ) ;
2014-07-23 09:03:32 +02:00
_copy_non_heap_obj_cl - > do_oop ( p ) ;
2011-09-22 10:57:37 -07:00
}
}
2014-07-23 09:03:32 +02:00
}
2011-09-22 10:57:37 -07:00
} ;
// Serial drain queue closure. Called as the 'complete_gc'
// closure for each discovered list in some of the
// reference processing phases.
class G1STWDrainQueueClosure : public VoidClosure {
protected :
G1CollectedHeap * _g1h ;
G1ParScanThreadState * _par_scan_state ;
G1ParScanThreadState * par_scan_state ( ) { return _par_scan_state ; }
public :
G1STWDrainQueueClosure ( G1CollectedHeap * g1h , G1ParScanThreadState * pss ) :
_g1h ( g1h ) ,
_par_scan_state ( pss )
{ }
void do_void ( ) {
G1ParScanThreadState * const pss = par_scan_state ( ) ;
pss - > trim_queue ( ) ;
}
} ;
// Parallel Reference Processing closures
// Implementation of AbstractRefProcTaskExecutor for parallel reference
// processing during G1 evacuation pauses.
class G1STWRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
private :
G1CollectedHeap * _g1h ;
RefToScanQueueSet * _queues ;
2011-08-09 10:16:01 -07:00
FlexibleWorkGang * _workers ;
2011-09-22 10:57:37 -07:00
int _active_workers ;
public :
G1STWRefProcTaskExecutor ( G1CollectedHeap * g1h ,
2011-08-09 10:16:01 -07:00
FlexibleWorkGang * workers ,
2011-09-22 10:57:37 -07:00
RefToScanQueueSet * task_queues ,
int n_workers ) :
_g1h ( g1h ) ,
_queues ( task_queues ) ,
_workers ( workers ) ,
_active_workers ( n_workers )
{
assert ( n_workers > 0 , " shouldn't call this otherwise " ) ;
}
// Executes the given task using concurrent marking worker threads.
virtual void execute ( ProcessTask & task ) ;
virtual void execute ( EnqueueTask & task ) ;
} ;
// Gang task for possibly parallel reference processing
class G1STWRefProcTaskProxy : public AbstractGangTask {
typedef AbstractRefProcTaskExecutor : : ProcessTask ProcessTask ;
ProcessTask & _proc_task ;
G1CollectedHeap * _g1h ;
RefToScanQueueSet * _task_queues ;
ParallelTaskTerminator * _terminator ;
public :
G1STWRefProcTaskProxy ( ProcessTask & proc_task ,
G1CollectedHeap * g1h ,
RefToScanQueueSet * task_queues ,
ParallelTaskTerminator * terminator ) :
AbstractGangTask ( " Process reference objects in parallel " ) ,
_proc_task ( proc_task ) ,
_g1h ( g1h ) ,
_task_queues ( task_queues ) ,
_terminator ( terminator )
{ }
2011-12-14 13:34:57 -08:00
virtual void work ( uint worker_id ) {
2011-09-22 10:57:37 -07:00
// The reference processing task executed by a single worker.
ResourceMark rm ;
HandleMark hm ;
G1STWIsAliveClosure is_alive ( _g1h ) ;
2014-02-24 09:41:04 +01:00
G1ParScanThreadState pss ( _g1h , worker_id , NULL ) ;
2011-09-22 10:57:37 -07:00
G1ParScanHeapEvacFailureClosure evac_failure_cl ( _g1h , & pss , NULL ) ;
pss . set_evac_failure_closure ( & evac_failure_cl ) ;
G1ParScanExtRootClosure only_copy_non_heap_cl ( _g1h , & pss , NULL ) ;
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl ( _g1h , & pss , NULL ) ;
OopClosure * copy_non_heap_cl = & only_copy_non_heap_cl ;
if ( _g1h - > g1_policy ( ) - > during_initial_mark_pause ( ) ) {
// We also need to mark copied objects.
copy_non_heap_cl = & copy_mark_non_heap_cl ;
}
// Keep alive closure.
2014-06-25 08:28:01 +02:00
G1CopyingKeepAliveClosure keep_alive ( _g1h , copy_non_heap_cl , & pss ) ;
2011-09-22 10:57:37 -07:00
// Complete GC closure
G1ParEvacuateFollowersClosure drain_queue ( _g1h , & pss , _task_queues , _terminator ) ;
// Call the reference processing task's work routine.
2011-12-14 13:34:57 -08:00
_proc_task . work ( worker_id , is_alive , keep_alive , drain_queue ) ;
2011-09-22 10:57:37 -07:00
// Note we cannot assert that the refs array is empty here as not all
// of the processing tasks (specifically phase2 - pp2_work) execute
// the complete_gc closure (which ordinarily would drain the queue) so
// the queue may not be empty.
}
} ;
// Driver routine for parallel reference processing.
// Creates an instance of the ref processing gang
// task and has the worker threads execute it.
void G1STWRefProcTaskExecutor : : execute ( ProcessTask & proc_task ) {
assert ( _workers ! = NULL , " Need parallel worker threads. " ) ;
ParallelTaskTerminator terminator ( _active_workers , _queues ) ;
G1STWRefProcTaskProxy proc_task_proxy ( proc_task , _g1h , _queues , & terminator ) ;
_g1h - > set_par_threads ( _active_workers ) ;
_workers - > run_task ( & proc_task_proxy ) ;
_g1h - > set_par_threads ( 0 ) ;
}
// Gang task for parallel reference enqueueing.
class G1STWRefEnqueueTaskProxy : public AbstractGangTask {
typedef AbstractRefProcTaskExecutor : : EnqueueTask EnqueueTask ;
EnqueueTask & _enq_task ;
public :
G1STWRefEnqueueTaskProxy ( EnqueueTask & enq_task ) :
AbstractGangTask ( " Enqueue reference objects in parallel " ) ,
_enq_task ( enq_task )
{ }
2011-12-14 13:34:57 -08:00
virtual void work ( uint worker_id ) {
_enq_task . work ( worker_id ) ;
2011-09-22 10:57:37 -07:00
}
} ;
2013-06-10 11:30:51 +02:00
// Driver routine for parallel reference enqueueing.
2011-09-22 10:57:37 -07:00
// Creates an instance of the ref enqueueing gang
// task and has the worker threads execute it.
void G1STWRefProcTaskExecutor : : execute ( EnqueueTask & enq_task ) {
assert ( _workers ! = NULL , " Need parallel worker threads. " ) ;
G1STWRefEnqueueTaskProxy enq_task_proxy ( enq_task ) ;
_g1h - > set_par_threads ( _active_workers ) ;
_workers - > run_task ( & enq_task_proxy ) ;
_g1h - > set_par_threads ( 0 ) ;
}
// End of weak reference support closures
// Abstract task used to preserve (i.e. copy) any referent objects
// that are in the collection set and are pointed to by reference
// objects discovered by the CM ref processor.
class G1ParPreserveCMReferentsTask : public AbstractGangTask {
protected :
G1CollectedHeap * _g1h ;
RefToScanQueueSet * _queues ;
ParallelTaskTerminator _terminator ;
2011-12-14 13:34:57 -08:00
uint _n_workers ;
2011-09-22 10:57:37 -07:00
public :
G1ParPreserveCMReferentsTask ( G1CollectedHeap * g1h , int workers , RefToScanQueueSet * task_queues ) :
AbstractGangTask ( " ParPreserveCMReferents " ) ,
_g1h ( g1h ) ,
_queues ( task_queues ) ,
_terminator ( workers , _queues ) ,
_n_workers ( workers )
{ }
2011-12-14 13:34:57 -08:00
void work ( uint worker_id ) {
2011-09-22 10:57:37 -07:00
ResourceMark rm ;
HandleMark hm ;
2014-02-24 09:41:04 +01:00
G1ParScanThreadState pss ( _g1h , worker_id , NULL ) ;
2011-09-22 10:57:37 -07:00
G1ParScanHeapEvacFailureClosure evac_failure_cl ( _g1h , & pss , NULL ) ;
pss . set_evac_failure_closure ( & evac_failure_cl ) ;
2014-06-26 15:48:05 +02:00
assert ( pss . queue_is_empty ( ) , " both queue and overflow should be empty " ) ;
2011-09-22 10:57:37 -07:00
G1ParScanExtRootClosure only_copy_non_heap_cl ( _g1h , & pss , NULL ) ;
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl ( _g1h , & pss , NULL ) ;
OopClosure * copy_non_heap_cl = & only_copy_non_heap_cl ;
if ( _g1h - > g1_policy ( ) - > during_initial_mark_pause ( ) ) {
// We also need to mark copied objects.
copy_non_heap_cl = & copy_mark_non_heap_cl ;
}
// Is alive closure
G1AlwaysAliveClosure always_alive ( _g1h ) ;
// Copying keep alive closure. Applied to referent objects that need
// to be copied.
2014-06-25 08:28:01 +02:00
G1CopyingKeepAliveClosure keep_alive ( _g1h , copy_non_heap_cl , & pss ) ;
2011-09-22 10:57:37 -07:00
ReferenceProcessor * rp = _g1h - > ref_processor_cm ( ) ;
2011-12-14 13:34:57 -08:00
uint limit = ReferenceProcessor : : number_of_subclasses_of_ref ( ) * rp - > max_num_q ( ) ;
uint stride = MIN2 ( MAX2 ( _n_workers , 1U ) , limit ) ;
2011-09-22 10:57:37 -07:00
// limit is set using max_num_q() - which was set using ParallelGCThreads.
// So this must be true - but assert just in case someone decides to
// change the worker ids.
2011-12-14 13:34:57 -08:00
assert ( 0 < = worker_id & & worker_id < limit , " sanity " ) ;
2011-09-22 10:57:37 -07:00
assert ( ! rp - > discovery_is_atomic ( ) , " check this code " ) ;
// Select discovered lists [i, i+stride, i+2*stride,...,limit)
2011-12-14 13:34:57 -08:00
for ( uint idx = worker_id ; idx < limit ; idx + = stride ) {
2011-10-17 09:57:41 -07:00
DiscoveredList & ref_list = rp - > discovered_refs ( ) [ idx ] ;
2011-09-22 10:57:37 -07:00
DiscoveredListIterator iter ( ref_list , & keep_alive , & always_alive ) ;
while ( iter . has_next ( ) ) {
// Since discovery is not atomic for the CM ref processor, we
// can see some null referent objects.
iter . load_ptrs ( DEBUG_ONLY ( true ) ) ;
oop ref = iter . obj ( ) ;
// This will filter nulls.
if ( iter . is_referent_alive ( ) ) {
iter . make_referent_alive ( ) ;
}
iter . move_to_next ( ) ;
}
}
// Drain the queue - which may cause stealing
G1ParEvacuateFollowersClosure drain_queue ( _g1h , & pss , _queues , & _terminator ) ;
drain_queue . do_void ( ) ;
// Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
2014-06-26 15:48:05 +02:00
assert ( pss . queue_is_empty ( ) , " should be " ) ;
2011-09-22 10:57:37 -07:00
}
} ;
// Weak Reference processing during an evacuation pause (part 1).
2012-10-04 10:04:13 -07:00
void G1CollectedHeap : : process_discovered_references ( uint no_of_gc_workers ) {
2011-09-22 10:57:37 -07:00
double ref_proc_start = os : : elapsedTime ( ) ;
ReferenceProcessor * rp = _ref_processor_stw ;
assert ( rp - > discovery_enabled ( ) , " should have been enabled " ) ;
// Any reference objects, in the collection set, that were 'discovered'
// by the CM ref processor should have already been copied (either by
// applying the external root copy closure to the discovered lists, or
// by following an RSet entry).
//
// But some of the referents, that are in the collection set, that these
// reference objects point to may not have been copied: the STW ref
// processor would have seen that the reference object had already
// been 'discovered' and would have skipped discovering the reference,
// but would not have treated the reference object as a regular oop.
2013-06-10 11:30:51 +02:00
// As a result the copy closure would not have been applied to the
2011-09-22 10:57:37 -07:00
// referent object.
//
// We need to explicitly copy these referent objects - the references
// will be processed at the end of remarking.
//
// We also need to do this copying before we process the reference
// objects discovered by the STW ref processor in case one of these
// referents points to another object which is also referenced by an
// object discovered by the STW ref processor.
2011-12-16 11:40:00 -08:00
assert ( ! G1CollectedHeap : : use_parallel_gc_threads ( ) | |
2012-10-04 10:04:13 -07:00
no_of_gc_workers = = workers ( ) - > active_workers ( ) ,
" Need to reset active GC workers " ) ;
2011-12-16 11:40:00 -08:00
2012-10-04 10:04:13 -07:00
set_par_threads ( no_of_gc_workers ) ;
G1ParPreserveCMReferentsTask keep_cm_referents ( this ,
no_of_gc_workers ,
_task_queues ) ;
2011-09-22 10:57:37 -07:00
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
workers ( ) - > run_task ( & keep_cm_referents ) ;
} else {
keep_cm_referents . work ( 0 ) ;
}
set_par_threads ( 0 ) ;
// Closure to test whether a referent is alive.
G1STWIsAliveClosure is_alive ( this ) ;
// Even when parallel reference processing is enabled, the processing
// of JNI refs is serial and performed serially by the current thread
// rather than by a worker. The following PSS will be used for processing
// JNI refs.
// Use only a single queue for this PSS.
2014-02-24 09:41:04 +01:00
G1ParScanThreadState pss ( this , 0 , NULL ) ;
2011-09-22 10:57:37 -07:00
// We do not embed a reference processor in the copying/scanning
// closures while we're actually processing the discovered
// reference objects.
G1ParScanHeapEvacFailureClosure evac_failure_cl ( this , & pss , NULL ) ;
pss . set_evac_failure_closure ( & evac_failure_cl ) ;
2014-06-26 15:48:05 +02:00
assert ( pss . queue_is_empty ( ) , " pre-condition " ) ;
2011-09-22 10:57:37 -07:00
G1ParScanExtRootClosure only_copy_non_heap_cl ( this , & pss , NULL ) ;
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl ( this , & pss , NULL ) ;
OopClosure * copy_non_heap_cl = & only_copy_non_heap_cl ;
if ( _g1h - > g1_policy ( ) - > during_initial_mark_pause ( ) ) {
// We also need to mark copied objects.
copy_non_heap_cl = & copy_mark_non_heap_cl ;
}
// Keep alive closure.
2014-06-25 08:28:01 +02:00
G1CopyingKeepAliveClosure keep_alive ( this , copy_non_heap_cl , & pss ) ;
2011-09-22 10:57:37 -07:00
// Serial Complete GC closure
G1STWDrainQueueClosure drain_queue ( this , & pss ) ;
// Setup the soft refs policy...
rp - > setup_policy ( false ) ;
2013-06-10 11:30:51 +02:00
ReferenceProcessorStats stats ;
2011-09-22 10:57:37 -07:00
if ( ! rp - > processing_is_mt ( ) ) {
// Serial reference processing...
2013-06-10 11:30:51 +02:00
stats = rp - > process_discovered_references ( & is_alive ,
& keep_alive ,
& drain_queue ,
NULL ,
2014-06-19 13:31:14 +02:00
_gc_timer_stw ,
_gc_tracer_stw - > gc_id ( ) ) ;
2011-09-22 10:57:37 -07:00
} else {
// Parallel reference processing
2012-10-04 10:04:13 -07:00
assert ( rp - > num_q ( ) = = no_of_gc_workers , " sanity " ) ;
assert ( no_of_gc_workers < = rp - > max_num_q ( ) , " sanity " ) ;
2011-09-22 10:57:37 -07:00
2012-10-04 10:04:13 -07:00
G1STWRefProcTaskExecutor par_task_executor ( this , workers ( ) , _task_queues , no_of_gc_workers ) ;
2013-06-10 11:30:51 +02:00
stats = rp - > process_discovered_references ( & is_alive ,
& keep_alive ,
& drain_queue ,
& par_task_executor ,
2014-06-19 13:31:14 +02:00
_gc_timer_stw ,
_gc_tracer_stw - > gc_id ( ) ) ;
2011-09-22 10:57:37 -07:00
}
2013-06-10 11:30:51 +02:00
_gc_tracer_stw - > report_gc_reference_stats ( stats ) ;
2014-04-16 11:05:37 +02:00
// We have completed copying any necessary live referent objects.
2014-06-26 15:48:05 +02:00
assert ( pss . queue_is_empty ( ) , " both queue and overflow should be empty " ) ;
2011-09-22 10:57:37 -07:00
double ref_proc_time = os : : elapsedTime ( ) - ref_proc_start ;
2012-07-11 22:47:38 +02:00
g1_policy ( ) - > phase_times ( ) - > record_ref_proc_time ( ref_proc_time * 1000.0 ) ;
2011-09-22 10:57:37 -07:00
}
// Weak Reference processing during an evacuation pause (part 2).
2012-10-04 10:04:13 -07:00
void G1CollectedHeap : : enqueue_discovered_references ( uint no_of_gc_workers ) {
2011-09-22 10:57:37 -07:00
double ref_enq_start = os : : elapsedTime ( ) ;
ReferenceProcessor * rp = _ref_processor_stw ;
assert ( ! rp - > discovery_enabled ( ) , " should have been disabled as part of processing " ) ;
// Now enqueue any remaining on the discovered lists on to
// the pending list.
if ( ! rp - > processing_is_mt ( ) ) {
// Serial reference processing...
rp - > enqueue_discovered_references ( ) ;
} else {
2013-06-10 11:30:51 +02:00
// Parallel reference enqueueing
2011-09-22 10:57:37 -07:00
2012-10-04 10:04:13 -07:00
assert ( no_of_gc_workers = = workers ( ) - > active_workers ( ) ,
" Need to reset active workers " ) ;
assert ( rp - > num_q ( ) = = no_of_gc_workers , " sanity " ) ;
assert ( no_of_gc_workers < = rp - > max_num_q ( ) , " sanity " ) ;
2011-09-22 10:57:37 -07:00
2012-10-04 10:04:13 -07:00
G1STWRefProcTaskExecutor par_task_executor ( this , workers ( ) , _task_queues , no_of_gc_workers ) ;
2011-09-22 10:57:37 -07:00
rp - > enqueue_discovered_references ( & par_task_executor ) ;
}
rp - > verify_no_references_recorded ( ) ;
assert ( ! rp - > discovery_enabled ( ) , " should have been disabled " ) ;
// FIXME
// CM's reference processing also cleans up the string and symbol tables.
// Should we do that here also? We could, but it is a serial operation
2013-06-10 11:30:51 +02:00
// and could significantly increase the pause time.
2011-09-22 10:57:37 -07:00
double ref_enq_time = os : : elapsedTime ( ) - ref_enq_start ;
2012-07-11 22:47:38 +02:00
g1_policy ( ) - > phase_times ( ) - > record_ref_enq_time ( ref_enq_time * 1000.0 ) ;
2011-09-22 10:57:37 -07:00
}
2013-06-10 11:30:51 +02:00
void G1CollectedHeap : : evacuate_collection_set ( EvacuationInfo & evacuation_info ) {
2012-01-05 05:54:01 -05:00
_expand_heap_after_alloc_failure = true ;
2013-06-10 11:30:51 +02:00
_evacuation_failed = false ;
2008-06-05 15:57:56 -07:00
2012-08-28 15:20:08 -07:00
// Should G1EvacuationFailureALot be in effect for this GC?
NOT_PRODUCT ( set_evacuation_failure_alot_for_current_gc ( ) ; )
2008-06-05 15:57:56 -07:00
g1_rem_set ( ) - > prepare_for_oops_into_collection_set_do ( ) ;
2013-05-09 11:16:39 -07:00
// Disable the hot card cache.
G1HotCardCache * hot_card_cache = _cg1r - > hot_card_cache ( ) ;
hot_card_cache - > reset_hot_cache_claimed_index ( ) ;
hot_card_cache - > set_use_cache ( false ) ;
2009-08-03 12:59:30 -07:00
2011-12-14 13:34:57 -08:00
uint n_workers ;
2011-08-09 10:16:01 -07:00
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
n_workers =
AdaptiveSizePolicy : : calc_active_workers ( workers ( ) - > total_workers ( ) ,
workers ( ) - > active_workers ( ) ,
Threads : : number_of_non_daemon_threads ( ) ) ;
assert ( UseDynamicNumberOfGCThreads | |
n_workers = = workers ( ) - > total_workers ( ) ,
" If not dynamic should be using all the workers " ) ;
2011-12-16 11:40:00 -08:00
workers ( ) - > set_active_workers ( n_workers ) ;
2011-08-09 10:16:01 -07:00
set_par_threads ( n_workers ) ;
} else {
assert ( n_par_threads ( ) = = 0 ,
" Should be the original non-parallel value " ) ;
n_workers = 1 ;
}
G1ParTask g1_par_task ( this , _task_queues ) ;
2008-06-05 15:57:56 -07:00
init_for_evac_failure ( NULL ) ;
rem_set ( ) - > prepare_for_younger_refs_iterate ( true ) ;
2009-03-06 13:50:14 -08:00
assert ( dirty_card_queue_set ( ) . completed_buffers_num ( ) = = 0 , " Should be empty " ) ;
2012-03-13 11:05:32 -07:00
double start_par_time_sec = os : : elapsedTime ( ) ;
double end_par_time_sec ;
2011-09-22 10:57:37 -07:00
2012-03-13 11:05:32 -07:00
{
2009-09-15 21:53:47 -07:00
StrongRootsScope srs ( this ) ;
2014-07-07 10:12:40 +02:00
// InitialMark needs claim bits to keep track of the marked-through CLDs.
if ( g1_policy ( ) - > during_initial_mark_pause ( ) ) {
ClassLoaderDataGraph : : clear_claimed_marks ( ) ;
}
2012-03-13 11:05:32 -07:00
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
// The individual threads will set their evac-failure closures.
if ( ParallelGCVerbose ) G1ParScanThreadState : : print_termination_stats_hdr ( ) ;
// These tasks use ShareHeap::_process_strong_tasks
assert ( UseDynamicNumberOfGCThreads | |
workers ( ) - > active_workers ( ) = = workers ( ) - > total_workers ( ) ,
" If not dynamic should be using all the workers " ) ;
workers ( ) - > run_task ( & g1_par_task ) ;
} else {
g1_par_task . set_for_termination ( n_workers ) ;
g1_par_task . work ( 0 ) ;
}
end_par_time_sec = os : : elapsedTime ( ) ;
// Closing the inner scope will execute the destructor
// for the StrongRootsScope object. We record the current
// elapsed time before closing the scope so that time
// taken for the SRS destructor is NOT included in the
// reported parallel time.
2008-06-05 15:57:56 -07:00
}
2012-03-13 11:05:32 -07:00
double par_time_ms = ( end_par_time_sec - start_par_time_sec ) * 1000.0 ;
2012-07-11 22:47:38 +02:00
g1_policy ( ) - > phase_times ( ) - > record_par_time ( par_time_ms ) ;
2012-03-13 11:05:32 -07:00
double code_root_fixup_time_ms =
( os : : elapsedTime ( ) - end_par_time_sec ) * 1000.0 ;
2012-07-11 22:47:38 +02:00
g1_policy ( ) - > phase_times ( ) - > record_code_root_fixup_time ( code_root_fixup_time_ms ) ;
2011-08-09 10:16:01 -07:00
2008-06-05 15:57:56 -07:00
set_par_threads ( 0 ) ;
2010-12-01 17:34:02 -08:00
2011-09-22 10:57:37 -07:00
// Process any discovered reference objects - we have
// to do this _before_ we retire the GC alloc regions
// as we may have to copy some 'reachable' referent
// objects (and their reachable sub-graphs) that were
// not copied during the pause.
2012-10-04 10:04:13 -07:00
process_discovered_references ( n_workers ) ;
2011-09-22 10:57:37 -07:00
2010-12-01 17:34:02 -08:00
// Weak root processing.
2008-06-05 15:57:56 -07:00
{
2011-09-22 10:57:37 -07:00
G1STWIsAliveClosure is_alive ( this ) ;
2008-06-05 15:57:56 -07:00
G1KeepAliveClosure keep_alive ( this ) ;
JNIHandles : : weak_oops_do ( & is_alive , & keep_alive ) ;
2014-03-18 19:07:22 +01:00
if ( G1StringDedup : : is_enabled ( ) ) {
G1StringDedup : : unlink_or_oops_do ( & is_alive , & keep_alive ) ;
}
2008-06-05 15:57:56 -07:00
}
2011-09-22 10:57:37 -07:00
2013-06-10 11:30:51 +02:00
release_gc_alloc_regions ( n_workers , evacuation_info ) ;
2008-06-05 15:57:56 -07:00
g1_rem_set ( ) - > cleanup_after_oops_into_collection_set_do ( ) ;
2009-03-06 13:50:14 -08:00
2013-05-09 11:16:39 -07:00
// Reset and re-enable the hot card cache.
// Note the counts for the cards in the regions in the
// collection set are reset when the collection set is freed.
hot_card_cache - > reset_hot_cache ( ) ;
hot_card_cache - > set_use_cache ( true ) ;
2008-06-05 15:57:56 -07:00
2013-08-15 10:52:18 +02:00
// Migrate the strong code roots attached to each region in
// the collection set. Ideally we would like to do this
// after we have finished the scanning/evacuation of the
// strong code roots for a particular heap region.
migrate_strong_code_roots ( ) ;
2014-03-17 10:12:21 +01:00
purge_code_root_memory ( ) ;
2013-08-15 10:52:18 +02:00
if ( g1_policy ( ) - > during_initial_mark_pause ( ) ) {
// Reset the claim values set during marking the strong code roots
reset_heap_region_claim_values ( ) ;
}
2008-06-05 15:57:56 -07:00
finalize_for_evac_failure ( ) ;
if ( evacuation_failed ( ) ) {
remove_self_forwarding_pointers ( ) ;
2012-08-28 15:20:08 -07:00
// Reset the G1EvacuationFailureALot counters and flags
// Note: the values are reset only when an actual
// evacuation failure occurs.
NOT_PRODUCT ( reset_evacuation_should_fail ( ) ; )
2008-06-05 15:57:56 -07:00
}
2011-09-22 10:57:37 -07:00
// Enqueue any remaining references remaining on the STW
// reference processor's discovered lists. We need to do
// this after the card table is cleaned (and verified) as
2013-06-10 11:30:51 +02:00
// the act of enqueueing entries on to the pending list
2011-09-22 10:57:37 -07:00
// will log these updates (and dirty their associated
// cards). We need these updates logged to update any
// RSets.
2012-10-04 10:04:13 -07:00
enqueue_discovered_references ( n_workers ) ;
2011-09-22 10:57:37 -07:00
2009-03-06 13:50:14 -08:00
if ( G1DeferredRSUpdate ) {
2014-03-17 10:13:27 +01:00
redirty_logged_cards ( ) ;
2009-03-06 13:50:14 -08:00
}
2008-06-05 15:57:56 -07:00
COMPILER2_PRESENT ( DerivedPointerTable : : update_pointers ( ) ) ;
}
2011-01-19 19:30:42 -05:00
void G1CollectedHeap : : free_region ( HeapRegion * hr ,
FreeRegionList * free_list ,
2014-03-17 10:13:18 +01:00
bool par ,
bool locked ) {
2011-01-19 19:30:42 -05:00
assert ( ! hr - > isHumongous ( ) , " this is only for non-humongous regions " ) ;
assert ( ! hr - > is_empty ( ) , " the region should not be empty " ) ;
2014-08-26 09:36:53 +02:00
assert ( _hrm . is_available ( hr - > hrm_index ( ) ) , " region should be committed " ) ;
2011-01-19 19:30:42 -05:00
assert ( free_list ! = NULL , " pre-condition " ) ;
2014-04-29 09:33:20 +02:00
if ( G1VerifyBitmaps ) {
MemRegion mr ( hr - > bottom ( ) , hr - > end ( ) ) ;
concurrent_mark ( ) - > clearRangePrevBitmap ( mr ) ;
}
2013-05-09 11:16:39 -07:00
// Clear the card counts for this region.
// Note: we only need to do this if the region is not young
// (since we don't refine cards in young regions).
if ( ! hr - > is_young ( ) ) {
_cg1r - > hot_card_cache ( ) - > reset_card_counts ( hr ) ;
}
2014-03-17 10:13:18 +01:00
hr - > hr_clear ( par , true /* clear_space */ , locked /* locked */ ) ;
2014-02-28 15:27:09 +01:00
free_list - > add_ordered ( hr ) ;
2011-01-19 19:30:42 -05:00
}
void G1CollectedHeap : : free_humongous_region ( HeapRegion * hr ,
FreeRegionList * free_list ,
bool par ) {
assert ( hr - > startsHumongous ( ) , " this is only for starts humongous regions " ) ;
assert ( free_list ! = NULL , " pre-condition " ) ;
size_t hr_capacity = hr - > capacity ( ) ;
2012-07-19 15:15:54 -07:00
// We need to read this before we make the region non-humongous,
// otherwise the information will be gone.
uint last_index = hr - > last_hc_index ( ) ;
2011-01-19 19:30:42 -05:00
hr - > set_notHumongous ( ) ;
2014-03-14 10:15:46 +01:00
free_region ( hr , free_list , par ) ;
2011-01-19 19:30:42 -05:00
2014-08-26 09:36:53 +02:00
uint i = hr - > hrm_index ( ) + 1 ;
2012-07-19 15:15:54 -07:00
while ( i < last_index ) {
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
2011-06-10 13:16:40 -04:00
HeapRegion * curr_hr = region_at ( i ) ;
2012-07-19 15:15:54 -07:00
assert ( curr_hr - > continuesHumongous ( ) , " invariant " ) ;
2011-01-19 19:30:42 -05:00
curr_hr - > set_notHumongous ( ) ;
2014-03-14 10:15:46 +01:00
free_region ( curr_hr , free_list , par ) ;
2011-01-19 19:30:42 -05:00
i + = 1 ;
}
2014-03-14 10:15:46 +01:00
}
void G1CollectedHeap : : remove_from_old_sets ( const HeapRegionSetCount & old_regions_removed ,
const HeapRegionSetCount & humongous_regions_removed ) {
if ( old_regions_removed . length ( ) > 0 | | humongous_regions_removed . length ( ) > 0 ) {
2011-11-07 22:11:12 -05:00
MutexLockerEx x ( OldSets_lock , Mutex : : _no_safepoint_check_flag ) ;
2014-03-14 10:15:46 +01:00
_old_set . bulk_remove ( old_regions_removed ) ;
_humongous_set . bulk_remove ( humongous_regions_removed ) ;
2011-11-07 22:11:12 -05:00
}
2014-03-14 10:15:46 +01:00
}
void G1CollectedHeap : : prepend_to_freelist ( FreeRegionList * list ) {
assert ( list ! = NULL , " list can't be null " ) ;
if ( ! list - > is_empty ( ) ) {
MutexLockerEx x ( FreeList_lock , Mutex : : _no_safepoint_check_flag ) ;
2014-08-26 09:36:53 +02:00
_hrm . insert_list_into_free_list ( list ) ;
2008-06-05 15:57:56 -07:00
}
}
2014-03-14 10:15:46 +01:00
void G1CollectedHeap : : decrement_summary_bytes ( size_t bytes ) {
assert ( _summary_bytes_used > = bytes ,
err_msg ( " invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT ,
_summary_bytes_used , bytes ) ) ;
_summary_bytes_used - = bytes ;
}
2009-05-19 04:05:31 -07:00
class G1ParCleanupCTTask : public AbstractGangTask {
2013-09-24 14:46:29 +02:00
G1SATBCardTableModRefBS * _ct_bs ;
2009-05-19 04:05:31 -07:00
G1CollectedHeap * _g1h ;
2009-08-31 05:27:29 -07:00
HeapRegion * volatile _su_head ;
2009-05-19 04:05:31 -07:00
public :
2013-09-24 14:46:29 +02:00
G1ParCleanupCTTask ( G1SATBCardTableModRefBS * ct_bs ,
2011-08-12 11:31:06 -04:00
G1CollectedHeap * g1h ) :
2009-05-19 04:05:31 -07:00
AbstractGangTask ( " G1 Par Cleanup CT Task " ) ,
2011-08-12 11:31:06 -04:00
_ct_bs ( ct_bs ) , _g1h ( g1h ) { }
2009-05-19 04:05:31 -07:00
2011-12-14 13:34:57 -08:00
void work ( uint worker_id ) {
2009-05-19 04:05:31 -07:00
HeapRegion * r ;
while ( r = _g1h - > pop_dirty_cards_region ( ) ) {
clear_cards ( r ) ;
}
}
2009-08-31 05:27:29 -07:00
2009-05-19 04:05:31 -07:00
void clear_cards ( HeapRegion * r ) {
2011-08-12 11:31:06 -04:00
// Cards of the survivors should have already been dirtied.
2010-04-22 10:02:38 -07:00
if ( ! r - > is_survivor ( ) ) {
2009-05-19 04:05:31 -07:00
_ct_bs - > clear ( MemRegion ( r - > bottom ( ) , r - > end ( ) ) ) ;
}
}
} ;
2009-08-31 05:27:29 -07:00
# ifndef PRODUCT
class G1VerifyCardTableCleanup : public HeapRegionClosure {
2011-04-29 14:59:04 -04:00
G1CollectedHeap * _g1h ;
2013-09-24 14:46:29 +02:00
G1SATBCardTableModRefBS * _ct_bs ;
2009-08-31 05:27:29 -07:00
public :
2013-09-24 14:46:29 +02:00
G1VerifyCardTableCleanup ( G1CollectedHeap * g1h , G1SATBCardTableModRefBS * ct_bs )
2011-04-29 14:59:04 -04:00
: _g1h ( g1h ) , _ct_bs ( ct_bs ) { }
2011-03-30 10:26:59 -04:00
virtual bool doHeapRegion ( HeapRegion * r ) {
2010-04-22 10:02:38 -07:00
if ( r - > is_survivor ( ) ) {
2011-04-29 14:59:04 -04:00
_g1h - > verify_dirty_region ( r ) ;
2009-08-31 05:27:29 -07:00
} else {
2011-04-29 14:59:04 -04:00
_g1h - > verify_not_dirty_region ( r ) ;
2009-08-31 05:27:29 -07:00
}
return false ;
}
} ;
2011-03-30 10:26:59 -04:00
2011-04-29 14:59:04 -04:00
void G1CollectedHeap : : verify_not_dirty_region ( HeapRegion * hr ) {
// All of the region should be clean.
2013-09-24 14:46:29 +02:00
G1SATBCardTableModRefBS * ct_bs = g1_barrier_set ( ) ;
2011-04-29 14:59:04 -04:00
MemRegion mr ( hr - > bottom ( ) , hr - > end ( ) ) ;
ct_bs - > verify_not_dirty_region ( mr ) ;
}
void G1CollectedHeap : : verify_dirty_region ( HeapRegion * hr ) {
// We cannot guarantee that [bottom(),end()] is dirty. Threads
// dirty allocated blocks as they allocate them. The thread that
// retires each region and replaces it with a new one will do a
// maximal allocation to fill in [pre_dummy_top(),end()] but will
// not dirty that area (one less thing to have to do while holding
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
// is dirty.
2013-09-24 14:46:29 +02:00
G1SATBCardTableModRefBS * ct_bs = g1_barrier_set ( ) ;
2011-04-29 14:59:04 -04:00
MemRegion mr ( hr - > bottom ( ) , hr - > pre_dummy_top ( ) ) ;
2013-10-08 17:35:51 +02:00
if ( hr - > is_young ( ) ) {
ct_bs - > verify_g1_young_region ( mr ) ;
} else {
ct_bs - > verify_dirty_region ( mr ) ;
}
2011-04-29 14:59:04 -04:00
}
2011-03-30 10:26:59 -04:00
void G1CollectedHeap : : verify_dirty_young_list ( HeapRegion * head ) {
2013-09-24 14:46:29 +02:00
G1SATBCardTableModRefBS * ct_bs = g1_barrier_set ( ) ;
2011-03-30 10:26:59 -04:00
for ( HeapRegion * hr = head ; hr ! = NULL ; hr = hr - > get_next_young_region ( ) ) {
2011-04-29 14:59:04 -04:00
verify_dirty_region ( hr ) ;
2011-03-30 10:26:59 -04:00
}
}
void G1CollectedHeap : : verify_dirty_young_regions ( ) {
verify_dirty_young_list ( _young_list - > first_region ( ) ) ;
}
2014-04-29 09:33:20 +02:00
bool G1CollectedHeap : : verify_no_bits_over_tams ( const char * bitmap_name , CMBitMapRO * bitmap ,
HeapWord * tams , HeapWord * end ) {
guarantee ( tams < = end ,
err_msg ( " tams: " PTR_FORMAT " end: " PTR_FORMAT , tams , end ) ) ;
HeapWord * result = bitmap - > getNextMarkedWordAddress ( tams , end ) ;
if ( result < end ) {
gclog_or_tty - > cr ( ) ;
gclog_or_tty - > print_cr ( " ## wrong marked address on %s bitmap: " PTR_FORMAT ,
bitmap_name , result ) ;
gclog_or_tty - > print_cr ( " ## %s tams: " PTR_FORMAT " end: " PTR_FORMAT ,
bitmap_name , tams , end ) ;
return false ;
}
return true ;
}
bool G1CollectedHeap : : verify_bitmaps ( const char * caller , HeapRegion * hr ) {
CMBitMapRO * prev_bitmap = concurrent_mark ( ) - > prevMarkBitMap ( ) ;
CMBitMapRO * next_bitmap = ( CMBitMapRO * ) concurrent_mark ( ) - > nextMarkBitMap ( ) ;
HeapWord * bottom = hr - > bottom ( ) ;
HeapWord * ptams = hr - > prev_top_at_mark_start ( ) ;
HeapWord * ntams = hr - > next_top_at_mark_start ( ) ;
HeapWord * end = hr - > end ( ) ;
bool res_p = verify_no_bits_over_tams ( " prev " , prev_bitmap , ptams , end ) ;
bool res_n = true ;
// We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
// we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
// if we happen to be in that state.
if ( mark_in_progress ( ) | | ! _cmThread - > in_progress ( ) ) {
res_n = verify_no_bits_over_tams ( " next " , next_bitmap , ntams , end ) ;
}
if ( ! res_p | | ! res_n ) {
gclog_or_tty - > print_cr ( " #### Bitmap verification failed for " HR_FORMAT ,
HR_FORMAT_PARAMS ( hr ) ) ;
gclog_or_tty - > print_cr ( " #### Caller: %s " , caller ) ;
return false ;
}
return true ;
}
void G1CollectedHeap : : check_bitmaps ( const char * caller , HeapRegion * hr ) {
if ( ! G1VerifyBitmaps ) return ;
guarantee ( verify_bitmaps ( caller , hr ) , " bitmap verification " ) ;
}
class G1VerifyBitmapClosure : public HeapRegionClosure {
private :
const char * _caller ;
G1CollectedHeap * _g1h ;
bool _failures ;
public :
G1VerifyBitmapClosure ( const char * caller , G1CollectedHeap * g1h ) :
_caller ( caller ) , _g1h ( g1h ) , _failures ( false ) { }
bool failures ( ) { return _failures ; }
virtual bool doHeapRegion ( HeapRegion * hr ) {
if ( hr - > continuesHumongous ( ) ) return false ;
bool result = _g1h - > verify_bitmaps ( _caller , hr ) ;
if ( ! result ) {
_failures = true ;
}
return false ;
}
} ;
void G1CollectedHeap : : check_bitmaps ( const char * caller ) {
if ( ! G1VerifyBitmaps ) return ;
G1VerifyBitmapClosure cl ( caller , this ) ;
heap_region_iterate ( & cl ) ;
guarantee ( ! cl . failures ( ) , " bitmap verification " ) ;
}
# endif // PRODUCT
2009-08-31 05:27:29 -07:00
2008-06-05 15:57:56 -07:00
void G1CollectedHeap : : cleanUpCardTable ( ) {
2013-09-24 14:46:29 +02:00
G1SATBCardTableModRefBS * ct_bs = g1_barrier_set ( ) ;
2008-06-05 15:57:56 -07:00
double start = os : : elapsedTime ( ) ;
2011-10-23 23:06:06 -07:00
{
// Iterate over the dirty cards region list.
G1ParCleanupCTTask cleanup_task ( ct_bs , this ) ;
2010-04-22 10:02:38 -07:00
2011-12-16 11:40:00 -08:00
if ( G1CollectedHeap : : use_parallel_gc_threads ( ) ) {
set_par_threads ( ) ;
2011-10-23 23:06:06 -07:00
workers ( ) - > run_task ( & cleanup_task ) ;
set_par_threads ( 0 ) ;
} else {
while ( _dirty_cards_region_list ) {
HeapRegion * r = _dirty_cards_region_list ;
cleanup_task . clear_cards ( r ) ;
_dirty_cards_region_list = r - > get_next_dirty_cards_region ( ) ;
if ( _dirty_cards_region_list = = r ) {
// The last region.
_dirty_cards_region_list = NULL ;
}
r - > set_next_dirty_cards_region ( NULL ) ;
2009-05-19 04:05:31 -07:00
}
}
2011-10-23 23:06:06 -07:00
# ifndef PRODUCT
if ( G1VerifyCTCleanup | | VerifyAfterGC ) {
G1VerifyCardTableCleanup cleanup_verifier ( this , ct_bs ) ;
heap_region_iterate ( & cleanup_verifier ) ;
}
# endif
2009-05-19 04:05:31 -07:00
}
2010-04-22 10:02:38 -07:00
2008-06-05 15:57:56 -07:00
double elapsed = os : : elapsedTime ( ) - start ;
2012-07-11 22:47:38 +02:00
g1_policy ( ) - > phase_times ( ) - > record_clear_ct_time ( elapsed * 1000.0 ) ;
2008-06-05 15:57:56 -07:00
}
2013-06-10 11:30:51 +02:00
void G1CollectedHeap : : free_collection_set ( HeapRegion * cs_head , EvacuationInfo & evacuation_info ) {
2011-01-19 19:30:42 -05:00
size_t pre_used = 0 ;
FreeRegionList local_free_list ( " Local List for CSet Freeing " ) ;
2008-06-05 15:57:56 -07:00
double young_time_ms = 0.0 ;
double non_young_time_ms = 0.0 ;
2010-04-22 10:02:38 -07:00
// Since the collection set is a superset of the the young list,
// all we need to do to clear the young list is clear its
// head and length, and unlink any young regions in the code below
_young_list - > clear ( ) ;
2008-06-05 15:57:56 -07:00
G1CollectorPolicy * policy = g1_policy ( ) ;
double start_sec = os : : elapsedTime ( ) ;
bool non_young = true ;
HeapRegion * cur = cs_head ;
int age_bound = - 1 ;
size_t rs_lengths = 0 ;
while ( cur ! = NULL ) {
2011-03-04 17:13:19 -05:00
assert ( ! is_on_master_free_list ( cur ) , " sanity " ) ;
2008-06-05 15:57:56 -07:00
if ( non_young ) {
if ( cur - > is_young ( ) ) {
double end_sec = os : : elapsedTime ( ) ;
double elapsed_ms = ( end_sec - start_sec ) * 1000.0 ;
non_young_time_ms + = elapsed_ms ;
start_sec = os : : elapsedTime ( ) ;
non_young = false ;
}
} else {
2011-11-17 12:40:15 -08:00
if ( ! cur - > is_young ( ) ) {
double end_sec = os : : elapsedTime ( ) ;
double elapsed_ms = ( end_sec - start_sec ) * 1000.0 ;
young_time_ms + = elapsed_ms ;
2008-06-05 15:57:56 -07:00
2011-11-17 12:40:15 -08:00
start_sec = os : : elapsedTime ( ) ;
non_young = true ;
}
2008-06-05 15:57:56 -07:00
}
2014-03-17 10:13:18 +01:00
rs_lengths + = cur - > rem_set ( ) - > occupied_locked ( ) ;
2008-06-05 15:57:56 -07:00
HeapRegion * next = cur - > next_in_collection_set ( ) ;
assert ( cur - > in_collection_set ( ) , " bad CS " ) ;
cur - > set_next_in_collection_set ( NULL ) ;
cur - > set_in_collection_set ( false ) ;
if ( cur - > is_young ( ) ) {
int index = cur - > young_index_in_cset ( ) ;
2011-11-18 12:52:27 -05:00
assert ( index ! = - 1 , " invariant " ) ;
2012-04-18 07:21:15 -04:00
assert ( ( uint ) index < policy - > young_cset_region_length ( ) , " invariant " ) ;
2008-06-05 15:57:56 -07:00
size_t words_survived = _surviving_young_words [ index ] ;
cur - > record_surv_words_in_group ( words_survived ) ;
2010-04-22 10:02:38 -07:00
// At this point the we have 'popped' cur from the collection set
// (linked via next_in_collection_set()) but it is still in the
// young list (linked via next_young_region()). Clear the
// _next_young_region field.
cur - > set_next_young_region ( NULL ) ;
2008-06-05 15:57:56 -07:00
} else {
int index = cur - > young_index_in_cset ( ) ;
2011-11-18 12:52:27 -05:00
assert ( index = = - 1 , " invariant " ) ;
2008-06-05 15:57:56 -07:00
}
assert ( ( cur - > is_young ( ) & & cur - > young_index_in_cset ( ) > - 1 ) | |
( ! cur - > is_young ( ) & & cur - > young_index_in_cset ( ) = = - 1 ) ,
" invariant " ) ;
if ( ! cur - > evacuation_failed ( ) ) {
2011-11-17 12:40:15 -08:00
MemRegion used_mr = cur - > used_region ( ) ;
2008-06-05 15:57:56 -07:00
// And the region is empty.
2011-11-17 12:40:15 -08:00
assert ( ! used_mr . is_empty ( ) , " Should not have empty regions in a CS. " ) ;
2014-03-14 10:15:46 +01:00
pre_used + = cur - > used ( ) ;
2014-03-17 10:13:18 +01:00
free_region ( cur , & local_free_list , false /* par */ , true /* locked */ ) ;
2008-06-05 15:57:56 -07:00
} else {
cur - > uninstall_surv_rate_group ( ) ;
2011-11-18 12:52:27 -05:00
if ( cur - > is_young ( ) ) {
2008-06-05 15:57:56 -07:00
cur - > set_young_index_in_cset ( - 1 ) ;
2011-11-18 12:52:27 -05:00
}
2008-06-05 15:57:56 -07:00
cur - > set_not_young ( ) ;
cur - > set_evacuation_failed ( false ) ;
2011-11-07 22:11:12 -05:00
// The region is now considered to be old.
_old_set . add ( cur ) ;
2013-06-10 11:30:51 +02:00
evacuation_info . increment_collectionset_used_after ( cur - > used ( ) ) ;
2008-06-05 15:57:56 -07:00
}
cur = next ;
}
2013-06-10 11:30:51 +02:00
evacuation_info . set_regions_freed ( local_free_list . length ( ) ) ;
2008-06-05 15:57:56 -07:00
policy - > record_max_rs_lengths ( rs_lengths ) ;
policy - > cset_regions_freed ( ) ;
double end_sec = os : : elapsedTime ( ) ;
double elapsed_ms = ( end_sec - start_sec ) * 1000.0 ;
2011-11-17 12:40:15 -08:00
if ( non_young ) {
2008-06-05 15:57:56 -07:00
non_young_time_ms + = elapsed_ms ;
2011-11-17 12:40:15 -08:00
} else {
2008-06-05 15:57:56 -07:00
young_time_ms + = elapsed_ms ;
2011-11-17 12:40:15 -08:00
}
2008-06-05 15:57:56 -07:00
2014-03-14 10:15:46 +01:00
prepend_to_freelist ( & local_free_list ) ;
decrement_summary_bytes ( pre_used ) ;
2012-07-11 22:47:38 +02:00
policy - > phase_times ( ) - > record_young_free_cset_time_ms ( young_time_ms ) ;
policy - > phase_times ( ) - > record_non_young_free_cset_time_ms ( non_young_time_ms ) ;
2008-06-05 15:57:56 -07:00
}
2014-07-23 09:03:32 +02:00
class G1FreeHumongousRegionClosure : public HeapRegionClosure {
private :
FreeRegionList * _free_region_list ;
HeapRegionSet * _proxy_set ;
HeapRegionSetCount _humongous_regions_removed ;
size_t _freed_bytes ;
public :
G1FreeHumongousRegionClosure ( FreeRegionList * free_region_list ) :
_free_region_list ( free_region_list ) , _humongous_regions_removed ( ) , _freed_bytes ( 0 ) {
}
virtual bool doHeapRegion ( HeapRegion * r ) {
if ( ! r - > startsHumongous ( ) ) {
return false ;
}
G1CollectedHeap * g1h = G1CollectedHeap : : heap ( ) ;
2014-07-31 09:23:24 +02:00
oop obj = ( oop ) r - > bottom ( ) ;
CMBitMap * next_bitmap = g1h - > concurrent_mark ( ) - > nextMarkBitMap ( ) ;
2014-07-23 09:03:32 +02:00
// The following checks whether the humongous object is live are sufficient.
// The main additional check (in addition to having a reference from the roots
// or the young gen) is whether the humongous object has a remembered set entry.
//
// A humongous object cannot be live if there is no remembered set for it
// because:
// - there can be no references from within humongous starts regions referencing
// the object because we never allocate other objects into them.
// (I.e. there are no intra-region references that may be missed by the
// remembered set)
// - as soon there is a remembered set entry to the humongous starts region
// (i.e. it has "escaped" to an old object) this remembered set entry will stay
// until the end of a concurrent mark.
//
// It is not required to check whether the object has been found dead by marking
// or not, in fact it would prevent reclamation within a concurrent cycle, as
// all objects allocated during that time are considered live.
// SATB marking is even more conservative than the remembered set.
// So if at this point in the collection there is no remembered set entry,
// nobody has a reference to it.
// At the start of collection we flush all refinement logs, and remembered sets
// are completely up-to-date wrt to references to the humongous object.
//
// Other implementation considerations:
// - never consider object arrays: while they are a valid target, they have not
// been observed to be used as temporary objects.
// - they would also pose considerable effort for cleaning up the the remembered
// sets.
// While this cleanup is not strictly necessary to be done (or done instantly),
// given that their occurrence is very low, this saves us this additional
// complexity.
2014-08-26 09:36:53 +02:00
uint region_idx = r - > hrm_index ( ) ;
2014-07-23 09:03:32 +02:00
if ( g1h - > humongous_is_live ( region_idx ) | |
g1h - > humongous_region_is_always_live ( region_idx ) ) {
if ( G1TraceReclaimDeadHumongousObjectsAtYoungGC ) {
2014-07-31 09:23:24 +02:00
gclog_or_tty - > print_cr ( " Live humongous %d region %d with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d live-other %d obj array %d " ,
2014-07-23 09:03:32 +02:00
r - > isHumongous ( ) ,
region_idx ,
r - > rem_set ( ) - > occupied ( ) ,
r - > rem_set ( ) - > strong_code_roots_list_length ( ) ,
2014-07-31 09:23:24 +02:00
next_bitmap - > isMarked ( r - > bottom ( ) ) ,
2014-07-23 09:03:32 +02:00
g1h - > humongous_is_live ( region_idx ) ,
2014-07-31 09:23:24 +02:00
obj - > is_objArray ( )
2014-07-23 09:03:32 +02:00
) ;
}
return false ;
}
2014-07-31 09:23:24 +02:00
guarantee ( ! obj - > is_objArray ( ) ,
2014-07-23 09:03:32 +02:00
err_msg ( " Eagerly reclaiming object arrays is not supported, but the object " PTR_FORMAT " is. " ,
r - > bottom ( ) ) ) ;
if ( G1TraceReclaimDeadHumongousObjectsAtYoungGC ) {
2014-07-31 09:23:24 +02:00
gclog_or_tty - > print_cr ( " Reclaim humongous region %d start " PTR_FORMAT " region %d length " UINT32_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d live-other %d obj array %d " ,
2014-07-23 09:03:32 +02:00
r - > isHumongous ( ) ,
r - > bottom ( ) ,
region_idx ,
r - > region_num ( ) ,
r - > rem_set ( ) - > occupied ( ) ,
r - > rem_set ( ) - > strong_code_roots_list_length ( ) ,
2014-07-31 09:23:24 +02:00
next_bitmap - > isMarked ( r - > bottom ( ) ) ,
2014-07-23 09:03:32 +02:00
g1h - > humongous_is_live ( region_idx ) ,
2014-07-31 09:23:24 +02:00
obj - > is_objArray ( )
2014-07-23 09:03:32 +02:00
) ;
}
2014-07-31 09:23:24 +02:00
// Need to clear mark bit of the humongous object if already set.
if ( next_bitmap - > isMarked ( r - > bottom ( ) ) ) {
next_bitmap - > clear ( r - > bottom ( ) ) ;
}
2014-07-23 09:03:32 +02:00
_freed_bytes + = r - > used ( ) ;
r - > set_containing_set ( NULL ) ;
_humongous_regions_removed . increment ( 1u , r - > capacity ( ) ) ;
g1h - > free_humongous_region ( r , _free_region_list , false ) ;
return false ;
}
HeapRegionSetCount & humongous_free_count ( ) {
return _humongous_regions_removed ;
}
size_t bytes_freed ( ) const {
return _freed_bytes ;
}
size_t humongous_reclaimed ( ) const {
return _humongous_regions_removed . length ( ) ;
}
} ;
void G1CollectedHeap : : eagerly_reclaim_humongous_regions ( ) {
assert_at_safepoint ( true ) ;
if ( ! G1ReclaimDeadHumongousObjectsAtYoungGC | | ! _has_humongous_reclaim_candidates ) {
g1_policy ( ) - > phase_times ( ) - > record_fast_reclaim_humongous_time_ms ( 0.0 , 0 ) ;
return ;
}
double start_time = os : : elapsedTime ( ) ;
FreeRegionList local_cleanup_list ( " Local Humongous Cleanup List " ) ;
G1FreeHumongousRegionClosure cl ( & local_cleanup_list ) ;
heap_region_iterate ( & cl ) ;
HeapRegionSetCount empty_set ;
remove_from_old_sets ( empty_set , cl . humongous_free_count ( ) ) ;
G1HRPrinter * hr_printer = _g1h - > hr_printer ( ) ;
if ( hr_printer - > is_active ( ) ) {
FreeRegionListIterator iter ( & local_cleanup_list ) ;
while ( iter . more_available ( ) ) {
HeapRegion * hr = iter . get_next ( ) ;
hr_printer - > cleanup ( hr ) ;
}
}
prepend_to_freelist ( & local_cleanup_list ) ;
decrement_summary_bytes ( cl . bytes_freed ( ) ) ;
g1_policy ( ) - > phase_times ( ) - > record_fast_reclaim_humongous_time_ms ( ( os : : elapsedTime ( ) - start_time ) * 1000.0 ,
cl . humongous_reclaimed ( ) ) ;
}
2010-04-22 10:02:38 -07:00
// This routine is similar to the above but does not record
// any policy statistics or update free lists; we are abandoning
// the current incremental collection set in preparation of a
// full collection. After the full GC we will start to build up
// the incremental collection set again.
// This is only called when we're doing a full collection
// and is immediately followed by the tearing down of the young list.
void G1CollectedHeap : : abandon_collection_set ( HeapRegion * cs_head ) {
HeapRegion * cur = cs_head ;
while ( cur ! = NULL ) {
HeapRegion * next = cur - > next_in_collection_set ( ) ;
assert ( cur - > in_collection_set ( ) , " bad CS " ) ;
cur - > set_next_in_collection_set ( NULL ) ;
cur - > set_in_collection_set ( false ) ;
cur - > set_young_index_in_cset ( - 1 ) ;
cur = next ;
}
}
2011-01-19 19:30:42 -05:00
void G1CollectedHeap : : set_free_regions_coming ( ) {
if ( G1ConcRegionFreeingVerbose ) {
gclog_or_tty - > print_cr ( " G1ConcRegionFreeing [cm thread] : "
" setting free regions coming " ) ;
2008-06-05 15:57:56 -07:00
}
2011-01-19 19:30:42 -05:00
assert ( ! free_regions_coming ( ) , " pre-condition " ) ;
_free_regions_coming = true ;
2008-06-05 15:57:56 -07:00
}
2011-01-19 19:30:42 -05:00
void G1CollectedHeap : : reset_free_regions_coming ( ) {
2012-01-25 12:58:23 -05:00
assert ( free_regions_coming ( ) , " pre-condition " ) ;
2011-01-19 19:30:42 -05:00
{
MutexLockerEx x ( SecondaryFreeList_lock , Mutex : : _no_safepoint_check_flag ) ;
_free_regions_coming = false ;
SecondaryFreeList_lock - > notify_all ( ) ;
2008-06-05 15:57:56 -07:00
}
2011-01-19 19:30:42 -05:00
if ( G1ConcRegionFreeingVerbose ) {
gclog_or_tty - > print_cr ( " G1ConcRegionFreeing [cm thread] : "
" reset free regions coming " ) ;
2008-06-05 15:57:56 -07:00
}
}
2011-01-19 19:30:42 -05:00
void G1CollectedHeap : : wait_while_free_regions_coming ( ) {
// Most of the time we won't have to wait, so let's do a quick test
// first before we take the lock.
if ( ! free_regions_coming ( ) ) {
return ;
2008-06-05 15:57:56 -07:00
}
2011-01-19 19:30:42 -05:00
if ( G1ConcRegionFreeingVerbose ) {
gclog_or_tty - > print_cr ( " G1ConcRegionFreeing [other] : "
" waiting for free regions " ) ;
2008-06-05 15:57:56 -07:00
}
{
2011-01-19 19:30:42 -05:00
MutexLockerEx x ( SecondaryFreeList_lock , Mutex : : _no_safepoint_check_flag ) ;
while ( free_regions_coming ( ) ) {
SecondaryFreeList_lock - > wait ( Mutex : : _no_safepoint_check_flag ) ;
2008-06-05 15:57:56 -07:00
}
}
2011-01-19 19:30:42 -05:00
if ( G1ConcRegionFreeingVerbose ) {
gclog_or_tty - > print_cr ( " G1ConcRegionFreeing [other] : "
" done waiting for free regions " ) ;
2008-06-05 15:57:56 -07:00
}
}
void G1CollectedHeap : : set_region_short_lived_locked ( HeapRegion * hr ) {
assert ( heap_lock_held_for_gc ( ) ,
" the heap lock should already be held by or for this thread " ) ;
_young_list - > push_region ( hr ) ;
}
class NoYoungRegionsClosure : public HeapRegionClosure {
private :
bool _success ;
public :
NoYoungRegionsClosure ( ) : _success ( true ) { }
bool doHeapRegion ( HeapRegion * r ) {
if ( r - > is_young ( ) ) {
gclog_or_tty - > print_cr ( " Region [ " PTR_FORMAT " , " PTR_FORMAT " ) tagged as young " ,
r - > bottom ( ) , r - > end ( ) ) ;
_success = false ;
}
return false ;
}
bool success ( ) { return _success ; }
} ;
2010-04-22 10:02:38 -07:00
bool G1CollectedHeap : : check_young_list_empty ( bool check_heap , bool check_sample ) {
bool ret = _young_list - > check_list_empty ( check_sample ) ;
2008-06-05 15:57:56 -07:00
2010-04-22 10:02:38 -07:00
if ( check_heap ) {
2008-06-05 15:57:56 -07:00
NoYoungRegionsClosure closure ;
heap_region_iterate ( & closure ) ;
ret = ret & & closure . success ( ) ;
}
return ret ;
}
2011-11-07 22:11:12 -05:00
class TearDownRegionSetsClosure : public HeapRegionClosure {
private :
2014-03-14 10:15:46 +01:00
HeapRegionSet * _old_set ;
2008-06-05 15:57:56 -07:00
2011-11-07 22:11:12 -05:00
public :
2014-03-14 10:15:46 +01:00
TearDownRegionSetsClosure ( HeapRegionSet * old_set ) : _old_set ( old_set ) { }
2008-06-05 15:57:56 -07:00
2011-11-07 22:11:12 -05:00
bool doHeapRegion ( HeapRegion * r ) {
if ( r - > is_empty ( ) ) {
// We ignore empty regions, we'll empty the free list afterwards
} else if ( r - > is_young ( ) ) {
// We ignore young regions, we'll empty the young list afterwards
} else if ( r - > isHumongous ( ) ) {
// We ignore humongous regions, we're not tearing down the
// humongous region set
} else {
// The rest should be old
_old_set - > remove ( r ) ;
}
return false ;
}
~ TearDownRegionSetsClosure ( ) {
assert ( _old_set - > is_empty ( ) , " post-condition " ) ;
}
} ;
void G1CollectedHeap : : tear_down_region_sets ( bool free_list_only ) {
assert_at_safepoint ( true /* should_be_vm_thread */ ) ;
if ( ! free_list_only ) {
TearDownRegionSetsClosure cl ( & _old_set ) ;
heap_region_iterate ( & cl ) ;
2014-03-18 19:07:22 +01:00
// Note that emptying the _young_list is postponed and instead done as
// the first step when rebuilding the regions sets again. The reason for
// this is that during a full GC string deduplication needs to know if
// a collected region was young or old when the full GC was initiated.
2011-11-07 22:11:12 -05:00
}
2014-08-26 09:36:53 +02:00
_hrm . remove_all_free_regions ( ) ;
2008-06-05 15:57:56 -07:00
}
2011-11-07 22:11:12 -05:00
class RebuildRegionSetsClosure : public HeapRegionClosure {
private :
bool _free_list_only ;
2014-03-14 10:15:46 +01:00
HeapRegionSet * _old_set ;
2014-08-26 09:36:53 +02:00
HeapRegionManager * _hrm ;
2011-11-07 22:11:12 -05:00
size_t _total_used ;
2011-01-19 19:30:42 -05:00
2008-06-05 15:57:56 -07:00
public :
2011-11-07 22:11:12 -05:00
RebuildRegionSetsClosure ( bool free_list_only ,
2014-08-26 09:36:53 +02:00
HeapRegionSet * old_set , HeapRegionManager * hrm ) :
2011-11-07 22:11:12 -05:00
_free_list_only ( free_list_only ) ,
2014-08-26 09:36:53 +02:00
_old_set ( old_set ) , _hrm ( hrm ) , _total_used ( 0 ) {
assert ( _hrm - > num_free_regions ( ) = = 0 , " pre-condition " ) ;
2011-11-07 22:11:12 -05:00
if ( ! free_list_only ) {
assert ( _old_set - > is_empty ( ) , " pre-condition " ) ;
}
}
2011-01-19 19:30:42 -05:00
2008-06-05 15:57:56 -07:00
bool doHeapRegion ( HeapRegion * r ) {
2011-11-07 22:11:12 -05:00
if ( r - > continuesHumongous ( ) ) {
return false ;
}
if ( r - > is_empty ( ) ) {
// Add free regions to the free list
2014-08-26 09:36:53 +02:00
_hrm - > insert_into_free_list ( r ) ;
2011-11-07 22:11:12 -05:00
} else if ( ! _free_list_only ) {
assert ( ! r - > is_young ( ) , " we should not come across young regions " ) ;
if ( r - > isHumongous ( ) ) {
// We ignore humongous regions, we left the humongous set unchanged
} else {
// The rest should be old, add them to the old set
_old_set - > add ( r ) ;
2008-06-05 15:57:56 -07:00
}
2011-11-07 22:11:12 -05:00
_total_used + = r - > used ( ) ;
2008-06-05 15:57:56 -07:00
}
2011-11-07 22:11:12 -05:00
2008-06-05 15:57:56 -07:00
return false ;
}
2011-11-07 22:11:12 -05:00
size_t total_used ( ) {
return _total_used ;
2011-01-19 19:30:42 -05:00
}
2008-06-05 15:57:56 -07:00
} ;
2011-11-07 22:11:12 -05:00
void G1CollectedHeap : : rebuild_region_sets ( bool free_list_only ) {
assert_at_safepoint ( true /* should_be_vm_thread */ ) ;
2014-03-18 19:07:22 +01:00
if ( ! free_list_only ) {
_young_list - > empty_list ( ) ;
}
2014-08-26 09:36:53 +02:00
RebuildRegionSetsClosure cl ( free_list_only , & _old_set , & _hrm ) ;
2011-11-07 22:11:12 -05:00
heap_region_iterate ( & cl ) ;
if ( ! free_list_only ) {
_summary_bytes_used = cl . total_used ( ) ;
}
assert ( _summary_bytes_used = = recalculate_used ( ) ,
err_msg ( " inconsistent _summary_bytes_used, "
" value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT ,
_summary_bytes_used , recalculate_used ( ) ) ) ;
2008-06-05 15:57:56 -07:00
}
void G1CollectedHeap : : set_refine_cte_cl_concurrency ( bool concurrent ) {
_refine_cte_cl - > set_concurrent ( concurrent ) ;
}
2011-01-19 19:30:42 -05:00
bool G1CollectedHeap : : is_in_closed_subset ( const void * p ) const {
HeapRegion * hr = heap_region_containing ( p ) ;
2014-04-17 15:57:02 +02:00
return hr - > is_in ( p ) ;
2011-01-19 19:30:42 -05:00
}
2011-08-12 11:31:06 -04:00
// Methods for the mutator alloc region
2011-03-30 10:26:59 -04:00
HeapRegion * G1CollectedHeap : : new_mutator_alloc_region ( size_t word_size ,
bool force ) {
assert_heap_locked_or_at_safepoint ( true /* should_be_vm_thread */ ) ;
assert ( ! force | | g1_policy ( ) - > can_expand_young_list ( ) ,
" if force is true we should be able to expand the young list " ) ;
2011-06-24 12:38:49 -04:00
bool young_list_full = g1_policy ( ) - > is_young_list_full ( ) ;
if ( force | | ! young_list_full ) {
2011-03-30 10:26:59 -04:00
HeapRegion * new_alloc_region = new_region ( word_size ,
2014-02-28 15:27:09 +01:00
false /* is_old */ ,
2011-03-30 10:26:59 -04:00
false /* do_expand */ ) ;
if ( new_alloc_region ! = NULL ) {
set_region_short_lived_locked ( new_alloc_region ) ;
2011-06-24 12:38:49 -04:00
_hr_printer . alloc ( new_alloc_region , G1HRPrinter : : Eden , young_list_full ) ;
2014-04-29 09:33:20 +02:00
check_bitmaps ( " Mutator Region Allocation " , new_alloc_region ) ;
2011-03-30 10:26:59 -04:00
return new_alloc_region ;
}
}
return NULL ;
}
void G1CollectedHeap : : retire_mutator_alloc_region ( HeapRegion * alloc_region ,
size_t allocated_bytes ) {
assert_heap_locked_or_at_safepoint ( true /* should_be_vm_thread */ ) ;
assert ( alloc_region - > is_young ( ) , " all mutator alloc regions should be young " ) ;
g1_policy ( ) - > add_region_to_incremental_cset_lhs ( alloc_region ) ;
_summary_bytes_used + = allocated_bytes ;
2011-06-24 12:38:49 -04:00
_hr_printer . retire ( alloc_region ) ;
2011-09-23 16:07:49 -04:00
// We update the eden sizes here, when the region is retired,
// instead of when it's allocated, since this is the point that its
// used space has been recored in _summary_bytes_used.
g1mm ( ) - > update_eden_size ( ) ;
2011-03-30 10:26:59 -04:00
}
HeapRegion * MutatorAllocRegion : : allocate_new_region ( size_t word_size ,
bool force ) {
return _g1h - > new_mutator_alloc_region ( word_size , force ) ;
}
2011-08-09 10:16:01 -07:00
void G1CollectedHeap : : set_par_threads ( ) {
// Don't change the number of workers. Use the value previously set
// in the workgroup.
2011-12-16 11:40:00 -08:00
assert ( G1CollectedHeap : : use_parallel_gc_threads ( ) , " shouldn't be here otherwise " ) ;
2011-12-14 13:34:57 -08:00
uint n_workers = workers ( ) - > active_workers ( ) ;
2011-12-16 11:40:00 -08:00
assert ( UseDynamicNumberOfGCThreads | |
2011-08-09 10:16:01 -07:00
n_workers = = workers ( ) - > total_workers ( ) ,
" Otherwise should be using the total number of workers " ) ;
if ( n_workers = = 0 ) {
assert ( false , " Should have been set in prior evacuation pause. " ) ;
n_workers = ParallelGCThreads ;
workers ( ) - > set_active_workers ( n_workers ) ;
}
set_par_threads ( n_workers ) ;
}
2011-03-30 10:26:59 -04:00
void MutatorAllocRegion : : retire_region ( HeapRegion * alloc_region ,
size_t allocated_bytes ) {
_g1h - > retire_mutator_alloc_region ( alloc_region , allocated_bytes ) ;
}
2011-08-12 11:31:06 -04:00
// Methods for the GC alloc regions
HeapRegion * G1CollectedHeap : : new_gc_alloc_region ( size_t word_size ,
2012-04-18 07:21:15 -04:00
uint count ,
2011-08-12 11:31:06 -04:00
GCAllocPurpose ap ) {
assert ( FreeList_lock - > owned_by_self ( ) , " pre-condition " ) ;
if ( count < g1_policy ( ) - > max_regions ( ap ) ) {
2014-02-28 15:27:09 +01:00
bool survivor = ( ap = = GCAllocForSurvived ) ;
2011-08-12 11:31:06 -04:00
HeapRegion * new_alloc_region = new_region ( word_size ,
2014-02-28 15:27:09 +01:00
! survivor ,
2011-08-12 11:31:06 -04:00
true /* do_expand */ ) ;
if ( new_alloc_region ! = NULL ) {
// We really only need to do this for old regions given that we
// should never scan survivors. But it doesn't hurt to do it
// for survivors too.
2014-06-26 10:00:00 +02:00
new_alloc_region - > record_top_and_timestamp ( ) ;
2014-02-28 15:27:09 +01:00
if ( survivor ) {
2011-08-12 11:31:06 -04:00
new_alloc_region - > set_survivor ( ) ;
_hr_printer . alloc ( new_alloc_region , G1HRPrinter : : Survivor ) ;
2014-04-29 09:33:20 +02:00
check_bitmaps ( " Survivor Region Allocation " , new_alloc_region ) ;
2011-08-12 11:31:06 -04:00
} else {
_hr_printer . alloc ( new_alloc_region , G1HRPrinter : : Old ) ;
2014-04-29 09:33:20 +02:00
check_bitmaps ( " Old Region Allocation " , new_alloc_region ) ;
2011-08-12 11:31:06 -04:00
}
2012-01-10 18:58:13 -05:00
bool during_im = g1_policy ( ) - > during_initial_mark_pause ( ) ;
new_alloc_region - > note_start_of_copying ( during_im ) ;
2011-08-12 11:31:06 -04:00
return new_alloc_region ;
} else {
g1_policy ( ) - > note_alloc_region_limit_reached ( ap ) ;
}
}
return NULL ;
}
void G1CollectedHeap : : retire_gc_alloc_region ( HeapRegion * alloc_region ,
size_t allocated_bytes ,
GCAllocPurpose ap ) {
2012-01-10 18:58:13 -05:00
bool during_im = g1_policy ( ) - > during_initial_mark_pause ( ) ;
alloc_region - > note_end_of_copying ( during_im ) ;
2011-08-12 11:31:06 -04:00
g1_policy ( ) - > record_bytes_copied_during_gc ( allocated_bytes ) ;
if ( ap = = GCAllocForSurvived ) {
young_list ( ) - > add_survivor_region ( alloc_region ) ;
2011-11-07 22:11:12 -05:00
} else {
_old_set . add ( alloc_region ) ;
2011-08-12 11:31:06 -04:00
}
_hr_printer . retire ( alloc_region ) ;
}
HeapRegion * SurvivorGCAllocRegion : : allocate_new_region ( size_t word_size ,
bool force ) {
assert ( ! force , " not supported for GC alloc regions " ) ;
return _g1h - > new_gc_alloc_region ( word_size , count ( ) , GCAllocForSurvived ) ;
}
void SurvivorGCAllocRegion : : retire_region ( HeapRegion * alloc_region ,
size_t allocated_bytes ) {
_g1h - > retire_gc_alloc_region ( alloc_region , allocated_bytes ,
GCAllocForSurvived ) ;
}
HeapRegion * OldGCAllocRegion : : allocate_new_region ( size_t word_size ,
bool force ) {
assert ( ! force , " not supported for GC alloc regions " ) ;
return _g1h - > new_gc_alloc_region ( word_size , count ( ) , GCAllocForTenured ) ;
}
void OldGCAllocRegion : : retire_region ( HeapRegion * alloc_region ,
size_t allocated_bytes ) {
_g1h - > retire_gc_alloc_region ( alloc_region , allocated_bytes ,
GCAllocForTenured ) ;
}
2014-08-18 19:30:24 -07:00
HeapRegion * OldGCAllocRegion : : release ( ) {
HeapRegion * cur = get ( ) ;
if ( cur ! = NULL ) {
// Determine how far we are from the next card boundary. If it is smaller than
// the minimum object size we can allocate into, expand into the next card.
HeapWord * top = cur - > top ( ) ;
HeapWord * aligned_top = ( HeapWord * ) align_ptr_up ( top , G1BlockOffsetSharedArray : : N_bytes ) ;
size_t to_allocate_words = pointer_delta ( aligned_top , top , HeapWordSize ) ;
if ( to_allocate_words ! = 0 ) {
// We are not at a card boundary. Fill up, possibly into the next, taking the
// end of the region and the minimum object size into account.
to_allocate_words = MIN2 ( pointer_delta ( cur - > end ( ) , cur - > top ( ) , HeapWordSize ) ,
MAX2 ( to_allocate_words , G1CollectedHeap : : min_fill_size ( ) ) ) ;
// Skip allocation if there is not enough space to allocate even the smallest
// possible object. In this case this region will not be retained, so the
// original problem cannot occur.
if ( to_allocate_words > = G1CollectedHeap : : min_fill_size ( ) ) {
HeapWord * dummy = attempt_allocation ( to_allocate_words , true /* bot_updates */ ) ;
CollectedHeap : : fill_with_object ( dummy , to_allocate_words ) ;
}
}
}
return G1AllocRegion : : release ( ) ;
}
2011-03-30 10:26:59 -04:00
// Heap region set verification
2011-01-19 19:30:42 -05:00
class VerifyRegionListsClosure : public HeapRegionClosure {
private :
2014-03-14 10:15:46 +01:00
HeapRegionSet * _old_set ;
HeapRegionSet * _humongous_set ;
2014-08-26 09:36:53 +02:00
HeapRegionManager * _hrm ;
2008-06-05 15:57:56 -07:00
public :
2014-03-14 10:15:46 +01:00
HeapRegionSetCount _old_count ;
HeapRegionSetCount _humongous_count ;
HeapRegionSetCount _free_count ;
2011-01-19 19:30:42 -05:00
2014-03-14 10:15:46 +01:00
VerifyRegionListsClosure ( HeapRegionSet * old_set ,
HeapRegionSet * humongous_set ,
2014-08-26 09:36:53 +02:00
HeapRegionManager * hrm ) :
_old_set ( old_set ) , _humongous_set ( humongous_set ) , _hrm ( hrm ) ,
2014-03-14 10:15:46 +01:00
_old_count ( ) , _humongous_count ( ) , _free_count ( ) { }
2011-01-19 19:30:42 -05:00
bool doHeapRegion ( HeapRegion * hr ) {
if ( hr - > continuesHumongous ( ) ) {
return false ;
}
if ( hr - > is_young ( ) ) {
// TODO
} else if ( hr - > startsHumongous ( ) ) {
2014-08-26 09:36:53 +02:00
assert ( hr - > containing_set ( ) = = _humongous_set , err_msg ( " Heap region %u is starts humongous but not in humongous set. " , hr - > hrm_index ( ) ) ) ;
2014-03-14 10:15:46 +01:00
_humongous_count . increment ( 1u , hr - > capacity ( ) ) ;
2011-01-19 19:30:42 -05:00
} else if ( hr - > is_empty ( ) ) {
2014-08-26 09:36:53 +02:00
assert ( _hrm - > is_free ( hr ) , err_msg ( " Heap region %u is empty but not on the free list. " , hr - > hrm_index ( ) ) ) ;
2014-03-14 10:15:46 +01:00
_free_count . increment ( 1u , hr - > capacity ( ) ) ;
2011-11-07 22:11:12 -05:00
} else {
2014-08-26 09:36:53 +02:00
assert ( hr - > containing_set ( ) = = _old_set , err_msg ( " Heap region %u is old but not in the old set. " , hr - > hrm_index ( ) ) ) ;
2014-03-14 10:15:46 +01:00
_old_count . increment ( 1u , hr - > capacity ( ) ) ;
2011-01-19 19:30:42 -05:00
}
2008-06-05 15:57:56 -07:00
return false ;
}
2014-03-14 10:15:46 +01:00
2014-08-26 09:36:53 +02:00
void verify_counts ( HeapRegionSet * old_set , HeapRegionSet * humongous_set , HeapRegionManager * free_list ) {
2014-03-14 10:15:46 +01:00
guarantee ( old_set - > length ( ) = = _old_count . length ( ) , err_msg ( " Old set count mismatch. Expected %u, actual %u. " , old_set - > length ( ) , _old_count . length ( ) ) ) ;
guarantee ( old_set - > total_capacity_bytes ( ) = = _old_count . capacity ( ) , err_msg ( " Old set capacity mismatch. Expected " SIZE_FORMAT " , actual " SIZE_FORMAT ,
old_set - > total_capacity_bytes ( ) , _old_count . capacity ( ) ) ) ;
guarantee ( humongous_set - > length ( ) = = _humongous_count . length ( ) , err_msg ( " Hum set count mismatch. Expected %u, actual %u. " , humongous_set - > length ( ) , _humongous_count . length ( ) ) ) ;
guarantee ( humongous_set - > total_capacity_bytes ( ) = = _humongous_count . capacity ( ) , err_msg ( " Hum set capacity mismatch. Expected " SIZE_FORMAT " , actual " SIZE_FORMAT ,
humongous_set - > total_capacity_bytes ( ) , _humongous_count . capacity ( ) ) ) ;
2014-08-18 16:10:44 +02:00
guarantee ( free_list - > num_free_regions ( ) = = _free_count . length ( ) , err_msg ( " Free list count mismatch. Expected %u, actual %u. " , free_list - > num_free_regions ( ) , _free_count . length ( ) ) ) ;
2014-03-14 10:15:46 +01:00
guarantee ( free_list - > total_capacity_bytes ( ) = = _free_count . capacity ( ) , err_msg ( " Free list capacity mismatch. Expected " SIZE_FORMAT " , actual " SIZE_FORMAT ,
free_list - > total_capacity_bytes ( ) , _free_count . capacity ( ) ) ) ;
}
2008-06-05 15:57:56 -07:00
} ;
2011-01-19 19:30:42 -05:00
void G1CollectedHeap : : verify_region_sets ( ) {
assert_heap_locked_or_at_safepoint ( true /* should_be_vm_thread */ ) ;
2008-06-05 15:57:56 -07:00
2011-01-19 19:30:42 -05:00
// First, check the explicit lists.
2014-08-26 09:36:53 +02:00
_hrm . verify ( ) ;
2011-01-19 19:30:42 -05:00
{
// Given that a concurrent operation might be adding regions to
// the secondary free list we have to take the lock before
// verifying it.
MutexLockerEx x ( SecondaryFreeList_lock , Mutex : : _no_safepoint_check_flag ) ;
2014-03-14 10:15:46 +01:00
_secondary_free_list . verify_list ( ) ;
2011-01-19 19:30:42 -05:00
}
// If a concurrent region freeing operation is in progress it will
// be difficult to correctly attributed any free regions we come
// across to the correct free list given that they might belong to
// one of several (free_list, secondary_free_list, any local lists,
// etc.). So, if that's the case we will skip the rest of the
// verification operation. Alternatively, waiting for the concurrent
// operation to complete will have a non-trivial effect on the GC's
// operation (no concurrent operation will last longer than the
// interval between two calls to verification) and it might hide
// any issues that we would like to catch during testing.
if ( free_regions_coming ( ) ) {
return ;
}
2008-06-05 15:57:56 -07:00
2011-03-04 17:13:19 -05:00
// Make sure we append the secondary_free_list on the free_list so
// that all free regions we will come across can be safely
// attributed to the free_list.
append_secondary_free_list_if_not_empty_with_lock ( ) ;
2008-06-05 15:57:56 -07:00
2011-01-19 19:30:42 -05:00
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
2008-06-05 15:57:56 -07:00
2014-08-26 09:36:53 +02:00
VerifyRegionListsClosure cl ( & _old_set , & _humongous_set , & _hrm ) ;
2011-01-19 19:30:42 -05:00
heap_region_iterate ( & cl ) ;
2014-08-26 09:36:53 +02:00
cl . verify_counts ( & _old_set , & _humongous_set , & _hrm ) ;
2008-06-05 15:57:56 -07:00
}
2013-08-15 10:52:18 +02:00
// Optimized nmethod scanning
class RegisterNMethodOopClosure : public OopClosure {
G1CollectedHeap * _g1h ;
nmethod * _nm ;
template < class T > void do_oop_work ( T * p ) {
T heap_oop = oopDesc : : load_heap_oop ( p ) ;
if ( ! oopDesc : : is_null ( heap_oop ) ) {
oop obj = oopDesc : : decode_heap_oop_not_null ( heap_oop ) ;
HeapRegion * hr = _g1h - > heap_region_containing ( obj ) ;
2013-11-07 15:17:10 +01:00
assert ( ! hr - > continuesHumongous ( ) ,
err_msg ( " trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
" starting at " HR_FORMAT ,
_nm , HR_FORMAT_PARAMS ( hr ) , HR_FORMAT_PARAMS ( hr - > humongous_start_region ( ) ) ) ) ;
2013-08-15 10:52:18 +02:00
// HeapRegion::add_strong_code_root() avoids adding duplicate
// entries but having duplicates is OK since we "mark" nmethods
// as visited when we scan the strong code root lists during the GC.
hr - > add_strong_code_root ( _nm ) ;
2013-11-07 15:17:10 +01:00
assert ( hr - > rem_set ( ) - > strong_code_roots_list_contains ( _nm ) ,
err_msg ( " failed to add code root " PTR_FORMAT " to remembered set of region " HR_FORMAT ,
_nm , HR_FORMAT_PARAMS ( hr ) ) ) ;
2013-08-15 10:52:18 +02:00
}
}
public :
RegisterNMethodOopClosure ( G1CollectedHeap * g1h , nmethod * nm ) :
_g1h ( g1h ) , _nm ( nm ) { }
void do_oop ( oop * p ) { do_oop_work ( p ) ; }
void do_oop ( narrowOop * p ) { do_oop_work ( p ) ; }
} ;
class UnregisterNMethodOopClosure : public OopClosure {
G1CollectedHeap * _g1h ;
nmethod * _nm ;
template < class T > void do_oop_work ( T * p ) {
T heap_oop = oopDesc : : load_heap_oop ( p ) ;
if ( ! oopDesc : : is_null ( heap_oop ) ) {
oop obj = oopDesc : : decode_heap_oop_not_null ( heap_oop ) ;
HeapRegion * hr = _g1h - > heap_region_containing ( obj ) ;
2013-11-07 15:17:10 +01:00
assert ( ! hr - > continuesHumongous ( ) ,
err_msg ( " trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
" starting at " HR_FORMAT ,
_nm , HR_FORMAT_PARAMS ( hr ) , HR_FORMAT_PARAMS ( hr - > humongous_start_region ( ) ) ) ) ;
2013-08-15 10:52:18 +02:00
hr - > remove_strong_code_root ( _nm ) ;
2013-11-07 15:17:10 +01:00
assert ( ! hr - > rem_set ( ) - > strong_code_roots_list_contains ( _nm ) ,
err_msg ( " failed to remove code root " PTR_FORMAT " of region " HR_FORMAT ,
_nm , HR_FORMAT_PARAMS ( hr ) ) ) ;
2013-08-15 10:52:18 +02:00
}
}
public :
UnregisterNMethodOopClosure ( G1CollectedHeap * g1h , nmethod * nm ) :
_g1h ( g1h ) , _nm ( nm ) { }
void do_oop ( oop * p ) { do_oop_work ( p ) ; }
void do_oop ( narrowOop * p ) { do_oop_work ( p ) ; }
} ;
void G1CollectedHeap : : register_nmethod ( nmethod * nm ) {
CollectedHeap : : register_nmethod ( nm ) ;
guarantee ( nm ! = NULL , " sanity " ) ;
RegisterNMethodOopClosure reg_cl ( this , nm ) ;
nm - > oops_do ( & reg_cl ) ;
}
void G1CollectedHeap : : unregister_nmethod ( nmethod * nm ) {
CollectedHeap : : unregister_nmethod ( nm ) ;
guarantee ( nm ! = NULL , " sanity " ) ;
UnregisterNMethodOopClosure reg_cl ( this , nm ) ;
nm - > oops_do ( & reg_cl , true ) ;
}
class MigrateCodeRootsHeapRegionClosure : public HeapRegionClosure {
public :
bool doHeapRegion ( HeapRegion * hr ) {
2013-11-07 15:17:10 +01:00
assert ( ! hr - > isHumongous ( ) ,
err_msg ( " humongous region " HR_FORMAT " should not have been added to collection set " ,
HR_FORMAT_PARAMS ( hr ) ) ) ;
2013-08-15 10:52:18 +02:00
hr - > migrate_strong_code_roots ( ) ;
return false ;
}
} ;
void G1CollectedHeap : : migrate_strong_code_roots ( ) {
MigrateCodeRootsHeapRegionClosure cl ;
double migrate_start = os : : elapsedTime ( ) ;
collection_set_iterate ( & cl ) ;
double migration_time_ms = ( os : : elapsedTime ( ) - migrate_start ) * 1000.0 ;
g1_policy ( ) - > phase_times ( ) - > record_strong_code_root_migration_time ( migration_time_ms ) ;
}
2014-03-17 10:12:21 +01:00
void G1CollectedHeap : : purge_code_root_memory ( ) {
double purge_start = os : : elapsedTime ( ) ;
G1CodeRootSet : : purge_chunks ( G1CodeRootsChunkCacheKeepPercent ) ;
double purge_time_ms = ( os : : elapsedTime ( ) - purge_start ) * 1000.0 ;
g1_policy ( ) - > phase_times ( ) - > record_strong_code_root_purge_time ( purge_time_ms ) ;
}
2013-08-15 10:52:18 +02:00
class RebuildStrongCodeRootClosure : public CodeBlobClosure {
G1CollectedHeap * _g1h ;
public :
RebuildStrongCodeRootClosure ( G1CollectedHeap * g1h ) :
_g1h ( g1h ) { }
void do_code_blob ( CodeBlob * cb ) {
nmethod * nm = ( cb ! = NULL ) ? cb - > as_nmethod_or_null ( ) : NULL ;
if ( nm = = NULL ) {
return ;
}
2014-06-23 16:43:41 +02:00
if ( ScavengeRootsInCode ) {
2013-08-15 10:52:18 +02:00
_g1h - > register_nmethod ( nm ) ;
}
}
} ;
void G1CollectedHeap : : rebuild_strong_code_roots ( ) {
RebuildStrongCodeRootClosure blob_cl ( this ) ;
CodeCache : : blobs_do ( & blob_cl ) ;
}