2011-06-14 10:33:43 -04:00
/*
2017-03-15 11:44:46 +01:00
* Copyright ( c ) 2001 , 2017 , Oracle and / or its affiliates . All rights reserved .
2011-06-14 10:33:43 -04:00
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER .
*
* This code is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 only , as
* published by the Free Software Foundation .
*
* This code is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* version 2 for more details ( a copy is included in the LICENSE file that
* accompanied this code ) .
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
* Please contact Oracle , 500 Oracle Parkway , Redwood Shores , CA 94065 USA
* or visit www . oracle . com if you need additional information or have any
* questions .
*
*/
2016-02-05 16:03:56 +01:00
# ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
# define SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
2011-06-14 10:33:43 -04:00
2015-05-13 15:16:06 +02:00
# include "gc/g1/g1CollectedHeap.inline.hpp"
2016-02-05 16:03:56 +01:00
# include "gc/g1/g1ConcurrentMark.hpp"
2017-08-04 14:24:11 +02:00
# include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
2016-11-24 11:27:57 +01:00
# include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
2016-04-06 13:41:59 +02:00
# include "gc/g1/suspendibleThreadSet.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/shared/taskqueue.inline.hpp"
2017-05-26 13:46:49 +02:00
# include "utilities/bitMap.inline.hpp"
2011-06-14 10:33:43 -04:00
2017-08-04 14:28:57 +02:00
inline bool G1ConcurrentMark : : mark_in_next_bitmap ( oop const obj ) {
HeapRegion * const hr = _g1h - > heap_region_containing ( obj ) ;
return mark_in_next_bitmap ( hr , obj ) ;
}
inline bool G1ConcurrentMark : : mark_in_next_bitmap ( HeapRegion * const hr , oop const obj ) {
assert ( hr ! = NULL , " just checking " ) ;
assert ( hr - > is_in_reserved ( obj ) , " Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u " , p2i ( obj ) , hr - > hrm_index ( ) ) ;
if ( hr - > obj_allocated_since_next_marking ( obj ) ) {
return false ;
}
// Some callers may have stale objects to mark above nTAMS after humongous reclaim.
2017-08-11 23:29:14 +02:00
// Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread.
2017-08-04 14:28:57 +02:00
assert ( ! hr - > is_continues_humongous ( ) , " Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT , p2i ( obj ) , hr - > hrm_index ( ) , p2i ( hr - > next_top_at_mark_start ( ) ) ) ;
HeapWord * const obj_addr = ( HeapWord * ) obj ;
// Dirty read to avoid CAS.
if ( _nextMarkBitMap - > is_marked ( obj_addr ) ) {
return false ;
}
return _nextMarkBitMap - > par_mark ( obj_addr ) ;
2012-01-25 12:58:23 -05:00
}
2016-09-15 16:44:19 +02:00
# ifndef PRODUCT
2015-07-27 14:05:55 -04:00
template < typename Fn >
2016-09-15 16:44:19 +02:00
inline void G1CMMarkStack : : iterate ( Fn fn ) const {
2016-07-15 13:33:44 +02:00
assert_at_safepoint ( true ) ;
2016-09-15 16:44:19 +02:00
size_t num_chunks = 0 ;
2017-03-15 11:44:46 +01:00
TaskQueueEntryChunk * cur = _chunk_list ;
2016-09-15 16:44:19 +02:00
while ( cur ! = NULL ) {
guarantee ( num_chunks < = _chunks_in_chunk_list , " Found " SIZE_FORMAT " oop chunks which is more than there should be " , num_chunks ) ;
2017-03-15 11:44:46 +01:00
for ( size_t i = 0 ; i < EntriesPerChunk ; + + i ) {
if ( cur - > data [ i ] . is_null ( ) ) {
2016-09-15 16:44:19 +02:00
break ;
}
fn ( cur - > data [ i ] ) ;
}
cur = cur - > next ;
num_chunks + + ;
2015-07-27 14:05:55 -04:00
}
}
2016-09-15 16:44:19 +02:00
# endif
2015-07-27 14:05:55 -04:00
2015-08-13 09:32:01 +02:00
// It scans an object and visits its children.
2017-03-15 11:44:46 +01:00
inline void G1CMTask : : scan_task_entry ( G1TaskQueueEntry task_entry ) { process_grey_task_entry < true > ( task_entry ) ; }
2015-08-13 09:32:01 +02:00
2017-03-15 11:44:46 +01:00
inline void G1CMTask : : push ( G1TaskQueueEntry task_entry ) {
assert ( task_entry . is_array_slice ( ) | | _g1h - > is_in_g1_reserved ( task_entry . obj ( ) ) , " invariant " ) ;
assert ( task_entry . is_array_slice ( ) | | ! _g1h - > is_on_master_free_list (
_g1h - > heap_region_containing ( task_entry . obj ( ) ) ) , " invariant " ) ;
assert ( task_entry . is_array_slice ( ) | | ! _g1h - > is_obj_ill ( task_entry . obj ( ) ) , " invariant " ) ; // FIXME!!!
2017-08-04 14:15:42 +02:00
assert ( task_entry . is_array_slice ( ) | | _nextMarkBitMap - > is_marked ( ( HeapWord * ) task_entry . obj ( ) ) , " invariant " ) ;
2011-06-14 10:33:43 -04:00
2017-03-15 11:44:46 +01:00
if ( ! _task_queue - > push ( task_entry ) ) {
2011-06-14 10:33:43 -04:00
// The local task queue looks full. We need to push some entries
// to the global stack.
move_entries_to_global_stack ( ) ;
// this should succeed since, even if we overflow the global
// stack, we should have definitely removed some entries from the
// local queue. So, there must be space on it.
2017-03-15 11:44:46 +01:00
bool success = _task_queue - > push ( task_entry ) ;
2011-06-14 10:33:43 -04:00
assert ( success , " invariant " ) ;
}
}
2016-02-05 16:03:56 +01:00
inline bool G1CMTask : : is_below_finger ( oop obj , HeapWord * global_finger ) const {
2015-05-01 17:38:12 -04:00
// If obj is above the global finger, then the mark bitmap scan
2015-04-08 10:32:16 -04:00
// will find it later, and no push is needed. Similarly, if we have
2015-05-01 17:38:12 -04:00
// a current region and obj is between the local finger and the
2015-04-08 10:32:16 -04:00
// end of the current region, then no push is needed. The tradeoff
// of checking both vs only checking the global finger is that the
// local check will be more accurate and so result in fewer pushes,
// but may also be a little slower.
2015-05-01 17:38:12 -04:00
HeapWord * objAddr = ( HeapWord * ) obj ;
2015-04-08 10:32:16 -04:00
if ( _finger ! = NULL ) {
// We have a current region.
// Finger and region values are all NULL or all non-NULL. We
// use _finger to check since we immediately use its value.
assert ( _curr_region ! = NULL , " invariant " ) ;
assert ( _region_limit ! = NULL , " invariant " ) ;
assert ( _region_limit < = global_finger , " invariant " ) ;
2015-05-01 17:38:12 -04:00
// True if obj is less than the local finger, or is between
2015-04-08 10:32:16 -04:00
// the region limit and the global finger.
if ( objAddr < _finger ) {
return true ;
} else if ( objAddr < _region_limit ) {
return false ;
} // Else check global finger.
}
// Check global finger.
return objAddr < global_finger ;
}
2011-06-14 10:33:43 -04:00
2015-08-13 09:32:01 +02:00
template < bool scan >
2017-03-15 11:44:46 +01:00
inline void G1CMTask : : process_grey_task_entry ( G1TaskQueueEntry task_entry ) {
assert ( scan | | ( task_entry . is_oop ( ) & & task_entry . obj ( ) - > is_typeArray ( ) ) , " Skipping scan of grey non-typeArray " ) ;
2017-08-04 14:15:42 +02:00
assert ( task_entry . is_array_slice ( ) | | _nextMarkBitMap - > is_marked ( ( HeapWord * ) task_entry . obj ( ) ) ,
2016-11-24 11:27:57 +01:00
" Any stolen object should be a slice or marked " ) ;
2015-08-13 09:32:01 +02:00
if ( scan ) {
2017-03-15 11:44:46 +01:00
if ( task_entry . is_array_slice ( ) ) {
_words_scanned + = _objArray_processor . process_slice ( task_entry . slice ( ) ) ;
2016-11-24 11:27:57 +01:00
} else {
2017-03-15 11:44:46 +01:00
oop obj = task_entry . obj ( ) ;
if ( G1CMObjArrayProcessor : : should_be_sliced ( obj ) ) {
_words_scanned + = _objArray_processor . process_obj ( obj ) ;
} else {
_words_scanned + = obj - > oop_iterate_size ( _cm_oop_closure ) ; ;
}
2016-11-24 11:27:57 +01:00
}
2015-08-13 09:32:01 +02:00
}
check_limits ( ) ;
}
2016-11-24 11:27:57 +01:00
inline size_t G1CMTask : : scan_objArray ( objArrayOop obj , MemRegion mr ) {
obj - > oop_iterate ( _cm_oop_closure , mr ) ;
return mr . word_size ( ) ;
}
2016-04-06 13:32:48 +02:00
inline void G1CMTask : : make_reference_grey ( oop obj ) {
2017-08-04 14:28:57 +02:00
if ( ! _cm - > mark_in_next_bitmap ( obj ) ) {
return ;
}
// No OrderAccess:store_load() is needed. It is implicit in the
// CAS done in G1CMBitMap::parMark() call in the routine above.
HeapWord * global_finger = _cm - > finger ( ) ;
// We only need to push a newly grey object on the mark
// stack if it is in a section of memory the mark bitmap
// scan has already examined. Mark bitmap scanning
// maintains progress "fingers" for determining that.
//
// Notice that the global finger might be moving forward
// concurrently. This is not a problem. In the worst case, we
// mark the object while it is above the global finger and, by
// the time we read the global finger, it has moved forward
// past this object. In this case, the object will probably
// be visited when a task is scanning the region and will also
// be pushed on the stack. So, some duplicate work, but no
// correctness problems.
if ( is_below_finger ( obj , global_finger ) ) {
G1TaskQueueEntry entry = G1TaskQueueEntry : : from_oop ( obj ) ;
if ( obj - > is_typeArray ( ) ) {
// Immediately process arrays of primitive types, rather
// than pushing on the mark stack. This keeps us from
// adding humongous objects to the mark stack that might
// be reclaimed before the entry is processed - see
// selection of candidates for eager reclaim of humongous
// objects. The cost of the additional type test is
// mitigated by avoiding a trip through the mark stack,
// by only doing a bookkeeping update and avoiding the
// actual scan of the object - a typeArray contains no
// references, and the metadata is built-in.
process_grey_task_entry < false > ( entry ) ;
} else {
push ( entry ) ;
2015-05-01 17:38:12 -04:00
}
}
}
2016-02-05 16:03:56 +01:00
inline void G1CMTask : : deal_with_reference ( oop obj ) {
2015-05-01 17:38:12 -04:00
increment_refs_reached ( ) ;
2017-08-04 14:28:57 +02:00
if ( obj = = NULL ) {
return ;
2011-06-14 10:33:43 -04:00
}
2017-08-04 14:28:57 +02:00
make_reference_grey ( obj ) ;
2011-06-14 10:33:43 -04:00
}
2016-02-05 16:03:56 +01:00
inline void G1ConcurrentMark : : markPrev ( oop p ) {
2017-08-04 14:15:42 +02:00
assert ( ! _prevMarkBitMap - > is_marked ( ( HeapWord * ) p ) , " sanity " ) ;
_prevMarkBitMap - > mark ( ( HeapWord * ) p ) ;
2012-01-10 18:58:13 -05:00
}
2016-02-05 16:03:56 +01:00
bool G1ConcurrentMark : : isPrevMarked ( oop p ) const {
2017-08-23 14:52:55 -04:00
assert ( p ! = NULL & & oopDesc : : is_oop ( p ) , " expected an oop " ) ;
2017-08-04 14:15:42 +02:00
return _prevMarkBitMap - > is_marked ( ( HeapWord * ) p ) ;
2016-01-18 10:25:41 +01:00
}
2016-04-06 13:41:59 +02:00
inline bool G1ConcurrentMark : : do_yield_check ( ) {
if ( SuspendibleThreadSet : : should_yield ( ) ) {
SuspendibleThreadSet : : yield ( ) ;
return true ;
} else {
return false ;
}
}
2016-02-05 16:03:56 +01:00
# endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP