2008-06-05 15:57:56 -07:00
/*
2019-01-09 19:05:05 -05:00
* Copyright ( c ) 2001 , 2019 , Oracle and / or its affiliates . All rights reserved .
2008-06-05 15:57:56 -07:00
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER .
*
* This code is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 only , as
* published by the Free Software Foundation .
*
* This code is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* version 2 for more details ( a copy is included in the LICENSE file that
* accompanied this code ) .
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2010-05-27 19:08:38 -07:00
* Please contact Oracle , 500 Oracle Parkway , Redwood Shores , CA 94065 USA
* or visit www . oracle . com if you need additional information or have any
* questions .
2008-06-05 15:57:56 -07:00
*
*/
2010-11-23 13:22:55 -08:00
# include "precompiled.hpp"
2018-09-28 16:07:39 -04:00
# include "classfile/classLoaderDataGraph.hpp"
2014-07-07 10:12:40 +02:00
# include "code/codeCache.hpp"
2018-04-12 08:25:30 +02:00
# include "gc/g1/g1BarrierSet.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/g1/g1CollectedHeap.inline.hpp"
2015-06-05 10:27:41 +02:00
# include "gc/g1/g1CollectorState.hpp"
2016-02-05 16:03:56 +01:00
# include "gc/g1/g1ConcurrentMark.inline.hpp"
2018-04-03 12:05:49 +02:00
# include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
2019-02-13 17:38:14 -05:00
# include "gc/g1/g1DirtyCardQueue.hpp"
2016-01-26 15:28:31 +01:00
# include "gc/g1/g1HeapVerifier.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/g1/g1OopClosures.inline.hpp"
2016-03-18 15:20:43 +01:00
# include "gc/g1/g1Policy.hpp"
2018-03-26 16:51:43 +02:00
# include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/g1/g1StringDedup.hpp"
2018-04-12 08:25:56 +02:00
# include "gc/g1/g1ThreadLocalData.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/g1/heapRegion.inline.hpp"
# include "gc/g1/heapRegionRemSet.hpp"
# include "gc/g1/heapRegionSet.inline.hpp"
2015-09-30 09:07:21 +02:00
# include "gc/shared/gcId.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/shared/gcTimer.hpp"
# include "gc/shared/gcTrace.hpp"
2015-12-10 14:57:55 +01:00
# include "gc/shared/gcTraceTime.inline.hpp"
2018-12-06 15:44:13 +01:00
# include "gc/shared/gcVMOperations.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/shared/genOopClosures.inline.hpp"
# include "gc/shared/referencePolicy.hpp"
# include "gc/shared/strongRootsScope.hpp"
2017-10-18 21:17:46 +02:00
# include "gc/shared/suspendibleThreadSet.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/shared/taskqueue.inline.hpp"
2018-08-28 12:57:40 -04:00
# include "gc/shared/weakProcessor.inline.hpp"
2018-12-07 12:46:31 +08:00
# include "gc/shared/workerPolicy.hpp"
2018-04-04 11:21:14 +02:00
# include "include/jvm.h"
2015-12-10 14:57:55 +01:00
# include "logging/log.hpp"
2014-07-07 10:12:40 +02:00
# include "memory/allocation.hpp"
2010-11-23 13:22:55 -08:00
# include "memory/resourceArea.hpp"
2018-03-15 21:24:10 +01:00
# include "oops/access.inline.hpp"
2010-11-23 13:22:55 -08:00
# include "oops/oop.inline.hpp"
2016-08-21 20:56:37 -04:00
# include "runtime/atomic.hpp"
2010-11-23 13:22:55 -08:00
# include "runtime/handles.inline.hpp"
# include "runtime/java.hpp"
2014-05-08 15:37:17 +02:00
# include "runtime/prefetch.inline.hpp"
2012-06-28 17:03:16 -04:00
# include "services/memTracker.hpp"
2017-07-05 11:33:17 +02:00
# include "utilities/align.hpp"
2016-04-27 16:11:45 +02:00
# include "utilities/growableArray.hpp"
2008-06-05 15:57:56 -07:00
2017-08-04 14:24:11 +02:00
bool G1CMBitMapClosure : : do_addr ( HeapWord * const addr ) {
assert ( addr < _cm - > finger ( ) , " invariant " ) ;
assert ( addr > = _task - > finger ( ) , " invariant " ) ;
2014-08-19 14:09:10 +02:00
2017-08-04 14:24:11 +02:00
// We move that task's local finger along.
_task - > move_finger_to ( addr ) ;
2014-08-19 14:09:10 +02:00
2017-08-04 14:24:11 +02:00
_task - > scan_task_entry ( G1TaskQueueEntry : : from_oop ( oop ( addr ) ) ) ;
// we only partially drain the local queue and global stack
_task - > drain_local_queue ( true ) ;
_task - > drain_global_stack ( true ) ;
2014-08-19 14:09:10 +02:00
2017-08-04 14:24:11 +02:00
// if the has_aborted flag has been raised, we need to bail out of
// the iteration
return ! _task - > has_aborted ( ) ;
2008-06-05 15:57:56 -07:00
}
2016-07-15 13:33:44 +02:00
G1CMMarkStack : : G1CMMarkStack ( ) :
2016-09-15 16:44:19 +02:00
_max_chunk_capacity ( 0 ) ,
2016-07-15 13:33:44 +02:00
_base ( NULL ) ,
2017-05-09 13:50:06 -04:00
_chunk_capacity ( 0 ) {
2016-07-15 13:33:44 +02:00
set_empty ( ) ;
}
2008-06-05 15:57:56 -07:00
2016-07-15 13:33:44 +02:00
bool G1CMMarkStack : : resize ( size_t new_capacity ) {
assert ( is_empty ( ) , " Only resize when stack is empty. " ) ;
2016-09-15 16:44:19 +02:00
assert ( new_capacity < = _max_chunk_capacity ,
" Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT , new_capacity , _max_chunk_capacity ) ;
2016-07-15 13:33:44 +02:00
2017-07-21 21:01:59 -04:00
TaskQueueEntryChunk * new_base = MmapArrayAllocator < TaskQueueEntryChunk > : : allocate_or_null ( new_capacity , mtGC ) ;
2016-07-15 13:33:44 +02:00
2016-09-15 16:44:19 +02:00
if ( new_base = = NULL ) {
2017-03-15 11:44:46 +01:00
log_warning ( gc ) ( " Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT " B. " , new_capacity , new_capacity * sizeof ( TaskQueueEntryChunk ) ) ;
2012-10-01 09:28:13 -07:00
return false ;
2011-06-20 22:03:13 -04:00
}
2016-09-15 16:44:19 +02:00
// Release old mapping.
if ( _base ! = NULL ) {
2017-07-21 21:01:59 -04:00
MmapArrayAllocator < TaskQueueEntryChunk > : : free ( _base , _chunk_capacity ) ;
2016-09-15 16:44:19 +02:00
}
2016-07-15 13:33:44 +02:00
2016-09-15 16:44:19 +02:00
_base = new_base ;
_chunk_capacity = new_capacity ;
set_empty ( ) ;
2016-07-15 13:33:44 +02:00
2016-09-15 16:44:19 +02:00
return true ;
}
2016-07-15 13:33:44 +02:00
2016-09-15 16:44:19 +02:00
size_t G1CMMarkStack : : capacity_alignment ( ) {
2017-03-15 11:44:46 +01:00
return ( size_t ) lcm ( os : : vm_allocation_granularity ( ) , sizeof ( TaskQueueEntryChunk ) ) / sizeof ( G1TaskQueueEntry ) ;
2016-09-15 16:44:19 +02:00
}
2016-07-15 13:33:44 +02:00
2016-09-15 16:44:19 +02:00
bool G1CMMarkStack : : initialize ( size_t initial_capacity , size_t max_capacity ) {
guarantee ( _max_chunk_capacity = = 0 , " G1CMMarkStack already initialized. " ) ;
2016-07-15 13:33:44 +02:00
2017-03-15 11:44:46 +01:00
size_t const TaskEntryChunkSizeInVoidStar = sizeof ( TaskQueueEntryChunk ) / sizeof ( G1TaskQueueEntry ) ;
2016-07-15 13:33:44 +02:00
2017-07-04 15:58:10 +02:00
_max_chunk_capacity = align_up ( max_capacity , capacity_alignment ( ) ) / TaskEntryChunkSizeInVoidStar ;
size_t initial_chunk_capacity = align_up ( initial_capacity , capacity_alignment ( ) ) / TaskEntryChunkSizeInVoidStar ;
2016-07-15 13:33:44 +02:00
2016-09-15 16:44:19 +02:00
guarantee ( initial_chunk_capacity < = _max_chunk_capacity ,
" Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT ,
_max_chunk_capacity ,
initial_chunk_capacity ) ;
2016-07-15 13:33:44 +02:00
2016-09-15 16:44:19 +02:00
log_debug ( gc ) ( " Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT ,
initial_chunk_capacity , _max_chunk_capacity ) ;
2012-10-01 09:28:13 -07:00
2016-09-15 16:44:19 +02:00
return resize ( initial_chunk_capacity ) ;
2016-07-15 13:33:44 +02:00
}
2016-02-05 16:03:56 +01:00
void G1CMMarkStack : : expand ( ) {
2016-09-15 16:44:19 +02:00
if ( _chunk_capacity = = _max_chunk_capacity ) {
log_debug ( gc ) ( " Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks. " , _chunk_capacity ) ;
2012-10-01 09:28:13 -07:00
return ;
}
2016-09-15 16:44:19 +02:00
size_t old_capacity = _chunk_capacity ;
2012-10-01 09:28:13 -07:00
// Double capacity if possible
2016-09-15 16:44:19 +02:00
size_t new_capacity = MIN2 ( old_capacity * 2 , _max_chunk_capacity ) ;
2016-07-15 13:33:44 +02:00
if ( resize ( new_capacity ) ) {
2016-09-15 16:44:19 +02:00
log_debug ( gc ) ( " Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks " ,
2016-07-15 13:33:44 +02:00
old_capacity , new_capacity ) ;
2012-10-01 09:28:13 -07:00
} else {
2016-09-15 16:44:19 +02:00
log_warning ( gc ) ( " Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks " ,
2016-07-15 13:33:44 +02:00
old_capacity , new_capacity ) ;
2012-10-01 09:28:13 -07:00
}
}
2016-02-05 16:03:56 +01:00
G1CMMarkStack : : ~ G1CMMarkStack ( ) {
2011-06-20 22:03:13 -04:00
if ( _base ! = NULL ) {
2017-07-21 21:01:59 -04:00
MmapArrayAllocator < TaskQueueEntryChunk > : : free ( _base , _chunk_capacity ) ;
2011-06-20 22:03:13 -04:00
}
2008-06-05 15:57:56 -07:00
}
2017-03-15 11:44:46 +01:00
void G1CMMarkStack : : add_chunk_to_list ( TaskQueueEntryChunk * volatile * list , TaskQueueEntryChunk * elem ) {
2016-09-15 16:44:19 +02:00
elem - > next = * list ;
* list = elem ;
}
2017-03-15 11:44:46 +01:00
void G1CMMarkStack : : add_chunk_to_chunk_list ( TaskQueueEntryChunk * elem ) {
2016-09-15 16:44:19 +02:00
MutexLockerEx x ( MarkStackChunkList_lock , Mutex : : _no_safepoint_check_flag ) ;
add_chunk_to_list ( & _chunk_list , elem ) ;
_chunks_in_chunk_list + + ;
}
2017-03-15 11:44:46 +01:00
void G1CMMarkStack : : add_chunk_to_free_list ( TaskQueueEntryChunk * elem ) {
2016-09-15 16:44:19 +02:00
MutexLockerEx x ( MarkStackFreeList_lock , Mutex : : _no_safepoint_check_flag ) ;
add_chunk_to_list ( & _free_list , elem ) ;
}
2017-03-15 11:44:46 +01:00
G1CMMarkStack : : TaskQueueEntryChunk * G1CMMarkStack : : remove_chunk_from_list ( TaskQueueEntryChunk * volatile * list ) {
TaskQueueEntryChunk * result = * list ;
2016-09-15 16:44:19 +02:00
if ( result ! = NULL ) {
* list = ( * list ) - > next ;
2008-06-05 15:57:56 -07:00
}
2016-09-15 16:44:19 +02:00
return result ;
}
2017-03-15 11:44:46 +01:00
G1CMMarkStack : : TaskQueueEntryChunk * G1CMMarkStack : : remove_chunk_from_chunk_list ( ) {
2016-09-15 16:44:19 +02:00
MutexLockerEx x ( MarkStackChunkList_lock , Mutex : : _no_safepoint_check_flag ) ;
2017-03-15 11:44:46 +01:00
TaskQueueEntryChunk * result = remove_chunk_from_list ( & _chunk_list ) ;
2016-09-15 16:44:19 +02:00
if ( result ! = NULL ) {
_chunks_in_chunk_list - - ;
2008-06-05 15:57:56 -07:00
}
2016-09-15 16:44:19 +02:00
return result ;
2008-06-05 15:57:56 -07:00
}
2017-03-15 11:44:46 +01:00
G1CMMarkStack : : TaskQueueEntryChunk * G1CMMarkStack : : remove_chunk_from_free_list ( ) {
2016-09-15 16:44:19 +02:00
MutexLockerEx x ( MarkStackFreeList_lock , Mutex : : _no_safepoint_check_flag ) ;
return remove_chunk_from_list ( & _free_list ) ;
}
2017-03-15 11:44:46 +01:00
G1CMMarkStack : : TaskQueueEntryChunk * G1CMMarkStack : : allocate_new_chunk ( ) {
2016-09-15 16:44:19 +02:00
// This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
// Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
// wraparound of _hwm.
if ( _hwm > = _chunk_capacity ) {
return NULL ;
2008-06-05 15:57:56 -07:00
}
2016-09-15 16:44:19 +02:00
2017-08-28 13:31:20 +02:00
size_t cur_idx = Atomic : : add ( 1u , & _hwm ) - 1 ;
2016-09-15 16:44:19 +02:00
if ( cur_idx > = _chunk_capacity ) {
return NULL ;
}
2017-03-15 11:44:46 +01:00
TaskQueueEntryChunk * result = : : new ( & _base [ cur_idx ] ) TaskQueueEntryChunk ;
2016-09-15 16:44:19 +02:00
result - > next = NULL ;
return result ;
2008-06-05 15:57:56 -07:00
}
2017-03-15 11:44:46 +01:00
bool G1CMMarkStack : : par_push_chunk ( G1TaskQueueEntry * ptr_arr ) {
2016-09-15 16:44:19 +02:00
// Get a new chunk.
2017-03-15 11:44:46 +01:00
TaskQueueEntryChunk * new_chunk = remove_chunk_from_free_list ( ) ;
2016-09-15 16:44:19 +02:00
if ( new_chunk = = NULL ) {
// Did not get a chunk from the free list. Allocate from backing memory.
new_chunk = allocate_new_chunk ( ) ;
2017-03-09 10:48:44 +01:00
if ( new_chunk = = NULL ) {
return false ;
}
2016-09-15 16:44:19 +02:00
}
2017-03-15 11:44:46 +01:00
Copy : : conjoint_memory_atomic ( ptr_arr , new_chunk - > data , EntriesPerChunk * sizeof ( G1TaskQueueEntry ) ) ;
2016-09-15 16:44:19 +02:00
add_chunk_to_chunk_list ( new_chunk ) ;
return true ;
2012-01-10 18:58:13 -05:00
}
2017-03-15 11:44:46 +01:00
bool G1CMMarkStack : : par_pop_chunk ( G1TaskQueueEntry * ptr_arr ) {
TaskQueueEntryChunk * cur = remove_chunk_from_chunk_list ( ) ;
2016-09-15 16:44:19 +02:00
if ( cur = = NULL ) {
return false ;
}
2017-03-15 11:44:46 +01:00
Copy : : conjoint_memory_atomic ( cur - > data , ptr_arr , EntriesPerChunk * sizeof ( G1TaskQueueEntry ) ) ;
2016-07-15 13:33:44 +02:00
2016-09-15 16:44:19 +02:00
add_chunk_to_free_list ( cur ) ;
return true ;
}
void G1CMMarkStack : : set_empty ( ) {
_chunks_in_chunk_list = 0 ;
_hwm = 0 ;
_chunk_list = NULL ;
_free_list = NULL ;
2012-01-10 18:58:13 -05:00
}
2018-12-06 13:55:22 +01:00
G1CMRootRegions : : G1CMRootRegions ( uint const max_regions ) :
_root_regions ( NEW_C_HEAP_ARRAY ( HeapRegion * , max_regions , mtGC ) ) ,
_max_regions ( max_regions ) ,
_num_root_regions ( 0 ) ,
_claimed_root_regions ( 0 ) ,
_scan_in_progress ( false ) ,
_should_abort ( false ) { }
2012-01-25 12:58:23 -05:00
2018-12-06 13:55:22 +01:00
G1CMRootRegions : : ~ G1CMRootRegions ( ) {
FREE_C_HEAP_ARRAY ( HeapRegion * , _max_regions ) ;
}
void G1CMRootRegions : : reset ( ) {
_num_root_regions = 0 ;
}
void G1CMRootRegions : : add ( HeapRegion * hr ) {
assert_at_safepoint ( ) ;
size_t idx = Atomic : : add ( ( size_t ) 1 , & _num_root_regions ) - 1 ;
assert ( idx < _max_regions , " Trying to add more root regions than there is space " SIZE_FORMAT , _max_regions ) ;
_root_regions [ idx ] = hr ;
2012-01-25 12:58:23 -05:00
}
2016-02-05 16:03:56 +01:00
void G1CMRootRegions : : prepare_for_scan ( ) {
2012-01-25 12:58:23 -05:00
assert ( ! scan_in_progress ( ) , " pre-condition " ) ;
2018-12-06 13:55:22 +01:00
_scan_in_progress = _num_root_regions > 0 ;
_claimed_root_regions = 0 ;
2012-01-25 12:58:23 -05:00
_should_abort = false ;
}
2016-02-05 16:03:56 +01:00
HeapRegion * G1CMRootRegions : : claim_next ( ) {
2012-01-25 12:58:23 -05:00
if ( _should_abort ) {
// If someone has set the should_abort flag, we return NULL to
// force the caller to bail out of their loop.
return NULL ;
}
2018-12-06 13:55:22 +01:00
if ( _claimed_root_regions > = _num_root_regions ) {
return NULL ;
}
2012-01-25 12:58:23 -05:00
2018-12-06 13:55:22 +01:00
size_t claimed_index = Atomic : : add ( ( size_t ) 1 , & _claimed_root_regions ) - 1 ;
if ( claimed_index < _num_root_regions ) {
return _root_regions [ claimed_index ] ;
2016-04-27 16:11:45 +02:00
}
return NULL ;
2012-01-25 12:58:23 -05:00
}
2016-05-10 16:40:15 +02:00
uint G1CMRootRegions : : num_root_regions ( ) const {
2018-12-06 13:55:22 +01:00
return ( uint ) _num_root_regions ;
2016-05-10 16:40:15 +02:00
}
2016-02-10 12:56:55 +01:00
void G1CMRootRegions : : notify_scan_done ( ) {
MutexLockerEx x ( RootRegionScan_lock , Mutex : : _no_safepoint_check_flag ) ;
_scan_in_progress = false ;
RootRegionScan_lock - > notify_all ( ) ;
}
void G1CMRootRegions : : cancel_scan ( ) {
notify_scan_done ( ) ;
}
2016-02-05 16:03:56 +01:00
void G1CMRootRegions : : scan_finished ( ) {
2012-01-25 12:58:23 -05:00
assert ( scan_in_progress ( ) , " pre-condition " ) ;
if ( ! _should_abort ) {
2018-12-06 13:55:22 +01:00
assert ( _claimed_root_regions > = num_root_regions ( ) ,
" we should have claimed all root regions, claimed " SIZE_FORMAT " , length = %u " ,
_claimed_root_regions , num_root_regions ( ) ) ;
2012-01-25 12:58:23 -05:00
}
2016-02-10 12:56:55 +01:00
notify_scan_done ( ) ;
2012-01-25 12:58:23 -05:00
}
2016-02-05 16:03:56 +01:00
bool G1CMRootRegions : : wait_until_scan_finished ( ) {
2018-03-29 14:08:10 +02:00
if ( ! scan_in_progress ( ) ) {
return false ;
}
2012-01-25 12:58:23 -05:00
{
MutexLockerEx x ( RootRegionScan_lock , Mutex : : _no_safepoint_check_flag ) ;
while ( scan_in_progress ( ) ) {
RootRegionScan_lock - > wait ( Mutex : : _no_safepoint_check_flag ) ;
}
}
return true ;
}
2017-10-25 16:13:09 +02:00
// Returns the maximum number of workers to be used in a concurrent
// phase based on the number of GC workers being used in a STW
// phase.
static uint scale_concurrent_worker_threads ( uint num_gc_workers ) {
return MAX2 ( ( num_gc_workers + 2 ) / 4 , 1U ) ;
2011-08-09 10:16:01 -07:00
}
2017-10-25 16:15:10 +02:00
G1ConcurrentMark : : G1ConcurrentMark ( G1CollectedHeap * g1h ,
G1RegionToSpaceMapper * prev_bitmap_storage ,
G1RegionToSpaceMapper * next_bitmap_storage ) :
// _cm_thread set inside the constructor
2012-10-01 09:28:13 -07:00
_g1h ( g1h ) ,
2017-10-25 16:15:10 +02:00
_completed_initialization ( false ) ,
2017-10-23 11:46:12 +02:00
_mark_bitmap_1 ( ) ,
_mark_bitmap_2 ( ) ,
_prev_mark_bitmap ( & _mark_bitmap_1 ) ,
_next_mark_bitmap ( & _mark_bitmap_2 ) ,
2008-06-05 15:57:56 -07:00
2018-03-29 14:08:10 +02:00
_heap ( _g1h - > reserved_region ( ) ) ,
2017-10-25 16:15:10 +02:00
2018-12-06 13:55:22 +01:00
_root_regions ( _g1h - > max_regions ( ) ) ,
2017-10-25 16:15:10 +02:00
2016-07-15 13:33:44 +02:00
_global_mark_stack ( ) ,
2017-10-25 16:15:10 +02:00
2008-06-05 15:57:56 -07:00
// _finger set in set_non_marking_state
2019-02-13 17:38:14 -05:00
_worker_id_offset ( G1DirtyCardQueueSet : : num_par_ids ( ) + G1ConcRefinementThreads ) ,
2017-10-25 16:13:09 +02:00
_max_num_tasks ( ParallelGCThreads ) ,
2017-10-25 16:15:10 +02:00
// _num_active_tasks set in set_non_marking_state()
2008-06-05 15:57:56 -07:00
// _tasks set inside the constructor
2017-10-25 16:15:10 +02:00
2017-10-25 16:13:09 +02:00
_task_queues ( new G1CMTaskQueueSet ( ( int ) _max_num_tasks ) ) ,
2018-12-07 13:55:06 -05:00
_terminator ( ( int ) _max_num_tasks , _task_queues ) ,
2008-06-05 15:57:56 -07:00
2017-10-25 16:15:10 +02:00
_first_overflow_barrier_sync ( ) ,
_second_overflow_barrier_sync ( ) ,
2008-06-05 15:57:56 -07:00
_has_overflown ( false ) ,
_concurrent ( false ) ,
2009-03-07 11:07:37 -05:00
_has_aborted ( false ) ,
_restart_for_overflow ( false ) ,
2016-03-17 11:18:52 -07:00
_gc_timer_cm ( new ( ResourceObj : : C_HEAP , mtGC ) ConcurrentGCTimer ( ) ) ,
_gc_tracer_cm ( new ( ResourceObj : : C_HEAP , mtGC ) G1OldTracer ( ) ) ,
2008-06-05 15:57:56 -07:00
// _verbose_level set below
_init_times ( ) ,
2017-10-23 11:46:12 +02:00
_remark_times ( ) ,
_remark_mark_times ( ) ,
_remark_weak_ref_times ( ) ,
2008-06-05 15:57:56 -07:00
_cleanup_times ( ) ,
2018-03-29 14:08:10 +02:00
_total_cleanup_time ( 0.0 ) ,
2012-01-12 00:06:47 -08:00
2017-10-25 16:15:10 +02:00
_accum_task_vtime ( NULL ) ,
2008-06-05 15:57:56 -07:00
2017-10-25 16:15:10 +02:00
_concurrent_workers ( NULL ) ,
_num_concurrent_workers ( 0 ) ,
2018-03-26 16:51:43 +02:00
_max_concurrent_workers ( 0 ) ,
2018-03-26 16:51:43 +02:00
_region_mark_stats ( NEW_C_HEAP_ARRAY ( G1RegionMarkStats , _g1h - > max_regions ( ) , mtGC ) ) ,
_top_at_rebuild_starts ( NEW_C_HEAP_ARRAY ( HeapWord * , _g1h - > max_regions ( ) , mtGC ) )
2017-10-25 16:15:10 +02:00
{
2017-10-23 11:46:12 +02:00
_mark_bitmap_1 . initialize ( g1h - > reserved_region ( ) , prev_bitmap_storage ) ;
_mark_bitmap_2 . initialize ( g1h - > reserved_region ( ) , next_bitmap_storage ) ;
2008-06-05 15:57:56 -07:00
2017-10-25 16:15:10 +02:00
// Create & start ConcurrentMark thread.
2018-04-03 12:05:49 +02:00
_cm_thread = new G1ConcurrentMarkThread ( this ) ;
2017-10-23 11:46:12 +02:00
if ( _cm_thread - > osthread ( ) = = NULL ) {
vm_shutdown_during_initialization ( " Could not create ConcurrentMarkThread " ) ;
2013-12-09 08:20:45 +01:00
}
2009-07-14 15:40:39 -07:00
2017-10-23 11:46:12 +02:00
assert ( CGC_lock ! = NULL , " CGC_lock must be initialized " ) ;
2008-06-05 15:57:56 -07:00
2017-10-25 16:13:09 +02:00
if ( FLAG_IS_DEFAULT ( ConcGCThreads ) | | ConcGCThreads = = 0 ) {
// Calculate the number of concurrent worker threads by scaling
// the number of parallel GC threads.
uint marking_thread_num = scale_concurrent_worker_threads ( ParallelGCThreads ) ;
FLAG_SET_ERGO ( uint , ConcGCThreads , marking_thread_num ) ;
}
assert ( ConcGCThreads > 0 , " ConcGCThreads have been set. " ) ;
2010-02-24 07:00:33 -08:00
if ( ConcGCThreads > ParallelGCThreads ) {
2017-10-25 16:12:15 +02:00
log_warning ( gc ) ( " More ConcGCThreads (%u) than ParallelGCThreads (%u). " ,
2016-03-11 11:34:22 +01:00
ConcGCThreads , ParallelGCThreads ) ;
2012-10-01 09:28:13 -07:00
return ;
2008-06-05 15:57:56 -07:00
}
2017-10-25 16:12:15 +02:00
2018-03-26 16:51:43 +02:00
log_debug ( gc ) ( " ConcGCThreads: %u offset %u " , ConcGCThreads , _worker_id_offset ) ;
2016-05-19 10:48:28 -04:00
log_debug ( gc ) ( " ParallelGCThreads: %u " , ParallelGCThreads ) ;
2017-10-25 16:12:15 +02:00
2017-10-25 16:13:09 +02:00
_num_concurrent_workers = ConcGCThreads ;
_max_concurrent_workers = _num_concurrent_workers ;
2013-02-01 13:17:04 -08:00
2017-10-25 16:13:09 +02:00
_concurrent_workers = new WorkGang ( " G1 Conc " , _max_concurrent_workers , false , true ) ;
_concurrent_workers - > initialize_workers ( ) ;
2008-06-05 15:57:56 -07:00
2012-10-01 09:28:13 -07:00
if ( FLAG_IS_DEFAULT ( MarkStackSize ) ) {
2015-03-03 18:01:27 +01:00
size_t mark_stack_size =
2012-10-01 09:28:13 -07:00
MIN2 ( MarkStackSizeMax ,
2017-10-25 16:13:09 +02:00
MAX2 ( MarkStackSize , ( size_t ) ( _max_concurrent_workers * TASKQUEUE_SIZE ) ) ) ;
2012-10-01 09:28:13 -07:00
// Verify that the calculated value for MarkStackSize is in range.
// It would be nice to use the private utility routine from Arguments.
if ( ! ( mark_stack_size > = 1 & & mark_stack_size < = MarkStackSizeMax ) ) {
2016-03-11 11:34:22 +01:00
log_warning ( gc ) ( " Invalid value calculated for MarkStackSize ( " SIZE_FORMAT " ): "
" must be between 1 and " SIZE_FORMAT ,
mark_stack_size , MarkStackSizeMax ) ;
2012-10-01 09:28:13 -07:00
return ;
}
2015-03-03 18:01:27 +01:00
FLAG_SET_ERGO ( size_t , MarkStackSize , mark_stack_size ) ;
2012-10-01 09:28:13 -07:00
} else {
// Verify MarkStackSize is in range.
if ( FLAG_IS_CMDLINE ( MarkStackSize ) ) {
if ( FLAG_IS_DEFAULT ( MarkStackSizeMax ) ) {
if ( ! ( MarkStackSize > = 1 & & MarkStackSize < = MarkStackSizeMax ) ) {
2016-03-11 11:34:22 +01:00
log_warning ( gc ) ( " Invalid value specified for MarkStackSize ( " SIZE_FORMAT " ): "
" must be between 1 and " SIZE_FORMAT ,
MarkStackSize , MarkStackSizeMax ) ;
2012-10-01 09:28:13 -07:00
return ;
}
} else if ( FLAG_IS_CMDLINE ( MarkStackSizeMax ) ) {
if ( ! ( MarkStackSize > = 1 & & MarkStackSize < = MarkStackSizeMax ) ) {
2016-03-11 11:34:22 +01:00
log_warning ( gc ) ( " Invalid value specified for MarkStackSize ( " SIZE_FORMAT " ) "
" or for MarkStackSizeMax ( " SIZE_FORMAT " ) " ,
MarkStackSize , MarkStackSizeMax ) ;
2012-10-01 09:28:13 -07:00
return ;
}
}
}
}
2016-09-15 16:44:19 +02:00
if ( ! _global_mark_stack . initialize ( MarkStackSize , MarkStackSizeMax ) ) {
2016-07-15 13:33:44 +02:00
vm_exit_during_initialization ( " Failed to allocate initial concurrent mark overflow mark stack. " ) ;
2012-10-01 09:28:13 -07:00
}
2017-10-25 16:13:09 +02:00
_tasks = NEW_C_HEAP_ARRAY ( G1CMTask * , _max_num_tasks , mtGC ) ;
_accum_task_vtime = NEW_C_HEAP_ARRAY ( double , _max_num_tasks , mtGC ) ;
2012-10-01 09:28:13 -07:00
// so that the assertion in MarkingTaskQueue::task_queue doesn't fail
2017-10-25 16:13:09 +02:00
_num_active_tasks = _max_num_tasks ;
2012-10-01 09:28:13 -07:00
2017-10-25 16:13:09 +02:00
for ( uint i = 0 ; i < _max_num_tasks ; + + i ) {
2016-02-05 16:03:56 +01:00
G1CMTaskQueue * task_queue = new G1CMTaskQueue ( ) ;
2012-10-01 09:28:13 -07:00
task_queue - > initialize ( ) ;
_task_queues - > register_queue ( i , task_queue ) ;
2018-03-26 16:51:43 +02:00
_tasks [ i ] = new G1CMTask ( i , this , task_queue , _region_mark_stats , _g1h - > max_regions ( ) ) ;
2012-10-01 09:28:13 -07:00
_accum_task_vtime [ i ] = 0.0 ;
}
2018-03-29 14:08:10 +02:00
reset_at_marking_complete ( ) ;
2012-10-01 09:28:13 -07:00
_completed_initialization = true ;
2008-06-05 15:57:56 -07:00
}
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : reset ( ) {
2018-04-04 11:21:14 +02:00
_has_aborted = false ;
2018-03-29 14:08:10 +02:00
reset_marking_for_restart ( ) ;
2008-06-05 15:57:56 -07:00
2018-03-26 16:51:43 +02:00
// Reset all tasks, since different phases will use different number of active
// threads. So, it's easiest to have all of them ready.
2017-10-25 16:13:09 +02:00
for ( uint i = 0 ; i < _max_num_tasks ; + + i ) {
2017-10-23 11:46:12 +02:00
_tasks [ i ] - > reset ( _next_mark_bitmap ) ;
2010-09-28 09:51:37 -07:00
}
2008-06-05 15:57:56 -07:00
2018-03-26 16:51:43 +02:00
uint max_regions = _g1h - > max_regions ( ) ;
for ( uint i = 0 ; i < max_regions ; i + + ) {
2018-03-26 16:51:43 +02:00
_top_at_rebuild_starts [ i ] = NULL ;
2018-03-26 16:51:43 +02:00
_region_mark_stats [ i ] . clear ( ) ;
}
2008-06-05 15:57:56 -07:00
}
2018-03-26 16:51:43 +02:00
void G1ConcurrentMark : : clear_statistics_in_region ( uint region_idx ) {
for ( uint j = 0 ; j < _max_num_tasks ; + + j ) {
_tasks [ j ] - > clear_mark_stats_cache ( region_idx ) ;
}
2018-03-26 16:51:43 +02:00
_top_at_rebuild_starts [ region_idx ] = NULL ;
2018-03-26 16:51:43 +02:00
_region_mark_stats [ region_idx ] . clear ( ) ;
}
2018-04-04 11:21:14 +02:00
void G1ConcurrentMark : : clear_statistics ( HeapRegion * r ) {
2018-03-26 16:51:43 +02:00
uint const region_idx = r - > hrm_index ( ) ;
if ( r - > is_humongous ( ) ) {
assert ( r - > is_starts_humongous ( ) , " Got humongous continues region here " ) ;
uint const size_in_regions = ( uint ) _g1h - > humongous_obj_size_in_regions ( oop ( r - > humongous_start_region ( ) - > bottom ( ) ) - > size ( ) ) ;
for ( uint j = region_idx ; j < ( region_idx + size_in_regions ) ; j + + ) {
clear_statistics_in_region ( j ) ;
}
} else {
clear_statistics_in_region ( region_idx ) ;
}
2018-03-26 16:51:41 +02:00
}
2013-01-03 16:28:22 -08:00
2018-04-10 09:12:23 +02:00
static void clear_mark_if_set ( G1CMBitMap * bitmap , HeapWord * addr ) {
if ( bitmap - > is_marked ( addr ) ) {
bitmap - > clear ( addr ) ;
}
}
2018-04-04 11:21:14 +02:00
void G1ConcurrentMark : : humongous_object_eagerly_reclaimed ( HeapRegion * r ) {
assert_at_safepoint_on_vm_thread ( ) ;
2018-04-10 09:12:23 +02:00
// Need to clear all mark bits of the humongous object.
clear_mark_if_set ( _prev_mark_bitmap , r - > bottom ( ) ) ;
clear_mark_if_set ( _next_mark_bitmap , r - > bottom ( ) ) ;
2018-04-04 11:21:14 +02:00
if ( ! _g1h - > collector_state ( ) - > mark_or_rebuild_in_progress ( ) ) {
return ;
}
// Clear any statistics about the region gathered so far.
clear_statistics ( r ) ;
}
2018-03-29 14:08:10 +02:00
void G1ConcurrentMark : : reset_marking_for_restart ( ) {
2017-03-09 10:48:44 +01:00
_global_mark_stack . set_empty ( ) ;
2017-05-09 13:50:06 -04:00
// Expand the marking stack, if we have to and if we can.
if ( has_overflown ( ) ) {
_global_mark_stack . expand ( ) ;
2018-03-26 16:51:43 +02:00
uint max_regions = _g1h - > max_regions ( ) ;
for ( uint i = 0 ; i < max_regions ; i + + ) {
_region_mark_stats [ i ] . clear_during_overflow ( ) ;
}
2017-05-09 13:50:06 -04:00
}
2017-03-09 10:48:44 +01:00
clear_has_overflown ( ) ;
2018-03-29 14:08:10 +02:00
_finger = _heap . start ( ) ;
2013-01-03 16:28:22 -08:00
2017-10-25 16:13:09 +02:00
for ( uint i = 0 ; i < _max_num_tasks ; + + i ) {
2016-02-05 16:03:56 +01:00
G1CMTaskQueue * queue = _task_queues - > queue ( i ) ;
2013-01-03 16:28:22 -08:00
queue - > set_empty ( ) ;
}
}
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : set_concurrency ( uint active_tasks ) {
2017-10-25 16:13:09 +02:00
assert ( active_tasks < = _max_num_tasks , " we should not have more " ) ;
2008-06-05 15:57:56 -07:00
2017-10-25 16:13:09 +02:00
_num_active_tasks = active_tasks ;
2008-06-05 15:57:56 -07:00
// Need to update the three data structures below according to the
// number of active threads for this phase.
2019-01-31 10:18:41 -05:00
_terminator . terminator ( ) - > reset_for_reuse ( ( int ) active_tasks ) ;
2008-06-05 15:57:56 -07:00
_first_overflow_barrier_sync . set_n_workers ( ( int ) active_tasks ) ;
_second_overflow_barrier_sync . set_n_workers ( ( int ) active_tasks ) ;
2013-03-19 00:57:39 -07:00
}
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : set_concurrency_and_phase ( uint active_tasks , bool concurrent ) {
2013-03-19 00:57:39 -07:00
set_concurrency ( active_tasks ) ;
2008-06-05 15:57:56 -07:00
_concurrent = concurrent ;
2018-04-04 11:21:14 +02:00
if ( ! concurrent ) {
// At this point we should be in a STW phase, and completed marking.
2018-03-29 14:08:10 +02:00
assert_at_safepoint_on_vm_thread ( ) ;
2014-05-14 14:32:23 +02:00
assert ( out_of_regions ( ) ,
2015-09-29 11:02:08 +02:00
" only way to get here: _finger: " PTR_FORMAT " , _heap_end: " PTR_FORMAT ,
2018-03-29 14:08:10 +02:00
p2i ( _finger ) , p2i ( _heap . end ( ) ) ) ;
2008-06-05 15:57:56 -07:00
}
}
2018-03-29 14:08:10 +02:00
void G1ConcurrentMark : : reset_at_marking_complete ( ) {
2008-06-05 15:57:56 -07:00
// We set the global marking state to some default values when we're
// not doing marking.
2018-03-29 14:08:10 +02:00
reset_marking_for_restart ( ) ;
2017-10-25 16:13:09 +02:00
_num_active_tasks = 0 ;
2008-06-05 15:57:56 -07:00
}
2016-02-05 16:03:56 +01:00
G1ConcurrentMark : : ~ G1ConcurrentMark ( ) {
2018-03-26 16:51:43 +02:00
FREE_C_HEAP_ARRAY ( HeapWord * , _top_at_rebuild_starts ) ;
2018-03-26 16:51:43 +02:00
FREE_C_HEAP_ARRAY ( G1RegionMarkStats , _region_mark_stats ) ;
2016-02-05 16:03:56 +01:00
// The G1ConcurrentMark instance is never freed.
2011-12-29 07:37:23 +01:00
ShouldNotReachHere ( ) ;
2008-06-05 15:57:56 -07:00
}
2016-03-16 12:21:18 +01:00
class G1ClearBitMapTask : public AbstractGangTask {
2016-05-02 10:24:41 +02:00
public :
static size_t chunk_size ( ) { return M ; }
private :
2016-03-16 12:21:18 +01:00
// Heap region closure used for clearing the given mark bitmap.
class G1ClearBitmapHRClosure : public HeapRegionClosure {
private :
G1CMBitMap * _bitmap ;
G1ConcurrentMark * _cm ;
public :
2018-08-08 15:31:06 +02:00
G1ClearBitmapHRClosure ( G1CMBitMap * bitmap , G1ConcurrentMark * cm ) : HeapRegionClosure ( ) , _bitmap ( bitmap ) , _cm ( cm ) {
2016-03-16 12:21:18 +01:00
}
2018-02-09 13:09:55 +01:00
virtual bool do_heap_region ( HeapRegion * r ) {
2016-05-02 10:24:41 +02:00
size_t const chunk_size_in_words = G1ClearBitMapTask : : chunk_size ( ) / HeapWordSize ;
2016-03-16 12:21:18 +01:00
HeapWord * cur = r - > bottom ( ) ;
HeapWord * const end = r - > end ( ) ;
while ( cur < end ) {
MemRegion mr ( cur , MIN2 ( cur + chunk_size_in_words , end ) ) ;
_bitmap - > clear_range ( mr ) ;
cur + = chunk_size_in_words ;
// Abort iteration if after yielding the marking has been aborted.
if ( _cm ! = NULL & & _cm - > do_yield_check ( ) & & _cm - > has_aborted ( ) ) {
return true ;
}
// Repeat the asserts from before the start of the closure. We will do them
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
2017-10-23 11:46:12 +02:00
assert ( _cm = = NULL | | _cm - > cm_thread ( ) - > during_cycle ( ) , " invariant " ) ;
2018-03-29 14:07:59 +02:00
assert ( _cm = = NULL | | ! G1CollectedHeap : : heap ( ) - > collector_state ( ) - > mark_or_rebuild_in_progress ( ) , " invariant " ) ;
2016-03-16 12:21:18 +01:00
}
assert ( cur = = end , " Must have completed iteration over the bitmap for region %u. " , r - > hrm_index ( ) ) ;
return false ;
}
} ;
G1ClearBitmapHRClosure _cl ;
HeapRegionClaimer _hr_claimer ;
bool _suspendible ; // If the task is suspendible, workers must join the STS.
public :
G1ClearBitMapTask ( G1CMBitMap * bitmap , G1ConcurrentMark * cm , uint n_workers , bool suspendible ) :
2016-05-02 10:24:41 +02:00
AbstractGangTask ( " G1 Clear Bitmap " ) ,
2016-03-16 12:21:18 +01:00
_cl ( bitmap , suspendible ? cm : NULL ) ,
_hr_claimer ( n_workers ) ,
_suspendible ( suspendible )
{ }
void work ( uint worker_id ) {
SuspendibleThreadSetJoiner sts_join ( _suspendible ) ;
2017-11-14 11:33:23 +01:00
G1CollectedHeap : : heap ( ) - > heap_region_par_iterate_from_worker_offset ( & _cl , & _hr_claimer , worker_id ) ;
2016-03-16 12:21:18 +01:00
}
bool is_complete ( ) {
2018-02-09 13:09:55 +01:00
return _cl . is_complete ( ) ;
2016-03-16 12:21:18 +01:00
}
} ;
2010-04-06 10:59:45 -04:00
2016-03-16 12:21:18 +01:00
void G1ConcurrentMark : : clear_bitmap ( G1CMBitMap * bitmap , WorkGang * workers , bool may_yield ) {
assert ( may_yield | | SafepointSynchronize : : is_at_safepoint ( ) , " Non-yielding bitmap clear only allowed at safepoint. " ) ;
2016-05-02 10:24:41 +02:00
size_t const num_bytes_to_clear = ( HeapRegion : : GrainBytes * _g1h - > num_regions ( ) ) / G1CMBitMap : : heap_map_factor ( ) ;
2017-07-04 15:58:10 +02:00
size_t const num_chunks = align_up ( num_bytes_to_clear , G1ClearBitMapTask : : chunk_size ( ) ) / G1ClearBitMapTask : : chunk_size ( ) ;
2016-05-02 10:24:41 +02:00
uint const num_workers = ( uint ) MIN2 ( num_chunks , ( size_t ) workers - > active_workers ( ) ) ;
G1ClearBitMapTask cl ( bitmap , this , num_workers , may_yield ) ;
log_debug ( gc , ergo ) ( " Running %s with %u workers for " SIZE_FORMAT " work units. " , cl . name ( ) , num_workers , num_chunks ) ;
workers - > run_task ( & cl , num_workers ) ;
guarantee ( ! may_yield | | cl . is_complete ( ) , " Must have completed iteration when not yielding. " ) ;
2016-03-16 12:21:18 +01:00
}
void G1ConcurrentMark : : cleanup_for_next_mark ( ) {
2010-04-06 10:59:45 -04:00
// Make sure that the concurrent mark thread looks to still be in
// the current cycle.
2017-10-23 11:46:12 +02:00
guarantee ( cm_thread ( ) - > during_cycle ( ) , " invariant " ) ;
2010-04-06 10:59:45 -04:00
// We are finishing up the current cycle by clearing the next
// marking bitmap and getting it ready for the next cycle. During
// this time no other cycle can start. So, let's make sure that this
// is the case.
2018-03-29 14:07:59 +02:00
guarantee ( ! _g1h - > collector_state ( ) - > mark_or_rebuild_in_progress ( ) , " invariant " ) ;
2010-04-06 10:59:45 -04:00
2017-10-25 16:13:09 +02:00
clear_bitmap ( _next_mark_bitmap , _concurrent_workers , true ) ;
2010-04-06 10:59:45 -04:00
// Repeat the asserts from above.
2017-10-23 11:46:12 +02:00
guarantee ( cm_thread ( ) - > during_cycle ( ) , " invariant " ) ;
2018-03-29 14:07:59 +02:00
guarantee ( ! _g1h - > collector_state ( ) - > mark_or_rebuild_in_progress ( ) , " invariant " ) ;
2016-03-16 12:21:18 +01:00
}
void G1ConcurrentMark : : clear_prev_bitmap ( WorkGang * workers ) {
2018-03-29 14:08:10 +02:00
assert_at_safepoint_on_vm_thread ( ) ;
2017-10-23 11:46:12 +02:00
clear_bitmap ( _prev_mark_bitmap , workers , false ) ;
2008-06-05 15:57:56 -07:00
}
2018-03-29 14:08:10 +02:00
class NoteStartOfMarkHRClosure : public HeapRegionClosure {
2008-06-05 15:57:56 -07:00
public :
2018-02-09 13:09:55 +01:00
bool do_heap_region ( HeapRegion * r ) {
2015-11-09 09:19:39 +01:00
r - > note_start_of_marking ( ) ;
2008-06-05 15:57:56 -07:00
return false ;
}
} ;
2018-03-29 14:08:10 +02:00
void G1ConcurrentMark : : pre_initial_mark ( ) {
2019-03-25 14:11:09 +01:00
assert_at_safepoint_on_vm_thread ( ) ;
// Reset marking state.
2008-06-05 15:57:56 -07:00
reset ( ) ;
2012-01-10 18:58:13 -05:00
// For each region note start of marking.
NoteStartOfMarkHRClosure startcl ;
2018-03-29 14:08:10 +02:00
_g1h - > heap_region_iterate ( & startcl ) ;
2018-12-06 13:55:22 +01:00
_root_regions . reset ( ) ;
2008-06-05 15:57:56 -07:00
}
2018-03-29 14:08:10 +02:00
void G1ConcurrentMark : : post_initial_mark ( ) {
2011-09-22 10:57:37 -07:00
// Start Concurrent Marking weak-reference discovery.
2018-03-29 14:08:10 +02:00
ReferenceProcessor * rp = _g1h - > ref_processor_cm ( ) ;
2011-09-22 10:57:37 -07:00
// enable ("weak") refs discovery
2014-12-17 22:32:44 -05:00
rp - > enable_discovery ( ) ;
2008-12-01 23:25:24 -08:00
rp - > setup_policy ( false ) ; // snapshot the soft ref policy to be used in this cycle
2008-06-05 15:57:56 -07:00
2018-04-12 08:25:30 +02:00
SATBMarkQueueSet & satb_mq_set = G1BarrierSet : : satb_mark_queue_set ( ) ;
2010-03-18 12:14:59 -04:00
// This is the start of the marking cycle, we're expected all
// threads to have SATB queues with active set to false.
satb_mq_set . set_active_all_threads ( true , /* new active value */
false /* expected_active */ ) ;
2008-06-05 15:57:56 -07:00
2012-01-25 12:58:23 -05:00
_root_regions . prepare_for_scan ( ) ;
2008-06-05 15:57:56 -07:00
// update_g1_committed() will be called at the end of an evac pause
// when marking is on. So, it's also called at the end of the
// initial-mark pause to update the heap end, if the heap expands
// during it. No need to call it here.
}
/*
2011-04-29 12:40:49 -04:00
* Notice that in the next two methods , we actually leave the STS
* during the barrier sync and join it immediately afterwards . If we
* do not do this , the following deadlock can occur : one thread could
* be in the barrier sync code , waiting for the other thread to also
* sync up , whereas another one could be trying to yield , while also
* waiting for the other threads to sync up too .
*
* Note , however , that this code is also used during remark and in
* this case we should not attempt to leave / enter the STS , otherwise
2014-01-23 14:47:23 +01:00
* we ' ll either hit an assert ( debug / fastdebug ) or deadlock
2011-04-29 12:40:49 -04:00
* ( product ) . So we should only leave / enter the STS if we are
* operating concurrently .
*
* Because the thread that does the sync barrier has left the STS , it
* is possible to be suspended for a Full GC or an evacuation pause
* could occur . This is actually safe , since the entering the sync
* barrier is one of the last things do_marking_step ( ) does , and it
* doesn ' t manipulate any data structures afterwards .
*/
2008-06-05 15:57:56 -07:00
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : enter_first_sync_barrier ( uint worker_id ) {
2015-05-11 13:57:30 +02:00
bool barrier_aborted ;
{
SuspendibleThreadSetLeaver sts_leave ( concurrent ( ) ) ;
barrier_aborted = ! _first_overflow_barrier_sync . enter ( ) ;
2011-04-29 12:40:49 -04:00
}
2014-05-14 13:32:44 +02:00
2008-06-05 15:57:56 -07:00
// at this point everyone should have synced up and not be doing any
// more work
2014-05-14 13:32:44 +02:00
if ( barrier_aborted ) {
// If the barrier aborted we ignore the overflow condition and
// just abort the whole marking phase as quickly as possible.
return ;
2011-06-20 22:03:13 -04:00
}
2008-06-05 15:57:56 -07:00
}
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : enter_second_sync_barrier ( uint worker_id ) {
2015-11-02 09:11:44 +01:00
SuspendibleThreadSetLeaver sts_leave ( concurrent ( ) ) ;
_second_overflow_barrier_sync . enter ( ) ;
2014-05-14 13:32:44 +02:00
2013-03-19 00:57:39 -07:00
// at this point everything should be re-initialized and ready to go
2008-06-05 15:57:56 -07:00
}
2018-03-29 14:08:10 +02:00
class G1CMConcurrentMarkingTask : public AbstractGangTask {
2016-02-05 16:03:56 +01:00
G1ConcurrentMark * _cm ;
2018-04-03 12:05:49 +02:00
2008-06-05 15:57:56 -07:00
public :
2011-12-14 13:34:57 -08:00
void work ( uint worker_id ) {
2017-10-25 16:12:15 +02:00
assert ( Thread : : current ( ) - > is_ConcurrentGC_thread ( ) , " Not a concurrent GC thread " ) ;
2010-12-01 17:34:02 -08:00
ResourceMark rm ;
2008-06-05 15:57:56 -07:00
double start_vtime = os : : elapsedVTime ( ) ;
2015-05-11 13:57:30 +02:00
{
SuspendibleThreadSetJoiner sts_join ;
assert ( worker_id < _cm - > active_tasks ( ) , " invariant " ) ;
2017-10-25 16:12:15 +02:00
2017-10-23 11:46:12 +02:00
G1CMTask * task = _cm - > task ( worker_id ) ;
task - > record_start_time ( ) ;
2015-05-11 13:57:30 +02:00
if ( ! _cm - > has_aborted ( ) ) {
do {
2017-10-25 16:12:15 +02:00
task - > do_marking_step ( G1ConcMarkStepDurationMillis ,
2017-10-23 11:46:12 +02:00
true /* do_termination */ ,
false /* is_serial*/ ) ;
2015-05-11 13:57:30 +02:00
2016-04-06 13:41:59 +02:00
_cm - > do_yield_check ( ) ;
2017-10-23 11:46:12 +02:00
} while ( ! _cm - > has_aborted ( ) & & task - > has_aborted ( ) ) ;
2015-05-11 13:57:30 +02:00
}
2017-10-23 11:46:12 +02:00
task - > record_end_time ( ) ;
guarantee ( ! task - > has_aborted ( ) | | _cm - > has_aborted ( ) , " invariant " ) ;
2008-06-05 15:57:56 -07:00
}
double end_vtime = os : : elapsedVTime ( ) ;
2011-12-14 13:34:57 -08:00
_cm - > update_accum_task_vtime ( worker_id , end_vtime - start_vtime ) ;
2008-06-05 15:57:56 -07:00
}
2018-03-29 14:08:10 +02:00
G1CMConcurrentMarkingTask ( G1ConcurrentMark * cm ) :
AbstractGangTask ( " Concurrent Mark " ) , _cm ( cm ) { }
2008-06-05 15:57:56 -07:00
2016-02-05 16:03:56 +01:00
~ G1CMConcurrentMarkingTask ( ) { }
2008-06-05 15:57:56 -07:00
} ;
2017-10-25 16:13:09 +02:00
uint G1ConcurrentMark : : calc_active_marking_workers ( ) {
uint result = 0 ;
2014-10-21 11:57:22 +02:00
if ( ! UseDynamicNumberOfGCThreads | |
( ! FLAG_IS_DEFAULT ( ConcGCThreads ) & &
! ForceDynamicNumberOfGCThreads ) ) {
2017-10-25 16:13:09 +02:00
result = _max_concurrent_workers ;
2014-10-21 11:57:22 +02:00
} else {
2017-10-25 16:13:09 +02:00
result =
2018-12-07 12:46:31 +08:00
WorkerPolicy : : calc_default_active_workers ( _max_concurrent_workers ,
1 , /* Minimum workers */
_num_concurrent_workers ,
Threads : : number_of_non_daemon_threads ( ) ) ;
2017-10-25 16:13:09 +02:00
// Don't scale the result down by scale_concurrent_workers() because
// that scaling has already gone into "_max_concurrent_workers".
2011-08-09 10:16:01 -07:00
}
2017-10-25 16:13:09 +02:00
assert ( result > 0 & & result < = _max_concurrent_workers ,
" Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u " ,
_max_concurrent_workers , result ) ;
return result ;
2011-08-09 10:16:01 -07:00
}
2018-03-26 16:51:43 +02:00
void G1ConcurrentMark : : scan_root_region ( HeapRegion * hr , uint worker_id ) {
2018-12-06 13:55:22 +01:00
assert ( hr - > is_old ( ) | | ( hr - > is_survivor ( ) & & hr - > next_top_at_mark_start ( ) = = hr - > bottom ( ) ) ,
" Root regions must be old or survivor but region %u is %s " , hr - > hrm_index ( ) , hr - > get_type_str ( ) ) ;
2018-03-26 16:51:43 +02:00
G1RootRegionScanClosure cl ( _g1h , this , worker_id ) ;
2012-01-25 12:58:23 -05:00
const uintx interval = PrefetchScanIntervalInBytes ;
2018-12-06 13:55:22 +01:00
HeapWord * curr = hr - > next_top_at_mark_start ( ) ;
2012-01-25 12:58:23 -05:00
const HeapWord * end = hr - > top ( ) ;
while ( curr < end ) {
Prefetch : : read ( curr , interval ) ;
oop obj = oop ( curr ) ;
2015-09-02 09:14:04 +02:00
int size = obj - > oop_iterate_size ( & cl ) ;
2012-01-25 12:58:23 -05:00
assert ( size = = obj - > size ( ) , " sanity " ) ;
curr + = size ;
}
}
2016-02-05 16:03:56 +01:00
class G1CMRootRegionScanTask : public AbstractGangTask {
G1ConcurrentMark * _cm ;
2012-01-25 12:58:23 -05:00
public :
2016-02-05 16:03:56 +01:00
G1CMRootRegionScanTask ( G1ConcurrentMark * cm ) :
2016-05-10 16:40:15 +02:00
AbstractGangTask ( " G1 Root Region Scan " ) , _cm ( cm ) { }
2012-01-25 12:58:23 -05:00
void work ( uint worker_id ) {
assert ( Thread : : current ( ) - > is_ConcurrentGC_thread ( ) ,
" this should only be done by a conc GC thread " ) ;
2016-02-05 16:03:56 +01:00
G1CMRootRegions * root_regions = _cm - > root_regions ( ) ;
2012-01-25 12:58:23 -05:00
HeapRegion * hr = root_regions - > claim_next ( ) ;
while ( hr ! = NULL ) {
2018-03-26 16:51:43 +02:00
_cm - > scan_root_region ( hr , worker_id ) ;
2012-01-25 12:58:23 -05:00
hr = root_regions - > claim_next ( ) ;
}
}
} ;
2016-03-16 12:23:13 +01:00
void G1ConcurrentMark : : scan_root_regions ( ) {
2012-01-25 12:58:23 -05:00
// scan_in_progress() will have been set to true only if there was
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
if ( root_regions ( ) - > scan_in_progress ( ) ) {
2016-02-10 12:56:55 +01:00
assert ( ! has_aborted ( ) , " Aborting before root region scanning is finished not supported. " ) ;
2015-06-23 13:26:05 +02:00
2017-10-25 16:13:09 +02:00
_num_concurrent_workers = MIN2 ( calc_active_marking_workers ( ) ,
// We distribute work on a per-region basis, so starting
// more threads than that is useless.
root_regions ( ) - > num_root_regions ( ) ) ;
assert ( _num_concurrent_workers < = _max_concurrent_workers ,
2012-01-25 12:58:23 -05:00
" Maximum number of marking threads exceeded " ) ;
2016-02-05 16:03:56 +01:00
G1CMRootRegionScanTask task ( this ) ;
2016-05-10 16:40:15 +02:00
log_debug ( gc , ergo ) ( " Running %s using %u workers for %u work units. " ,
2017-10-25 16:13:09 +02:00
task . name ( ) , _num_concurrent_workers , root_regions ( ) - > num_root_regions ( ) ) ;
_concurrent_workers - > run_task ( & task , _num_concurrent_workers ) ;
2012-01-25 12:58:23 -05:00
// It's possible that has_aborted() is true here without actually
// aborting the survivor scan earlier. This is OK as it's
// mainly used for sanity checking.
root_regions ( ) - > scan_finished ( ) ;
}
}
2016-03-17 11:18:52 -07:00
void G1ConcurrentMark : : concurrent_cycle_start ( ) {
_gc_timer_cm - > register_gc_start ( ) ;
_gc_tracer_cm - > report_gc_start ( GCCause : : _no_gc /* first parameter is not used */ , _gc_timer_cm - > gc_start ( ) ) ;
_g1h - > trace_heap_before_gc ( _gc_tracer_cm ) ;
2015-12-18 08:17:30 -08:00
}
2016-03-17 11:18:52 -07:00
void G1ConcurrentMark : : concurrent_cycle_end ( ) {
2018-04-04 11:21:14 +02:00
_g1h - > collector_state ( ) - > set_clearing_next_bitmap ( false ) ;
2016-03-17 11:18:52 -07:00
_g1h - > trace_heap_after_gc ( _gc_tracer_cm ) ;
2016-03-07 02:11:47 -08:00
2016-03-17 11:18:52 -07:00
if ( has_aborted ( ) ) {
2018-03-28 16:39:32 +02:00
log_info ( gc , marking ) ( " Concurrent Mark Abort " ) ;
2016-03-17 11:18:52 -07:00
_gc_tracer_cm - > report_concurrent_mode_failure ( ) ;
2015-12-18 08:17:30 -08:00
}
2016-03-17 11:18:52 -07:00
_gc_timer_cm - > register_gc_end ( ) ;
2016-03-07 02:11:47 -08:00
2016-03-17 11:18:52 -07:00
_gc_tracer_cm - > report_gc_end ( _gc_timer_cm - > gc_end ( ) , _gc_timer_cm - > time_partitions ( ) ) ;
2016-03-07 02:11:47 -08:00
}
2016-03-16 12:23:13 +01:00
void G1ConcurrentMark : : mark_from_roots ( ) {
2008-06-05 15:57:56 -07:00
_restart_for_overflow = false ;
2011-08-09 10:16:01 -07:00
2017-10-25 16:13:09 +02:00
_num_concurrent_workers = calc_active_marking_workers ( ) ;
2011-08-09 10:16:01 -07:00
2017-10-25 16:13:09 +02:00
uint active_workers = MAX2 ( 1U , _num_concurrent_workers ) ;
2011-12-16 11:40:00 -08:00
2016-06-08 14:11:51 -07:00
// Setting active workers is not guaranteed since fewer
// worker threads may currently exist and more may not be
// available.
2017-10-25 16:13:09 +02:00
active_workers = _concurrent_workers - > update_active_workers ( active_workers ) ;
log_info ( gc , task ) ( " Using %u workers of %u for marking " , active_workers , _concurrent_workers - > total_workers ( ) ) ;
2016-09-07 09:20:10 +02:00
2013-03-19 00:57:39 -07:00
// Parallel task terminator is set in "set_concurrency_and_phase()"
set_concurrency_and_phase ( active_workers , true /* concurrent */ ) ;
2008-06-05 15:57:56 -07:00
2018-03-29 14:08:10 +02:00
G1CMConcurrentMarkingTask marking_task ( this ) ;
2017-10-25 16:13:09 +02:00
_concurrent_workers - > run_task ( & marking_task ) ;
2008-06-05 15:57:56 -07:00
print_stats ( ) ;
}
2018-04-04 11:21:14 +02:00
void G1ConcurrentMark : : verify_during_pause ( G1HeapVerifier : : G1VerifyType type , VerifyOption vo , const char * caller ) {
G1HeapVerifier * verifier = _g1h - > verifier ( ) ;
verifier - > verify_region_sets_optional ( ) ;
if ( VerifyDuringGC ) {
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( caller , _gc_timer_cm ) ;
2018-04-04 11:21:14 +02:00
size_t const BufLen = 512 ;
char buffer [ BufLen ] ;
jio_snprintf ( buffer , BufLen , " During GC (%s) " , caller ) ;
verifier - > verify ( type , vo , buffer ) ;
}
verifier - > check_bitmaps ( caller ) ;
}
2018-04-18 11:36:48 +02:00
class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
2018-03-26 16:51:43 +02:00
G1CollectedHeap * _g1h ;
G1ConcurrentMark * _cm ;
2018-04-18 11:36:48 +02:00
HeapRegionClaimer _hrclaimer ;
uint volatile _total_selected_for_rebuild ;
2018-03-26 16:51:43 +02:00
2018-04-18 11:36:48 +02:00
G1PrintRegionLivenessInfoClosure _cl ;
2018-04-18 11:36:48 +02:00
class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
G1CollectedHeap * _g1h ;
G1ConcurrentMark * _cm ;
2018-03-26 16:51:43 +02:00
2018-04-18 11:36:48 +02:00
G1PrintRegionLivenessInfoClosure * _cl ;
2018-03-26 16:51:43 +02:00
2018-04-18 11:36:48 +02:00
uint _num_regions_selected_for_rebuild ; // The number of regions actually selected for rebuild.
2018-03-26 16:51:43 +02:00
2018-07-09 10:19:51 +02:00
void update_remset_before_rebuild ( HeapRegion * hr ) {
2019-03-04 11:49:16 +01:00
G1RemSetTrackingPolicy * tracking_policy = _g1h - > policy ( ) - > remset_tracker ( ) ;
2018-04-18 11:36:48 +02:00
2018-07-09 10:19:51 +02:00
bool selected_for_rebuild ;
if ( hr - > is_humongous ( ) ) {
bool const is_live = _cm - > liveness ( hr - > humongous_start_region ( ) - > hrm_index ( ) ) > 0 ;
selected_for_rebuild = tracking_policy - > update_humongous_before_rebuild ( hr , is_live ) ;
} else {
size_t const live_bytes = _cm - > liveness ( hr - > hrm_index ( ) ) ;
selected_for_rebuild = tracking_policy - > update_before_rebuild ( hr , live_bytes ) ;
}
2018-04-18 11:36:48 +02:00
if ( selected_for_rebuild ) {
_num_regions_selected_for_rebuild + + ;
}
_cm - > update_top_at_rebuild_start ( hr ) ;
}
2018-04-18 11:36:48 +02:00
2018-04-18 11:36:48 +02:00
// Distribute the given words across the humongous object starting with hr and
// note end of marking.
void distribute_marked_bytes ( HeapRegion * hr , size_t marked_words ) {
uint const region_idx = hr - > hrm_index ( ) ;
size_t const obj_size_in_words = ( size_t ) oop ( hr - > bottom ( ) ) - > size ( ) ;
uint const num_regions_in_humongous = ( uint ) G1CollectedHeap : : humongous_obj_size_in_regions ( obj_size_in_words ) ;
// "Distributing" zero words means that we only note end of marking for these
// regions.
assert ( marked_words = = 0 | | obj_size_in_words = = marked_words ,
" Marked words should either be 0 or the same as humongous object ( " SIZE_FORMAT " ) but is " SIZE_FORMAT ,
obj_size_in_words , marked_words ) ;
for ( uint i = region_idx ; i < ( region_idx + num_regions_in_humongous ) ; i + + ) {
HeapRegion * const r = _g1h - > region_at ( i ) ;
size_t const words_to_add = MIN2 ( HeapRegion : : GrainWords , marked_words ) ;
log_trace ( gc , marking ) ( " Adding " SIZE_FORMAT " words to humongous region %u (%s) " ,
words_to_add , i , r - > get_type_str ( ) ) ;
add_marked_bytes_and_note_end ( r , words_to_add * HeapWordSize ) ;
marked_words - = words_to_add ;
}
assert ( marked_words = = 0 ,
SIZE_FORMAT " words left after distributing space across %u regions " ,
marked_words , num_regions_in_humongous ) ;
2018-04-18 11:36:48 +02:00
}
2018-04-18 11:36:48 +02:00
void update_marked_bytes ( HeapRegion * hr ) {
uint const region_idx = hr - > hrm_index ( ) ;
size_t const marked_words = _cm - > liveness ( region_idx ) ;
// The marking attributes the object's size completely to the humongous starts
// region. We need to distribute this value across the entire set of regions a
// humongous object spans.
if ( hr - > is_humongous ( ) ) {
assert ( hr - > is_starts_humongous ( ) | | marked_words = = 0 ,
" Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s) " ,
marked_words , region_idx , hr - > get_type_str ( ) ) ;
if ( hr - > is_starts_humongous ( ) ) {
distribute_marked_bytes ( hr , marked_words ) ;
}
} else {
log_trace ( gc , marking ) ( " Adding " SIZE_FORMAT " words to region %u (%s) " , marked_words , region_idx , hr - > get_type_str ( ) ) ;
add_marked_bytes_and_note_end ( hr , marked_words * HeapWordSize ) ;
2018-04-18 11:36:48 +02:00
}
}
2018-04-18 11:36:48 +02:00
void add_marked_bytes_and_note_end ( HeapRegion * hr , size_t marked_bytes ) {
hr - > add_to_marked_bytes ( marked_bytes ) ;
_cl - > do_heap_region ( hr ) ;
hr - > note_end_of_marking ( ) ;
}
2018-03-26 16:51:43 +02:00
2018-04-18 11:36:48 +02:00
public :
G1UpdateRemSetTrackingBeforeRebuild ( G1CollectedHeap * g1h , G1ConcurrentMark * cm , G1PrintRegionLivenessInfoClosure * cl ) :
2018-08-08 15:31:06 +02:00
_g1h ( g1h ) , _cm ( cm ) , _cl ( cl ) , _num_regions_selected_for_rebuild ( 0 ) { }
2018-04-18 11:36:48 +02:00
virtual bool do_heap_region ( HeapRegion * r ) {
update_remset_before_rebuild ( r ) ;
update_marked_bytes ( r ) ;
return false ;
2018-04-18 11:36:48 +02:00
}
2018-04-18 11:36:48 +02:00
uint num_selected_for_rebuild ( ) const { return _num_regions_selected_for_rebuild ; }
} ;
public :
G1UpdateRemSetTrackingBeforeRebuildTask ( G1CollectedHeap * g1h , G1ConcurrentMark * cm , uint num_workers ) :
AbstractGangTask ( " G1 Update RemSet Tracking Before Rebuild " ) ,
_g1h ( g1h ) , _cm ( cm ) , _hrclaimer ( num_workers ) , _total_selected_for_rebuild ( 0 ) , _cl ( " Post-Marking " ) { }
virtual void work ( uint worker_id ) {
G1UpdateRemSetTrackingBeforeRebuild update_cl ( _g1h , _cm , & _cl ) ;
_g1h - > heap_region_par_iterate_from_worker_offset ( & update_cl , & _hrclaimer , worker_id ) ;
Atomic : : add ( update_cl . num_selected_for_rebuild ( ) , & _total_selected_for_rebuild ) ;
2018-03-26 16:51:43 +02:00
}
2018-04-18 11:36:48 +02:00
uint total_selected_for_rebuild ( ) const { return _total_selected_for_rebuild ; }
// Number of regions for which roughly one thread should be spawned for this work.
static const uint RegionsPerThread = 384 ;
2018-03-26 16:51:43 +02:00
} ;
class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
G1CollectedHeap * _g1h ;
public :
G1UpdateRemSetTrackingAfterRebuild ( G1CollectedHeap * g1h ) : _g1h ( g1h ) { }
virtual bool do_heap_region ( HeapRegion * r ) {
2019-03-04 11:49:16 +01:00
_g1h - > policy ( ) - > remset_tracker ( ) - > update_after_rebuild ( r ) ;
2018-03-26 16:51:43 +02:00
return false ;
}
} ;
2018-03-29 14:08:10 +02:00
void G1ConcurrentMark : : remark ( ) {
assert_at_safepoint_on_vm_thread ( ) ;
2008-06-05 15:57:56 -07:00
2018-03-29 14:08:10 +02:00
// If a full collection has happened, we should not continue. However we might
// have ended up here as the Remark VM operation has been scheduled already.
2008-06-05 15:57:56 -07:00
if ( has_aborted ( ) ) {
return ;
}
2019-03-04 11:49:16 +01:00
G1Policy * policy = _g1h - > policy ( ) ;
policy - > record_concurrent_mark_remark_start ( ) ;
2008-06-05 15:57:56 -07:00
double start = os : : elapsedTime ( ) ;
2018-04-04 11:21:14 +02:00
verify_during_pause ( G1HeapVerifier : : G1VerifyRemark , VerifyOption_G1UsePrevMarking , " Remark before " ) ;
2008-06-05 15:57:56 -07:00
2018-04-04 11:21:14 +02:00
{
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( " Finalize Marking " , _gc_timer_cm ) ;
2018-04-04 11:21:14 +02:00
finalize_marking ( ) ;
}
2008-06-05 15:57:56 -07:00
2018-04-04 11:21:14 +02:00
double mark_work_end = os : : elapsedTime ( ) ;
2013-03-19 09:38:37 -07:00
2018-04-04 11:21:14 +02:00
bool const mark_finished = ! has_overflown ( ) ;
if ( mark_finished ) {
weak_refs_work ( false /* clear_all_soft_refs */ ) ;
2013-03-19 09:38:37 -07:00
2018-04-12 08:25:30 +02:00
SATBMarkQueueSet & satb_mq_set = G1BarrierSet : : satb_mark_queue_set ( ) ;
2008-06-05 15:57:56 -07:00
// We're done with marking.
2018-03-29 14:08:10 +02:00
// This is the end of the marking cycle, we're expected all
2010-03-18 12:14:59 -04:00
// threads to have SATB queues with active set to true.
2011-01-19 09:35:17 -05:00
satb_mq_set . set_active_all_threads ( false , /* new active value */
true /* expected_active */ ) ;
2009-06-12 16:20:16 -04:00
2018-03-26 16:51:43 +02:00
{
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( " Flush Task Caches " , _gc_timer_cm ) ;
2018-03-26 16:51:43 +02:00
flush_all_task_caches ( ) ;
}
2018-04-18 11:36:48 +02:00
// Install newly created mark bitmap as "prev".
swap_mark_bitmaps ( ) ;
2018-03-26 16:51:43 +02:00
{
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( " Update Remembered Set Tracking Before Rebuild " , _gc_timer_cm ) ;
2018-04-18 11:36:48 +02:00
uint const workers_by_capacity = ( _g1h - > num_regions ( ) + G1UpdateRemSetTrackingBeforeRebuildTask : : RegionsPerThread - 1 ) /
G1UpdateRemSetTrackingBeforeRebuildTask : : RegionsPerThread ;
uint const num_workers = MIN2 ( _g1h - > workers ( ) - > active_workers ( ) , workers_by_capacity ) ;
G1UpdateRemSetTrackingBeforeRebuildTask cl ( _g1h , this , num_workers ) ;
log_debug ( gc , ergo ) ( " Running %s using %u workers for %u regions in heap " , cl . name ( ) , num_workers , _g1h - > num_regions ( ) ) ;
_g1h - > workers ( ) - > run_task ( & cl , num_workers ) ;
2018-03-26 16:51:43 +02:00
log_debug ( gc , remset , tracking ) ( " Remembered Set Tracking update regions total %u, selected %u " ,
2018-04-18 11:36:48 +02:00
_g1h - > num_regions ( ) , cl . total_selected_for_rebuild ( ) ) ;
2018-03-26 16:51:43 +02:00
}
2018-04-18 11:36:48 +02:00
{
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( " Reclaim Empty Regions " , _gc_timer_cm ) ;
2018-04-18 11:36:48 +02:00
reclaim_empty_regions ( ) ;
}
// Clean out dead classes
if ( ClassUnloadingWithConcurrentMark ) {
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( " Purge Metaspace " , _gc_timer_cm ) ;
2018-04-18 11:36:48 +02:00
ClassLoaderDataGraph : : purge ( ) ;
}
2018-10-31 13:43:57 +01:00
_g1h - > resize_heap_if_necessary ( ) ;
2018-04-18 11:36:48 +02:00
compute_new_sizes ( ) ;
2018-03-26 16:51:43 +02:00
2018-04-18 11:36:48 +02:00
verify_during_pause ( G1HeapVerifier : : G1VerifyRemark , VerifyOption_G1UsePrevMarking , " Remark after " ) ;
2018-04-04 11:21:14 +02:00
2011-01-25 10:56:22 -08:00
assert ( ! restart_for_overflow ( ) , " sanity " ) ;
2013-01-03 16:28:22 -08:00
// Completely reset the marking state since marking completed
2018-03-29 14:08:10 +02:00
reset_at_marking_complete ( ) ;
2018-04-04 11:21:14 +02:00
} else {
// We overflowed. Restart concurrent marking.
_restart_for_overflow = true ;
verify_during_pause ( G1HeapVerifier : : G1VerifyRemark , VerifyOption_G1UsePrevMarking , " Remark overflow " ) ;
// Clear the marking state because we will be restarting
// marking due to overflowing the global mark stack.
reset_marking_for_restart ( ) ;
}
{
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( " Report Object Count " , _gc_timer_cm ) ;
2018-04-18 11:36:48 +02:00
report_object_count ( mark_finished ) ;
2011-01-25 10:56:22 -08:00
}
2008-06-05 15:57:56 -07:00
// Statistics
double now = os : : elapsedTime ( ) ;
_remark_mark_times . add ( ( mark_work_end - start ) * 1000.0 ) ;
_remark_weak_ref_times . add ( ( now - mark_work_end ) * 1000.0 ) ;
_remark_times . add ( ( now - start ) * 1000.0 ) ;
2019-03-04 11:49:16 +01:00
policy - > record_concurrent_mark_remark_end ( ) ;
2008-06-05 15:57:56 -07:00
}
2018-04-18 11:36:48 +02:00
class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
2018-03-28 16:39:32 +02:00
// Per-region work during the Cleanup pause.
2018-04-18 11:36:48 +02:00
class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
2018-03-29 14:08:10 +02:00
G1CollectedHeap * _g1h ;
2018-03-28 16:39:32 +02:00
size_t _freed_bytes ;
FreeRegionList * _local_cleanup_list ;
uint _old_regions_removed ;
uint _humongous_regions_removed ;
2008-06-05 15:57:56 -07:00
2018-03-28 16:39:32 +02:00
public :
2018-04-18 11:36:48 +02:00
G1ReclaimEmptyRegionsClosure ( G1CollectedHeap * g1h ,
2018-11-28 11:06:58 +01:00
FreeRegionList * local_cleanup_list ) :
2018-04-18 11:36:48 +02:00
_g1h ( g1h ) ,
2018-03-28 16:39:32 +02:00
_freed_bytes ( 0 ) ,
_local_cleanup_list ( local_cleanup_list ) ,
_old_regions_removed ( 0 ) ,
2018-11-28 11:06:58 +01:00
_humongous_regions_removed ( 0 ) { }
2018-03-28 16:39:32 +02:00
size_t freed_bytes ( ) { return _freed_bytes ; }
const uint old_regions_removed ( ) { return _old_regions_removed ; }
const uint humongous_regions_removed ( ) { return _humongous_regions_removed ; }
bool do_heap_region ( HeapRegion * hr ) {
if ( hr - > used ( ) > 0 & & hr - > max_live_bytes ( ) = = 0 & & ! hr - > is_young ( ) & & ! hr - > is_archive ( ) ) {
_freed_bytes + = hr - > used ( ) ;
hr - > set_containing_set ( NULL ) ;
if ( hr - > is_humongous ( ) ) {
_humongous_regions_removed + + ;
2018-03-29 14:08:10 +02:00
_g1h - > free_humongous_region ( hr , _local_cleanup_list ) ;
2018-03-28 16:39:32 +02:00
} else {
_old_regions_removed + + ;
2018-03-29 14:08:10 +02:00
_g1h - > free_region ( hr , _local_cleanup_list , false /* skip_remset */ , false /* skip_hcc */ , true /* locked */ ) ;
2018-03-28 16:39:32 +02:00
}
hr - > clear_cardtable ( ) ;
2018-04-04 11:21:14 +02:00
_g1h - > concurrent_mark ( ) - > clear_statistics_in_region ( hr - > hrm_index ( ) ) ;
log_trace ( gc ) ( " Reclaimed empty region %u (%s) bot " PTR_FORMAT , hr - > hrm_index ( ) , hr - > get_short_type_str ( ) , p2i ( hr - > bottom ( ) ) ) ;
2014-03-14 10:15:46 +01:00
}
2008-06-05 15:57:56 -07:00
2018-03-28 16:39:32 +02:00
return false ;
}
} ;
2011-01-19 19:30:42 -05:00
2008-06-05 15:57:56 -07:00
G1CollectedHeap * _g1h ;
2011-01-19 19:30:42 -05:00
FreeRegionList * _cleanup_list ;
2014-10-07 14:54:53 +02:00
HeapRegionClaimer _hrclaimer ;
2011-01-19 19:30:42 -05:00
2008-06-05 15:57:56 -07:00
public :
2018-04-18 11:36:48 +02:00
G1ReclaimEmptyRegionsTask ( G1CollectedHeap * g1h , FreeRegionList * cleanup_list , uint n_workers ) :
2018-03-28 16:39:32 +02:00
AbstractGangTask ( " G1 Cleanup " ) ,
_g1h ( g1h ) ,
_cleanup_list ( cleanup_list ) ,
_hrclaimer ( n_workers ) {
2014-10-07 14:54:53 +02:00
}
2008-06-05 15:57:56 -07:00
2011-12-14 13:34:57 -08:00
void work ( uint worker_id ) {
2011-01-25 17:58:19 -05:00
FreeRegionList local_cleanup_list ( " Local Cleanup List " ) ;
2018-11-28 11:06:58 +01:00
G1ReclaimEmptyRegionsClosure cl ( _g1h , & local_cleanup_list ) ;
2018-03-28 16:39:32 +02:00
_g1h - > heap_region_par_iterate_from_worker_offset ( & cl , & _hrclaimer , worker_id ) ;
assert ( cl . is_complete ( ) , " Shouldn't have aborted! " ) ;
// Now update the old/humongous region sets
_g1h - > remove_from_old_sets ( cl . old_regions_removed ( ) , cl . humongous_regions_removed ( ) ) ;
2008-06-05 15:57:56 -07:00
{
MutexLockerEx x ( ParGCRareEvent_lock , Mutex : : _no_safepoint_check_flag ) ;
2018-03-28 16:39:32 +02:00
_g1h - > decrement_summary_bytes ( cl . freed_bytes ( ) ) ;
2011-06-24 12:38:49 -04:00
2014-02-28 15:27:09 +01:00
_cleanup_list - > add_ordered ( & local_cleanup_list ) ;
2011-01-25 17:58:19 -05:00
assert ( local_cleanup_list . is_empty ( ) , " post-condition " ) ;
2008-06-05 15:57:56 -07:00
}
}
} ;
2018-03-28 16:39:32 +02:00
void G1ConcurrentMark : : reclaim_empty_regions ( ) {
WorkGang * workers = _g1h - > workers ( ) ;
FreeRegionList empty_regions_list ( " Empty Regions After Mark List " ) ;
2018-04-18 11:36:48 +02:00
G1ReclaimEmptyRegionsTask cl ( _g1h , & empty_regions_list , workers - > active_workers ( ) ) ;
2018-03-28 16:39:32 +02:00
workers - > run_task ( & cl ) ;
if ( ! empty_regions_list . is_empty ( ) ) {
2018-04-04 11:21:14 +02:00
log_debug ( gc ) ( " Reclaimed %u empty regions " , empty_regions_list . length ( ) ) ;
2018-03-28 16:39:32 +02:00
// Now print the empty regions list.
G1HRPrinter * hrp = _g1h - > hr_printer ( ) ;
if ( hrp - > is_active ( ) ) {
FreeRegionListIterator iter ( & empty_regions_list ) ;
while ( iter . more_available ( ) ) {
HeapRegion * hr = iter . get_next ( ) ;
hrp - > cleanup ( hr ) ;
}
}
// And actually make them available.
_g1h - > prepend_to_freelist ( & empty_regions_list ) ;
}
}
2018-04-18 11:36:48 +02:00
void G1ConcurrentMark : : compute_new_sizes ( ) {
MetaspaceGC : : compute_new_size ( ) ;
// Cleanup will have freed any regions completely full of garbage.
// Update the soft reference policy with the new heap occupancy.
Universe : : update_heap_info_at_gc ( ) ;
// We reclaimed old regions so we should calculate the sizes to make
// sure we update the old gen/space data.
_g1h - > g1mm ( ) - > update_sizes ( ) ;
}
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : cleanup ( ) {
2018-03-29 14:08:10 +02:00
assert_at_safepoint_on_vm_thread ( ) ;
2008-06-05 15:57:56 -07:00
// If a full collection has happened, we shouldn't do this.
if ( has_aborted ( ) ) {
return ;
}
2019-03-04 11:49:16 +01:00
G1Policy * policy = _g1h - > policy ( ) ;
policy - > record_concurrent_mark_cleanup_start ( ) ;
2008-06-05 15:57:56 -07:00
double start = os : : elapsedTime ( ) ;
2018-04-18 11:36:48 +02:00
verify_during_pause ( G1HeapVerifier : : G1VerifyCleanup , VerifyOption_G1UsePrevMarking , " Cleanup before " ) ;
2018-04-04 11:21:14 +02:00
2016-04-06 13:32:48 +02:00
{
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( " Update Remembered Set Tracking After Rebuild " , _gc_timer_cm ) ;
2018-03-26 16:51:43 +02:00
G1UpdateRemSetTrackingAfterRebuild cl ( _g1h ) ;
2018-03-29 14:08:10 +02:00
_g1h - > heap_region_iterate ( & cl ) ;
2012-01-12 00:06:47 -08:00
}
2015-12-10 14:57:55 +01:00
if ( log_is_enabled ( Trace , gc , liveness ) ) {
2018-03-26 16:51:43 +02:00
G1PrintRegionLivenessInfoClosure cl ( " Post-Cleanup " ) ;
2011-04-04 14:23:17 -04:00
_g1h - > heap_region_iterate ( & cl ) ;
}
2018-04-04 11:21:14 +02:00
verify_during_pause ( G1HeapVerifier : : G1VerifyCleanup , VerifyOption_G1UsePrevMarking , " Cleanup after " ) ;
// We need to make this be a "collection" so any collection pause that
// races with it goes around and waits for Cleanup to finish.
_g1h - > increment_total_collections ( ) ;
// Local statistics
double recent_cleanup_time = ( os : : elapsedTime ( ) - start ) ;
_total_cleanup_time + = recent_cleanup_time ;
_cleanup_times . add ( recent_cleanup_time ) ;
{
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( " Finalize Concurrent Mark Cleanup " , _gc_timer_cm ) ;
2019-03-04 11:49:16 +01:00
policy - > record_concurrent_mark_cleanup_end ( ) ;
2018-04-04 11:21:14 +02:00
}
2008-06-05 15:57:56 -07:00
}
2013-02-05 09:13:05 -08:00
// 'Keep Alive' oop closure used by both serial parallel reference processing.
2016-02-05 16:03:56 +01:00
// Uses the G1CMTask associated with a worker thread (for serial reference
// processing the G1CMTask for worker 0 is used) to preserve (mark) and
2013-02-05 09:13:05 -08:00
// trace referent objects.
//
2016-02-05 16:03:56 +01:00
// Using the G1CMTask and embedded local queues avoids having the worker
2013-02-05 09:13:05 -08:00
// threads operating on the global mark stack. This reduces the risk
// of overflowing the stack - which we would rather avoid at this late
// state. Also using the tasks' local queues removes the potential
// of the workers interfering with each other that could occur if
// operating on the global stack.
2018-03-29 14:08:10 +02:00
class G1CMKeepAliveAndDrainClosure : public OopClosure {
2016-02-05 16:03:56 +01:00
G1ConcurrentMark * _cm ;
G1CMTask * _task ;
2018-04-18 19:00:32 +02:00
uint _ref_counter_limit ;
uint _ref_counter ;
2016-02-05 16:03:56 +01:00
bool _is_serial ;
2018-03-29 14:08:10 +02:00
public :
2016-02-05 16:03:56 +01:00
G1CMKeepAliveAndDrainClosure ( G1ConcurrentMark * cm , G1CMTask * task , bool is_serial ) :
2018-08-08 15:31:06 +02:00
_cm ( cm ) , _task ( task ) , _ref_counter_limit ( G1RefProcDrainInterval ) ,
_ref_counter ( _ref_counter_limit ) , _is_serial ( is_serial ) {
2013-03-18 11:05:27 -07:00
assert ( ! _is_serial | | _task - > worker_id ( ) = = 0 , " only task 0 for serial code " ) ;
2011-01-25 10:56:22 -08:00
}
virtual void do_oop ( narrowOop * p ) { do_oop_work ( p ) ; }
virtual void do_oop ( oop * p ) { do_oop_work ( p ) ; }
template < class T > void do_oop_work ( T * p ) {
2018-04-18 11:36:48 +02:00
if ( _cm - > has_overflown ( ) ) {
return ;
}
if ( ! _task - > deal_with_reference ( p ) ) {
// We did not add anything to the mark bitmap (or mark stack), so there is
// no point trying to drain it.
return ;
}
_ref_counter - - ;
if ( _ref_counter = = 0 ) {
// We have dealt with _ref_counter_limit references, pushing them
// and objects reachable from them on to the local stack (and
// possibly the global stack). Call G1CMTask::do_marking_step() to
// process these entries.
//
// We call G1CMTask::do_marking_step() in a loop, which we'll exit if
// there's nothing more to do (i.e. we're done with the entries that
// were pushed as a result of the G1CMTask::deal_with_reference() calls
// above) or we overflow.
//
// Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
// flag while there may still be some work to do. (See the comment at
// the beginning of G1CMTask::do_marking_step() for those conditions -
// one of which is reaching the specified time target.) It is only
// when G1CMTask::do_marking_step() returns without setting the
// has_aborted() flag that the marking step has completed.
do {
double mark_step_duration_ms = G1ConcMarkStepDurationMillis ;
_task - > do_marking_step ( mark_step_duration_ms ,
false /* do_termination */ ,
_is_serial ) ;
} while ( _task - > has_aborted ( ) & & ! _cm - > has_overflown ( ) ) ;
_ref_counter = _ref_counter_limit ;
2011-01-25 10:56:22 -08:00
}
}
} ;
2013-02-05 09:13:05 -08:00
// 'Drain' oop closure used by both serial and parallel reference processing.
2016-02-05 16:03:56 +01:00
// Uses the G1CMTask associated with a given worker thread (for serial
// reference processing the G1CMtask for worker 0 is used). Calls the
2013-02-05 09:13:05 -08:00
// do_marking_step routine, with an unbelievably large timeout value,
// to drain the marking data structures of the remaining entries
// added by the 'keep alive' oop closure above.
2018-03-29 14:08:10 +02:00
class G1CMDrainMarkingStackClosure : public VoidClosure {
2016-02-05 16:03:56 +01:00
G1ConcurrentMark * _cm ;
G1CMTask * _task ;
bool _is_serial ;
2011-01-25 10:56:22 -08:00
public :
2016-02-05 16:03:56 +01:00
G1CMDrainMarkingStackClosure ( G1ConcurrentMark * cm , G1CMTask * task , bool is_serial ) :
2013-03-18 11:05:27 -07:00
_cm ( cm ) , _task ( task ) , _is_serial ( is_serial ) {
assert ( ! _is_serial | | _task - > worker_id ( ) = = 0 , " only task 0 for serial code " ) ;
2013-02-05 09:13:05 -08:00
}
2011-01-25 10:56:22 -08:00
void do_void ( ) {
do {
2016-02-05 16:03:56 +01:00
// We call G1CMTask::do_marking_step() to completely drain the local
2013-02-05 09:13:05 -08:00
// and global marking stacks of entries pushed by the 'keep alive'
// oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
//
2016-02-05 16:03:56 +01:00
// G1CMTask::do_marking_step() is called in a loop, which we'll exit
2014-01-23 14:47:23 +01:00
// if there's nothing more to do (i.e. we've completely drained the
2013-02-05 09:13:05 -08:00
// entries that were pushed as a a result of applying the 'keep alive'
// closure to the entries on the discovered ref lists) or we overflow
// the global marking stack.
//
2016-02-05 16:03:56 +01:00
// Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
2013-02-05 09:13:05 -08:00
// flag while there may still be some work to do. (See the comment at
2016-02-05 16:03:56 +01:00
// the beginning of G1CMTask::do_marking_step() for those conditions -
2013-02-05 09:13:05 -08:00
// one of which is reaching the specified time target.) It is only
2016-02-05 16:03:56 +01:00
// when G1CMTask::do_marking_step() returns without setting the
2013-02-05 09:13:05 -08:00
// has_aborted() flag that the marking step has completed.
2011-01-25 10:56:22 -08:00
_task - > do_marking_step ( 1000000000.0 /* something very large */ ,
2013-03-18 11:05:27 -07:00
true /* do_termination */ ,
_is_serial ) ;
2011-01-25 10:56:22 -08:00
} while ( _task - > has_aborted ( ) & & ! _cm - > has_overflown ( ) ) ;
}
} ;
2011-09-22 10:57:37 -07:00
// Implementation of AbstractRefProcTaskExecutor for parallel
// reference processing at the end of G1 concurrent marking
2018-03-29 14:08:10 +02:00
class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
2011-01-25 10:56:22 -08:00
private :
2016-02-05 16:03:56 +01:00
G1CollectedHeap * _g1h ;
G1ConcurrentMark * _cm ;
WorkGang * _workers ;
uint _active_workers ;
2011-01-25 10:56:22 -08:00
public :
2011-09-22 10:57:37 -07:00
G1CMRefProcTaskExecutor ( G1CollectedHeap * g1h ,
2016-02-05 16:03:56 +01:00
G1ConcurrentMark * cm ,
2015-04-29 15:12:33 +03:00
WorkGang * workers ,
uint n_workers ) :
2011-11-18 12:27:10 -08:00
_g1h ( g1h ) , _cm ( cm ) ,
_workers ( workers ) , _active_workers ( n_workers ) { }
2011-01-25 10:56:22 -08:00
2018-06-18 12:11:59 +02:00
virtual void execute ( ProcessTask & task , uint ergo_workers ) ;
2011-01-25 10:56:22 -08:00
} ;
2018-03-29 14:08:10 +02:00
class G1CMRefProcTaskProxy : public AbstractGangTask {
2011-01-25 10:56:22 -08:00
typedef AbstractRefProcTaskExecutor : : ProcessTask ProcessTask ;
2016-02-05 16:03:56 +01:00
ProcessTask & _proc_task ;
G1CollectedHeap * _g1h ;
G1ConcurrentMark * _cm ;
2011-01-25 10:56:22 -08:00
public :
2011-09-22 10:57:37 -07:00
G1CMRefProcTaskProxy ( ProcessTask & proc_task ,
2016-02-05 16:03:56 +01:00
G1CollectedHeap * g1h ,
G1ConcurrentMark * cm ) :
2011-01-25 10:56:22 -08:00
AbstractGangTask ( " Process reference objects in parallel " ) ,
2013-02-05 09:13:05 -08:00
_proc_task ( proc_task ) , _g1h ( g1h ) , _cm ( cm ) {
2013-03-18 11:05:27 -07:00
ReferenceProcessor * rp = _g1h - > ref_processor_cm ( ) ;
assert ( rp - > processing_is_mt ( ) , " shouldn't be here otherwise " ) ;
}
2011-01-25 10:56:22 -08:00
2011-12-14 13:34:57 -08:00
virtual void work ( uint worker_id ) {
2014-07-29 10:26:09 +02:00
ResourceMark rm ;
HandleMark hm ;
2016-02-05 16:03:56 +01:00
G1CMTask * task = _cm - > task ( worker_id ) ;
2011-01-25 10:56:22 -08:00
G1CMIsAliveClosure g1_is_alive ( _g1h ) ;
2013-03-18 11:05:27 -07:00
G1CMKeepAliveAndDrainClosure g1_par_keep_alive ( _cm , task , false /* is_serial */ ) ;
G1CMDrainMarkingStackClosure g1_par_drain ( _cm , task , false /* is_serial */ ) ;
2011-01-25 10:56:22 -08:00
2011-12-14 13:34:57 -08:00
_proc_task . work ( worker_id , g1_is_alive , g1_par_keep_alive , g1_par_drain ) ;
2011-01-25 10:56:22 -08:00
}
} ;
2018-06-18 12:11:59 +02:00
void G1CMRefProcTaskExecutor : : execute ( ProcessTask & proc_task , uint ergo_workers ) {
2011-01-25 10:56:22 -08:00
assert ( _workers ! = NULL , " Need parallel worker threads. " ) ;
2013-02-05 09:13:05 -08:00
assert ( _g1h - > ref_processor_cm ( ) - > processing_is_mt ( ) , " processing is not MT " ) ;
2018-06-18 12:11:59 +02:00
assert ( _workers - > active_workers ( ) > = ergo_workers ,
" Ergonomically chosen workers(%u) should be less than or equal to active workers(%u) " ,
ergo_workers , _workers - > active_workers ( ) ) ;
2011-01-25 10:56:22 -08:00
2011-11-18 12:27:10 -08:00
G1CMRefProcTaskProxy proc_task_proxy ( proc_task , _g1h , _cm ) ;
2011-01-25 10:56:22 -08:00
2013-03-19 00:57:39 -07:00
// We need to reset the concurrency level before each
// proxy task execution, so that the termination protocol
2016-02-05 16:03:56 +01:00
// and overflow handling in G1CMTask::do_marking_step() knows
2013-03-19 00:57:39 -07:00
// how many workers to wait for.
2018-06-18 12:11:59 +02:00
_cm - > set_concurrency ( ergo_workers ) ;
_workers - > run_task ( & proc_task_proxy , ergo_workers ) ;
2011-01-25 10:56:22 -08:00
}
2017-10-23 11:46:12 +02:00
void G1ConcurrentMark : : weak_refs_work ( bool clear_all_soft_refs ) {
2008-06-05 15:57:56 -07:00
ResourceMark rm ;
HandleMark hm ;
2011-09-21 10:04:45 -07:00
// Is alive closure.
2018-03-29 14:08:10 +02:00
G1CMIsAliveClosure g1_is_alive ( _g1h ) ;
2011-09-21 10:04:45 -07:00
2018-08-14 18:42:14 -05:00
// Inner scope to exclude the cleaning of the string table
// from the displayed time.
2011-09-21 10:04:45 -07:00
{
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc , phases ) debug ( " Reference Processing " , _gc_timer_cm ) ;
2010-12-01 17:34:02 -08:00
2018-03-29 14:08:10 +02:00
ReferenceProcessor * rp = _g1h - > ref_processor_cm ( ) ;
2008-06-05 15:57:56 -07:00
2011-09-21 10:04:45 -07:00
// See the comment in G1CollectedHeap::ref_processing_init()
// about how reference processing currently works in G1.
2011-01-25 10:56:22 -08:00
2013-02-05 09:13:05 -08:00
// Set the soft reference policy
2011-09-21 10:04:45 -07:00
rp - > setup_policy ( clear_all_soft_refs ) ;
2016-07-15 13:33:44 +02:00
assert ( _global_mark_stack . is_empty ( ) , " mark stack should be empty " ) ;
2011-01-25 10:56:22 -08:00
2017-10-23 11:20:53 +02:00
// Instances of the 'Keep Alive' and 'Complete GC' closures used
// in serial reference processing. Note these closures are also
// used for serially processing (by the the current thread) the
// JNI references during parallel reference processing.
//
// These closures do not need to synchronize with the worker
// threads involved in parallel reference processing as these
// instances are executed serially by the current thread (e.g.
// reference processing is not multi-threaded and is thus
// performed by the current thread instead of a gang worker).
//
// The gang tasks involved in parallel reference processing create
// their own instances of these closures, which do their own
// synchronization among themselves.
G1CMKeepAliveAndDrainClosure g1_keep_alive ( this , task ( 0 ) , true /* is_serial */ ) ;
G1CMDrainMarkingStackClosure g1_drain_mark_stack ( this , task ( 0 ) , true /* is_serial */ ) ;
2013-03-18 11:05:27 -07:00
// We need at least one active thread. If reference processing
// is not multi-threaded we use the current (VMThread) thread,
// otherwise we use the work gang from the G1CollectedHeap and
// we utilize all the worker threads we can.
2014-10-21 11:57:22 +02:00
bool processing_is_mt = rp - > processing_is_mt ( ) ;
2018-03-29 14:08:10 +02:00
uint active_workers = ( processing_is_mt ? _g1h - > workers ( ) - > active_workers ( ) : 1U ) ;
2017-10-25 16:13:09 +02:00
active_workers = MAX2 ( MIN2 ( active_workers , _max_num_tasks ) , 1U ) ;
2011-01-25 10:56:22 -08:00
2013-03-18 11:05:27 -07:00
// Parallel processing task executor.
2018-03-29 14:08:10 +02:00
G1CMRefProcTaskExecutor par_task_executor ( _g1h , this ,
_g1h - > workers ( ) , active_workers ) ;
2013-03-18 11:05:27 -07:00
AbstractRefProcTaskExecutor * executor = ( processing_is_mt ? & par_task_executor : NULL ) ;
2011-09-21 10:04:45 -07:00
2013-03-19 00:57:39 -07:00
// Set the concurrency level. The phase was already set prior to
// executing the remark task.
set_concurrency ( active_workers ) ;
2013-02-05 09:13:05 -08:00
// Set the degree of MT processing here. If the discovery was done MT,
// the number of threads involved during discovery could differ from
// the number of active workers. This is OK as long as the discovered
// Reference lists are balanced (see balance_all_queues() and balance_queues()).
rp - > set_active_mt_degree ( active_workers ) ;
2018-06-04 21:20:16 -07:00
ReferenceProcessorPhaseTimes pt ( _gc_timer_cm , rp - > max_num_queues ( ) ) ;
2017-08-10 18:09:19 -07:00
2013-02-05 09:13:05 -08:00
// Process the weak references.
2013-06-10 11:30:51 +02:00
const ReferenceProcessorStats & stats =
rp - > process_discovered_references ( & g1_is_alive ,
& g1_keep_alive ,
& g1_drain_mark_stack ,
executor ,
2017-08-10 18:09:19 -07:00
& pt ) ;
2016-03-17 11:18:52 -07:00
_gc_tracer_cm - > report_gc_reference_stats ( stats ) ;
2017-08-10 18:09:19 -07:00
pt . print_all_references ( ) ;
2011-01-25 10:56:22 -08:00
2013-02-05 09:13:05 -08:00
// The do_oop work routines of the keep_alive and drain_marking_stack
// oop closures will set the has_overflown flag if we overflow the
// global marking stack.
2011-01-25 10:56:22 -08:00
2017-03-09 10:48:44 +01:00
assert ( has_overflown ( ) | | _global_mark_stack . is_empty ( ) ,
2018-03-29 14:08:10 +02:00
" Mark stack should be empty (unless it has overflown) " ) ;
2011-01-25 10:56:22 -08:00
2018-05-03 14:10:08 +02:00
assert ( rp - > num_queues ( ) = = active_workers , " why not " ) ;
2013-02-05 09:13:05 -08:00
2011-09-21 10:04:45 -07:00
rp - > verify_no_references_recorded ( ) ;
2011-09-22 10:57:37 -07:00
assert ( ! rp - > discovery_enabled ( ) , " Post condition " ) ;
2011-01-25 10:56:22 -08:00
}
2014-03-17 15:18:45 +01:00
if ( has_overflown ( ) ) {
2018-06-07 11:20:55 +02:00
// We can not trust g1_is_alive and the contents of the heap if the marking stack
// overflowed while processing references. Exit the VM.
fatal ( " Overflow during reference processing, can not continue. Please "
" increase MarkStackSizeMax (current value: " SIZE_FORMAT " ) and "
" restart. " , MarkStackSizeMax ) ;
2014-03-17 15:18:45 +01:00
return ;
}
2016-07-15 13:33:44 +02:00
assert ( _global_mark_stack . is_empty ( ) , " Marking should have completed " ) ;
2014-07-07 10:12:40 +02:00
2018-04-23 16:00:56 -04:00
{
GCTraceTime ( Debug , gc , phases ) debug ( " Weak Processing " , _gc_timer_cm ) ;
2018-08-28 12:57:40 -04:00
WeakProcessor : : weak_oops_do ( _g1h - > workers ( ) , & g1_is_alive , & do_nothing_cl , 1 ) ;
2018-04-23 16:00:56 -04:00
}
2018-08-14 18:42:14 -05:00
// Unload Klasses, String, Code Cache, etc.
2016-03-18 08:59:07 +01:00
if ( ClassUnloadingWithConcurrentMark ) {
2017-03-24 10:27:04 +01:00
GCTraceTime ( Debug , gc , phases ) debug ( " Class Unloading " , _gc_timer_cm ) ;
2018-11-13 11:45:16 +01:00
bool purged_classes = SystemDictionary : : do_unloading ( _gc_timer_cm ) ;
2018-03-29 14:08:10 +02:00
_g1h - > complete_cleaning ( & g1_is_alive , purged_classes ) ;
2019-01-29 11:30:17 +01:00
} else if ( StringDedup : : is_enabled ( ) ) {
GCTraceTime ( Debug , gc , phases ) debug ( " String Deduplication " , _gc_timer_cm ) ;
_g1h - > string_dedup_cleaning ( & g1_is_alive , NULL ) ;
2016-03-18 08:59:07 +01:00
}
2008-06-05 15:57:56 -07:00
}
2018-05-14 11:47:03 +02:00
class G1PrecleanYieldClosure : public YieldClosure {
G1ConcurrentMark * _cm ;
public :
G1PrecleanYieldClosure ( G1ConcurrentMark * cm ) : _cm ( cm ) { }
virtual bool should_return ( ) {
return _cm - > has_aborted ( ) ;
}
virtual bool should_return_fine_grain ( ) {
_cm - > do_yield_check ( ) ;
return _cm - > has_aborted ( ) ;
}
} ;
void G1ConcurrentMark : : preclean ( ) {
assert ( G1UseReferencePrecleaning , " Precleaning must be enabled. " ) ;
SuspendibleThreadSetJoiner joiner ;
G1CMKeepAliveAndDrainClosure keep_alive ( this , task ( 0 ) , true /* is_serial */ ) ;
G1CMDrainMarkingStackClosure drain_mark_stack ( this , task ( 0 ) , true /* is_serial */ ) ;
set_concurrency_and_phase ( 1 , true ) ;
G1PrecleanYieldClosure yield_cl ( this ) ;
ReferenceProcessor * rp = _g1h - > ref_processor_cm ( ) ;
// Precleaning is single threaded. Temporarily disable MT discovery.
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery ( rp , false ) ;
rp - > preclean_discovered_references ( rp - > is_alive_non_header ( ) ,
& keep_alive ,
& drain_mark_stack ,
& yield_cl ,
_gc_timer_cm ) ;
}
2018-04-18 11:36:48 +02:00
// When sampling object counts, we already swapped the mark bitmaps, so we need to use
// the prev bitmap determining liveness.
class G1ObjectCountIsAliveClosure : public BoolObjectClosure {
2018-04-18 11:36:48 +02:00
G1CollectedHeap * _g1h ;
public :
G1ObjectCountIsAliveClosure ( G1CollectedHeap * g1h ) : _g1h ( g1h ) { }
2018-04-18 11:36:48 +02:00
bool do_object_b ( oop obj ) {
HeapWord * addr = ( HeapWord * ) obj ;
return addr ! = NULL & &
2018-04-18 11:36:48 +02:00
( ! _g1h - > is_in_g1_reserved ( addr ) | | ! _g1h - > is_obj_dead ( obj ) ) ;
2018-04-18 11:36:48 +02:00
}
} ;
void G1ConcurrentMark : : report_object_count ( bool mark_completed ) {
// Depending on the completion of the marking liveness needs to be determined
// using either the next or prev bitmap.
if ( mark_completed ) {
G1ObjectCountIsAliveClosure is_alive ( _g1h ) ;
_gc_tracer_cm - > report_object_count_after_gc ( & is_alive ) ;
} else {
G1CMIsAliveClosure is_alive ( _g1h ) ;
_gc_tracer_cm - > report_object_count_after_gc ( & is_alive ) ;
}
2018-04-04 11:21:14 +02:00
}
2018-04-18 11:36:48 +02:00
2017-10-23 11:46:12 +02:00
void G1ConcurrentMark : : swap_mark_bitmaps ( ) {
G1CMBitMap * temp = _prev_mark_bitmap ;
_prev_mark_bitmap = _next_mark_bitmap ;
_next_mark_bitmap = temp ;
2018-04-04 11:21:14 +02:00
_g1h - > collector_state ( ) - > set_clearing_next_bitmap ( true ) ;
2008-06-05 15:57:56 -07:00
}
2015-05-01 17:38:12 -04:00
// Closure for marking entries in SATB buffers.
2016-02-05 16:03:56 +01:00
class G1CMSATBBufferClosure : public SATBBufferClosure {
2014-07-07 10:12:40 +02:00
private :
2016-02-05 16:03:56 +01:00
G1CMTask * _task ;
2015-05-01 17:38:12 -04:00
G1CollectedHeap * _g1h ;
2014-07-07 10:12:40 +02:00
2016-02-05 16:03:56 +01:00
// This is very similar to G1CMTask::deal_with_reference, but with
2015-05-01 17:38:12 -04:00
// more relaxed requirements for the argument, so this must be more
// circumspect about treating the argument as an object.
void do_entry ( void * entry ) const {
_task - > increment_refs_reached ( ) ;
2017-08-04 14:28:57 +02:00
oop const obj = static_cast < oop > ( entry ) ;
_task - > make_reference_grey ( obj ) ;
2014-07-07 10:12:40 +02:00
}
2015-05-01 17:38:12 -04:00
public :
2016-02-05 16:03:56 +01:00
G1CMSATBBufferClosure ( G1CMTask * task , G1CollectedHeap * g1h )
2015-05-01 17:38:12 -04:00
: _task ( task ) , _g1h ( g1h ) { }
virtual void do_buffer ( void * * buffer , size_t size ) {
for ( size_t i = 0 ; i < size ; + + i ) {
do_entry ( buffer [ i ] ) ;
}
}
2014-07-07 10:12:40 +02:00
} ;
class G1RemarkThreadsClosure : public ThreadClosure {
2016-02-05 16:03:56 +01:00
G1CMSATBBufferClosure _cm_satb_cl ;
2014-07-07 10:12:40 +02:00
G1CMOopClosure _cm_cl ;
MarkingCodeBlobClosure _code_cl ;
int _thread_parity ;
public :
2016-02-05 16:03:56 +01:00
G1RemarkThreadsClosure ( G1CollectedHeap * g1h , G1CMTask * task ) :
2015-05-01 17:38:12 -04:00
_cm_satb_cl ( task , g1h ) ,
2018-03-29 14:08:10 +02:00
_cm_cl ( g1h , task ) ,
2015-05-01 17:38:12 -04:00
_code_cl ( & _cm_cl , ! CodeBlobToOopClosure : : FixRelocations ) ,
2015-03-31 07:54:56 +02:00
_thread_parity ( Threads : : thread_claim_parity ( ) ) { }
2014-07-07 10:12:40 +02:00
void do_thread ( Thread * thread ) {
2019-03-05 19:54:33 -05:00
if ( thread - > claim_oops_do ( true , _thread_parity ) ) {
SATBMarkQueue & queue = G1ThreadLocalData : : satb_mark_queue ( thread ) ;
queue . apply_closure_and_empty ( & _cm_satb_cl ) ;
if ( thread - > is_Java_thread ( ) ) {
2014-07-07 10:12:40 +02:00
// In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
// however the liveness of oops reachable from nmethods have very complex lifecycles:
// * Alive if on the stack of an executing method
// * Weakly reachable otherwise
// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
2019-03-05 19:54:33 -05:00
JavaThread * jt = ( JavaThread * ) thread ;
2014-07-07 10:12:40 +02:00
jt - > nmethods_do ( & _code_cl ) ;
}
}
}
} ;
2018-03-29 14:08:10 +02:00
class G1CMRemarkTask : public AbstractGangTask {
2016-02-05 16:03:56 +01:00
G1ConcurrentMark * _cm ;
2008-06-05 15:57:56 -07:00
public :
2011-12-14 13:34:57 -08:00
void work ( uint worker_id ) {
2017-11-13 15:28:17 +01:00
G1CMTask * task = _cm - > task ( worker_id ) ;
task - > record_start_time ( ) ;
{
ResourceMark rm ;
HandleMark hm ;
2014-07-07 10:12:40 +02:00
2017-11-13 15:28:17 +01:00
G1RemarkThreadsClosure threads_f ( G1CollectedHeap : : heap ( ) , task ) ;
Threads : : threads_do ( & threads_f ) ;
2008-06-05 15:57:56 -07:00
}
2017-11-13 15:28:17 +01:00
do {
task - > do_marking_step ( 1000000000.0 /* something very large */ ,
true /* do_termination */ ,
false /* is_serial */ ) ;
} while ( task - > has_aborted ( ) & & ! _cm - > has_overflown ( ) ) ;
// If we overflow, then we do not want to restart. We instead
// want to abort remark and do concurrent marking again.
task - > record_end_time ( ) ;
2008-06-05 15:57:56 -07:00
}
2016-02-05 16:03:56 +01:00
G1CMRemarkTask ( G1ConcurrentMark * cm , uint active_workers ) :
2014-10-21 11:57:22 +02:00
AbstractGangTask ( " Par Remark " ) , _cm ( cm ) {
2011-12-16 11:40:00 -08:00
_cm - > terminator ( ) - > reset_for_reuse ( active_workers ) ;
2011-08-09 10:16:01 -07:00
}
2008-06-05 15:57:56 -07:00
} ;
2018-03-29 14:08:10 +02:00
void G1ConcurrentMark : : finalize_marking ( ) {
2008-06-05 15:57:56 -07:00
ResourceMark rm ;
HandleMark hm ;
2018-03-29 14:08:10 +02:00
_g1h - > ensure_parsability ( false ) ;
2008-06-05 15:57:56 -07:00
2014-10-21 11:57:22 +02:00
// this is remark, so we'll use up all active threads
2018-03-29 14:08:10 +02:00
uint active_workers = _g1h - > workers ( ) - > active_workers ( ) ;
2014-10-21 11:57:22 +02:00
set_concurrency_and_phase ( active_workers , false /* concurrent */ ) ;
// Leave _parallel_marking_threads at it's
2016-02-05 16:03:56 +01:00
// value originally calculated in the G1ConcurrentMark
2014-10-21 11:57:22 +02:00
// constructor and pass values of the active workers
// through the gang in the task.
2015-05-21 09:23:00 +02:00
{
StrongRootsScope srs ( active_workers ) ;
2016-02-05 16:03:56 +01:00
G1CMRemarkTask remarkTask ( this , active_workers ) ;
2015-05-21 09:23:00 +02:00
// We will start all available threads, even if we decide that the
// active_workers will be fewer. The extra ones will just bail out
// immediately.
2018-03-29 14:08:10 +02:00
_g1h - > workers ( ) - > run_task ( & remarkTask ) ;
2015-05-21 09:23:00 +02:00
}
2014-10-21 11:57:22 +02:00
2018-04-12 08:25:30 +02:00
SATBMarkQueueSet & satb_mq_set = G1BarrierSet : : satb_mark_queue_set ( ) ;
2013-03-19 09:38:37 -07:00
guarantee ( has_overflown ( ) | |
satb_mq_set . completed_buffers_num ( ) = = 0 ,
2016-02-26 14:02:39 -05:00
" Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT ,
2015-09-29 11:02:08 +02:00
BOOL_TO_STR ( has_overflown ( ) ) ,
satb_mq_set . completed_buffers_num ( ) ) ;
2008-06-05 15:57:56 -07:00
print_stats ( ) ;
}
2018-03-26 16:51:43 +02:00
void G1ConcurrentMark : : flush_all_task_caches ( ) {
size_t hits = 0 ;
size_t misses = 0 ;
for ( uint i = 0 ; i < _max_num_tasks ; i + + ) {
Pair < size_t , size_t > stats = _tasks [ i ] - > flush_mark_stats_cache ( ) ;
hits + = stats . first ;
misses + = stats . second ;
}
size_t sum = hits + misses ;
log_debug ( gc , stats ) ( " Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf " ,
hits , misses , percent_of ( hits , sum ) ) ;
}
2017-10-23 11:46:12 +02:00
void G1ConcurrentMark : : clear_range_in_prev_bitmap ( MemRegion mr ) {
_prev_mark_bitmap - > clear_range ( mr ) ;
2012-01-10 18:58:13 -05:00
}
2008-06-05 15:57:56 -07:00
HeapRegion *
2016-02-05 16:03:56 +01:00
G1ConcurrentMark : : claim_region ( uint worker_id ) {
2008-06-05 15:57:56 -07:00
// "checkpoint" the finger
HeapWord * finger = _finger ;
2018-03-29 14:08:10 +02:00
while ( finger < _heap . end ( ) ) {
2009-10-07 10:09:57 -04:00
assert ( _g1h - > is_in_g1_reserved ( finger ) , " invariant " ) ;
2008-06-05 15:57:56 -07:00
2015-11-09 09:19:39 +01:00
HeapRegion * curr_region = _g1h - > heap_region_containing ( finger ) ;
2016-09-13 11:32:45 +02:00
// Make sure that the reads below do not float before loading curr_region.
OrderAccess : : loadload ( ) ;
2015-11-09 09:19:39 +01:00
// Above heap_region_containing may return NULL as we always scan claim
2014-08-19 14:09:10 +02:00
// until the end of the heap. In this case, just jump to the next region.
HeapWord * end = curr_region ! = NULL ? curr_region - > end ( ) : finger + HeapRegion : : GrainWords ;
2008-06-05 15:57:56 -07:00
2011-06-14 10:33:43 -04:00
// Is the gap between reading the finger and doing the CAS too long?
2017-10-16 22:36:06 -04:00
HeapWord * res = Atomic : : cmpxchg ( end , & _finger , finger ) ;
2014-08-19 14:09:10 +02:00
if ( res = = finger & & curr_region ! = NULL ) {
2008-06-05 15:57:56 -07:00
// we succeeded
2014-08-19 14:09:10 +02:00
HeapWord * bottom = curr_region - > bottom ( ) ;
HeapWord * limit = curr_region - > next_top_at_mark_start ( ) ;
2008-06-05 15:57:56 -07:00
// notice that _finger == end cannot be guaranteed here since,
// someone else might have moved the finger even further
2009-10-07 10:09:57 -04:00
assert ( _finger > = end , " the finger should have moved forward " ) ;
2008-06-05 15:57:56 -07:00
if ( limit > bottom ) {
return curr_region ;
} else {
2009-10-07 10:09:57 -04:00
assert ( limit = = bottom ,
" the region limit should be at bottom " ) ;
2008-06-05 15:57:56 -07:00
// we return NULL and the caller should try calling
// claim_region() again.
return NULL ;
}
} else {
2009-10-07 10:09:57 -04:00
assert ( _finger > finger , " the finger should have moved forward " ) ;
2008-06-05 15:57:56 -07:00
// read it again
finger = _finger ;
}
}
return NULL ;
}
2012-01-10 18:58:13 -05:00
# ifndef PRODUCT
2018-03-14 07:27:19 -04:00
class VerifyNoCSetOops {
2012-01-10 18:58:13 -05:00
G1CollectedHeap * _g1h ;
2015-07-27 14:05:55 -04:00
const char * _phase ;
2012-01-10 18:58:13 -05:00
int _info ;
2015-07-27 14:05:55 -04:00
public :
VerifyNoCSetOops ( const char * phase , int info = - 1 ) :
_g1h ( G1CollectedHeap : : heap ( ) ) ,
_phase ( phase ) ,
_info ( info )
{ }
2017-03-15 11:44:46 +01:00
void operator ( ) ( G1TaskQueueEntry task_entry ) const {
if ( task_entry . is_array_slice ( ) ) {
guarantee ( _g1h - > is_in_reserved ( task_entry . slice ( ) ) , " Slice " PTR_FORMAT " must be in heap. " , p2i ( task_entry . slice ( ) ) ) ;
return ;
}
2017-08-23 14:52:55 -04:00
guarantee ( oopDesc : : is_oop ( task_entry . obj ( ) ) ,
2015-09-29 11:02:08 +02:00
" Non-oop " PTR_FORMAT " , phase: %s, info: %d " ,
2017-03-15 11:44:46 +01:00
p2i ( task_entry . obj ( ) ) , _phase , _info ) ;
guarantee ( ! _g1h - > is_in_cset ( task_entry . obj ( ) ) ,
2015-09-29 11:02:08 +02:00
" obj: " PTR_FORMAT " in CSet, phase: %s, info: %d " ,
2017-03-15 11:44:46 +01:00
p2i ( task_entry . obj ( ) ) , _phase , _info ) ;
2008-06-05 15:57:56 -07:00
}
2012-01-10 18:58:13 -05:00
} ;
2019-03-25 14:11:09 +01:00
void G1ConcurrentMark : : verify_no_collection_set_oops ( ) {
2012-01-10 18:58:13 -05:00
assert ( SafepointSynchronize : : is_at_safepoint ( ) , " should be at a safepoint " ) ;
2018-03-29 14:08:10 +02:00
if ( ! _g1h - > collector_state ( ) - > mark_or_rebuild_in_progress ( ) ) {
2012-01-10 18:58:13 -05:00
return ;
}
2015-04-22 14:06:49 -04:00
// Verify entries on the global mark stack
2016-07-15 13:33:44 +02:00
_global_mark_stack . iterate ( VerifyNoCSetOops ( " Stack " ) ) ;
2012-01-10 18:58:13 -05:00
2015-04-22 14:06:49 -04:00
// Verify entries on the task queues
2017-10-25 16:13:09 +02:00
for ( uint i = 0 ; i < _max_num_tasks ; + + i ) {
2016-02-05 16:03:56 +01:00
G1CMTaskQueue * queue = _task_queues - > queue ( i ) ;
2015-07-27 14:05:55 -04:00
queue - > iterate ( VerifyNoCSetOops ( " Queue " , i ) ) ;
2015-04-22 14:06:49 -04:00
}
// Verify the global finger
HeapWord * global_finger = finger ( ) ;
2018-03-29 14:08:10 +02:00
if ( global_finger ! = NULL & & global_finger < _heap . end ( ) ) {
2015-04-22 14:06:49 -04:00
// Since we always iterate over all regions, we might get a NULL HeapRegion
// here.
2015-11-09 09:19:39 +01:00
HeapRegion * global_hr = _g1h - > heap_region_containing ( global_finger ) ;
2015-04-22 14:06:49 -04:00
guarantee ( global_hr = = NULL | | global_finger = = global_hr - > bottom ( ) ,
2015-09-29 11:02:08 +02:00
" global finger: " PTR_FORMAT " region: " HR_FORMAT ,
p2i ( global_finger ) , HR_FORMAT_PARAMS ( global_hr ) ) ;
2015-04-22 14:06:49 -04:00
}
// Verify the task fingers
2017-10-25 16:13:09 +02:00
assert ( _num_concurrent_workers < = _max_num_tasks , " sanity " ) ;
for ( uint i = 0 ; i < _num_concurrent_workers ; + + i ) {
2016-02-05 16:03:56 +01:00
G1CMTask * task = _tasks [ i ] ;
2015-04-22 14:06:49 -04:00
HeapWord * task_finger = task - > finger ( ) ;
2018-03-29 14:08:10 +02:00
if ( task_finger ! = NULL & & task_finger < _heap . end ( ) ) {
2015-04-22 14:06:49 -04:00
// See above note on the global finger verification.
2015-11-09 09:19:39 +01:00
HeapRegion * task_hr = _g1h - > heap_region_containing ( task_finger ) ;
2015-04-22 14:06:49 -04:00
guarantee ( task_hr = = NULL | | task_finger = = task_hr - > bottom ( ) | |
! task_hr - > in_collection_set ( ) ,
2015-09-29 11:02:08 +02:00
" task finger: " PTR_FORMAT " region: " HR_FORMAT ,
p2i ( task_finger ) , HR_FORMAT_PARAMS ( task_hr ) ) ;
2012-01-10 18:58:13 -05:00
}
}
2008-06-05 15:57:56 -07:00
}
2012-01-10 18:58:13 -05:00
# endif // PRODUCT
2012-01-12 00:06:47 -08:00
2018-03-26 16:51:43 +02:00
void G1ConcurrentMark : : rebuild_rem_set_concurrently ( ) {
2019-03-04 11:49:16 +01:00
_g1h - > rem_set ( ) - > rebuild_rem_set ( this , _concurrent_workers , _worker_id_offset ) ;
2016-04-06 13:41:59 +02:00
}
2016-04-06 13:32:48 +02:00
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : print_stats ( ) {
2015-12-10 14:57:55 +01:00
if ( ! log_is_enabled ( Debug , gc , stats ) ) {
return ;
}
log_debug ( gc , stats ) ( " --------------------------------------------------------------------- " ) ;
2017-10-25 16:13:09 +02:00
for ( size_t i = 0 ; i < _num_active_tasks ; + + i ) {
2015-12-10 14:57:55 +01:00
_tasks [ i ] - > print_stats ( ) ;
log_debug ( gc , stats ) ( " --------------------------------------------------------------------- " ) ;
2008-06-05 15:57:56 -07:00
}
}
2018-03-29 14:08:10 +02:00
void G1ConcurrentMark : : concurrent_cycle_abort ( ) {
2017-10-23 11:46:12 +02:00
if ( ! cm_thread ( ) - > during_cycle ( ) | | _has_aborted ) {
2015-06-23 13:26:05 +02:00
// We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
return ;
}
2014-07-21 09:59:46 +02:00
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
// concurrent bitmap clearing.
2016-04-06 13:41:59 +02:00
{
2018-04-18 11:36:48 +02:00
GCTraceTime ( Debug , gc ) debug ( " Clear Next Bitmap " ) ;
2017-10-23 11:46:12 +02:00
clear_bitmap ( _next_mark_bitmap , _g1h - > workers ( ) , false ) ;
2016-04-06 13:41:59 +02:00
}
2014-04-29 09:33:20 +02:00
// Note we cannot clear the previous marking bitmap here
// since VerifyDuringGC verifies the objects marked during
// a full GC against the previous bitmap.
2008-06-05 15:57:56 -07:00
// Empty mark stack
2018-03-29 14:08:10 +02:00
reset_marking_for_restart ( ) ;
2017-10-25 16:13:09 +02:00
for ( uint i = 0 ; i < _max_num_tasks ; + + i ) {
2008-06-05 15:57:56 -07:00
_tasks [ i ] - > clear_region_fields ( ) ;
2010-09-28 09:51:37 -07:00
}
2014-05-14 13:32:44 +02:00
_first_overflow_barrier_sync . abort ( ) ;
_second_overflow_barrier_sync . abort ( ) ;
2008-06-05 15:57:56 -07:00
_has_aborted = true ;
2018-04-12 08:25:30 +02:00
SATBMarkQueueSet & satb_mq_set = G1BarrierSet : : satb_mark_queue_set ( ) ;
2008-06-05 15:57:56 -07:00
satb_mq_set . abandon_partial_marking ( ) ;
2010-03-18 12:14:59 -04:00
// This can be called either during or outside marking, we'll read
// the expected_active value from the SATB queue set.
satb_mq_set . set_active_all_threads (
false , /* new active value */
satb_mq_set . is_active ( ) /* expected_active */ ) ;
2008-06-05 15:57:56 -07:00
}
static void print_ms_time_info ( const char * prefix , const char * name ,
NumberSeq & ns ) {
2015-12-10 14:57:55 +01:00
log_trace ( gc , marking ) ( " %s%5d %12s: total time = %8.2f s (avg = %8.2f ms). " ,
2008-06-05 15:57:56 -07:00
prefix , ns . num ( ) , name , ns . sum ( ) / 1000.0 , ns . avg ( ) ) ;
if ( ns . num ( ) > 0 ) {
2015-12-10 14:57:55 +01:00
log_trace ( gc , marking ) ( " %s [std. dev = %8.2f ms, max = %8.2f ms] " ,
2008-06-05 15:57:56 -07:00
prefix , ns . sd ( ) , ns . maximum ( ) ) ;
}
}
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : print_summary_info ( ) {
2016-04-04 09:15:15 +02:00
Log ( gc , marking ) log ;
2015-12-10 14:57:55 +01:00
if ( ! log . is_trace ( ) ) {
return ;
}
log . trace ( " Concurrent marking: " ) ;
2008-06-05 15:57:56 -07:00
print_ms_time_info ( " " , " init marks " , _init_times ) ;
print_ms_time_info ( " " , " remarks " , _remark_times ) ;
{
print_ms_time_info ( " " , " final marks " , _remark_mark_times ) ;
print_ms_time_info ( " " , " weak refs " , _remark_weak_ref_times ) ;
}
print_ms_time_info ( " " , " cleanups " , _cleanup_times ) ;
2016-04-06 13:32:48 +02:00
log . trace ( " Finalize live data total time = %8.2f s (avg = %8.2f ms). " ,
2018-03-29 14:08:10 +02:00
_total_cleanup_time , ( _cleanup_times . num ( ) > 0 ? _total_cleanup_time * 1000.0 / ( double ) _cleanup_times . num ( ) : 0.0 ) ) ;
2015-12-10 14:57:55 +01:00
log . trace ( " Total stop_world time = %8.2f s. " ,
( _init_times . sum ( ) + _remark_times . sum ( ) + _cleanup_times . sum ( ) ) / 1000.0 ) ;
log . trace ( " Total concurrent time = %8.2f s (%8.2f s marking). " ,
2017-10-23 11:46:12 +02:00
cm_thread ( ) - > vtime_accum ( ) , cm_thread ( ) - > vtime_mark_accum ( ) ) ;
2008-06-05 15:57:56 -07:00
}
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : print_worker_threads_on ( outputStream * st ) const {
2017-10-25 16:13:09 +02:00
_concurrent_workers - > print_worker_threads_on ( st ) ;
2009-10-02 16:12:07 -04:00
}
2016-04-13 17:00:54 -04:00
void G1ConcurrentMark : : threads_do ( ThreadClosure * tc ) const {
2017-10-25 16:13:09 +02:00
_concurrent_workers - > threads_do ( tc ) ;
2016-04-13 17:00:54 -04:00
}
2016-02-05 16:03:56 +01:00
void G1ConcurrentMark : : print_on_error ( outputStream * st ) const {
2013-04-10 14:26:49 +02:00
st - > print_cr ( " Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT " , (CMBitMap*) " PTR_FORMAT ,
2017-10-23 11:46:12 +02:00
p2i ( _prev_mark_bitmap ) , p2i ( _next_mark_bitmap ) ) ;
_prev_mark_bitmap - > print_on_error ( st , " Prev Bits: " ) ;
_next_mark_bitmap - > print_on_error ( st , " Next Bits: " ) ;
2013-04-10 14:26:49 +02:00
}
2015-10-16 14:55:09 -04:00
static ReferenceProcessor * get_cm_oop_closure_ref_processor ( G1CollectedHeap * g1h ) {
2016-02-28 12:22:05 -05:00
ReferenceProcessor * result = g1h - > ref_processor_cm ( ) ;
assert ( result ! = NULL , " CM reference processor should not be NULL " ) ;
2015-10-16 14:55:09 -04:00
return result ;
2011-06-14 10:33:43 -04:00
}
2008-06-05 15:57:56 -07:00
2015-10-16 14:55:09 -04:00
G1CMOopClosure : : G1CMOopClosure ( G1CollectedHeap * g1h ,
2016-02-05 16:03:56 +01:00
G1CMTask * task )
2018-05-26 06:59:49 +02:00
: MetadataVisitingOopIterateClosure ( get_cm_oop_closure_ref_processor ( g1h ) ) ,
2018-03-29 14:08:10 +02:00
_g1h ( g1h ) , _task ( task )
2015-10-16 14:55:09 -04:00
{ }
2016-02-05 16:03:56 +01:00
void G1CMTask : : setup_for_region ( HeapRegion * hr ) {
2009-10-07 10:09:57 -04:00
assert ( hr ! = NULL ,
2014-04-17 15:57:02 +02:00
" claim_region() should have filtered out NULL regions " ) ;
2008-06-05 15:57:56 -07:00
_curr_region = hr ;
_finger = hr - > bottom ( ) ;
update_region_limit ( ) ;
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : update_region_limit ( ) {
2008-06-05 15:57:56 -07:00
HeapRegion * hr = _curr_region ;
HeapWord * bottom = hr - > bottom ( ) ;
HeapWord * limit = hr - > next_top_at_mark_start ( ) ;
if ( limit = = bottom ) {
// The region was collected underneath our feet.
// We set the finger to bottom to ensure that the bitmap
// iteration that will follow this will not do anything.
// (this is not a condition that holds when we set the region up,
// as the region is not supposed to be empty in the first place)
_finger = bottom ;
} else if ( limit > = _region_limit ) {
2009-10-07 10:09:57 -04:00
assert ( limit > = _finger , " peace of mind " ) ;
2008-06-05 15:57:56 -07:00
} else {
2009-10-07 10:09:57 -04:00
assert ( limit < _region_limit , " only way to get here " ) ;
2008-06-05 15:57:56 -07:00
// This can happen under some pretty unusual circumstances. An
// evacuation pause empties the region underneath our feet (NTAMS
// at bottom). We then do some allocation in the region (NTAMS
// stays at bottom), followed by the region being used as a GC
// alloc region (NTAMS will move to top() and the objects
// originally below it will be grayed). All objects now marked in
// the region are explicitly grayed, if below the global finger,
// and we do not need in fact to scan anything else. So, we simply
// set _finger to be limit to ensure that the bitmap iteration
// doesn't do anything.
_finger = limit ;
}
_region_limit = limit ;
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : giveup_current_region ( ) {
2009-10-07 10:09:57 -04:00
assert ( _curr_region ! = NULL , " invariant " ) ;
2008-06-05 15:57:56 -07:00
clear_region_fields ( ) ;
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : clear_region_fields ( ) {
2008-06-05 15:57:56 -07:00
// Values for these three fields that indicate that we're not
// holding on to a region.
_curr_region = NULL ;
_finger = NULL ;
_region_limit = NULL ;
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : set_cm_oop_closure ( G1CMOopClosure * cm_oop_closure ) {
2011-06-14 10:33:43 -04:00
if ( cm_oop_closure = = NULL ) {
assert ( _cm_oop_closure ! = NULL , " invariant " ) ;
} else {
assert ( _cm_oop_closure = = NULL , " invariant " ) ;
}
_cm_oop_closure = cm_oop_closure ;
}
2017-10-23 11:46:12 +02:00
void G1CMTask : : reset ( G1CMBitMap * next_mark_bitmap ) {
guarantee ( next_mark_bitmap ! = NULL , " invariant " ) ;
_next_mark_bitmap = next_mark_bitmap ;
2008-06-05 15:57:56 -07:00
clear_region_fields ( ) ;
_calls = 0 ;
_elapsed_time_ms = 0.0 ;
_termination_time_ms = 0.0 ;
_termination_start_time_ms = 0.0 ;
2018-03-26 16:51:43 +02:00
_mark_stats_cache . reset ( ) ;
2008-06-05 15:57:56 -07:00
}
2016-02-05 16:03:56 +01:00
bool G1CMTask : : should_exit_termination ( ) {
2019-01-09 19:05:05 -05:00
if ( ! regular_clock_call ( ) ) {
return true ;
}
2008-06-05 15:57:56 -07:00
// This is called when we are in the termination protocol. We should
// quit if, for some reason, this task wants to abort or the global
// stack is not empty (this means that we can get work from it).
return ! _cm - > mark_stack_empty ( ) | | has_aborted ( ) ;
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : reached_limit ( ) {
2009-10-07 10:09:57 -04:00
assert ( _words_scanned > = _words_scanned_limit | |
_refs_reached > = _refs_reached_limit ,
" shouldn't have been called otherwise " ) ;
2019-01-09 19:05:05 -05:00
abort_marking_if_regular_check_fail ( ) ;
2008-06-05 15:57:56 -07:00
}
2019-01-09 19:05:05 -05:00
bool G1CMTask : : regular_clock_call ( ) {
2018-03-29 14:08:10 +02:00
if ( has_aborted ( ) ) {
2019-01-09 19:05:05 -05:00
return false ;
2018-03-29 14:08:10 +02:00
}
2008-06-05 15:57:56 -07:00
// First, we need to recalculate the words scanned and refs reached
// limits for the next clock call.
recalculate_limits ( ) ;
// During the regular clock call we do the following
// (1) If an overflow has been flagged, then we abort.
if ( _cm - > has_overflown ( ) ) {
2019-01-09 19:05:05 -05:00
return false ;
2008-06-05 15:57:56 -07:00
}
// If we are not concurrent (i.e. we're doing remark) we don't need
// to check anything else. The other steps are only needed during
// the concurrent marking phase.
2018-04-04 11:21:14 +02:00
if ( ! _cm - > concurrent ( ) ) {
2019-01-09 19:05:05 -05:00
return true ;
2017-10-23 11:46:12 +02:00
}
2008-06-05 15:57:56 -07:00
// (2) If marking has been aborted for Full GC, then we also abort.
if ( _cm - > has_aborted ( ) ) {
2019-01-09 19:05:05 -05:00
return false ;
2008-06-05 15:57:56 -07:00
}
double curr_time_ms = os : : elapsedVTime ( ) * 1000.0 ;
// (4) We check whether we should yield. If we have to, then we abort.
2014-04-11 12:29:24 +02:00
if ( SuspendibleThreadSet : : should_yield ( ) ) {
2008-06-05 15:57:56 -07:00
// We should yield. To do this we abort the task. The caller is
// responsible for yielding.
2019-01-09 19:05:05 -05:00
return false ;
2008-06-05 15:57:56 -07:00
}
// (5) We check whether we've reached our time quota. If we have,
// then we abort.
double elapsed_time_ms = curr_time_ms - _start_time_ms ;
if ( elapsed_time_ms > _time_target_ms ) {
2011-01-25 10:56:22 -08:00
_has_timed_out = true ;
2019-01-09 19:05:05 -05:00
return false ;
2008-06-05 15:57:56 -07:00
}
// (6) Finally, we check whether there are enough completed STAB
// buffers available for processing. If there are, we abort.
2018-04-12 08:25:30 +02:00
SATBMarkQueueSet & satb_mq_set = G1BarrierSet : : satb_mark_queue_set ( ) ;
2008-06-05 15:57:56 -07:00
if ( ! _draining_satb_buffers & & satb_mq_set . process_completed_buffers ( ) ) {
// we do need to process SATB buffers, we'll abort and restart
// the marking task to do so
2019-01-09 19:05:05 -05:00
return false ;
2008-06-05 15:57:56 -07:00
}
2019-01-09 19:05:05 -05:00
return true ;
2008-06-05 15:57:56 -07:00
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : recalculate_limits ( ) {
2008-06-05 15:57:56 -07:00
_real_words_scanned_limit = _words_scanned + words_scanned_period ;
_words_scanned_limit = _real_words_scanned_limit ;
_real_refs_reached_limit = _refs_reached + refs_reached_period ;
_refs_reached_limit = _real_refs_reached_limit ;
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : decrease_limits ( ) {
2008-06-05 15:57:56 -07:00
// This is called when we believe that we're going to do an infrequent
// operation which will increase the per byte scanned cost (i.e. move
// entries to/from the global stack). It basically tries to decrease the
// scanning limit so that the clock is called earlier.
2017-10-23 11:46:12 +02:00
_words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4 ;
_refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4 ;
2008-06-05 15:57:56 -07:00
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : move_entries_to_global_stack ( ) {
2016-09-15 16:44:19 +02:00
// Local array where we'll store the entries that will be popped
// from the local queue.
2017-03-15 11:44:46 +01:00
G1TaskQueueEntry buffer [ G1CMMarkStack : : EntriesPerChunk ] ;
2008-06-05 15:57:56 -07:00
2016-09-15 16:44:19 +02:00
size_t n = 0 ;
2017-03-15 11:44:46 +01:00
G1TaskQueueEntry task_entry ;
while ( n < G1CMMarkStack : : EntriesPerChunk & & _task_queue - > pop_local ( task_entry ) ) {
buffer [ n ] = task_entry ;
2008-06-05 15:57:56 -07:00
+ + n ;
}
2017-03-15 11:44:46 +01:00
if ( n < G1CMMarkStack : : EntriesPerChunk ) {
buffer [ n ] = G1TaskQueueEntry ( ) ;
2016-09-15 16:44:19 +02:00
}
2008-06-05 15:57:56 -07:00
if ( n > 0 ) {
2016-09-15 16:44:19 +02:00
if ( ! _cm - > mark_stack_push ( buffer ) ) {
2008-06-05 15:57:56 -07:00
set_has_aborted ( ) ;
}
}
2016-09-15 16:44:19 +02:00
// This operation was quite expensive, so decrease the limits.
2008-06-05 15:57:56 -07:00
decrease_limits ( ) ;
}
2016-09-15 16:44:19 +02:00
bool G1CMTask : : get_entries_from_global_stack ( ) {
// Local array where we'll store the entries that will be popped
2008-06-05 15:57:56 -07:00
// from the global stack.
2017-03-15 11:44:46 +01:00
G1TaskQueueEntry buffer [ G1CMMarkStack : : EntriesPerChunk ] ;
2016-09-15 16:44:19 +02:00
if ( ! _cm - > mark_stack_pop ( buffer ) ) {
return false ;
}
// We did actually pop at least one entry.
2017-03-15 11:44:46 +01:00
for ( size_t i = 0 ; i < G1CMMarkStack : : EntriesPerChunk ; + + i ) {
G1TaskQueueEntry task_entry = buffer [ i ] ;
if ( task_entry . is_null ( ) ) {
2016-09-15 16:44:19 +02:00
break ;
2008-06-05 15:57:56 -07:00
}
2017-08-23 14:52:55 -04:00
assert ( task_entry . is_array_slice ( ) | | oopDesc : : is_oop ( task_entry . obj ( ) ) , " Element " PTR_FORMAT " must be an array slice or oop " , p2i ( task_entry . obj ( ) ) ) ;
2017-03-15 11:44:46 +01:00
bool success = _task_queue - > push ( task_entry ) ;
2016-09-15 16:44:19 +02:00
// We only call this when the local queue is empty or under a
// given target limit. So, we do not expect this push to fail.
assert ( success , " invariant " ) ;
2008-06-05 15:57:56 -07:00
}
2016-09-15 16:44:19 +02:00
// This operation was quite expensive, so decrease the limits
2008-06-05 15:57:56 -07:00
decrease_limits ( ) ;
2016-09-15 16:44:19 +02:00
return true ;
2008-06-05 15:57:56 -07:00
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : drain_local_queue ( bool partially ) {
2016-11-24 11:27:57 +01:00
if ( has_aborted ( ) ) {
return ;
}
2008-06-05 15:57:56 -07:00
// Decide what the target size is, depending whether we're going to
// drain it partially (so that other tasks can steal if they run out
// of things to do) or totally (at the very end).
size_t target_size ;
2011-06-20 22:03:13 -04:00
if ( partially ) {
2018-06-19 10:00:39 -04:00
target_size = MIN2 ( ( size_t ) _task_queue - > max_elems ( ) / 3 , ( size_t ) GCDrainStackTargetSize ) ;
2011-06-20 22:03:13 -04:00
} else {
2008-06-05 15:57:56 -07:00
target_size = 0 ;
2011-06-20 22:03:13 -04:00
}
2008-06-05 15:57:56 -07:00
if ( _task_queue - > size ( ) > target_size ) {
2017-03-15 11:44:46 +01:00
G1TaskQueueEntry entry ;
bool ret = _task_queue - > pop_local ( entry ) ;
2008-06-05 15:57:56 -07:00
while ( ret ) {
2017-03-15 11:44:46 +01:00
scan_task_entry ( entry ) ;
2011-06-20 22:03:13 -04:00
if ( _task_queue - > size ( ) < = target_size | | has_aborted ( ) ) {
2008-06-05 15:57:56 -07:00
ret = false ;
2011-06-20 22:03:13 -04:00
} else {
2017-03-15 11:44:46 +01:00
ret = _task_queue - > pop_local ( entry ) ;
2011-06-20 22:03:13 -04:00
}
2008-06-05 15:57:56 -07:00
}
}
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : drain_global_stack ( bool partially ) {
2018-03-29 14:08:10 +02:00
if ( has_aborted ( ) ) {
return ;
}
2008-06-05 15:57:56 -07:00
// We have a policy to drain the local queue before we attempt to
// drain the global stack.
2009-10-07 10:09:57 -04:00
assert ( partially | | _task_queue - > size ( ) = = 0 , " invariant " ) ;
2008-06-05 15:57:56 -07:00
// Decide what the target size is, depending whether we're going to
// drain it partially (so that other tasks can steal if they run out
2016-09-15 16:44:19 +02:00
// of things to do) or totally (at the very end).
// Notice that when draining the global mark stack partially, due to the racyness
// of the mark stack size update we might in fact drop below the target. But,
// this is not a problem.
// In case of total draining, we simply process until the global mark stack is
// totally empty, disregarding the size counter.
2011-06-20 22:03:13 -04:00
if ( partially ) {
2016-09-15 16:44:19 +02:00
size_t const target_size = _cm - > partial_mark_stack_size_target ( ) ;
2008-06-05 15:57:56 -07:00
while ( ! has_aborted ( ) & & _cm - > mark_stack_size ( ) > target_size ) {
2016-09-15 16:44:19 +02:00
if ( get_entries_from_global_stack ( ) ) {
drain_local_queue ( partially ) ;
}
}
} else {
while ( ! has_aborted ( ) & & get_entries_from_global_stack ( ) ) {
2008-06-05 15:57:56 -07:00
drain_local_queue ( partially ) ;
}
}
}
// SATB Queue has several assumptions on whether to call the par or
// non-par versions of the methods. this is why some of the code is
// replicated. We should really get rid of the single-threaded version
// of the code to simplify things.
2016-02-05 16:03:56 +01:00
void G1CMTask : : drain_satb_buffers ( ) {
2018-03-29 14:08:10 +02:00
if ( has_aborted ( ) ) {
return ;
}
2008-06-05 15:57:56 -07:00
// We set this so that the regular clock knows that we're in the
// middle of draining buffers and doesn't set the abort flag when it
// notices that SATB buffers are available for draining. It'd be
// very counter productive if it did that. :-)
_draining_satb_buffers = true ;
2016-02-05 16:03:56 +01:00
G1CMSATBBufferClosure satb_cl ( this , _g1h ) ;
2018-04-12 08:25:30 +02:00
SATBMarkQueueSet & satb_mq_set = G1BarrierSet : : satb_mark_queue_set ( ) ;
2008-06-05 15:57:56 -07:00
// This keeps claiming and applying the closure to completed buffers
// until we run out of buffers or we need to abort.
2014-10-21 11:57:22 +02:00
while ( ! has_aborted ( ) & &
2015-05-01 17:38:12 -04:00
satb_mq_set . apply_closure_to_completed_buffer ( & satb_cl ) ) {
2019-01-09 19:05:05 -05:00
abort_marking_if_regular_check_fail ( ) ;
2008-06-05 15:57:56 -07:00
}
_draining_satb_buffers = false ;
2009-10-07 10:09:57 -04:00
assert ( has_aborted ( ) | |
2018-04-04 11:21:14 +02:00
_cm - > concurrent ( ) | |
2009-10-07 10:09:57 -04:00
satb_mq_set . completed_buffers_num ( ) = = 0 , " invariant " ) ;
2008-06-05 15:57:56 -07:00
// again, this was a potentially expensive operation, decrease the
// limits to get the regular clock call early
decrease_limits ( ) ;
}
2018-03-26 16:51:43 +02:00
void G1CMTask : : clear_mark_stats_cache ( uint region_idx ) {
_mark_stats_cache . reset ( region_idx ) ;
}
Pair < size_t , size_t > G1CMTask : : flush_mark_stats_cache ( ) {
return _mark_stats_cache . evict_all ( ) ;
}
2016-02-05 16:03:56 +01:00
void G1CMTask : : print_stats ( ) {
2018-03-26 16:51:43 +02:00
log_debug ( gc , stats ) ( " Marking Stats, task = %u, calls = %u " , _worker_id , _calls ) ;
2015-12-10 14:57:55 +01:00
log_debug ( gc , stats ) ( " Elapsed time = %1.2lfms, Termination time = %1.2lfms " ,
_elapsed_time_ms , _termination_time_ms ) ;
2018-03-26 16:51:43 +02:00
log_debug ( gc , stats ) ( " Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms " ,
_step_times_ms . num ( ) ,
_step_times_ms . avg ( ) ,
_step_times_ms . sd ( ) ,
_step_times_ms . maximum ( ) ,
_step_times_ms . sum ( ) ) ;
size_t const hits = _mark_stats_cache . hits ( ) ;
size_t const misses = _mark_stats_cache . misses ( ) ;
log_debug ( gc , stats ) ( " Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f " ,
hits , misses , percent_of ( hits , hits + misses ) ) ;
2008-06-05 15:57:56 -07:00
}
2018-08-03 11:06:10 +02:00
bool G1ConcurrentMark : : try_stealing ( uint worker_id , G1TaskQueueEntry & task_entry ) {
return _task_queues - > steal ( worker_id , task_entry ) ;
2015-04-27 09:51:06 +02:00
}
2008-06-05 15:57:56 -07:00
/*****************************************************************************
2013-03-18 11:05:27 -07:00
The do_marking_step ( time_target_ms , . . . ) method is the building
block of the parallel marking framework . It can be called in parallel
2008-06-05 15:57:56 -07:00
with other invocations of do_marking_step ( ) on different tasks
( but only one per task , obviously ) and concurrently with the
mutator threads , or during remark , hence it eliminates the need
for two versions of the code . When called during remark , it will
pick up from where the task left off during the concurrent marking
phase . Interestingly , tasks are also claimable during evacuation
pauses too , since do_marking_step ( ) ensures that it aborts before
it needs to yield .
2013-03-18 11:05:27 -07:00
The data structures that it uses to do marking work are the
2008-06-05 15:57:56 -07:00
following :
( 1 ) Marking Bitmap . If there are gray objects that appear only
on the bitmap ( this happens either when dealing with an overflow
or when the initial marking phase has simply marked the roots
and didn ' t push them on the stack ) , then tasks claim heap
regions whose bitmap they then scan to find gray objects . A
global finger indicates where the end of the last claimed region
is . A local finger indicates how far into the region a task has
scanned . The two fingers are used to determine how to gray an
object ( i . e . whether simply marking it is OK , as it will be
visited by a task in the future , or whether it needs to be also
pushed on a stack ) .
( 2 ) Local Queue . The local queue of the task which is accessed
reasonably efficiently by the task . Other tasks can steal from
it when they run out of work . Throughout the marking phase , a
task attempts to keep its local queue short but not totally
empty , so that entries are available for stealing by other
tasks . Only when there is no more work , a task will totally
drain its local queue .
( 3 ) Global Mark Stack . This handles local queue overflow . During
marking only sets of entries are moved between it and the local
queues , as access to it requires a mutex and more fine - grain
interaction with it which might cause contention . If it
overflows , then the marking phase should restart and iterate
over the bitmap to identify gray objects . Throughout the marking
phase , tasks attempt to keep the global mark stack at a small
length but not totally empty , so that entries are available for
popping by other tasks . Only when there is no more work , tasks
will totally drain the global mark stack .
2012-04-05 13:57:23 -04:00
( 4 ) SATB Buffer Queue . This is where completed SATB buffers are
2008-06-05 15:57:56 -07:00
made available . Buffers are regularly removed from this queue
and scanned for roots , so that the queue doesn ' t get too
long . During remark , all completed buffers are processed , as
well as the filled in parts of any uncompleted buffers .
The do_marking_step ( ) method tries to abort when the time target
has been reached . There are a few other cases when the
do_marking_step ( ) method also aborts :
( 1 ) When the marking phase has been aborted ( after a Full GC ) .
2012-04-05 13:57:23 -04:00
( 2 ) When a global overflow ( on the global stack ) has been
triggered . Before the task aborts , it will actually sync up with
the other tasks to ensure that all the marking data structures
2013-03-19 00:57:39 -07:00
( local queues , stacks , fingers etc . ) are re - initialized so that
2012-04-05 13:57:23 -04:00
when do_marking_step ( ) completes , the marking phase can
immediately restart .
2008-06-05 15:57:56 -07:00
( 3 ) When enough completed SATB buffers are available . The
do_marking_step ( ) method only tries to drain SATB buffers right
at the beginning . So , if enough buffers are available , the
marking step aborts and the SATB buffers are processed at
the beginning of the next invocation .
( 4 ) To yield . when we have to yield then we abort and yield
right at the end of do_marking_step ( ) . This saves us from a lot
of hassle as , by yielding we might allow a Full GC . If this
happens then objects will be compacted underneath our feet , the
heap might shrink , etc . We save checking for this by just
aborting and doing the yield right at the end .
From the above it follows that the do_marking_step ( ) method should
be called in a loop ( or , otherwise , regularly ) until it completes .
If a marking step completes without its has_aborted ( ) flag being
true , it means it has completed the current marking phase ( and
also all other marking tasks have done so and have all synced up ) .
A method called regular_clock_call ( ) is invoked " regularly " ( in
sub ms intervals ) throughout marking . It is this clock method that
checks all the abort conditions which were mentioned above and
decides when the task should abort . A work - based scheme is used to
trigger this clock method : when the number of object words the
marking phase has scanned or the number of references the marking
phase has visited reach a given limit . Additional invocations to
the method clock have been planted in a few other strategic places
too . The initial reason for the clock method was to avoid calling
vtime too regularly , as it is quite expensive . So , once it was in
place , it was natural to piggy - back all the other conditions on it
too and not constantly check them throughout the code .
2013-03-18 11:05:27 -07:00
If do_termination is true then do_marking_step will enter its
termination protocol .
The value of is_serial must be true when do_marking_step is being
called serially ( i . e . by the VMThread ) and do_marking_step should
skip any synchronization in the termination and overflow code .
Examples include the serial remark code and the serial reference
processing closures .
The value of is_serial must be false when do_marking_step is
being called by any of the worker threads in a work gang .
Examples include the concurrent marking code ( CMMarkingTask ) ,
the MT remark code , and the MT reference processing closures .
2008-06-05 15:57:56 -07:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-02-05 16:03:56 +01:00
void G1CMTask : : do_marking_step ( double time_target_ms ,
bool do_termination ,
bool is_serial ) {
2009-10-07 10:09:57 -04:00
assert ( time_target_ms > = 1.0 , " minimum granularity is 1ms " ) ;
2008-06-05 15:57:56 -07:00
_start_time_ms = os : : elapsedVTime ( ) * 1000.0 ;
2013-03-18 11:05:27 -07:00
// If do_stealing is true then do_marking_step will attempt to
2016-02-05 16:03:56 +01:00
// steal work from the other G1CMTasks. It only makes sense to
2013-03-18 11:05:27 -07:00
// enable stealing when the termination protocol is enabled
// and do_marking_step() is not being called serially.
bool do_stealing = do_termination & & ! is_serial ;
2019-03-04 11:49:16 +01:00
double diff_prediction_ms = _g1h - > policy ( ) - > predictor ( ) . get_new_prediction ( & _marking_step_diffs_ms ) ;
2008-06-05 15:57:56 -07:00
_time_target_ms = time_target_ms - diff_prediction_ms ;
// set up the variables that are used in the work-based scheme to
// call the regular clock method
_words_scanned = 0 ;
_refs_reached = 0 ;
recalculate_limits ( ) ;
// clear all flags
clear_has_aborted ( ) ;
2011-01-25 10:56:22 -08:00
_has_timed_out = false ;
2008-06-05 15:57:56 -07:00
_draining_satb_buffers = false ;
+ + _calls ;
// Set up the bitmap and oop closures. Anything that uses them is
// eventually called from this method, so it is OK to allocate these
// statically.
2017-08-04 14:15:42 +02:00
G1CMBitMapClosure bitmap_closure ( this , _cm ) ;
2018-03-29 14:08:10 +02:00
G1CMOopClosure cm_oop_closure ( _g1h , this ) ;
2011-06-14 10:33:43 -04:00
set_cm_oop_closure ( & cm_oop_closure ) ;
2008-06-05 15:57:56 -07:00
if ( _cm - > has_overflown ( ) ) {
2012-04-05 13:57:23 -04:00
// This can happen if the mark stack overflows during a GC pause
// and this task, after a yield point, restarts. We have to abort
// as we need to get into the overflow protocol which happens
// right at the end of this task.
2008-06-05 15:57:56 -07:00
set_has_aborted ( ) ;
}
// First drain any available SATB buffers. After this, we will not
// look at SATB buffers before the next invocation of this method.
// If enough completed SATB buffers are queued up, the regular clock
// will abort this task so that it restarts.
drain_satb_buffers ( ) ;
// ...then partially drain the local queue and the global stack
drain_local_queue ( true ) ;
drain_global_stack ( true ) ;
do {
if ( ! has_aborted ( ) & & _curr_region ! = NULL ) {
// This means that we're already holding on to a region.
2009-10-07 10:09:57 -04:00
assert ( _finger ! = NULL , " if region is not NULL, then the finger "
" should not be NULL either " ) ;
2008-06-05 15:57:56 -07:00
// We might have restarted this task after an evacuation pause
// which might have evacuated the region we're holding on to
// underneath our feet. Let's read its limit again to make sure
// that we do not iterate over a region of the heap that
// contains garbage (update_region_limit() will also move
// _finger to the start of the region if it is found empty).
update_region_limit ( ) ;
// We will start from _finger not from the start of the region,
// as we might be restarting this task after aborting half-way
// through scanning this region. In this case, _finger points to
// the address where we last found a marked object. If this is a
// fresh region, _finger points to start().
MemRegion mr = MemRegion ( _finger , _region_limit ) ;
2014-09-23 11:43:24 +02:00
assert ( ! _curr_region - > is_humongous ( ) | | mr . start ( ) = = _curr_region - > bottom ( ) ,
2013-02-11 15:24:48 -08:00
" humongous regions should go around loop once only " ) ;
2013-02-06 14:50:37 -08:00
2013-02-11 15:24:48 -08:00
// Some special cases:
// If the memory region is empty, we can just give up the region.
// If the current region is humongous then we only need to check
// the bitmap for the bit associated with the start of the object,
// scan the object if it's live, and give up the region.
// Otherwise, let's iterate over the bitmap of the part of the region
// that is left.
2013-02-06 14:50:37 -08:00
// If the iteration is successful, give up the region.
2013-02-11 15:24:48 -08:00
if ( mr . is_empty ( ) ) {
giveup_current_region ( ) ;
2019-01-09 19:05:05 -05:00
abort_marking_if_regular_check_fail ( ) ;
2014-09-23 11:43:24 +02:00
} else if ( _curr_region - > is_humongous ( ) & & mr . start ( ) = = _curr_region - > bottom ( ) ) {
2017-10-23 11:46:12 +02:00
if ( _next_mark_bitmap - > is_marked ( mr . start ( ) ) ) {
2013-02-11 15:24:48 -08:00
// The object is marked - apply the closure
2017-08-04 14:15:42 +02:00
bitmap_closure . do_addr ( mr . start ( ) ) ;
2013-02-11 15:24:48 -08:00
}
// Even if this task aborted while scanning the humongous object
// we can (and should) give up the current region.
giveup_current_region ( ) ;
2019-01-09 19:05:05 -05:00
abort_marking_if_regular_check_fail ( ) ;
2017-10-23 11:46:12 +02:00
} else if ( _next_mark_bitmap - > iterate ( & bitmap_closure , mr ) ) {
2008-06-05 15:57:56 -07:00
giveup_current_region ( ) ;
2019-01-09 19:05:05 -05:00
abort_marking_if_regular_check_fail ( ) ;
2008-06-05 15:57:56 -07:00
} else {
2009-10-07 10:09:57 -04:00
assert ( has_aborted ( ) , " currently the only way to do so " ) ;
2008-06-05 15:57:56 -07:00
// The only way to abort the bitmap iteration is to return
// false from the do_bit() method. However, inside the
// do_bit() method we move the _finger to point to the
// object currently being looked at. So, if we bail out, we
// have definitely set _finger to something non-null.
2009-10-07 10:09:57 -04:00
assert ( _finger ! = NULL , " invariant " ) ;
2008-06-05 15:57:56 -07:00
// Region iteration was actually aborted. So now _finger
// points to the address of the object we last scanned. If we
// leave it there, when we restart this task, we will rescan
// the object. It is easy to avoid this. We move the finger by
2017-08-04 14:15:42 +02:00
// enough to point to the next possible object header.
2010-03-18 01:48:28 -07:00
assert ( _finger < _region_limit , " invariant " ) ;
2017-08-04 14:15:42 +02:00
HeapWord * const new_finger = _finger + ( ( oop ) _finger ) - > size ( ) ;
2010-03-18 01:48:28 -07:00
// Check if bitmap iteration was aborted while scanning the last object
if ( new_finger > = _region_limit ) {
2012-04-05 13:57:23 -04:00
giveup_current_region ( ) ;
2010-03-18 01:48:28 -07:00
} else {
2012-04-05 13:57:23 -04:00
move_finger_to ( new_finger ) ;
2010-03-18 01:48:28 -07:00
}
2008-06-05 15:57:56 -07:00
}
}
// At this point we have either completed iterating over the
// region we were holding on to, or we have aborted.
// We then partially drain the local queue and the global stack.
// (Do we really need this?)
drain_local_queue ( true ) ;
drain_global_stack ( true ) ;
// Read the note on the claim_region() method on why it might
// return NULL with potentially more regions available for
// claiming and why we have to check out_of_regions() to determine
// whether we're done or not.
while ( ! has_aborted ( ) & & _curr_region = = NULL & & ! _cm - > out_of_regions ( ) ) {
// We are going to try to claim a new region. We should have
// given up on the previous one.
2009-10-07 10:09:57 -04:00
// Separated the asserts so that we know which one fires.
assert ( _curr_region = = NULL , " invariant " ) ;
assert ( _finger = = NULL , " invariant " ) ;
assert ( _region_limit = = NULL , " invariant " ) ;
2012-10-06 01:17:44 -07:00
HeapRegion * claimed_region = _cm - > claim_region ( _worker_id ) ;
2008-06-05 15:57:56 -07:00
if ( claimed_region ! = NULL ) {
// Yes, we managed to claim one
setup_for_region ( claimed_region ) ;
2009-10-07 10:09:57 -04:00
assert ( _curr_region = = claimed_region , " invariant " ) ;
2008-06-05 15:57:56 -07:00
}
// It is important to call the regular clock here. It might take
// a while to claim a region if, for example, we hit a large
// block of empty regions. So we need to call the regular clock
// method once round the loop to make sure it's called
// frequently enough.
2019-01-09 19:05:05 -05:00
abort_marking_if_regular_check_fail ( ) ;
2008-06-05 15:57:56 -07:00
}
if ( ! has_aborted ( ) & & _curr_region = = NULL ) {
2009-10-07 10:09:57 -04:00
assert ( _cm - > out_of_regions ( ) ,
" at this point we should be out of regions " ) ;
2008-06-05 15:57:56 -07:00
}
} while ( _curr_region ! = NULL & & ! has_aborted ( ) ) ;
if ( ! has_aborted ( ) ) {
// We cannot check whether the global stack is empty, since other
2012-04-05 13:57:23 -04:00
// tasks might be pushing objects to it concurrently.
2009-10-07 10:09:57 -04:00
assert ( _cm - > out_of_regions ( ) ,
" at this point we should be out of regions " ) ;
2008-06-05 15:57:56 -07:00
// Try to reduce the number of available SATB buffers so that
// remark has less work to do.
drain_satb_buffers ( ) ;
}
// Since we've done everything else, we can now totally drain the
// local queue and global stack.
drain_local_queue ( false ) ;
drain_global_stack ( false ) ;
// Attempt at work stealing from other task's queues.
2011-01-25 10:56:22 -08:00
if ( do_stealing & & ! has_aborted ( ) ) {
2008-06-05 15:57:56 -07:00
// We have not aborted. This means that we have finished all that
// we could. Let's try to do some stealing...
// We cannot check whether the global stack is empty, since other
2012-04-05 13:57:23 -04:00
// tasks might be pushing objects to it concurrently.
2009-10-07 10:09:57 -04:00
assert ( _cm - > out_of_regions ( ) & & _task_queue - > size ( ) = = 0 ,
" only way to reach here " ) ;
2008-06-05 15:57:56 -07:00
while ( ! has_aborted ( ) ) {
2017-03-15 11:44:46 +01:00
G1TaskQueueEntry entry ;
2018-08-03 11:06:10 +02:00
if ( _cm - > try_stealing ( _worker_id , entry ) ) {
2017-03-15 11:44:46 +01:00
scan_task_entry ( entry ) ;
2008-06-05 15:57:56 -07:00
// And since we're towards the end, let's totally drain the
// local queue and global stack.
drain_local_queue ( false ) ;
drain_global_stack ( false ) ;
} else {
break ;
}
}
}
// We still haven't aborted. Now, let's try to get into the
// termination protocol.
2011-01-25 10:56:22 -08:00
if ( do_termination & & ! has_aborted ( ) ) {
2008-06-05 15:57:56 -07:00
// We cannot check whether the global stack is empty, since other
2012-04-05 13:57:23 -04:00
// tasks might be concurrently pushing objects on it.
2009-10-07 10:09:57 -04:00
// Separated the asserts so that we know which one fires.
assert ( _cm - > out_of_regions ( ) , " only way to reach here " ) ;
assert ( _task_queue - > size ( ) = = 0 , " only way to reach here " ) ;
2008-06-05 15:57:56 -07:00
_termination_start_time_ms = os : : elapsedVTime ( ) * 1000.0 ;
2013-03-18 11:05:27 -07:00
2016-02-05 16:03:56 +01:00
// The G1CMTask class also extends the TerminatorTerminator class,
2008-06-05 15:57:56 -07:00
// hence its should_exit_termination() method will also decide
// whether to exit the termination protocol or not.
2013-03-18 11:05:27 -07:00
bool finished = ( is_serial | |
_cm - > terminator ( ) - > offer_termination ( this ) ) ;
2008-06-05 15:57:56 -07:00
double termination_end_time_ms = os : : elapsedVTime ( ) * 1000.0 ;
_termination_time_ms + =
termination_end_time_ms - _termination_start_time_ms ;
if ( finished ) {
// We're all done.
// We can now guarantee that the global stack is empty, since
2009-10-07 10:09:57 -04:00
// all other tasks have finished. We separated the guarantees so
// that, if a condition is false, we can immediately find out
// which one.
guarantee ( _cm - > out_of_regions ( ) , " only way to reach here " ) ;
guarantee ( _cm - > mark_stack_empty ( ) , " only way to reach here " ) ;
guarantee ( _task_queue - > size ( ) = = 0 , " only way to reach here " ) ;
guarantee ( ! _cm - > has_overflown ( ) , " only way to reach here " ) ;
2019-01-09 19:05:05 -05:00
guarantee ( ! has_aborted ( ) , " should never happen if termination has completed " ) ;
2008-06-05 15:57:56 -07:00
} else {
// Apparently there's more work to do. Let's abort this task. It
// will restart it and we can hopefully find more things to do.
set_has_aborted ( ) ;
}
}
// Mainly for debugging purposes to make sure that a pointer to the
// closure which was statically allocated in this frame doesn't
// escape it by accident.
2011-06-14 10:33:43 -04:00
set_cm_oop_closure ( NULL ) ;
2008-06-05 15:57:56 -07:00
double end_time_ms = os : : elapsedVTime ( ) * 1000.0 ;
double elapsed_time_ms = end_time_ms - _start_time_ms ;
// Update the step history.
_step_times_ms . add ( elapsed_time_ms ) ;
if ( has_aborted ( ) ) {
// The task was aborted for some reason.
2011-01-25 10:56:22 -08:00
if ( _has_timed_out ) {
2008-06-05 15:57:56 -07:00
double diff_ms = elapsed_time_ms - _time_target_ms ;
// Keep statistics of how well we did with respect to hitting
// our target only if we actually timed out (if we aborted for
// other reasons, then the results might get skewed).
_marking_step_diffs_ms . add ( diff_ms ) ;
}
if ( _cm - > has_overflown ( ) ) {
// This is the interesting one. We aborted because a global
// overflow was raised. This means we have to restart the
// marking phase and start iterating over regions. However, in
// order to do this we have to make sure that all tasks stop
2014-01-23 14:47:23 +01:00
// what they are doing and re-initialize in a safe manner. We
2008-06-05 15:57:56 -07:00
// will achieve this with the use of two barrier sync points.
2013-03-18 11:05:27 -07:00
if ( ! is_serial ) {
// We only need to enter the sync barrier if being called
// from a parallel context
_cm - > enter_first_sync_barrier ( _worker_id ) ;
// When we exit this sync barrier we know that all tasks have
// stopped doing marking work. So, it's now safe to
2018-03-26 16:51:43 +02:00
// re-initialize our data structures.
2013-03-18 11:05:27 -07:00
}
2008-06-05 15:57:56 -07:00
clear_region_fields ( ) ;
2018-03-26 16:51:43 +02:00
flush_mark_stats_cache ( ) ;
2008-06-05 15:57:56 -07:00
2013-03-18 11:05:27 -07:00
if ( ! is_serial ) {
2018-03-26 16:51:43 +02:00
// If we're executing the concurrent phase of marking, reset the marking
// state; otherwise the marking state is reset after reference processing,
// during the remark pause.
// If we reset here as a result of an overflow during the remark we will
// see assertion failures from any subsequent set_concurrency_and_phase()
// calls.
if ( _cm - > concurrent ( ) & & _worker_id = = 0 ) {
// Worker 0 is responsible for clearing the global data structures because
// of an overflow. During STW we should not clear the overflow flag (in
// G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
// method to abort the pause and restart concurrent marking.
2018-03-29 14:08:10 +02:00
_cm - > reset_marking_for_restart ( ) ;
2018-03-26 16:51:43 +02:00
log_info ( gc , marking ) ( " Concurrent Mark reset for overflow " ) ;
}
2013-03-18 11:05:27 -07:00
// ...and enter the second barrier.
_cm - > enter_second_sync_barrier ( _worker_id ) ;
}
2013-03-19 00:57:39 -07:00
// At this point, if we're during the concurrent phase of
// marking, everything has been re-initialized and we're
2008-06-05 15:57:56 -07:00
// ready to restart.
}
}
}
2018-03-26 16:51:43 +02:00
G1CMTask : : G1CMTask ( uint worker_id ,
G1ConcurrentMark * cm ,
G1CMTaskQueue * task_queue ,
G1RegionMarkStats * mark_stats ,
uint max_regions ) :
2017-10-25 16:15:10 +02:00
_objArray_processor ( this ) ,
_worker_id ( worker_id ) ,
_g1h ( G1CollectedHeap : : heap ( ) ) ,
_cm ( cm ) ,
_next_mark_bitmap ( NULL ) ,
_task_queue ( task_queue ) ,
2018-03-26 16:51:43 +02:00
_mark_stats_cache ( mark_stats , max_regions , RegionMarkStatsCacheSize ) ,
2017-10-25 16:15:10 +02:00
_calls ( 0 ) ,
_time_target_ms ( 0.0 ) ,
_start_time_ms ( 0.0 ) ,
_cm_oop_closure ( NULL ) ,
_curr_region ( NULL ) ,
_finger ( NULL ) ,
_region_limit ( NULL ) ,
_words_scanned ( 0 ) ,
_words_scanned_limit ( 0 ) ,
_real_words_scanned_limit ( 0 ) ,
_refs_reached ( 0 ) ,
_refs_reached_limit ( 0 ) ,
_real_refs_reached_limit ( 0 ) ,
_has_aborted ( false ) ,
_has_timed_out ( false ) ,
_draining_satb_buffers ( false ) ,
_step_times_ms ( ) ,
_elapsed_time_ms ( 0.0 ) ,
_termination_time_ms ( 0.0 ) ,
_termination_start_time_ms ( 0.0 ) ,
_marking_step_diffs_ms ( )
{
2009-10-07 10:09:57 -04:00
guarantee ( task_queue ! = NULL , " invariant " ) ;
2008-06-05 15:57:56 -07:00
_marking_step_diffs_ms . add ( 0.5 ) ;
}
2011-04-04 14:23:17 -04:00
// These are formatting macros that are used below to ensure
// consistent formatting. The *_H_* versions are used to format the
// header for a particular value and they should be kept consistent
// with the corresponding macro. Also note that most of the macros add
// the necessary white space (as a prefix) which makes them a bit
// easier to compose.
// All the output lines are prefixed with this string to be able to
// identify them easily in a large log file.
# define G1PPRL_LINE_PREFIX "###"
2015-06-24 12:12:25 -04:00
# define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT
2011-04-04 14:23:17 -04:00
# ifdef _LP64
# define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
# else // _LP64
# define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
# endif // _LP64
// For per-region info
# define G1PPRL_TYPE_FORMAT " %-4s"
# define G1PPRL_TYPE_H_FORMAT " %4s"
2018-04-04 14:51:26 +02:00
# define G1PPRL_STATE_FORMAT " %-5s"
# define G1PPRL_STATE_H_FORMAT " %5s"
2015-06-24 12:12:25 -04:00
# define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9)
2011-04-04 14:23:17 -04:00
# define G1PPRL_BYTE_H_FORMAT " %9s"
# define G1PPRL_DOUBLE_FORMAT " %14.1f"
# define G1PPRL_DOUBLE_H_FORMAT " %14s"
// For summary info
2015-06-24 12:12:25 -04:00
# define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT
# define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT
# define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB"
# define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2011-04-04 14:23:17 -04:00
2017-10-25 16:15:10 +02:00
G1PrintRegionLivenessInfoClosure : : G1PrintRegionLivenessInfoClosure ( const char * phase_name ) :
_total_used_bytes ( 0 ) , _total_capacity_bytes ( 0 ) ,
_total_prev_live_bytes ( 0 ) , _total_next_live_bytes ( 0 ) ,
_total_remset_bytes ( 0 ) , _total_strong_code_roots_bytes ( 0 )
{
2018-04-18 11:36:48 +02:00
if ( ! log_is_enabled ( Trace , gc , liveness ) ) {
return ;
}
2011-04-04 14:23:17 -04:00
G1CollectedHeap * g1h = G1CollectedHeap : : heap ( ) ;
MemRegion g1_reserved = g1h - > g1_reserved ( ) ;
double now = os : : elapsedTime ( ) ;
// Print the header of the output.
2015-12-10 14:57:55 +01:00
log_trace ( gc , liveness ) ( G1PPRL_LINE_PREFIX " PHASE %s @ %1.3f " , phase_name , now ) ;
log_trace ( gc , liveness ) ( G1PPRL_LINE_PREFIX " HEAP "
G1PPRL_SUM_ADDR_FORMAT ( " reserved " )
G1PPRL_SUM_BYTE_FORMAT ( " region-size " ) ,
p2i ( g1_reserved . start ( ) ) , p2i ( g1_reserved . end ( ) ) ,
HeapRegion : : GrainBytes ) ;
log_trace ( gc , liveness ) ( G1PPRL_LINE_PREFIX ) ;
log_trace ( gc , liveness ) ( G1PPRL_LINE_PREFIX
G1PPRL_TYPE_H_FORMAT
G1PPRL_ADDR_BASE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_DOUBLE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
2018-04-04 14:51:26 +02:00
G1PPRL_STATE_H_FORMAT
2015-12-10 14:57:55 +01:00
G1PPRL_BYTE_H_FORMAT ,
" type " , " address-range " ,
" used " , " prev-live " , " next-live " , " gc-eff " ,
2018-04-04 14:51:26 +02:00
" remset " , " state " , " code-roots " ) ;
2015-12-10 14:57:55 +01:00
log_trace ( gc , liveness ) ( G1PPRL_LINE_PREFIX
G1PPRL_TYPE_H_FORMAT
G1PPRL_ADDR_BASE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_DOUBLE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
2018-04-04 14:51:26 +02:00
G1PPRL_STATE_H_FORMAT
2015-12-10 14:57:55 +01:00
G1PPRL_BYTE_H_FORMAT ,
" " , " " ,
" (bytes) " , " (bytes) " , " (bytes) " , " (bytes/ms) " ,
2018-04-04 14:51:26 +02:00
" (bytes) " , " " , " (bytes) " ) ;
2011-04-04 14:23:17 -04:00
}
2018-02-09 13:09:55 +01:00
bool G1PrintRegionLivenessInfoClosure : : do_heap_region ( HeapRegion * r ) {
2018-04-18 11:36:48 +02:00
if ( ! log_is_enabled ( Trace , gc , liveness ) ) {
return false ;
}
2014-09-15 12:19:31 +02:00
const char * type = r - > get_type_str ( ) ;
2011-04-04 14:23:17 -04:00
HeapWord * bottom = r - > bottom ( ) ;
HeapWord * end = r - > end ( ) ;
size_t capacity_bytes = r - > capacity ( ) ;
size_t used_bytes = r - > used ( ) ;
size_t prev_live_bytes = r - > live_bytes ( ) ;
size_t next_live_bytes = r - > next_live_bytes ( ) ;
double gc_eff = r - > gc_efficiency ( ) ;
2013-05-16 13:02:33 +02:00
size_t remset_bytes = r - > rem_set ( ) - > mem_size ( ) ;
2013-08-15 10:52:18 +02:00
size_t strong_code_roots_bytes = r - > rem_set ( ) - > strong_code_roots_mem_size ( ) ;
2018-04-04 14:51:26 +02:00
const char * remset_type = r - > rem_set ( ) - > get_short_state_str ( ) ;
2013-08-15 10:52:18 +02:00
2011-04-04 14:23:17 -04:00
_total_used_bytes + = used_bytes ;
_total_capacity_bytes + = capacity_bytes ;
_total_prev_live_bytes + = prev_live_bytes ;
_total_next_live_bytes + = next_live_bytes ;
2013-05-16 13:02:33 +02:00
_total_remset_bytes + = remset_bytes ;
2013-08-15 10:52:18 +02:00
_total_strong_code_roots_bytes + = strong_code_roots_bytes ;
2011-04-04 14:23:17 -04:00
// Print a line for this particular region.
2015-12-10 14:57:55 +01:00
log_trace ( gc , liveness ) ( G1PPRL_LINE_PREFIX
G1PPRL_TYPE_FORMAT
G1PPRL_ADDR_BASE_FORMAT
G1PPRL_BYTE_FORMAT
G1PPRL_BYTE_FORMAT
G1PPRL_BYTE_FORMAT
G1PPRL_DOUBLE_FORMAT
G1PPRL_BYTE_FORMAT
2018-04-04 14:51:26 +02:00
G1PPRL_STATE_FORMAT
2015-12-10 14:57:55 +01:00
G1PPRL_BYTE_FORMAT ,
type , p2i ( bottom ) , p2i ( end ) ,
used_bytes , prev_live_bytes , next_live_bytes , gc_eff ,
2018-04-04 14:51:26 +02:00
remset_bytes , remset_type , strong_code_roots_bytes ) ;
2011-04-04 14:23:17 -04:00
return false ;
}
G1PrintRegionLivenessInfoClosure : : ~ G1PrintRegionLivenessInfoClosure ( ) {
2018-04-18 11:36:48 +02:00
if ( ! log_is_enabled ( Trace , gc , liveness ) ) {
return ;
}
2013-05-16 13:02:33 +02:00
// add static memory usages to remembered set sizes
_total_remset_bytes + = HeapRegionRemSet : : fl_mem_size ( ) + HeapRegionRemSet : : static_mem_size ( ) ;
2011-04-04 14:23:17 -04:00
// Print the footer of the output.
2015-12-10 14:57:55 +01:00
log_trace ( gc , liveness ) ( G1PPRL_LINE_PREFIX ) ;
log_trace ( gc , liveness ) ( G1PPRL_LINE_PREFIX
" SUMMARY "
G1PPRL_SUM_MB_FORMAT ( " capacity " )
G1PPRL_SUM_MB_PERC_FORMAT ( " used " )
G1PPRL_SUM_MB_PERC_FORMAT ( " prev-live " )
G1PPRL_SUM_MB_PERC_FORMAT ( " next-live " )
G1PPRL_SUM_MB_FORMAT ( " remset " )
G1PPRL_SUM_MB_FORMAT ( " code-roots " ) ,
bytes_to_mb ( _total_capacity_bytes ) ,
bytes_to_mb ( _total_used_bytes ) ,
2017-10-23 11:46:25 +02:00
percent_of ( _total_used_bytes , _total_capacity_bytes ) ,
2015-12-10 14:57:55 +01:00
bytes_to_mb ( _total_prev_live_bytes ) ,
2017-10-23 11:46:25 +02:00
percent_of ( _total_prev_live_bytes , _total_capacity_bytes ) ,
2015-12-10 14:57:55 +01:00
bytes_to_mb ( _total_next_live_bytes ) ,
2017-10-23 11:46:25 +02:00
percent_of ( _total_next_live_bytes , _total_capacity_bytes ) ,
2015-12-10 14:57:55 +01:00
bytes_to_mb ( _total_remset_bytes ) ,
bytes_to_mb ( _total_strong_code_roots_bytes ) ) ;
2011-04-04 14:23:17 -04:00
}