2008-06-05 15:57:56 -07:00
/*
2018-03-26 16:51:43 +02:00
* Copyright ( c ) 2001 , 2018 , Oracle and / or its affiliates . All rights reserved .
2008-06-05 15:57:56 -07:00
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER .
*
* This code is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 only , as
* published by the Free Software Foundation .
*
* This code is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* version 2 for more details ( a copy is included in the LICENSE file that
* accompanied this code ) .
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2010-05-27 19:08:38 -07:00
* Please contact Oracle , 500 Oracle Parkway , Redwood Shores , CA 94065 USA
* or visit www . oracle . com if you need additional information or have any
* questions .
2008-06-05 15:57:56 -07:00
*
*/
2015-05-13 15:16:06 +02:00
# ifndef SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
# define SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
# include "gc/g1/g1CollectedHeap.hpp"
2016-02-05 16:03:56 +01:00
# include "gc/g1/g1ConcurrentMark.inline.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/g1/g1OopClosures.hpp"
# include "gc/g1/g1ParScanThreadState.inline.hpp"
# include "gc/g1/g1RemSet.hpp"
2015-08-06 15:49:50 +02:00
# include "gc/g1/heapRegion.inline.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/g1/heapRegionRemSet.hpp"
2014-07-07 10:12:40 +02:00
# include "memory/iterator.inline.hpp"
2018-01-11 11:28:51 +01:00
# include "oops/access.inline.hpp"
2018-03-15 21:24:10 +01:00
# include "oops/compressedOops.inline.hpp"
# include "oops/oopsHierarchy.hpp"
2018-04-05 10:54:53 +02:00
# include "oops/oop.inline.hpp"
2014-05-08 15:37:17 +02:00
# include "runtime/prefetch.inline.hpp"
2010-11-23 13:22:55 -08:00
2012-01-25 12:58:23 -05:00
template < class T >
2017-06-28 10:58:19 +02:00
inline void G1ScanClosureBase : : prefetch_and_push ( T * p , const oop obj ) {
// We're not going to even bother checking whether the object is
// already forwarded or not, as this usually causes an immediate
// stall. We'll try to prefetch the object (for write, given that
// we might need to install the forwarding reference) and we'll
// get back to it when pop it from the queue
2018-04-05 10:54:53 +02:00
Prefetch : : write ( obj - > mark_addr_raw ( ) , 0 ) ;
Prefetch : : read ( obj - > mark_addr_raw ( ) , ( HeapWordSize * 2 ) ) ;
2017-06-28 10:58:19 +02:00
// slightly paranoid test; I'm trying to catch potential
// problems before we go into push_on_queue to know where the
// problem is coming from
2018-03-15 21:24:10 +01:00
assert ( ( obj = = RawAccess < > : : oop_load ( p ) ) | |
2017-06-28 10:58:19 +02:00
( obj - > is_forwarded ( ) & &
2018-03-15 21:24:10 +01:00
obj - > forwardee ( ) = = RawAccess < > : : oop_load ( p ) ) ,
2017-06-28 10:58:19 +02:00
" p should still be pointing to obj or to its forwardee " ) ;
_par_scan_state - > push_on_queue ( p ) ;
}
2008-06-05 15:57:56 -07:00
2017-06-28 10:58:19 +02:00
template < class T >
inline void G1ScanClosureBase : : handle_non_cset_obj_common ( InCSetState const state , T * p , oop const obj ) {
if ( state . is_humongous ( ) ) {
2018-04-18 11:36:48 +02:00
_g1h - > set_humongous_is_live ( obj ) ;
2009-07-14 15:40:39 -07:00
}
2008-06-05 15:57:56 -07:00
}
2010-02-11 15:52:19 -08:00
2018-04-27 12:06:46 +02:00
inline void G1ScanClosureBase : : trim_queue_partially ( ) {
_par_scan_state - > trim_queue_partially ( ) ;
}
2012-01-25 12:58:23 -05:00
template < class T >
2017-06-28 10:58:19 +02:00
inline void G1ScanEvacuatedObjClosure : : do_oop_nv ( T * p ) {
2018-03-15 21:24:10 +01:00
T heap_oop = RawAccess < > : : oop_load ( p ) ;
2010-02-11 15:52:19 -08:00
2018-03-15 21:24:10 +01:00
if ( CompressedOops : : is_null ( heap_oop ) ) {
2017-06-28 10:58:19 +02:00
return ;
}
2018-03-15 21:24:10 +01:00
oop obj = CompressedOops : : decode_not_null ( heap_oop ) ;
2018-04-18 11:36:48 +02:00
const InCSetState state = _g1h - > in_cset_state ( obj ) ;
2017-06-28 10:58:19 +02:00
if ( state . is_in_cset ( ) ) {
prefetch_and_push ( p , obj ) ;
} else {
2017-07-10 10:10:49 +02:00
if ( HeapRegion : : is_in_same_region ( p , obj ) ) {
return ;
}
2017-06-28 10:58:19 +02:00
handle_non_cset_obj_common ( state , p , obj ) ;
_par_scan_state - > update_rs ( _from , p , obj ) ;
2010-02-11 15:52:19 -08:00
}
}
2010-08-02 12:51:43 -07:00
2012-01-25 12:58:23 -05:00
template < class T >
inline void G1CMOopClosure : : do_oop_nv ( T * p ) {
2018-03-26 16:51:43 +02:00
_task - > deal_with_reference ( p ) ;
2011-06-14 10:33:43 -04:00
}
2010-11-23 13:22:55 -08:00
2012-01-25 12:58:23 -05:00
template < class T >
inline void G1RootRegionScanClosure : : do_oop_nv ( T * p ) {
2018-01-11 11:28:51 +01:00
T heap_oop = RawAccess < MO_VOLATILE > : : oop_load ( p ) ;
2018-03-15 21:24:10 +01:00
if ( CompressedOops : : is_null ( heap_oop ) ) {
2017-08-04 14:28:57 +02:00
return ;
2012-01-25 12:58:23 -05:00
}
2018-03-15 21:24:10 +01:00
oop obj = CompressedOops : : decode_not_null ( heap_oop ) ;
2018-03-26 16:51:43 +02:00
_cm - > mark_in_next_bitmap ( _worker_id , obj ) ;
2012-01-25 12:58:23 -05:00
}
2017-06-02 13:47:54 +02:00
template < class T >
inline static void check_obj_during_refinement ( T * p , oop const obj ) {
# ifdef ASSERT
2018-04-18 11:36:48 +02:00
G1CollectedHeap * g1h = G1CollectedHeap : : heap ( ) ;
2017-06-02 13:47:54 +02:00
// can't do because of races
2017-08-23 14:52:55 -04:00
// assert(oopDesc::is_oop_or_null(obj), "expected an oop");
2017-06-02 13:47:54 +02:00
assert ( check_obj_alignment ( obj ) , " not oop aligned " ) ;
2018-04-18 11:36:48 +02:00
assert ( g1h - > is_in_reserved ( obj ) , " must be in heap " ) ;
2017-06-02 13:47:54 +02:00
2018-04-18 11:36:48 +02:00
HeapRegion * from = g1h - > heap_region_containing ( p ) ;
2017-06-02 13:47:54 +02:00
assert ( from ! = NULL , " from region must be non-NULL " ) ;
assert ( from - > is_in_reserved ( p ) | |
( from - > is_humongous ( ) & &
2018-04-18 11:36:48 +02:00
g1h - > heap_region_containing ( p ) - > is_humongous ( ) & &
from - > humongous_start_region ( ) = = g1h - > heap_region_containing ( p ) - > humongous_start_region ( ) ) ,
2017-06-02 13:47:54 +02:00
" p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u. " ,
p2i ( p ) , from - > hrm_index ( ) , from - > humongous_start_region ( ) - > hrm_index ( ) ) ;
# endif // ASSERT
}
template < class T >
inline void G1ConcurrentRefineOopClosure : : do_oop_nv ( T * p ) {
2018-01-11 11:28:51 +01:00
T o = RawAccess < MO_VOLATILE > : : oop_load ( p ) ;
2018-03-15 21:24:10 +01:00
if ( CompressedOops : : is_null ( o ) ) {
2017-06-02 13:47:54 +02:00
return ;
}
2018-03-15 21:24:10 +01:00
oop obj = CompressedOops : : decode_not_null ( o ) ;
2017-06-02 13:47:54 +02:00
check_obj_during_refinement ( p , obj ) ;
if ( HeapRegion : : is_in_same_region ( p , obj ) ) {
// Normally this closure should only be called with cross-region references.
// But since Java threads are manipulating the references concurrently and we
// reload the values things may have changed.
2017-06-28 10:58:19 +02:00
// Also this check lets slip through references from a humongous continues region
2017-06-02 13:47:54 +02:00
// to its humongous start region, as they are in different regions, and adds a
2017-06-28 10:58:19 +02:00
// remembered set entry. This is benign (apart from memory usage), as we never
// try to either evacuate or eager reclaim humonguous arrays of j.l.O.
2017-06-02 13:47:54 +02:00
return ;
}
2018-04-18 11:36:48 +02:00
HeapRegionRemSet * to_rem_set = _g1h - > heap_region_containing ( obj ) - > rem_set ( ) ;
2017-06-02 13:47:54 +02:00
2018-03-26 16:51:43 +02:00
assert ( to_rem_set ! = NULL , " Need per-region 'into' remsets. " ) ;
if ( to_rem_set - > is_tracked ( ) ) {
to_rem_set - > add_reference ( p , _worker_i ) ;
}
2017-06-02 13:47:54 +02:00
}
2012-01-26 14:14:55 -08:00
template < class T >
2017-06-28 10:58:19 +02:00
inline void G1ScanObjsDuringUpdateRSClosure : : do_oop_nv ( T * p ) {
2018-03-15 21:24:10 +01:00
T o = RawAccess < > : : oop_load ( p ) ;
if ( CompressedOops : : is_null ( o ) ) {
2014-04-17 15:57:02 +02:00
return ;
}
2018-03-15 21:24:10 +01:00
oop obj = CompressedOops : : decode_not_null ( o ) ;
2014-04-17 15:57:02 +02:00
2017-06-28 10:58:19 +02:00
check_obj_during_refinement ( p , obj ) ;
2012-01-26 14:14:55 -08:00
2018-04-18 11:36:48 +02:00
assert ( ! _g1h - > is_in_cset ( ( HeapWord * ) p ) , " Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set. " , p2i ( p ) , _g1h - > addr_to_region ( ( HeapWord * ) p ) ) ;
const InCSetState state = _g1h - > in_cset_state ( obj ) ;
2017-06-28 10:58:19 +02:00
if ( state . is_in_cset ( ) ) {
// Since the source is always from outside the collection set, here we implicitly know
// that this is a cross-region reference too.
prefetch_and_push ( p , obj ) ;
} else {
2018-04-18 11:36:48 +02:00
HeapRegion * to = _g1h - > heap_region_containing ( obj ) ;
2017-06-28 10:58:19 +02:00
if ( _from = = to ) {
return ;
}
handle_non_cset_obj_common ( state , p , obj ) ;
to - > rem_set ( ) - > add_reference ( p , _worker_i ) ;
2014-04-17 15:57:02 +02:00
}
2017-06-28 10:58:19 +02:00
}
2014-04-17 15:57:02 +02:00
2017-06-28 10:58:19 +02:00
template < class T >
inline void G1ScanObjsDuringScanRSClosure : : do_oop_nv ( T * p ) {
2018-03-15 21:24:10 +01:00
T heap_oop = RawAccess < > : : oop_load ( p ) ;
if ( CompressedOops : : is_null ( heap_oop ) ) {
2017-06-28 10:58:19 +02:00
return ;
}
2018-03-15 21:24:10 +01:00
oop obj = CompressedOops : : decode_not_null ( heap_oop ) ;
2012-07-17 11:52:10 -07:00
2018-04-18 11:36:48 +02:00
const InCSetState state = _g1h - > in_cset_state ( obj ) ;
2017-06-28 10:58:19 +02:00
if ( state . is_in_cset ( ) ) {
prefetch_and_push ( p , obj ) ;
2014-04-17 15:57:02 +02:00
} else {
2017-07-10 10:10:49 +02:00
if ( HeapRegion : : is_in_same_region ( p , obj ) ) {
return ;
}
2017-06-28 10:58:19 +02:00
handle_non_cset_obj_common ( state , p , obj ) ;
2012-01-26 14:14:55 -08:00
}
}
2017-10-03 16:42:04 -04:00
void G1ParCopyHelper : : do_cld_barrier ( oop new_obj ) {
2018-04-18 11:36:48 +02:00
if ( _g1h - > heap_region_containing ( new_obj ) - > is_young ( ) ) {
2017-10-03 16:42:04 -04:00
_scanned_cld - > record_modified_oops ( ) ;
2015-10-14 14:50:43 +02:00
}
}
void G1ParCopyHelper : : mark_object ( oop obj ) {
2018-04-18 11:36:48 +02:00
assert ( ! _g1h - > heap_region_containing ( obj ) - > in_collection_set ( ) , " should not mark objects in the CSet " ) ;
2015-10-14 14:50:43 +02:00
2018-03-26 16:51:43 +02:00
// We know that the object is not moving so it's safe to read its size.
_cm - > mark_in_next_bitmap ( _worker_id , obj ) ;
2015-10-14 14:50:43 +02:00
}
void G1ParCopyHelper : : mark_forwarded_object ( oop from_obj , oop to_obj ) {
assert ( from_obj - > is_forwarded ( ) , " from obj should be forwarded " ) ;
assert ( from_obj - > forwardee ( ) = = to_obj , " to obj should be the forwardee " ) ;
assert ( from_obj ! = to_obj , " should not be self-forwarded " ) ;
2018-04-18 11:36:48 +02:00
assert ( _g1h - > heap_region_containing ( from_obj ) - > in_collection_set ( ) , " from obj should be in the CSet " ) ;
assert ( ! _g1h - > heap_region_containing ( to_obj ) - > in_collection_set ( ) , " should not mark objects in the CSet " ) ;
2015-10-14 14:50:43 +02:00
2018-03-26 16:51:43 +02:00
// The object might be in the process of being copied by another
// worker so we cannot trust that its to-space image is
// well-formed. So we have to read its size from its from-space
// image which we know should not be changing.
_cm - > mark_in_next_bitmap ( _worker_id , to_obj , from_obj - > size ( ) ) ;
2015-10-14 14:50:43 +02:00
}
2018-04-27 12:06:46 +02:00
void G1ParCopyHelper : : trim_queue_partially ( ) {
_par_scan_state - > trim_queue_partially ( ) ;
}
2018-03-05 11:32:17 +01:00
template < G1Barrier barrier , G1Mark do_mark_object >
2015-10-14 14:50:43 +02:00
template < class T >
2018-03-05 11:32:17 +01:00
void G1ParCopyClosure < barrier , do_mark_object > : : do_oop_work ( T * p ) {
2018-03-15 21:24:10 +01:00
T heap_oop = RawAccess < > : : oop_load ( p ) ;
2015-10-14 14:50:43 +02:00
2018-03-15 21:24:10 +01:00
if ( CompressedOops : : is_null ( heap_oop ) ) {
2015-10-14 14:50:43 +02:00
return ;
}
2018-03-15 21:24:10 +01:00
oop obj = CompressedOops : : decode_not_null ( heap_oop ) ;
2015-10-14 14:50:43 +02:00
assert ( _worker_id = = _par_scan_state - > worker_id ( ) , " sanity " ) ;
2018-04-18 11:36:48 +02:00
const InCSetState state = _g1h - > in_cset_state ( obj ) ;
2015-10-14 14:50:43 +02:00
if ( state . is_in_cset ( ) ) {
oop forwardee ;
2018-04-05 10:54:53 +02:00
markOop m = obj - > mark_raw ( ) ;
2015-10-14 14:50:43 +02:00
if ( m - > is_marked ( ) ) {
forwardee = ( oop ) m - > decode_pointer ( ) ;
} else {
forwardee = _par_scan_state - > copy_to_survivor_space ( state , obj , m ) ;
}
assert ( forwardee ! = NULL , " forwardee should not be NULL " ) ;
2018-06-22 17:46:58 -04:00
RawAccess < IS_NOT_NULL > : : oop_store ( p , forwardee ) ;
2015-10-14 14:50:43 +02:00
if ( do_mark_object ! = G1MarkNone & & forwardee ! = obj ) {
// If the object is self-forwarded we don't need to explicitly
// mark it, the evacuation failure protocol will do so.
mark_forwarded_object ( obj , forwardee ) ;
}
2017-10-03 16:42:04 -04:00
if ( barrier = = G1BarrierCLD ) {
do_cld_barrier ( forwardee ) ;
2015-10-14 14:50:43 +02:00
}
} else {
if ( state . is_humongous ( ) ) {
2018-04-18 11:36:48 +02:00
_g1h - > set_humongous_is_live ( obj ) ;
2015-10-14 14:50:43 +02:00
}
2015-12-09 16:05:24 +01:00
2015-10-14 14:50:43 +02:00
// The object is not in collection set. If we're a root scanning
// closure during an initial mark pause then attempt to mark the object.
if ( do_mark_object = = G1MarkFromRoot ) {
mark_object ( obj ) ;
}
}
2018-04-27 12:06:46 +02:00
trim_queue_partially ( ) ;
2015-10-14 14:50:43 +02:00
}
2018-03-26 16:51:43 +02:00
template < class T > void G1RebuildRemSetClosure : : do_oop_nv ( T * p ) {
oop const obj = RawAccess < MO_VOLATILE > : : oop_load ( p ) ;
if ( obj = = NULL ) {
return ;
}
if ( HeapRegion : : is_in_same_region ( p , obj ) ) {
return ;
}
2018-04-18 11:36:48 +02:00
HeapRegion * to = _g1h - > heap_region_containing ( obj ) ;
2018-03-26 16:51:43 +02:00
HeapRegionRemSet * rem_set = to - > rem_set ( ) ;
rem_set - > add_reference ( p , _worker_id ) ;
}
2015-05-13 15:16:06 +02:00
# endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP