2007-12-01 00:00:00 +00:00
/*
2020-10-12 19:54:25 +00:00
* Copyright ( c ) 2005 , 2020 , Oracle and / or its affiliates . All rights reserved .
2007-12-01 00:00:00 +00:00
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER .
*
* This code is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 only , as
* published by the Free Software Foundation .
*
* This code is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* version 2 for more details ( a copy is included in the LICENSE file that
* accompanied this code ) .
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2010-05-27 19:08:38 -07:00
* Please contact Oracle , 500 Oracle Parkway , Redwood Shores , CA 94065 USA
* or visit www . oracle . com if you need additional information or have any
* questions .
2007-12-01 00:00:00 +00:00
*
*/
2010-11-23 13:22:55 -08:00
# include "precompiled.hpp"
# include "compiler/compileLog.hpp"
2018-03-16 09:12:13 -04:00
# include "gc/shared/collectedHeap.inline.hpp"
2010-11-23 13:22:55 -08:00
# include "libadt/vectset.hpp"
2019-05-09 14:28:30 +02:00
# include "memory/universe.hpp"
2010-11-23 13:22:55 -08:00
# include "opto/addnode.hpp"
2015-05-12 10:27:50 +02:00
# include "opto/arraycopynode.hpp"
2010-11-23 13:22:55 -08:00
# include "opto/callnode.hpp"
2014-04-01 09:05:20 -07:00
# include "opto/castnode.hpp"
2010-11-23 13:22:55 -08:00
# include "opto/cfgnode.hpp"
# include "opto/compile.hpp"
2014-04-01 09:05:20 -07:00
# include "opto/convertnode.hpp"
2016-06-02 08:46:52 +02:00
# include "opto/graphKit.hpp"
2020-02-14 10:31:34 +01:00
# include "opto/intrinsicnode.hpp"
2010-11-23 13:22:55 -08:00
# include "opto/locknode.hpp"
# include "opto/loopnode.hpp"
# include "opto/macro.hpp"
# include "opto/memnode.hpp"
2014-04-01 09:05:20 -07:00
# include "opto/narrowptrnode.hpp"
2010-11-23 13:22:55 -08:00
# include "opto/node.hpp"
2014-04-01 09:05:20 -07:00
# include "opto/opaquenode.hpp"
2010-11-23 13:22:55 -08:00
# include "opto/phaseX.hpp"
# include "opto/rootnode.hpp"
# include "opto/runtime.hpp"
# include "opto/subnode.hpp"
2020-02-14 10:31:34 +01:00
# include "opto/subtypenode.hpp"
2010-11-23 13:22:55 -08:00
# include "opto/type.hpp"
2020-11-12 01:45:27 +00:00
# include "prims/jvmtiExport.hpp"
2010-11-23 13:22:55 -08:00
# include "runtime/sharedRuntime.hpp"
2018-12-10 15:47:44 +01:00
# include "utilities/macros.hpp"
2020-02-17 10:03:17 +01:00
# include "utilities/powerOfTwo.hpp"
2018-05-04 11:41:35 +02:00
# if INCLUDE_G1GC
2018-04-12 08:25:56 +02:00
# include "gc/g1/g1ThreadLocalData.hpp"
2018-05-04 11:41:35 +02:00
# endif // INCLUDE_G1GC
2018-12-10 15:47:44 +01:00
# if INCLUDE_SHENANDOAHGC
# include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
# endif
2007-12-01 00:00:00 +00:00
//
// Replace any references to "oldref" in inputs to "use" with "newref".
// Returns the number of replacements made.
//
int PhaseMacroExpand : : replace_input ( Node * use , Node * oldref , Node * newref ) {
int nreplacements = 0 ;
uint req = use - > req ( ) ;
for ( uint j = 0 ; j < use - > len ( ) ; j + + ) {
Node * uin = use - > in ( j ) ;
if ( uin = = oldref ) {
if ( j < req )
use - > set_req ( j , newref ) ;
else
use - > set_prec ( j , newref ) ;
nreplacements + + ;
} else if ( j > = req & & uin = = NULL ) {
break ;
}
}
return nreplacements ;
}
2020-02-06 11:21:39 +01:00
void PhaseMacroExpand : : migrate_outs ( Node * old , Node * target ) {
assert ( old ! = NULL , " sanity " ) ;
for ( DUIterator_Fast imax , i = old - > fast_outs ( imax ) ; i < imax ; i + + ) {
Node * use = old - > fast_out ( i ) ;
_igvn . rehash_node_delayed ( use ) ;
imax - = replace_input ( use , old , target ) ;
// back up iterator
- - i ;
}
assert ( old - > outcnt ( ) = = 0 , " all uses must be deleted " ) ;
}
2008-11-07 09:29:38 -08:00
Node * PhaseMacroExpand : : opt_bits_test ( Node * ctrl , Node * region , int edge , Node * word , int mask , int bits , bool return_fast_path ) {
Node * cmp ;
if ( mask ! = 0 ) {
2014-06-02 08:07:29 +02:00
Node * and_node = transform_later ( new AndXNode ( word , MakeConX ( mask ) ) ) ;
cmp = transform_later ( new CmpXNode ( and_node , MakeConX ( bits ) ) ) ;
2008-11-07 09:29:38 -08:00
} else {
cmp = word ;
}
2014-06-02 08:07:29 +02:00
Node * bol = transform_later ( new BoolNode ( cmp , BoolTest : : ne ) ) ;
IfNode * iff = new IfNode ( ctrl , bol , PROB_MIN , COUNT_UNKNOWN ) ;
2008-11-07 09:29:38 -08:00
transform_later ( iff ) ;
2007-12-01 00:00:00 +00:00
2008-11-07 09:29:38 -08:00
// Fast path taken.
2014-06-02 08:07:29 +02:00
Node * fast_taken = transform_later ( new IfFalseNode ( iff ) ) ;
2007-12-01 00:00:00 +00:00
// Fast path not-taken, i.e. slow path
2014-06-02 08:07:29 +02:00
Node * slow_taken = transform_later ( new IfTrueNode ( iff ) ) ;
2008-11-07 09:29:38 -08:00
if ( return_fast_path ) {
region - > init_req ( edge , slow_taken ) ; // Capture slow-control
return fast_taken ;
} else {
region - > init_req ( edge , fast_taken ) ; // Capture fast-control
return slow_taken ;
}
2007-12-01 00:00:00 +00:00
}
//--------------------copy_predefined_input_for_runtime_call--------------------
void PhaseMacroExpand : : copy_predefined_input_for_runtime_call ( Node * ctrl , CallNode * oldcall , CallNode * call ) {
// Set fixed predefined input arguments
call - > init_req ( TypeFunc : : Control , ctrl ) ;
call - > init_req ( TypeFunc : : I_O , oldcall - > in ( TypeFunc : : I_O ) ) ;
call - > init_req ( TypeFunc : : Memory , oldcall - > in ( TypeFunc : : Memory ) ) ; // ?????
call - > init_req ( TypeFunc : : ReturnAdr , oldcall - > in ( TypeFunc : : ReturnAdr ) ) ;
call - > init_req ( TypeFunc : : FramePtr , oldcall - > in ( TypeFunc : : FramePtr ) ) ;
}
//------------------------------make_slow_call---------------------------------
2015-04-16 08:23:26 -07:00
CallNode * PhaseMacroExpand : : make_slow_call ( CallNode * oldcall , const TypeFunc * slow_call_type ,
address slow_call , const char * leaf_name , Node * slow_path ,
Node * parm0 , Node * parm1 , Node * parm2 ) {
2007-12-01 00:00:00 +00:00
// Slow-path call
CallNode * call = leaf_name
2014-06-02 08:07:29 +02:00
? ( CallNode * ) new CallLeafNode ( slow_call_type , slow_call , leaf_name , TypeRawPtr : : BOTTOM )
: ( CallNode * ) new CallStaticJavaNode ( slow_call_type , slow_call , OptoRuntime : : stub_name ( slow_call ) , oldcall - > jvms ( ) - > bci ( ) , TypeRawPtr : : BOTTOM ) ;
2007-12-01 00:00:00 +00:00
// Slow path call has no side-effects, uses few values
copy_predefined_input_for_runtime_call ( slow_path , oldcall , call ) ;
if ( parm0 ! = NULL ) call - > init_req ( TypeFunc : : Parms + 0 , parm0 ) ;
if ( parm1 ! = NULL ) call - > init_req ( TypeFunc : : Parms + 1 , parm1 ) ;
2015-04-16 08:23:26 -07:00
if ( parm2 ! = NULL ) call - > init_req ( TypeFunc : : Parms + 2 , parm2 ) ;
2020-10-19 11:30:13 +00:00
call - > copy_call_debug_info ( & _igvn , oldcall ) ;
2007-12-01 00:00:00 +00:00
call - > set_cnt ( PROB_UNLIKELY_MAG ( 4 ) ) ; // Same effect as RC_UNCOMMON.
2010-06-28 14:54:39 -07:00
_igvn . replace_node ( oldcall , call ) ;
2007-12-01 00:00:00 +00:00
transform_later ( call ) ;
return call ;
}
void PhaseMacroExpand : : extract_call_projections ( CallNode * call ) {
_fallthroughproj = NULL ;
_fallthroughcatchproj = NULL ;
_ioproj_fallthrough = NULL ;
_ioproj_catchall = NULL ;
_catchallcatchproj = NULL ;
_memproj_fallthrough = NULL ;
_memproj_catchall = NULL ;
_resproj = NULL ;
for ( DUIterator_Fast imax , i = call - > fast_outs ( imax ) ; i < imax ; i + + ) {
ProjNode * pn = call - > fast_out ( i ) - > as_Proj ( ) ;
switch ( pn - > _con ) {
case TypeFunc : : Control :
{
// For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
_fallthroughproj = pn ;
DUIterator_Fast jmax , j = pn - > fast_outs ( jmax ) ;
const Node * cn = pn - > fast_out ( j ) ;
if ( cn - > is_Catch ( ) ) {
ProjNode * cpn = NULL ;
for ( DUIterator_Fast kmax , k = cn - > fast_outs ( kmax ) ; k < kmax ; k + + ) {
cpn = cn - > fast_out ( k ) - > as_Proj ( ) ;
assert ( cpn - > is_CatchProj ( ) , " must be a CatchProjNode " ) ;
if ( cpn - > _con = = CatchProjNode : : fall_through_index )
_fallthroughcatchproj = cpn ;
else {
assert ( cpn - > _con = = CatchProjNode : : catch_all_index , " must be correct index. " ) ;
_catchallcatchproj = cpn ;
}
}
}
break ;
}
case TypeFunc : : I_O :
if ( pn - > _is_io_use )
_ioproj_catchall = pn ;
else
_ioproj_fallthrough = pn ;
break ;
case TypeFunc : : Memory :
if ( pn - > _is_io_use )
_memproj_catchall = pn ;
else
_memproj_fallthrough = pn ;
break ;
case TypeFunc : : Parms :
_resproj = pn ;
break ;
default :
assert ( false , " unexpected projection from allocation node. " ) ;
}
}
}
2018-05-18 14:51:06 +02:00
void PhaseMacroExpand : : eliminate_gc_barrier ( Node * p2x ) {
BarrierSetC2 * bs = BarrierSet : : barrier_set ( ) - > barrier_set_c2 ( ) ;
bs - > eliminate_gc_barrier ( this , p2x ) ;
2008-03-20 13:51:55 -07:00
}
// Search for a memory operation for the specified memory slice.
2008-07-28 17:12:52 -07:00
static Node * scan_mem_chain ( Node * mem , int alias_idx , int offset , Node * start_mem , Node * alloc , PhaseGVN * phase ) {
2008-03-20 13:51:55 -07:00
Node * orig_mem = mem ;
Node * alloc_mem = alloc - > in ( TypeFunc : : Memory ) ;
2008-07-28 17:12:52 -07:00
const TypeOopPtr * tinst = phase - > C - > get_adr_type ( alias_idx ) - > isa_oopptr ( ) ;
2008-03-20 13:51:55 -07:00
while ( true ) {
if ( mem = = alloc_mem | | mem = = start_mem ) {
2009-02-27 13:27:09 -08:00
return mem ; // hit one of our sentinels
2008-03-20 13:51:55 -07:00
} else if ( mem - > is_MergeMem ( ) ) {
mem = mem - > as_MergeMem ( ) - > memory_at ( alias_idx ) ;
} else if ( mem - > is_Proj ( ) & & mem - > as_Proj ( ) - > _con = = TypeFunc : : Memory ) {
Node * in = mem - > in ( 0 ) ;
// we can safely skip over safepoints, calls, locks and membars because we
// already know that the object is safe to eliminate.
if ( in - > is_Initialize ( ) & & in - > as_Initialize ( ) - > allocation ( ) = = alloc ) {
return in ;
2008-07-28 17:12:52 -07:00
} else if ( in - > is_Call ( ) ) {
CallNode * call = in - > as_Call ( ) ;
2015-08-15 02:54:18 +02:00
if ( call - > may_modify ( tinst , phase ) ) {
assert ( call - > is_ArrayCopy ( ) , " ArrayCopy is the only call node that doesn't make allocation escape " ) ;
if ( call - > as_ArrayCopy ( ) - > modifies ( offset , offset , phase , false ) ) {
return in ;
}
2008-07-28 17:12:52 -07:00
}
mem = in - > in ( TypeFunc : : Memory ) ;
} else if ( in - > is_MemBar ( ) ) {
2016-06-02 08:46:52 +02:00
ArrayCopyNode * ac = NULL ;
if ( ArrayCopyNode : : may_modify ( tinst , in - > as_MemBar ( ) , phase , ac ) ) {
assert ( ac ! = NULL & & ac - > is_clonebasic ( ) , " Only basic clone is a non escaping clone " ) ;
2015-08-15 02:54:18 +02:00
return ac ;
}
2008-03-20 13:51:55 -07:00
mem = in - > in ( TypeFunc : : Memory ) ;
} else {
assert ( false , " unexpected projection " ) ;
}
} else if ( mem - > is_Store ( ) ) {
const TypePtr * atype = mem - > as_Store ( ) - > adr_type ( ) ;
2015-08-15 02:54:18 +02:00
int adr_idx = phase - > C - > get_alias_index ( atype ) ;
2008-03-20 13:51:55 -07:00
if ( adr_idx = = alias_idx ) {
assert ( atype - > isa_oopptr ( ) , " address type must be oopptr " ) ;
int adr_offset = atype - > offset ( ) ;
uint adr_iid = atype - > is_oopptr ( ) - > instance_id ( ) ;
// Array elements references have the same alias_idx
// but different offset and different instance_id.
if ( adr_offset = = offset & & adr_iid = = alloc - > _idx )
return mem ;
} else {
assert ( adr_idx = = Compile : : AliasIdxRaw , " address must match or be raw " ) ;
}
mem = mem - > in ( MemNode : : Memory ) ;
2009-12-09 16:40:45 -08:00
} else if ( mem - > is_ClearArray ( ) ) {
if ( ! ClearArrayNode : : step_through ( & mem , alloc - > _idx , phase ) ) {
// Can not bypass initialization of the instance
// we are looking.
debug_only ( intptr_t offset ; )
assert ( alloc = = AllocateNode : : Ideal_allocation ( mem - > in ( 3 ) , phase , offset ) , " sanity " ) ;
InitializeNode * init = alloc - > as_Allocate ( ) - > initialization ( ) ;
// We are looking for stored value, return Initialize node
// or memory edge from Allocate node.
if ( init ! = NULL )
return init ;
else
return alloc - > in ( TypeFunc : : Memory ) ; // It will produce zero value (see callers).
}
// Otherwise skip it (the call updated 'mem' value).
2009-02-19 17:38:53 -08:00
} else if ( mem - > Opcode ( ) = = Op_SCMemProj ) {
2013-01-22 15:34:16 -08:00
mem = mem - > in ( 0 ) ;
Node * adr = NULL ;
if ( mem - > is_LoadStore ( ) ) {
adr = mem - > in ( MemNode : : Address ) ;
} else {
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
assert ( mem - > Opcode ( ) = = Op_EncodeISOArray | |
mem - > Opcode ( ) = = Op_StrCompressedCopy , " sanity " ) ;
2013-01-22 15:34:16 -08:00
adr = mem - > in ( 3 ) ; // Destination array
}
const TypePtr * atype = adr - > bottom_type ( ) - > is_ptr ( ) ;
2015-08-15 02:54:18 +02:00
int adr_idx = phase - > C - > get_alias_index ( atype ) ;
2009-02-19 17:38:53 -08:00
if ( adr_idx = = alias_idx ) {
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
DEBUG_ONLY ( mem - > dump ( ) ; )
assert ( false , " Object is not scalar replaceable if a LoadStore node accesses its field " ) ;
return NULL ;
}
mem = mem - > in ( MemNode : : Memory ) ;
} else if ( mem - > Opcode ( ) = = Op_StrInflatedCopy ) {
Node * adr = mem - > in ( 3 ) ; // Destination array
const TypePtr * atype = adr - > bottom_type ( ) - > is_ptr ( ) ;
int adr_idx = phase - > C - > get_alias_index ( atype ) ;
if ( adr_idx = = alias_idx ) {
DEBUG_ONLY ( mem - > dump ( ) ; )
assert ( false , " Object is not scalar replaceable if a StrInflatedCopy node accesses its field " ) ;
2009-02-19 17:38:53 -08:00
return NULL ;
}
2013-01-22 15:34:16 -08:00
mem = mem - > in ( MemNode : : Memory ) ;
2008-03-20 13:51:55 -07:00
} else {
return mem ;
}
2008-07-16 16:04:39 -07:00
assert ( mem ! = orig_mem , " dead memory loop " ) ;
2008-03-20 13:51:55 -07:00
}
}
2015-08-15 02:54:18 +02:00
// Generate loads from source of the arraycopy for fields of
// destination needed at a deoptimization point
2017-01-24 09:40:05 +01:00
Node * PhaseMacroExpand : : make_arraycopy_load ( ArrayCopyNode * ac , intptr_t offset , Node * ctl , Node * mem , BasicType ft , const Type * ftype , AllocateNode * alloc ) {
2015-08-15 02:54:18 +02:00
BasicType bt = ft ;
const Type * type = ftype ;
if ( ft = = T_NARROWOOP ) {
bt = T_OBJECT ;
type = ftype - > make_oopptr ( ) ;
}
Node * res = NULL ;
if ( ac - > is_clonebasic ( ) ) {
2019-08-07 12:09:55 +02:00
assert ( ac - > in ( ArrayCopyNode : : Src ) ! = ac - > in ( ArrayCopyNode : : Dest ) , " clone source equals destination " ) ;
2020-04-06 09:52:28 +02:00
Node * base = ac - > in ( ArrayCopyNode : : Src ) ;
2015-08-15 02:54:18 +02:00
Node * adr = _igvn . transform ( new AddPNode ( base , base , MakeConX ( offset ) ) ) ;
const TypePtr * adr_type = _igvn . type ( base ) - > is_ptr ( ) - > add_offset ( offset ) ;
2020-11-05 08:02:47 +00:00
MergeMemNode * mergemen = _igvn . transform ( MergeMemNode : : make ( mem ) ) - > as_MergeMem ( ) ;
2020-07-03 10:05:53 +02:00
BarrierSetC2 * bs = BarrierSet : : barrier_set ( ) - > barrier_set_c2 ( ) ;
res = ArrayCopyNode : : load ( bs , & _igvn , ctl , mergemen , adr , adr_type , type , bt ) ;
2015-08-15 02:54:18 +02:00
} else {
if ( ac - > modifies ( offset , offset , & _igvn , true ) ) {
assert ( ac - > in ( ArrayCopyNode : : Dest ) = = alloc - > result_cast ( ) , " arraycopy destination should be allocation's result " ) ;
2019-08-07 12:09:55 +02:00
uint shift = exact_log2 ( type2aelembytes ( bt ) ) ;
2019-09-17 15:58:54 +02:00
Node * src_pos = ac - > in ( ArrayCopyNode : : SrcPos ) ;
Node * dest_pos = ac - > in ( ArrayCopyNode : : DestPos ) ;
const TypeInt * src_pos_t = _igvn . type ( src_pos ) - > is_int ( ) ;
const TypeInt * dest_pos_t = _igvn . type ( dest_pos ) - > is_int ( ) ;
Node * adr = NULL ;
const TypePtr * adr_type = NULL ;
if ( src_pos_t - > is_con ( ) & & dest_pos_t - > is_con ( ) ) {
intptr_t off = ( ( src_pos_t - > get_con ( ) - dest_pos_t - > get_con ( ) ) < < shift ) + offset ;
Node * base = ac - > in ( ArrayCopyNode : : Src ) ;
adr = _igvn . transform ( new AddPNode ( base , base , MakeConX ( off ) ) ) ;
adr_type = _igvn . type ( base ) - > is_ptr ( ) - > add_offset ( off ) ;
if ( ac - > in ( ArrayCopyNode : : Src ) = = ac - > in ( ArrayCopyNode : : Dest ) ) {
// Don't emit a new load from src if src == dst but try to get the value from memory instead
return value_from_mem ( ac - > in ( TypeFunc : : Memory ) , ctl , ft , ftype , adr_type - > isa_oopptr ( ) , alloc ) ;
}
} else {
Node * diff = _igvn . transform ( new SubINode ( ac - > in ( ArrayCopyNode : : SrcPos ) , ac - > in ( ArrayCopyNode : : DestPos ) ) ) ;
2015-08-15 02:54:18 +02:00
# ifdef _LP64
2019-09-17 15:58:54 +02:00
diff = _igvn . transform ( new ConvI2LNode ( diff ) ) ;
2015-08-15 02:54:18 +02:00
# endif
2019-09-17 15:58:54 +02:00
diff = _igvn . transform ( new LShiftXNode ( diff , intcon ( shift ) ) ) ;
2015-08-15 02:54:18 +02:00
2019-09-17 15:58:54 +02:00
Node * off = _igvn . transform ( new AddXNode ( MakeConX ( offset ) , diff ) ) ;
Node * base = ac - > in ( ArrayCopyNode : : Src ) ;
adr = _igvn . transform ( new AddPNode ( base , base , off ) ) ;
adr_type = _igvn . type ( base ) - > is_ptr ( ) - > add_offset ( Type : : OffsetBot ) ;
if ( ac - > in ( ArrayCopyNode : : Src ) = = ac - > in ( ArrayCopyNode : : Dest ) ) {
// Non constant offset in the array: we can't statically
// determine the value
return NULL ;
}
2019-08-07 12:09:55 +02:00
}
2020-11-05 08:02:47 +00:00
MergeMemNode * mergemen = _igvn . transform ( MergeMemNode : : make ( mem ) ) - > as_MergeMem ( ) ;
2020-07-03 10:05:53 +02:00
BarrierSetC2 * bs = BarrierSet : : barrier_set ( ) - > barrier_set_c2 ( ) ;
res = ArrayCopyNode : : load ( bs , & _igvn , ctl , mergemen , adr , adr_type , type , bt ) ;
2015-08-15 02:54:18 +02:00
}
}
if ( res ! = NULL ) {
if ( ftype - > isa_narrowoop ( ) ) {
// PhaseMacroExpand::scalar_replacement adds DecodeN nodes
res = _igvn . transform ( new EncodePNode ( res , ftype ) ) ;
}
return res ;
}
return NULL ;
}
2008-03-20 13:51:55 -07:00
//
// Given a Memory Phi, compute a value Phi containing the values from stores
// on the input paths.
2015-08-15 02:54:18 +02:00
// Note: this function is recursive, its depth is limited by the "level" argument
2008-03-20 13:51:55 -07:00
// Returns the computed Phi, or NULL if it cannot compute it.
2015-08-15 02:54:18 +02:00
Node * PhaseMacroExpand : : value_from_mem_phi ( Node * mem , BasicType ft , const Type * phi_type , const TypeOopPtr * adr_t , AllocateNode * alloc , Node_Stack * value_phis , int level ) {
2008-07-16 16:04:39 -07:00
assert ( mem - > is_Phi ( ) , " sanity " ) ;
2008-03-20 13:51:55 -07:00
int alias_idx = C - > get_alias_index ( adr_t ) ;
int offset = adr_t - > offset ( ) ;
int instance_id = adr_t - > instance_id ( ) ;
2008-07-16 16:04:39 -07:00
// Check if an appropriate value phi already exists.
Node * region = mem - > in ( 0 ) ;
for ( DUIterator_Fast kmax , k = region - > fast_outs ( kmax ) ; k < kmax ; k + + ) {
Node * phi = region - > fast_out ( k ) ;
if ( phi - > is_Phi ( ) & & phi ! = mem & &
2016-08-23 13:44:26 +02:00
phi - > as_Phi ( ) - > is_same_inst_field ( phi_type , ( int ) mem - > _idx , instance_id , alias_idx , offset ) ) {
2008-07-16 16:04:39 -07:00
return phi ;
}
}
// Check if an appropriate new value phi already exists.
2011-06-28 15:24:29 -07:00
Node * new_phi = value_phis - > find ( mem - > _idx ) ;
if ( new_phi ! = NULL )
return new_phi ;
2008-07-16 16:04:39 -07:00
if ( level < = 0 ) {
2008-07-28 17:12:52 -07:00
return NULL ; // Give up: phi tree too deep
2008-07-16 16:04:39 -07:00
}
2018-01-17 14:25:47 -08:00
Node * start_mem = C - > start ( ) - > proj_out_or_null ( TypeFunc : : Memory ) ;
2008-03-20 13:51:55 -07:00
Node * alloc_mem = alloc - > in ( TypeFunc : : Memory ) ;
uint length = mem - > req ( ) ;
2020-06-16 09:37:53 +02:00
GrowableArray < Node * > values ( length , length , NULL ) ;
2008-03-20 13:51:55 -07:00
2008-07-16 16:04:39 -07:00
// create a new Phi for the value
2016-08-23 13:44:26 +02:00
PhiNode * phi = new PhiNode ( mem - > in ( 0 ) , phi_type , NULL , mem - > _idx , instance_id , alias_idx , offset ) ;
2008-07-16 16:04:39 -07:00
transform_later ( phi ) ;
value_phis - > push ( phi , mem - > _idx ) ;
2008-03-20 13:51:55 -07:00
for ( uint j = 1 ; j < length ; j + + ) {
Node * in = mem - > in ( j ) ;
if ( in = = NULL | | in - > is_top ( ) ) {
values . at_put ( j , in ) ;
} else {
2008-07-28 17:12:52 -07:00
Node * val = scan_mem_chain ( in , alias_idx , offset , start_mem , alloc , & _igvn ) ;
2008-03-20 13:51:55 -07:00
if ( val = = start_mem | | val = = alloc_mem ) {
// hit a sentinel, return appropriate 0 value
values . at_put ( j , _igvn . zerocon ( ft ) ) ;
continue ;
}
if ( val - > is_Initialize ( ) ) {
val = val - > as_Initialize ( ) - > find_captured_store ( offset , type2aelembytes ( ft ) , & _igvn ) ;
}
if ( val = = NULL ) {
return NULL ; // can't find a value on this path
}
if ( val = = mem ) {
values . at_put ( j , mem ) ;
} else if ( val - > is_Store ( ) ) {
2018-10-17 10:19:13 +02:00
Node * n = val - > in ( MemNode : : ValueIn ) ;
BarrierSetC2 * bs = BarrierSet : : barrier_set ( ) - > barrier_set_c2 ( ) ;
n = bs - > step_over_gc_barrier ( n ) ;
values . at_put ( j , n ) ;
2008-03-20 13:51:55 -07:00
} else if ( val - > is_Proj ( ) & & val - > in ( 0 ) = = alloc ) {
values . at_put ( j , _igvn . zerocon ( ft ) ) ;
} else if ( val - > is_Phi ( ) ) {
2008-07-16 16:04:39 -07:00
val = value_from_mem_phi ( val , ft , phi_type , adr_t , alloc , value_phis , level - 1 ) ;
if ( val = = NULL ) {
return NULL ;
2008-03-20 13:51:55 -07:00
}
2008-07-16 16:04:39 -07:00
values . at_put ( j , val ) ;
2009-02-19 17:38:53 -08:00
} else if ( val - > Opcode ( ) = = Op_SCMemProj ) {
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
assert ( val - > in ( 0 ) - > is_LoadStore ( ) | |
val - > in ( 0 ) - > Opcode ( ) = = Op_EncodeISOArray | |
val - > in ( 0 ) - > Opcode ( ) = = Op_StrCompressedCopy , " sanity " ) ;
assert ( false , " Object is not scalar replaceable if a LoadStore node accesses its field " ) ;
2009-02-19 17:38:53 -08:00
return NULL ;
2015-08-15 02:54:18 +02:00
} else if ( val - > is_ArrayCopy ( ) ) {
2017-01-24 09:40:05 +01:00
Node * res = make_arraycopy_load ( val - > as_ArrayCopy ( ) , offset , val - > in ( 0 ) , val - > in ( TypeFunc : : Memory ) , ft , phi_type , alloc ) ;
2015-08-26 09:49:37 +02:00
if ( res = = NULL ) {
return NULL ;
}
2015-08-15 02:54:18 +02:00
values . at_put ( j , res ) ;
2008-03-20 13:51:55 -07:00
} else {
2009-02-19 17:38:53 -08:00
# ifdef ASSERT
val - > dump ( ) ;
2008-07-28 17:12:52 -07:00
assert ( false , " unknown node on this path " ) ;
2009-02-19 17:38:53 -08:00
# endif
2008-07-28 17:12:52 -07:00
return NULL ; // unknown node on this path
2008-03-20 13:51:55 -07:00
}
}
}
2008-07-16 16:04:39 -07:00
// Set Phi's inputs
2008-03-20 13:51:55 -07:00
for ( uint j = 1 ; j < length ; j + + ) {
if ( values . at ( j ) = = mem ) {
phi - > init_req ( j , phi ) ;
} else {
phi - > init_req ( j , values . at ( j ) ) ;
}
}
return phi ;
}
// Search the last value stored into the object's field.
2015-08-15 02:54:18 +02:00
Node * PhaseMacroExpand : : value_from_mem ( Node * sfpt_mem , Node * sfpt_ctl , BasicType ft , const Type * ftype , const TypeOopPtr * adr_t , AllocateNode * alloc ) {
2008-06-26 13:34:00 -07:00
assert ( adr_t - > is_known_instance_field ( ) , " instance required " ) ;
int instance_id = adr_t - > instance_id ( ) ;
assert ( ( uint ) instance_id = = alloc - > _idx , " wrong allocation " ) ;
2008-03-20 13:51:55 -07:00
int alias_idx = C - > get_alias_index ( adr_t ) ;
int offset = adr_t - > offset ( ) ;
2018-01-17 14:25:47 -08:00
Node * start_mem = C - > start ( ) - > proj_out_or_null ( TypeFunc : : Memory ) ;
2008-03-20 13:51:55 -07:00
Node * alloc_ctrl = alloc - > in ( TypeFunc : : Control ) ;
Node * alloc_mem = alloc - > in ( TypeFunc : : Memory ) ;
2020-06-29 17:14:19 +02:00
VectorSet visited ;
2008-03-20 13:51:55 -07:00
bool done = sfpt_mem = = alloc_mem ;
Node * mem = sfpt_mem ;
while ( ! done ) {
if ( visited . test_set ( mem - > _idx ) ) {
return NULL ; // found a loop, give up
}
2008-07-28 17:12:52 -07:00
mem = scan_mem_chain ( mem , alias_idx , offset , start_mem , alloc , & _igvn ) ;
2008-03-20 13:51:55 -07:00
if ( mem = = start_mem | | mem = = alloc_mem ) {
done = true ; // hit a sentinel, return appropriate 0 value
} else if ( mem - > is_Initialize ( ) ) {
mem = mem - > as_Initialize ( ) - > find_captured_store ( offset , type2aelembytes ( ft ) , & _igvn ) ;
if ( mem = = NULL ) {
done = true ; // Something go wrong.
} else if ( mem - > is_Store ( ) ) {
const TypePtr * atype = mem - > as_Store ( ) - > adr_type ( ) ;
assert ( C - > get_alias_index ( atype ) = = Compile : : AliasIdxRaw , " store is correct memory slice " ) ;
done = true ;
}
} else if ( mem - > is_Store ( ) ) {
const TypeOopPtr * atype = mem - > as_Store ( ) - > adr_type ( ) - > isa_oopptr ( ) ;
assert ( atype ! = NULL , " address type must be oopptr " ) ;
assert ( C - > get_alias_index ( atype ) = = alias_idx & &
2008-06-26 13:34:00 -07:00
atype - > is_known_instance_field ( ) & & atype - > offset ( ) = = offset & &
2008-03-20 13:51:55 -07:00
atype - > instance_id ( ) = = instance_id , " store is correct memory slice " ) ;
done = true ;
} else if ( mem - > is_Phi ( ) ) {
// try to find a phi's unique input
Node * unique_input = NULL ;
Node * top = C - > top ( ) ;
for ( uint i = 1 ; i < mem - > req ( ) ; i + + ) {
2008-07-28 17:12:52 -07:00
Node * n = scan_mem_chain ( mem - > in ( i ) , alias_idx , offset , start_mem , alloc , & _igvn ) ;
2008-03-20 13:51:55 -07:00
if ( n = = NULL | | n = = top | | n = = mem ) {
continue ;
} else if ( unique_input = = NULL ) {
unique_input = n ;
} else if ( unique_input ! = n ) {
unique_input = top ;
break ;
}
}
if ( unique_input ! = NULL & & unique_input ! = top ) {
mem = unique_input ;
} else {
done = true ;
}
2015-08-15 02:54:18 +02:00
} else if ( mem - > is_ArrayCopy ( ) ) {
done = true ;
2008-03-20 13:51:55 -07:00
} else {
assert ( false , " unexpected node " ) ;
}
}
if ( mem ! = NULL ) {
if ( mem = = start_mem | | mem = = alloc_mem ) {
// hit a sentinel, return appropriate 0 value
return _igvn . zerocon ( ft ) ;
} else if ( mem - > is_Store ( ) ) {
2018-10-17 10:19:13 +02:00
Node * n = mem - > in ( MemNode : : ValueIn ) ;
BarrierSetC2 * bs = BarrierSet : : barrier_set ( ) - > barrier_set_c2 ( ) ;
n = bs - > step_over_gc_barrier ( n ) ;
return n ;
2008-03-20 13:51:55 -07:00
} else if ( mem - > is_Phi ( ) ) {
// attempt to produce a Phi reflecting the values on the input paths of the Phi
2020-06-29 17:14:19 +02:00
Node_Stack value_phis ( 8 ) ;
Node * phi = value_from_mem_phi ( mem , ft , ftype , adr_t , alloc , & value_phis , ValueSearchLimit ) ;
2008-03-20 13:51:55 -07:00
if ( phi ! = NULL ) {
return phi ;
2008-07-16 16:04:39 -07:00
} else {
// Kill all new Phis
while ( value_phis . is_nonempty ( ) ) {
Node * n = value_phis . node ( ) ;
2010-06-28 14:54:39 -07:00
_igvn . replace_node ( n , C - > top ( ) ) ;
2008-07-16 16:04:39 -07:00
value_phis . pop ( ) ;
}
2008-03-20 13:51:55 -07:00
}
2015-08-15 02:54:18 +02:00
} else if ( mem - > is_ArrayCopy ( ) ) {
Node * ctl = mem - > in ( 0 ) ;
2017-01-24 09:40:05 +01:00
Node * m = mem - > in ( TypeFunc : : Memory ) ;
2015-08-15 02:54:18 +02:00
if ( sfpt_ctl - > is_Proj ( ) & & sfpt_ctl - > as_Proj ( ) - > is_uncommon_trap_proj ( Deoptimization : : Reason_none ) ) {
// pin the loads in the uncommon trap path
ctl = sfpt_ctl ;
2017-01-24 09:40:05 +01:00
m = sfpt_mem ;
2015-08-15 02:54:18 +02:00
}
2017-01-24 09:40:05 +01:00
return make_arraycopy_load ( mem - > as_ArrayCopy ( ) , offset , ctl , m , ft , ftype , alloc ) ;
2008-03-20 13:51:55 -07:00
}
}
// Something go wrong.
return NULL ;
}
// Check the possibility of scalar replacement.
bool PhaseMacroExpand : : can_eliminate_allocation ( AllocateNode * alloc , GrowableArray < SafePointNode * > & safepoints ) {
// Scan the uses of the allocation to check for anything that would
// prevent us from eliminating it.
NOT_PRODUCT ( const char * fail_eliminate = NULL ; )
DEBUG_ONLY ( Node * disq_node = NULL ; )
bool can_eliminate = true ;
Node * res = alloc - > result_cast ( ) ;
const TypeOopPtr * res_type = NULL ;
if ( res = = NULL ) {
// All users were eliminated.
} else if ( ! res - > is_CheckCastPP ( ) ) {
NOT_PRODUCT ( fail_eliminate = " Allocation does not have unique CheckCastPP " ; )
can_eliminate = false ;
} else {
res_type = _igvn . type ( res ) - > isa_oopptr ( ) ;
if ( res_type = = NULL ) {
NOT_PRODUCT ( fail_eliminate = " Neither instance or array allocation " ; )
can_eliminate = false ;
} else if ( res_type - > isa_aryptr ( ) ) {
int length = alloc - > in ( AllocateNode : : ALength ) - > find_int_con ( - 1 ) ;
if ( length < 0 ) {
NOT_PRODUCT ( fail_eliminate = " Array's size is not constant " ; )
can_eliminate = false ;
}
}
}
if ( can_eliminate & & res ! = NULL ) {
for ( DUIterator_Fast jmax , j = res - > fast_outs ( jmax ) ;
j < jmax & & can_eliminate ; j + + ) {
Node * use = res - > fast_out ( j ) ;
if ( use - > is_AddP ( ) ) {
const TypePtr * addp_type = _igvn . type ( use ) - > is_ptr ( ) ;
int offset = addp_type - > offset ( ) ;
if ( offset = = Type : : OffsetTop | | offset = = Type : : OffsetBot ) {
NOT_PRODUCT ( fail_eliminate = " Undefined field referrence " ; )
can_eliminate = false ;
break ;
}
for ( DUIterator_Fast kmax , k = use - > fast_outs ( kmax ) ;
k < kmax & & can_eliminate ; k + + ) {
Node * n = use - > fast_out ( k ) ;
2020-04-06 09:52:28 +02:00
if ( ! n - > is_Store ( ) & & n - > Opcode ( ) ! = Op_CastP2X
SHENANDOAHGC_ONLY ( & & ( ! UseShenandoahGC | | ! ShenandoahBarrierSetC2 : : is_shenandoah_wb_pre_call ( n ) ) ) ) {
2008-03-20 13:51:55 -07:00
DEBUG_ONLY ( disq_node = n ; )
2008-07-28 17:12:52 -07:00
if ( n - > is_Load ( ) | | n - > is_LoadStore ( ) ) {
2008-03-20 13:51:55 -07:00
NOT_PRODUCT ( fail_eliminate = " Field load " ; )
} else {
NOT_PRODUCT ( fail_eliminate = " Not store field referrence " ; )
}
can_eliminate = false ;
}
}
2015-05-12 10:27:50 +02:00
} else if ( use - > is_ArrayCopy ( ) & &
2020-04-06 09:52:28 +02:00
( use - > as_ArrayCopy ( ) - > is_clonebasic ( ) | |
use - > as_ArrayCopy ( ) - > is_arraycopy_validated ( ) | |
2015-05-12 10:27:50 +02:00
use - > as_ArrayCopy ( ) - > is_copyof_validated ( ) | |
use - > as_ArrayCopy ( ) - > is_copyofrange_validated ( ) ) & &
use - > in ( ArrayCopyNode : : Dest ) = = res ) {
// ok to eliminate
2008-03-20 13:51:55 -07:00
} else if ( use - > is_SafePoint ( ) ) {
SafePointNode * sfpt = use - > as_SafePoint ( ) ;
2008-05-29 12:04:14 -07:00
if ( sfpt - > is_Call ( ) & & sfpt - > as_Call ( ) - > has_non_debug_use ( res ) ) {
2008-03-20 13:51:55 -07:00
// Object is passed as argument.
DEBUG_ONLY ( disq_node = use ; )
NOT_PRODUCT ( fail_eliminate = " Object is passed as argument " ; )
can_eliminate = false ;
}
Node * sfptMem = sfpt - > memory ( ) ;
if ( sfptMem = = NULL | | sfptMem - > is_top ( ) ) {
DEBUG_ONLY ( disq_node = use ; )
NOT_PRODUCT ( fail_eliminate = " NULL or TOP memory " ; )
can_eliminate = false ;
} else {
safepoints . append_if_missing ( sfpt ) ;
}
} else if ( use - > Opcode ( ) ! = Op_CastP2X ) { // CastP2X is used by card mark
if ( use - > is_Phi ( ) ) {
if ( use - > outcnt ( ) = = 1 & & use - > unique_out ( ) - > Opcode ( ) = = Op_Return ) {
NOT_PRODUCT ( fail_eliminate = " Object is return value " ; )
} else {
NOT_PRODUCT ( fail_eliminate = " Object is referenced by Phi " ; )
}
DEBUG_ONLY ( disq_node = use ; )
} else {
if ( use - > Opcode ( ) = = Op_Return ) {
NOT_PRODUCT ( fail_eliminate = " Object is return value " ; )
} else {
NOT_PRODUCT ( fail_eliminate = " Object is referenced by node " ; )
}
DEBUG_ONLY ( disq_node = use ; )
}
can_eliminate = false ;
}
}
}
# ifndef PRODUCT
if ( PrintEliminateAllocations ) {
if ( can_eliminate ) {
tty - > print ( " Scalar " ) ;
if ( res = = NULL )
alloc - > dump ( ) ;
else
res - > dump ( ) ;
2013-05-08 15:08:01 -07:00
} else if ( alloc - > _is_scalar_replaceable ) {
2008-03-20 13:51:55 -07:00
tty - > print ( " NotScalar (%s) " , fail_eliminate ) ;
if ( res = = NULL )
alloc - > dump ( ) ;
else
res - > dump ( ) ;
# ifdef ASSERT
if ( disq_node ! = NULL ) {
tty - > print ( " >>>> " ) ;
disq_node - > dump ( ) ;
}
# endif /*ASSERT*/
}
}
# endif
return can_eliminate ;
}
// Do scalar replacement.
bool PhaseMacroExpand : : scalar_replacement ( AllocateNode * alloc , GrowableArray < SafePointNode * > & safepoints ) {
GrowableArray < SafePointNode * > safepoints_done ;
ciKlass * klass = NULL ;
ciInstanceKlass * iklass = NULL ;
int nfields = 0 ;
2015-10-22 13:07:10 -04:00
int array_base = 0 ;
int element_size = 0 ;
BasicType basic_elem_type = T_ILLEGAL ;
ciType * elem_type = NULL ;
2008-03-20 13:51:55 -07:00
Node * res = alloc - > result_cast ( ) ;
2014-08-02 07:06:08 +02:00
assert ( res = = NULL | | res - > is_CheckCastPP ( ) , " unexpected AllocateNode result " ) ;
2008-03-20 13:51:55 -07:00
const TypeOopPtr * res_type = NULL ;
if ( res ! = NULL ) { // Could be NULL when there are no users
res_type = _igvn . type ( res ) - > isa_oopptr ( ) ;
}
if ( res ! = NULL ) {
klass = res_type - > klass ( ) ;
if ( res_type - > isa_instptr ( ) ) {
// find the fields of the class which will be needed for safepoint debug information
assert ( klass - > is_instance_klass ( ) , " must be an instance klass. " ) ;
iklass = klass - > as_instance_klass ( ) ;
nfields = iklass - > nof_nonstatic_fields ( ) ;
} else {
// find the array's elements which will be needed for safepoint debug information
nfields = alloc - > in ( AllocateNode : : ALength ) - > find_int_con ( - 1 ) ;
assert ( klass - > is_array_klass ( ) & & nfields > = 0 , " must be an array klass. " ) ;
elem_type = klass - > as_array_klass ( ) - > element_type ( ) ;
basic_elem_type = elem_type - > basic_type ( ) ;
array_base = arrayOopDesc : : base_offset_in_bytes ( basic_elem_type ) ;
element_size = type2aelembytes ( basic_elem_type ) ;
}
}
//
// Process the safepoint uses
//
while ( safepoints . length ( ) > 0 ) {
SafePointNode * sfpt = safepoints . pop ( ) ;
Node * mem = sfpt - > memory ( ) ;
2015-08-15 02:54:18 +02:00
Node * ctl = sfpt - > control ( ) ;
2013-08-23 11:41:37 -07:00
assert ( sfpt - > jvms ( ) ! = NULL , " missed JVMS " ) ;
// Fields of scalar objs are referenced only at the end
// of regular debuginfo at the last (youngest) JVMS.
// Record relative start index.
uint first_ind = ( sfpt - > req ( ) - sfpt - > jvms ( ) - > scloff ( ) ) ;
2014-06-02 08:07:29 +02:00
SafePointScalarObjectNode * sobj = new SafePointScalarObjectNode ( res_type ,
2008-03-20 13:51:55 -07:00
# ifdef ASSERT
alloc ,
# endif
first_ind , nfields ) ;
2011-11-16 09:13:57 -08:00
sobj - > init_req ( 0 , C - > root ( ) ) ;
2008-03-20 13:51:55 -07:00
transform_later ( sobj ) ;
// Scan object's fields adding an input to the safepoint for each field.
for ( int j = 0 ; j < nfields ; j + + ) {
2008-08-27 14:47:32 -07:00
intptr_t offset ;
2008-03-20 13:51:55 -07:00
ciField * field = NULL ;
if ( iklass ! = NULL ) {
field = iklass - > nonstatic_field_at ( j ) ;
offset = field - > offset ( ) ;
elem_type = field - > type ( ) ;
basic_elem_type = field - > layout_type ( ) ;
} else {
2008-08-27 14:47:32 -07:00
offset = array_base + j * ( intptr_t ) element_size ;
2008-03-20 13:51:55 -07:00
}
const Type * field_type ;
// The next code is taken from Parse::do_get_xxx().
2019-09-23 14:49:04 -04:00
if ( is_reference_type ( basic_elem_type ) ) {
2008-03-20 13:51:55 -07:00
if ( ! elem_type - > is_loaded ( ) ) {
field_type = TypeInstPtr : : BOTTOM ;
2016-04-11 21:42:55 +03:00
} else if ( field ! = NULL & & field - > is_static_constant ( ) ) {
2008-03-20 13:51:55 -07:00
// This can happen if the constant oop is non-perm.
ciObject * con = field - > constant_value ( ) . as_object ( ) ;
// Do not "join" in the previous type; it doesn't add value,
// and may yield a vacuous result if the field is of interface type.
field_type = TypeOopPtr : : make_from_constant ( con ) - > isa_oopptr ( ) ;
assert ( field_type ! = NULL , " field singleton type must be consistent " ) ;
} else {
field_type = TypeOopPtr : : make_from_klass ( elem_type - > as_klass ( ) ) ;
}
2008-04-23 11:20:36 -07:00
if ( UseCompressedOops ) {
2008-06-24 10:43:29 -07:00
field_type = field_type - > make_narrowoop ( ) ;
2008-04-23 11:20:36 -07:00
basic_elem_type = T_NARROWOOP ;
}
2008-03-20 13:51:55 -07:00
} else {
field_type = Type : : get_const_basic_type ( basic_elem_type ) ;
}
const TypeOopPtr * field_addr_type = res_type - > add_offset ( offset ) - > isa_oopptr ( ) ;
2015-08-15 02:54:18 +02:00
Node * field_val = value_from_mem ( mem , ctl , basic_elem_type , field_type , field_addr_type , alloc ) ;
2008-03-20 13:51:55 -07:00
if ( field_val = = NULL ) {
2011-11-16 09:13:57 -08:00
// We weren't able to find a value for this field,
// give up on eliminating this allocation.
// Remove any extra entries we added to the safepoint.
2008-03-20 13:51:55 -07:00
uint last = sfpt - > req ( ) - 1 ;
for ( int k = 0 ; k < j ; k + + ) {
sfpt - > del_req ( last - - ) ;
}
2014-08-05 08:25:10 +02:00
_igvn . _worklist . push ( sfpt ) ;
2008-03-20 13:51:55 -07:00
// rollback processed safepoints
while ( safepoints_done . length ( ) > 0 ) {
SafePointNode * sfpt_done = safepoints_done . pop ( ) ;
// remove any extra entries we added to the safepoint
last = sfpt_done - > req ( ) - 1 ;
for ( int k = 0 ; k < nfields ; k + + ) {
sfpt_done - > del_req ( last - - ) ;
}
JVMState * jvms = sfpt_done - > jvms ( ) ;
jvms - > set_endoff ( sfpt_done - > req ( ) ) ;
// Now make a pass over the debug information replacing any references
// to SafePointScalarObjectNode with the allocated object.
int start = jvms - > debug_start ( ) ;
int end = jvms - > debug_end ( ) ;
for ( int i = start ; i < end ; i + + ) {
if ( sfpt_done - > in ( i ) - > is_SafePointScalarObject ( ) ) {
SafePointScalarObjectNode * scobj = sfpt_done - > in ( i ) - > as_SafePointScalarObject ( ) ;
2013-08-23 11:41:37 -07:00
if ( scobj - > first_index ( jvms ) = = sfpt_done - > req ( ) & &
2008-03-20 13:51:55 -07:00
scobj - > n_fields ( ) = = ( uint ) nfields ) {
assert ( scobj - > alloc ( ) = = alloc , " sanity " ) ;
sfpt_done - > set_req ( i , res ) ;
}
}
}
2014-08-05 08:25:10 +02:00
_igvn . _worklist . push ( sfpt_done ) ;
2008-03-20 13:51:55 -07:00
}
# ifndef PRODUCT
if ( PrintEliminateAllocations ) {
if ( field ! = NULL ) {
tty - > print ( " === At SafePoint node %d can't find value of Field: " ,
sfpt - > _idx ) ;
field - > print ( ) ;
int field_idx = C - > get_alias_index ( field_addr_type ) ;
tty - > print ( " (alias_idx=%d) " , field_idx ) ;
} else { // Array's element
tty - > print ( " === At SafePoint node %d can't find value of array element [%d] " ,
sfpt - > _idx , j ) ;
}
tty - > print ( " , which prevents elimination of: " ) ;
if ( res = = NULL )
alloc - > dump ( ) ;
else
res - > dump ( ) ;
}
# endif
return false ;
}
2008-04-23 11:20:36 -07:00
if ( UseCompressedOops & & field_type - > isa_narrowoop ( ) ) {
// Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
// to be able scalar replace the allocation.
2008-06-24 10:43:29 -07:00
if ( field_val - > is_EncodeP ( ) ) {
field_val = field_val - > in ( 1 ) ;
} else {
2014-06-02 08:07:29 +02:00
field_val = transform_later ( new DecodeNNode ( field_val , field_val - > get_ptr_type ( ) ) ) ;
2008-06-24 10:43:29 -07:00
}
2008-04-23 11:20:36 -07:00
}
2008-03-20 13:51:55 -07:00
sfpt - > add_req ( field_val ) ;
}
JVMState * jvms = sfpt - > jvms ( ) ;
jvms - > set_endoff ( sfpt - > req ( ) ) ;
// Now make a pass over the debug information replacing any references
// to the allocated object with "sobj"
int start = jvms - > debug_start ( ) ;
int end = jvms - > debug_end ( ) ;
2013-05-08 15:08:01 -07:00
sfpt - > replace_edges_in_range ( res , sobj , start , end ) ;
2014-07-25 10:06:17 +02:00
_igvn . _worklist . push ( sfpt ) ;
2008-03-20 13:51:55 -07:00
safepoints_done . append_if_missing ( sfpt ) ; // keep it for rollback
}
return true ;
}
2016-09-28 11:17:51 +02:00
static void disconnect_projections ( MultiNode * n , PhaseIterGVN & igvn ) {
2018-01-17 14:25:47 -08:00
Node * ctl_proj = n - > proj_out_or_null ( TypeFunc : : Control ) ;
Node * mem_proj = n - > proj_out_or_null ( TypeFunc : : Memory ) ;
2016-09-28 11:17:51 +02:00
if ( ctl_proj ! = NULL ) {
igvn . replace_node ( ctl_proj , n - > in ( 0 ) ) ;
}
if ( mem_proj ! = NULL ) {
igvn . replace_node ( mem_proj , n - > in ( TypeFunc : : Memory ) ) ;
}
}
2008-03-20 13:51:55 -07:00
// Process users of eliminated allocation.
2013-05-08 15:08:01 -07:00
void PhaseMacroExpand : : process_users_of_allocation ( CallNode * alloc ) {
2008-03-20 13:51:55 -07:00
Node * res = alloc - > result_cast ( ) ;
if ( res ! = NULL ) {
for ( DUIterator_Last jmin , j = res - > last_outs ( jmin ) ; j > = jmin ; ) {
Node * use = res - > last_out ( j ) ;
uint oc1 = res - > outcnt ( ) ;
if ( use - > is_AddP ( ) ) {
for ( DUIterator_Last kmin , k = use - > last_outs ( kmin ) ; k > = kmin ; ) {
Node * n = use - > last_out ( k ) ;
uint oc2 = use - > outcnt ( ) ;
if ( n - > is_Store ( ) ) {
2009-12-09 16:40:45 -08:00
# ifdef ASSERT
// Verify that there is no dependent MemBarVolatile nodes,
// they should be removed during IGVN, see MemBarNode::Ideal().
for ( DUIterator_Fast pmax , p = n - > fast_outs ( pmax ) ;
p < pmax ; p + + ) {
Node * mb = n - > fast_out ( p ) ;
assert ( mb - > is_Initialize ( ) | | ! mb - > is_MemBar ( ) | |
mb - > req ( ) < = MemBarNode : : Precedent | |
mb - > in ( MemBarNode : : Precedent ) ! = n ,
" MemBarVolatile should be eliminated for non-escaping object " ) ;
}
# endif
2008-03-20 13:51:55 -07:00
_igvn . replace_node ( n , n - > in ( MemNode : : Memory ) ) ;
} else {
2018-05-18 14:51:06 +02:00
eliminate_gc_barrier ( n ) ;
2008-03-20 13:51:55 -07:00
}
k - = ( oc2 - use - > outcnt ( ) ) ;
}
2018-11-06 10:01:27 +01:00
_igvn . remove_dead_node ( use ) ;
2015-05-12 10:27:50 +02:00
} else if ( use - > is_ArrayCopy ( ) ) {
// Disconnect ArrayCopy node
ArrayCopyNode * ac = use - > as_ArrayCopy ( ) ;
2020-04-06 09:52:28 +02:00
if ( ac - > is_clonebasic ( ) ) {
Node * membar_after = ac - > proj_out ( TypeFunc : : Control ) - > unique_ctrl_out ( ) ;
disconnect_projections ( ac , _igvn ) ;
2020-06-02 09:07:53 +02:00
assert ( alloc - > in ( TypeFunc : : Memory ) - > is_Proj ( ) & & alloc - > in ( TypeFunc : : Memory ) - > in ( 0 ) - > Opcode ( ) = = Op_MemBarCPUOrder , " mem barrier expected before allocation " ) ;
Node * membar_before = alloc - > in ( TypeFunc : : Memory ) - > in ( 0 ) ;
2020-04-06 09:52:28 +02:00
disconnect_projections ( membar_before - > as_MemBar ( ) , _igvn ) ;
if ( membar_after - > is_MemBar ( ) ) {
disconnect_projections ( membar_after - > as_MemBar ( ) , _igvn ) ;
}
} else {
assert ( ac - > is_arraycopy_validated ( ) | |
ac - > is_copyof_validated ( ) | |
ac - > is_copyofrange_validated ( ) , " unsupported " ) ;
CallProjections callprojs ;
ac - > extract_projections ( & callprojs , true ) ;
_igvn . replace_node ( callprojs . fallthrough_ioproj , ac - > in ( TypeFunc : : I_O ) ) ;
_igvn . replace_node ( callprojs . fallthrough_memproj , ac - > in ( TypeFunc : : Memory ) ) ;
_igvn . replace_node ( callprojs . fallthrough_catchproj , ac - > in ( TypeFunc : : Control ) ) ;
// Set control to top. IGVN will remove the remaining projections
ac - > set_req ( 0 , top ( ) ) ;
ac - > replace_edge ( res , top ( ) ) ;
// Disconnect src right away: it can help find new
// opportunities for allocation elimination
Node * src = ac - > in ( ArrayCopyNode : : Src ) ;
ac - > replace_edge ( src , top ( ) ) ;
// src can be top at this point if src and dest of the
// arraycopy were the same
if ( src - > outcnt ( ) = = 0 & & ! src - > is_top ( ) ) {
_igvn . remove_dead_node ( src ) ;
}
2015-05-12 10:27:50 +02:00
}
_igvn . _worklist . push ( ac ) ;
2008-03-20 13:51:55 -07:00
} else {
2018-05-18 14:51:06 +02:00
eliminate_gc_barrier ( use ) ;
2008-03-20 13:51:55 -07:00
}
j - = ( oc1 - res - > outcnt ( ) ) ;
}
assert ( res - > outcnt ( ) = = 0 , " all uses of allocated objects must be deleted " ) ;
_igvn . remove_dead_node ( res ) ;
}
//
// Process other users of allocation's projections
//
if ( _resproj ! = NULL & & _resproj - > outcnt ( ) ! = 0 ) {
2013-05-08 15:08:01 -07:00
// First disconnect stores captured by Initialize node.
// If Initialize node is eliminated first in the following code,
// it will kill such stores and DUIterator_Last will assert.
for ( DUIterator_Fast jmax , j = _resproj - > fast_outs ( jmax ) ; j < jmax ; j + + ) {
Node * use = _resproj - > fast_out ( j ) ;
if ( use - > is_AddP ( ) ) {
// raw memory addresses used only by the initialization
_igvn . replace_node ( use , C - > top ( ) ) ;
- - j ; - - jmax ;
}
}
2008-03-20 13:51:55 -07:00
for ( DUIterator_Last jmin , j = _resproj - > last_outs ( jmin ) ; j > = jmin ; ) {
Node * use = _resproj - > last_out ( j ) ;
uint oc1 = _resproj - > outcnt ( ) ;
if ( use - > is_Initialize ( ) ) {
// Eliminate Initialize node.
InitializeNode * init = use - > as_Initialize ( ) ;
assert ( init - > outcnt ( ) < = 2 , " only a control and memory projection expected " ) ;
2018-01-17 14:25:47 -08:00
Node * ctrl_proj = init - > proj_out_or_null ( TypeFunc : : Control ) ;
2008-03-20 13:51:55 -07:00
if ( ctrl_proj ! = NULL ) {
2019-07-01 10:49:58 +02:00
_igvn . replace_node ( ctrl_proj , init - > in ( TypeFunc : : Control ) ) ;
# ifdef ASSERT
Node * tmp = init - > in ( TypeFunc : : Control ) ;
assert ( tmp = = _fallthroughcatchproj , " allocation control projection " ) ;
# endif
2008-03-20 13:51:55 -07:00
}
2018-01-17 14:25:47 -08:00
Node * mem_proj = init - > proj_out_or_null ( TypeFunc : : Memory ) ;
2008-03-20 13:51:55 -07:00
if ( mem_proj ! = NULL ) {
Node * mem = init - > in ( TypeFunc : : Memory ) ;
# ifdef ASSERT
if ( mem - > is_MergeMem ( ) ) {
assert ( mem - > in ( TypeFunc : : Memory ) = = _memproj_fallthrough , " allocation memory projection " ) ;
} else {
assert ( mem = = _memproj_fallthrough , " allocation memory projection " ) ;
}
# endif
_igvn . replace_node ( mem_proj , mem ) ;
}
} else {
assert ( false , " only Initialize or AddP expected " ) ;
}
j - = ( oc1 - _resproj - > outcnt ( ) ) ;
}
}
if ( _fallthroughcatchproj ! = NULL ) {
_igvn . replace_node ( _fallthroughcatchproj , alloc - > in ( TypeFunc : : Control ) ) ;
}
if ( _memproj_fallthrough ! = NULL ) {
_igvn . replace_node ( _memproj_fallthrough , alloc - > in ( TypeFunc : : Memory ) ) ;
}
if ( _memproj_catchall ! = NULL ) {
_igvn . replace_node ( _memproj_catchall , C - > top ( ) ) ;
}
if ( _ioproj_fallthrough ! = NULL ) {
_igvn . replace_node ( _ioproj_fallthrough , alloc - > in ( TypeFunc : : I_O ) ) ;
}
if ( _ioproj_catchall ! = NULL ) {
_igvn . replace_node ( _ioproj_catchall , C - > top ( ) ) ;
}
if ( _catchallcatchproj ! = NULL ) {
_igvn . replace_node ( _catchallcatchproj , C - > top ( ) ) ;
}
}
bool PhaseMacroExpand : : eliminate_allocate_node ( AllocateNode * alloc ) {
2020-10-20 15:31:55 +00:00
// If reallocation fails during deoptimization we'll pop all
2014-11-25 17:33:59 +01:00
// interpreter frames for this compiled frame and that won't play
// nice with JVMTI popframe.
2020-10-20 15:31:55 +00:00
// We avoid this issue by eager reallocation when the popframe request
// is received.
if ( ! EliminateAllocations | | ! alloc - > _is_non_escaping ) {
2013-05-08 15:08:01 -07:00
return false ;
}
Node * klass = alloc - > in ( AllocateNode : : KlassNode ) ;
const TypeKlassPtr * tklass = _igvn . type ( klass ) - > is_klassptr ( ) ;
Node * res = alloc - > result_cast ( ) ;
// Eliminate boxing allocations which are not used
// regardless scalar replacable status.
bool boxing_alloc = C - > eliminate_boxing ( ) & &
tklass - > klass ( ) - > is_instance_klass ( ) & &
tklass - > klass ( ) - > as_instance_klass ( ) - > is_box_klass ( ) ;
if ( ! alloc - > _is_scalar_replaceable & & ( ! boxing_alloc | | ( res ! = NULL ) ) ) {
2008-03-20 13:51:55 -07:00
return false ;
}
extract_call_projections ( alloc ) ;
GrowableArray < SafePointNode * > safepoints ;
if ( ! can_eliminate_allocation ( alloc , safepoints ) ) {
return false ;
}
2013-05-08 15:08:01 -07:00
if ( ! alloc - > _is_scalar_replaceable ) {
assert ( res = = NULL , " sanity " ) ;
// We can only eliminate allocation if all debug info references
// are already replaced with SafePointScalarObject because
// we can't search for a fields value without instance_id.
if ( safepoints . length ( ) > 0 ) {
return false ;
}
}
2008-03-20 13:51:55 -07:00
if ( ! scalar_replacement ( alloc , safepoints ) ) {
return false ;
}
2009-11-12 09:24:21 -08:00
CompileLog * log = C - > log ( ) ;
if ( log ! = NULL ) {
log - > head ( " eliminate_allocation type='%d' " ,
log - > identify ( tklass - > klass ( ) ) ) ;
JVMState * p = alloc - > jvms ( ) ;
while ( p ! = NULL ) {
log - > elem ( " jvms bci='%d' method='%d' " , p - > bci ( ) , log - > identify ( p - > method ( ) ) ) ;
p = p - > caller ( ) ;
}
log - > tail ( " eliminate_allocation " ) ;
}
2008-03-20 13:51:55 -07:00
process_users_of_allocation ( alloc ) ;
# ifndef PRODUCT
2009-11-12 09:24:21 -08:00
if ( PrintEliminateAllocations ) {
if ( alloc - > is_AllocateArray ( ) )
tty - > print_cr ( " ++++ Eliminated: %d AllocateArray " , alloc - > _idx ) ;
else
tty - > print_cr ( " ++++ Eliminated: %d Allocate " , alloc - > _idx ) ;
}
2008-03-20 13:51:55 -07:00
# endif
return true ;
}
2013-05-08 15:08:01 -07:00
bool PhaseMacroExpand : : eliminate_boxing_node ( CallStaticJavaNode * boxing ) {
// EA should remove all uses of non-escaping boxing node.
2018-01-17 14:25:47 -08:00
if ( ! C - > eliminate_boxing ( ) | | boxing - > proj_out_or_null ( TypeFunc : : Parms ) ! = NULL ) {
2013-05-08 15:08:01 -07:00
return false ;
}
2014-08-02 07:06:08 +02:00
assert ( boxing - > result_cast ( ) = = NULL , " unexpected boxing node result " ) ;
2013-05-08 15:08:01 -07:00
extract_call_projections ( boxing ) ;
const TypeTuple * r = boxing - > tf ( ) - > range ( ) ;
assert ( r - > cnt ( ) > TypeFunc : : Parms , " sanity " ) ;
const TypeInstPtr * t = r - > field_at ( TypeFunc : : Parms ) - > isa_instptr ( ) ;
assert ( t ! = NULL , " sanity " ) ;
CompileLog * log = C - > log ( ) ;
if ( log ! = NULL ) {
log - > head ( " eliminate_boxing type='%d' " ,
log - > identify ( t - > klass ( ) ) ) ;
JVMState * p = boxing - > jvms ( ) ;
while ( p ! = NULL ) {
log - > elem ( " jvms bci='%d' method='%d' " , p - > bci ( ) , log - > identify ( p - > method ( ) ) ) ;
p = p - > caller ( ) ;
}
log - > tail ( " eliminate_boxing " ) ;
}
process_users_of_allocation ( boxing ) ;
# ifndef PRODUCT
if ( PrintEliminateAllocations ) {
tty - > print ( " ++++ Eliminated: %d " , boxing - > _idx ) ;
boxing - > method ( ) - > print_short_name ( tty ) ;
tty - > cr ( ) ;
}
# endif
return true ;
}
2007-12-01 00:00:00 +00:00
//---------------------------set_eden_pointers-------------------------
void PhaseMacroExpand : : set_eden_pointers ( Node * & eden_top_adr , Node * & eden_end_adr ) {
if ( UseTLAB ) { // Private allocation: load from TLS
2014-06-02 08:07:29 +02:00
Node * thread = transform_later ( new ThreadLocalNode ( ) ) ;
2007-12-01 00:00:00 +00:00
int tlab_top_offset = in_bytes ( JavaThread : : tlab_top_offset ( ) ) ;
int tlab_end_offset = in_bytes ( JavaThread : : tlab_end_offset ( ) ) ;
eden_top_adr = basic_plus_adr ( top ( ) /*not oop*/ , thread , tlab_top_offset ) ;
eden_end_adr = basic_plus_adr ( top ( ) /*not oop*/ , thread , tlab_end_offset ) ;
} else { // Shared allocation: load from globals
CollectedHeap * ch = Universe : : heap ( ) ;
address top_adr = ( address ) ch - > top_addr ( ) ;
address end_adr = ( address ) ch - > end_addr ( ) ;
eden_top_adr = makecon ( TypeRawPtr : : make ( top_adr ) ) ;
eden_end_adr = basic_plus_adr ( eden_top_adr , end_adr - top_adr ) ;
}
}
Node * PhaseMacroExpand : : make_load ( Node * ctl , Node * mem , Node * base , int offset , const Type * value_type , BasicType bt ) {
Node * adr = basic_plus_adr ( base , offset ) ;
2008-11-07 09:29:38 -08:00
const TypePtr * adr_type = adr - > bottom_type ( ) - > is_ptr ( ) ;
2013-11-15 11:05:32 -08:00
Node * value = LoadNode : : make ( _igvn , ctl , mem , adr , adr_type , value_type , bt , MemNode : : unordered ) ;
2007-12-01 00:00:00 +00:00
transform_later ( value ) ;
return value ;
}
Node * PhaseMacroExpand : : make_store ( Node * ctl , Node * mem , Node * base , int offset , Node * value , BasicType bt ) {
Node * adr = basic_plus_adr ( base , offset ) ;
2013-11-15 11:05:32 -08:00
mem = StoreNode : : make ( _igvn , ctl , mem , adr , NULL , value , bt , MemNode : : unordered ) ;
2007-12-01 00:00:00 +00:00
transform_later ( mem ) ;
return mem ;
}
//=============================================================================
//
// A L L O C A T I O N
//
// Allocation attempts to be fast in the case of frequent small objects.
// It breaks down like this:
//
// 1) Size in doublewords is computed. This is a constant for objects and
// variable for most arrays. Doubleword units are used to avoid size
// overflow of huge doubleword arrays. We need doublewords in the end for
// rounding.
//
// 2) Size is checked for being 'too large'. Too-large allocations will go
// the slow path into the VM. The slow path can throw any required
// exceptions, and does all the special checks for very large arrays. The
// size test can constant-fold away for objects. For objects with
// finalizers it constant-folds the otherway: you always go slow with
// finalizers.
//
// 3) If NOT using TLABs, this is the contended loop-back point.
// Load-Locked the heap top. If using TLABs normal-load the heap top.
//
// 4) Check that heap top + size*8 < max. If we fail go the slow ` route.
// NOTE: "top+size*8" cannot wrap the 4Gig line! Here's why: for largish
// "size*8" we always enter the VM, where "largish" is a constant picked small
// enough that there's always space between the eden max and 4Gig (old space is
// there so it's quite large) and large enough that the cost of entering the VM
// is dwarfed by the cost to initialize the space.
//
// 5) If NOT using TLABs, Store-Conditional the adjusted heap top back
// down. If contended, repeat at step 3. If using TLABs normal-store
// adjusted heap top back down; there is no contention.
//
// 6) If !ZeroTLAB then Bulk-clear the object/array. Fill in klass & mark
// fields.
//
// 7) Merge with the slow-path; cast the raw memory pointer to the correct
// oop flavor.
//
//=============================================================================
// FastAllocateSizeLimit value is in DOUBLEWORDS.
// Allocations bigger than this always go the slow route.
// This value must be small enough that allocation attempts that need to
// trigger exceptions go the slow route. Also, it must be small enough so
// that heap_top + size_in_bytes does not wrap around the 4Gig limit.
//=============================================================================j//
// %%% Here is an old comment from parseHelper.cpp; is it outdated?
// The allocator will coalesce int->oop copies away. See comment in
// coalesce.cpp about how this works. It depends critically on the exact
// code shape produced here, so if you are changing this code shape
// make sure the GC info for the heap-top is correct in and around the
// slow-path call.
//
void PhaseMacroExpand : : expand_allocate_common (
AllocateNode * alloc , // allocation node to be expanded
Node * length , // array length for an array allocation
const TypeFunc * slow_call_type , // Type of slow call
address slow_call_address // Address of slow call
)
{
Node * ctrl = alloc - > in ( TypeFunc : : Control ) ;
Node * mem = alloc - > in ( TypeFunc : : Memory ) ;
Node * i_o = alloc - > in ( TypeFunc : : I_O ) ;
Node * size_in_bytes = alloc - > in ( AllocateNode : : AllocSize ) ;
Node * klass_node = alloc - > in ( AllocateNode : : KlassNode ) ;
Node * initial_slow_test = alloc - > in ( AllocateNode : : InitialTest ) ;
assert ( ctrl ! = NULL , " must have control " ) ;
2020-02-06 11:21:39 +01:00
2007-12-01 00:00:00 +00:00
// We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
// they will not be used if "always_slow" is set
enum { slow_result_path = 1 , fast_result_path = 2 } ;
2015-10-22 13:07:10 -04:00
Node * result_region = NULL ;
Node * result_phi_rawmem = NULL ;
Node * result_phi_rawoop = NULL ;
Node * result_phi_i_o = NULL ;
2007-12-01 00:00:00 +00:00
// The initial slow comparison is a size check, the comparison
// we want to do is a BoolTest::gt
2020-02-06 11:21:39 +01:00
bool expand_fast_path = true ;
2007-12-01 00:00:00 +00:00
int tv = _igvn . find_int_con ( initial_slow_test , - 1 ) ;
if ( tv > = 0 ) {
2020-02-06 11:21:39 +01:00
// InitialTest has constant result
// 0 - can fit in TLAB
// 1 - always too big or negative
assert ( tv < = 1 , " 0 or 1 if a constant " ) ;
expand_fast_path = ( tv = = 0 ) ;
2007-12-01 00:00:00 +00:00
initial_slow_test = NULL ;
} else {
initial_slow_test = BoolNode : : make_predicate ( initial_slow_test , & _igvn ) ;
}
2009-05-08 10:44:20 -07:00
if ( C - > env ( ) - > dtrace_alloc_probes ( ) | |
2017-07-06 01:50:26 +02:00
( ! UseTLAB & & ! Universe : : heap ( ) - > supports_inline_contig_alloc ( ) ) ) {
2007-12-01 00:00:00 +00:00
// Force slow-path allocation
2020-02-06 11:21:39 +01:00
expand_fast_path = false ;
2007-12-01 00:00:00 +00:00
initial_slow_test = NULL ;
}
2020-02-06 11:21:39 +01:00
bool allocation_has_use = ( alloc - > result_cast ( ) ! = NULL ) ;
if ( ! allocation_has_use ) {
InitializeNode * init = alloc - > initialization ( ) ;
if ( init ! = NULL ) {
2020-02-24 11:31:07 +01:00
init - > remove ( & _igvn ) ;
2020-02-06 11:21:39 +01:00
}
if ( expand_fast_path & & ( initial_slow_test = = NULL ) ) {
// Remove allocation node and return.
// Size is a non-negative constant -> no initial check needed -> directly to fast path.
// Also, no usages -> empty fast path -> no fall out to slow path -> nothing left.
2020-02-24 11:31:07 +01:00
# ifndef PRODUCT
if ( PrintEliminateAllocations ) {
tty - > print ( " NotUsed " ) ;
Node * res = alloc - > proj_out_or_null ( TypeFunc : : Parms ) ;
if ( res ! = NULL ) {
res - > dump ( ) ;
} else {
alloc - > dump ( ) ;
}
}
# endif
2020-02-06 11:21:39 +01:00
yank_alloc_node ( alloc ) ;
return ;
}
}
2008-06-05 15:57:56 -07:00
2007-12-01 00:00:00 +00:00
enum { too_big_or_final_path = 1 , need_gc_path = 2 } ;
Node * slow_region = NULL ;
Node * toobig_false = ctrl ;
// generate the initial test if necessary
if ( initial_slow_test ! = NULL ) {
2020-02-06 11:21:39 +01:00
assert ( expand_fast_path , " Only need test if there is a fast path " ) ;
2014-06-02 08:07:29 +02:00
slow_region = new RegionNode ( 3 ) ;
2007-12-01 00:00:00 +00:00
// Now make the initial failure test. Usually a too-big test but
// might be a TRUE for finalizers or a fancy class check for
// newInstance0.
2014-06-02 08:07:29 +02:00
IfNode * toobig_iff = new IfNode ( ctrl , initial_slow_test , PROB_MIN , COUNT_UNKNOWN ) ;
2007-12-01 00:00:00 +00:00
transform_later ( toobig_iff ) ;
// Plug the failing-too-big test into the slow-path region
2014-06-02 08:07:29 +02:00
Node * toobig_true = new IfTrueNode ( toobig_iff ) ;
2007-12-01 00:00:00 +00:00
transform_later ( toobig_true ) ;
slow_region - > init_req ( too_big_or_final_path , toobig_true ) ;
2014-06-02 08:07:29 +02:00
toobig_false = new IfFalseNode ( toobig_iff ) ;
2007-12-01 00:00:00 +00:00
transform_later ( toobig_false ) ;
2020-02-06 11:21:39 +01:00
} else {
// No initial test, just fall into next case
assert ( allocation_has_use | | ! expand_fast_path , " Should already have been handled " ) ;
2007-12-01 00:00:00 +00:00
toobig_false = ctrl ;
debug_only ( slow_region = NodeSentinel ) ;
}
2020-02-06 11:21:39 +01:00
// If we are here there are several possibilities
// - expand_fast_path is false - then only a slow path is expanded. That's it.
// no_initial_check means a constant allocation.
// - If check always evaluates to false -> expand_fast_path is false (see above)
// - If check always evaluates to true -> directly into fast path (but may bailout to slowpath)
// if !allocation_has_use the fast path is empty
// if !allocation_has_use && no_initial_check
// - Then there are no fastpath that can fall out to slowpath -> no allocation code at all.
// removed by yank_alloc_node above.
2007-12-01 00:00:00 +00:00
Node * slow_mem = mem ; // save the current memory state for slow path
// generate the fast allocation code unless we know that the initial test will always go slow
2020-02-06 11:21:39 +01:00
if ( expand_fast_path ) {
2009-02-05 14:43:58 -08:00
// Fast path modifies only raw memory.
if ( mem - > is_MergeMem ( ) ) {
mem = mem - > as_MergeMem ( ) - > memory_at ( Compile : : AliasIdxRaw ) ;
}
2007-12-01 00:00:00 +00:00
// allocate the Region and Phi nodes for the result
2014-06-02 08:07:29 +02:00
result_region = new RegionNode ( 3 ) ;
result_phi_rawmem = new PhiNode ( result_region , Type : : MEMORY , TypeRawPtr : : BOTTOM ) ;
result_phi_i_o = new PhiNode ( result_region , Type : : ABIO ) ; // I/O is used for Prefetch
2007-12-01 00:00:00 +00:00
// Grab regular I/O before optional prefetch may change it.
// Slow-path does no I/O so just set it to the original I/O.
2011-01-07 10:42:32 -05:00
result_phi_i_o - > init_req ( slow_result_path , i_o ) ;
2007-12-01 00:00:00 +00:00
2011-01-07 10:42:32 -05:00
// Name successful fast-path variables
Node * fast_oop_ctrl ;
Node * fast_oop_rawmem ;
2020-02-06 11:21:39 +01:00
if ( allocation_has_use ) {
Node * needgc_ctrl = NULL ;
result_phi_rawoop = new PhiNode ( result_region , TypeRawPtr : : BOTTOM ) ;
2011-01-07 10:42:32 -05:00
2020-02-06 11:21:39 +01:00
intx prefetch_lines = length ! = NULL ? AllocatePrefetchLines : AllocateInstancePrefetchLines ;
BarrierSetC2 * bs = BarrierSet : : barrier_set ( ) - > barrier_set_c2 ( ) ;
Node * fast_oop = bs - > obj_allocate ( this , ctrl , mem , toobig_false , size_in_bytes , i_o , needgc_ctrl ,
fast_oop_ctrl , fast_oop_rawmem ,
prefetch_lines ) ;
if ( initial_slow_test ! = NULL ) {
// This completes all paths into the slow merge point
slow_region - > init_req ( need_gc_path , needgc_ctrl ) ;
transform_later ( slow_region ) ;
2011-12-20 16:56:50 +01:00
} else {
2020-02-06 11:21:39 +01:00
// No initial slow path needed!
// Just fall from the need-GC path straight into the VM call.
slow_region = needgc_ctrl ;
2011-12-20 16:56:50 +01:00
}
2020-02-06 11:21:39 +01:00
InitializeNode * init = alloc - > initialization ( ) ;
fast_oop_rawmem = initialize_object ( alloc ,
fast_oop_ctrl , fast_oop_rawmem , fast_oop ,
klass_node , length , size_in_bytes ) ;
expand_initialize_membar ( alloc , init , fast_oop_ctrl , fast_oop_rawmem ) ;
expand_dtrace_alloc_probe ( alloc , fast_oop , fast_oop_ctrl , fast_oop_rawmem ) ;
result_phi_rawoop - > init_req ( fast_result_path , fast_oop ) ;
} else {
assert ( initial_slow_test ! = NULL , " sanity " ) ;
fast_oop_ctrl = toobig_false ;
fast_oop_rawmem = mem ;
transform_later ( slow_region ) ;
2007-12-01 00:00:00 +00:00
}
// Plug in the successful fast-path into the result merge point
2011-01-07 10:42:32 -05:00
result_region - > init_req ( fast_result_path , fast_oop_ctrl ) ;
result_phi_i_o - > init_req ( fast_result_path , i_o ) ;
result_phi_rawmem - > init_req ( fast_result_path , fast_oop_rawmem ) ;
2007-12-01 00:00:00 +00:00
} else {
slow_region = ctrl ;
2011-12-27 15:08:43 -08:00
result_phi_i_o = i_o ; // Rename it to use in the following code.
2007-12-01 00:00:00 +00:00
}
// Generate slow-path call
2014-06-02 08:07:29 +02:00
CallNode * call = new CallStaticJavaNode ( slow_call_type , slow_call_address ,
2012-09-27 09:38:42 -07:00
OptoRuntime : : stub_name ( slow_call_address ) ,
alloc - > jvms ( ) - > bci ( ) ,
TypePtr : : BOTTOM ) ;
2020-02-06 11:21:39 +01:00
call - > init_req ( TypeFunc : : Control , slow_region ) ;
call - > init_req ( TypeFunc : : I_O , top ( ) ) ; // does no i/o
call - > init_req ( TypeFunc : : Memory , slow_mem ) ; // may gc ptrs
call - > init_req ( TypeFunc : : ReturnAdr , alloc - > in ( TypeFunc : : ReturnAdr ) ) ;
call - > init_req ( TypeFunc : : FramePtr , alloc - > in ( TypeFunc : : FramePtr ) ) ;
2007-12-01 00:00:00 +00:00
call - > init_req ( TypeFunc : : Parms + 0 , klass_node ) ;
if ( length ! = NULL ) {
call - > init_req ( TypeFunc : : Parms + 1 , length ) ;
}
// Copy debug information and adjust JVMState information, then replace
// allocate node with the call
2020-10-19 11:30:13 +00:00
call - > copy_call_debug_info ( & _igvn , alloc ) ;
2020-02-06 11:21:39 +01:00
if ( expand_fast_path ) {
2007-12-01 00:00:00 +00:00
call - > set_cnt ( PROB_UNLIKELY_MAG ( 4 ) ) ; // Same effect as RC_UNCOMMON.
2011-12-27 15:08:43 -08:00
} else {
// Hook i_o projection to avoid its elimination during allocation
// replacement (when only a slow call is generated).
call - > set_req ( TypeFunc : : I_O , result_phi_i_o ) ;
2007-12-01 00:00:00 +00:00
}
2010-06-28 14:54:39 -07:00
_igvn . replace_node ( alloc , call ) ;
2007-12-01 00:00:00 +00:00
transform_later ( call ) ;
// Identify the output projections from the allocate node and
// adjust any references to them.
// The control and io projections look like:
//
// v---Proj(ctrl) <-----+ v---CatchProj(ctrl)
// Allocate Catch
// ^---Proj(io) <-------+ ^---CatchProj(io)
//
// We are interested in the CatchProj nodes.
//
extract_call_projections ( call ) ;
2011-12-27 15:08:43 -08:00
// An allocate node has separate memory projections for the uses on
// the control and i_o paths. Replace the control memory projection with
// result_phi_rawmem (unless we are only generating a slow call when
// both memory projections are combined)
2020-02-06 11:21:39 +01:00
if ( expand_fast_path & & _memproj_fallthrough ! = NULL ) {
migrate_outs ( _memproj_fallthrough , result_phi_rawmem ) ;
2007-12-01 00:00:00 +00:00
}
2011-12-27 15:08:43 -08:00
// Now change uses of _memproj_catchall to use _memproj_fallthrough and delete
// _memproj_catchall so we end up with a call that has only 1 memory projection.
2007-12-01 00:00:00 +00:00
if ( _memproj_catchall ! = NULL ) {
if ( _memproj_fallthrough = = NULL ) {
2014-06-02 08:07:29 +02:00
_memproj_fallthrough = new ProjNode ( call , TypeFunc : : Memory ) ;
2007-12-01 00:00:00 +00:00
transform_later ( _memproj_fallthrough ) ;
}
2020-02-06 11:21:39 +01:00
migrate_outs ( _memproj_catchall , _memproj_fallthrough ) ;
2011-12-27 15:08:43 -08:00
_igvn . remove_dead_node ( _memproj_catchall ) ;
2007-12-01 00:00:00 +00:00
}
2011-12-27 15:08:43 -08:00
// An allocate node has separate i_o projections for the uses on the control
// and i_o paths. Always replace the control i_o projection with result i_o
// otherwise incoming i_o become dead when only a slow call is generated
// (it is different from memory projections where both projections are
// combined in such case).
if ( _ioproj_fallthrough ! = NULL ) {
2020-02-06 11:21:39 +01:00
migrate_outs ( _ioproj_fallthrough , result_phi_i_o ) ;
2007-12-01 00:00:00 +00:00
}
2011-12-27 15:08:43 -08:00
// Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete
// _ioproj_catchall so we end up with a call that has only 1 i_o projection.
2007-12-01 00:00:00 +00:00
if ( _ioproj_catchall ! = NULL ) {
2011-12-27 15:08:43 -08:00
if ( _ioproj_fallthrough = = NULL ) {
2014-06-02 08:07:29 +02:00
_ioproj_fallthrough = new ProjNode ( call , TypeFunc : : I_O ) ;
2011-12-27 15:08:43 -08:00
transform_later ( _ioproj_fallthrough ) ;
}
2020-02-06 11:21:39 +01:00
migrate_outs ( _ioproj_catchall , _ioproj_fallthrough ) ;
2011-12-27 15:08:43 -08:00
_igvn . remove_dead_node ( _ioproj_catchall ) ;
2007-12-01 00:00:00 +00:00
}
// if we generated only a slow call, we are done
2020-02-06 11:21:39 +01:00
if ( ! expand_fast_path ) {
2011-12-27 15:08:43 -08:00
// Now we can unhook i_o.
2011-12-30 11:43:06 -08:00
if ( result_phi_i_o - > outcnt ( ) > 1 ) {
call - > set_req ( TypeFunc : : I_O , top ( ) ) ;
} else {
2020-02-06 11:21:39 +01:00
assert ( result_phi_i_o - > unique_ctrl_out ( ) = = call , " sanity " ) ;
2011-12-30 11:43:06 -08:00
// Case of new array with negative size known during compilation.
// AllocateArrayNode::Ideal() optimization disconnect unreachable
// following code since call to runtime will throw exception.
// As result there will be no users of i_o after the call.
// Leave i_o attached to this call to avoid problems in preceding graph.
}
2007-12-01 00:00:00 +00:00
return ;
2011-12-27 15:08:43 -08:00
}
2007-12-01 00:00:00 +00:00
if ( _fallthroughcatchproj ! = NULL ) {
ctrl = _fallthroughcatchproj - > clone ( ) ;
transform_later ( ctrl ) ;
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( _fallthroughcatchproj , result_region ) ;
2007-12-01 00:00:00 +00:00
} else {
ctrl = top ( ) ;
}
Node * slow_result ;
if ( _resproj = = NULL ) {
// no uses of the allocation result
slow_result = top ( ) ;
} else {
slow_result = _resproj - > clone ( ) ;
transform_later ( slow_result ) ;
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( _resproj , result_phi_rawoop ) ;
2007-12-01 00:00:00 +00:00
}
// Plug slow-path into result merge point
2020-02-06 11:21:39 +01:00
result_region - > init_req ( slow_result_path , ctrl ) ;
2007-12-01 00:00:00 +00:00
transform_later ( result_region ) ;
2020-02-06 11:21:39 +01:00
if ( allocation_has_use ) {
result_phi_rawoop - > init_req ( slow_result_path , slow_result ) ;
transform_later ( result_phi_rawoop ) ;
}
result_phi_rawmem - > init_req ( slow_result_path , _memproj_fallthrough ) ;
2007-12-01 00:00:00 +00:00
transform_later ( result_phi_rawmem ) ;
transform_later ( result_phi_i_o ) ;
// This completes all paths into the result merge point
}
2020-02-06 11:21:39 +01:00
// Remove alloc node that has no uses.
void PhaseMacroExpand : : yank_alloc_node ( AllocateNode * alloc ) {
Node * ctrl = alloc - > in ( TypeFunc : : Control ) ;
Node * mem = alloc - > in ( TypeFunc : : Memory ) ;
Node * i_o = alloc - > in ( TypeFunc : : I_O ) ;
extract_call_projections ( alloc ) ;
2020-02-24 11:31:07 +01:00
if ( _resproj ! = NULL ) {
for ( DUIterator_Fast imax , i = _resproj - > fast_outs ( imax ) ; i < imax ; i + + ) {
Node * use = _resproj - > fast_out ( i ) ;
use - > isa_MemBar ( ) - > remove ( & _igvn ) ;
- - imax ;
- - i ; // back up iterator
}
assert ( _resproj - > outcnt ( ) = = 0 , " all uses must be deleted " ) ;
_igvn . remove_dead_node ( _resproj ) ;
}
2020-02-06 11:21:39 +01:00
if ( _fallthroughcatchproj ! = NULL ) {
migrate_outs ( _fallthroughcatchproj , ctrl ) ;
_igvn . remove_dead_node ( _fallthroughcatchproj ) ;
}
if ( _catchallcatchproj ! = NULL ) {
_igvn . rehash_node_delayed ( _catchallcatchproj ) ;
_catchallcatchproj - > set_req ( 0 , top ( ) ) ;
}
if ( _fallthroughproj ! = NULL ) {
Node * catchnode = _fallthroughproj - > unique_ctrl_out ( ) ;
_igvn . remove_dead_node ( catchnode ) ;
_igvn . remove_dead_node ( _fallthroughproj ) ;
}
if ( _memproj_fallthrough ! = NULL ) {
migrate_outs ( _memproj_fallthrough , mem ) ;
_igvn . remove_dead_node ( _memproj_fallthrough ) ;
}
if ( _ioproj_fallthrough ! = NULL ) {
migrate_outs ( _ioproj_fallthrough , i_o ) ;
_igvn . remove_dead_node ( _ioproj_fallthrough ) ;
}
if ( _memproj_catchall ! = NULL ) {
_igvn . rehash_node_delayed ( _memproj_catchall ) ;
_memproj_catchall - > set_req ( 0 , top ( ) ) ;
}
if ( _ioproj_catchall ! = NULL ) {
_igvn . rehash_node_delayed ( _ioproj_catchall ) ;
_ioproj_catchall - > set_req ( 0 , top ( ) ) ;
}
2020-02-24 11:31:07 +01:00
# ifndef PRODUCT
if ( PrintEliminateAllocations ) {
2020-02-27 13:11:06 +01:00
if ( alloc - > is_AllocateArray ( ) ) {
2020-02-24 11:31:07 +01:00
tty - > print_cr ( " ++++ Eliminated: %d AllocateArray " , alloc - > _idx ) ;
} else {
tty - > print_cr ( " ++++ Eliminated: %d Allocate " , alloc - > _idx ) ;
}
2020-02-27 13:11:06 +01:00
}
2020-02-24 11:31:07 +01:00
# endif
2020-02-06 11:21:39 +01:00
_igvn . remove_dead_node ( alloc ) ;
}
void PhaseMacroExpand : : expand_initialize_membar ( AllocateNode * alloc , InitializeNode * init ,
Node * & fast_oop_ctrl , Node * & fast_oop_rawmem ) {
// If initialization is performed by an array copy, any required
// MemBarStoreStore was already added. If the object does not
// escape no need for a MemBarStoreStore. If the object does not
// escape in its initializer and memory barrier (MemBarStoreStore or
// stronger) is already added at exit of initializer, also no need
// for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
// so that stores that initialize this object can't be reordered
// with a subsequent store that makes this object accessible by
// other threads.
// Other threads include java threads and JVM internal threads
// (for example concurrent GC threads). Current concurrent GC
// implementation: G1 will not scan newly created object,
// so it's safe to skip storestore barrier when allocation does
// not escape.
if ( ! alloc - > does_not_escape_thread ( ) & &
! alloc - > is_allocation_MemBar_redundant ( ) & &
( init = = NULL | | ! init - > is_complete_with_arraycopy ( ) ) ) {
if ( init = = NULL | | init - > req ( ) < InitializeNode : : RawStores ) {
// No InitializeNode or no stores captured by zeroing
// elimination. Simply add the MemBarStoreStore after object
// initialization.
MemBarNode * mb = MemBarNode : : make ( C , Op_MemBarStoreStore , Compile : : AliasIdxBot ) ;
transform_later ( mb ) ;
mb - > init_req ( TypeFunc : : Memory , fast_oop_rawmem ) ;
mb - > init_req ( TypeFunc : : Control , fast_oop_ctrl ) ;
fast_oop_ctrl = new ProjNode ( mb , TypeFunc : : Control ) ;
transform_later ( fast_oop_ctrl ) ;
fast_oop_rawmem = new ProjNode ( mb , TypeFunc : : Memory ) ;
transform_later ( fast_oop_rawmem ) ;
} else {
// Add the MemBarStoreStore after the InitializeNode so that
// all stores performing the initialization that were moved
// before the InitializeNode happen before the storestore
// barrier.
Node * init_ctrl = init - > proj_out_or_null ( TypeFunc : : Control ) ;
Node * init_mem = init - > proj_out_or_null ( TypeFunc : : Memory ) ;
MemBarNode * mb = MemBarNode : : make ( C , Op_MemBarStoreStore , Compile : : AliasIdxBot ) ;
transform_later ( mb ) ;
Node * ctrl = new ProjNode ( init , TypeFunc : : Control ) ;
transform_later ( ctrl ) ;
Node * mem = new ProjNode ( init , TypeFunc : : Memory ) ;
transform_later ( mem ) ;
// The MemBarStoreStore depends on control and memory coming
// from the InitializeNode
mb - > init_req ( TypeFunc : : Memory , mem ) ;
mb - > init_req ( TypeFunc : : Control , ctrl ) ;
ctrl = new ProjNode ( mb , TypeFunc : : Control ) ;
transform_later ( ctrl ) ;
mem = new ProjNode ( mb , TypeFunc : : Memory ) ;
transform_later ( mem ) ;
// All nodes that depended on the InitializeNode for control
// and memory must now depend on the MemBarNode that itself
// depends on the InitializeNode
if ( init_ctrl ! = NULL ) {
_igvn . replace_node ( init_ctrl , ctrl ) ;
}
if ( init_mem ! = NULL ) {
_igvn . replace_node ( init_mem , mem ) ;
}
}
}
}
void PhaseMacroExpand : : expand_dtrace_alloc_probe ( AllocateNode * alloc , Node * oop ,
Node * & ctrl , Node * & rawmem ) {
if ( C - > env ( ) - > dtrace_extended_probes ( ) ) {
// Slow-path call
int size = TypeFunc : : Parms + 2 ;
CallLeafNode * call = new CallLeafNode ( OptoRuntime : : dtrace_object_alloc_Type ( ) ,
CAST_FROM_FN_PTR ( address , SharedRuntime : : dtrace_object_alloc_base ) ,
" dtrace_object_alloc " ,
TypeRawPtr : : BOTTOM ) ;
// Get base of thread-local storage area
Node * thread = new ThreadLocalNode ( ) ;
transform_later ( thread ) ;
call - > init_req ( TypeFunc : : Parms + 0 , thread ) ;
call - > init_req ( TypeFunc : : Parms + 1 , oop ) ;
call - > init_req ( TypeFunc : : Control , ctrl ) ;
call - > init_req ( TypeFunc : : I_O , top ( ) ) ; // does no i/o
call - > init_req ( TypeFunc : : Memory , ctrl ) ;
call - > init_req ( TypeFunc : : ReturnAdr , alloc - > in ( TypeFunc : : ReturnAdr ) ) ;
call - > init_req ( TypeFunc : : FramePtr , alloc - > in ( TypeFunc : : FramePtr ) ) ;
transform_later ( call ) ;
ctrl = new ProjNode ( call , TypeFunc : : Control ) ;
transform_later ( ctrl ) ;
rawmem = new ProjNode ( call , TypeFunc : : Memory ) ;
transform_later ( rawmem ) ;
}
}
2007-12-01 00:00:00 +00:00
// Helper for PhaseMacroExpand::expand_allocate_common.
// Initializes the newly-allocated storage.
Node *
PhaseMacroExpand : : initialize_object ( AllocateNode * alloc ,
Node * control , Node * rawmem , Node * object ,
Node * klass_node , Node * length ,
Node * size_in_bytes ) {
InitializeNode * init = alloc - > initialization ( ) ;
// Store the klass & mark bits
2019-07-16 08:56:08 +02:00
Node * mark_node = alloc - > make_ideal_mark ( & _igvn , object , control , rawmem ) ;
if ( ! mark_node - > is_Con ( ) ) {
transform_later ( mark_node ) ;
2007-12-01 00:00:00 +00:00
}
2019-07-16 08:56:08 +02:00
rawmem = make_store ( control , rawmem , object , oopDesc : : mark_offset_in_bytes ( ) , mark_node , TypeX_X - > basic_type ( ) ) ;
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
rawmem = make_store ( control , rawmem , object , oopDesc : : klass_offset_in_bytes ( ) , klass_node , T_METADATA ) ;
2007-12-01 00:00:00 +00:00
int header_size = alloc - > minimum_header_size ( ) ; // conservatively small
// Array length
if ( length ! = NULL ) { // Arrays need length field
rawmem = make_store ( control , rawmem , object , arrayOopDesc : : length_offset_in_bytes ( ) , length , T_INT ) ;
// conservatively small header size:
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
header_size = arrayOopDesc : : base_offset_in_bytes ( T_BYTE ) ;
2007-12-01 00:00:00 +00:00
ciKlass * k = _igvn . type ( klass_node ) - > is_klassptr ( ) - > klass ( ) ;
if ( k - > is_array_klass ( ) ) // we know the exact header size in most cases:
header_size = Klass : : layout_helper_header_size ( k - > layout_helper ( ) ) ;
}
// Clear the object body, if necessary.
if ( init = = NULL ) {
// The init has somehow disappeared; be cautious and clear everything.
//
// This can happen if a node is allocated but an uncommon trap occurs
// immediately. In this case, the Initialize gets associated with the
// trap, and may be placed in a different (outer) loop, if the Allocate
// is in a loop. If (this is rare) the inner loop gets unrolled, then
// there can be two Allocates to one Initialize. The answer in all these
// edge cases is safety first. It is always safe to clear immediately
// within an Allocate, and then (maybe or maybe not) clear some more later.
2016-01-12 09:19:09 +01:00
if ( ! ( UseTLAB & & ZeroTLAB ) ) {
2007-12-01 00:00:00 +00:00
rawmem = ClearArrayNode : : clear_memory ( control , rawmem , object ,
header_size , size_in_bytes ,
& _igvn ) ;
2016-01-12 09:19:09 +01:00
}
2007-12-01 00:00:00 +00:00
} else {
if ( ! init - > is_complete ( ) ) {
// Try to win by zeroing only what the init does not store.
// We can also try to do some peephole optimizations,
// such as combining some adjacent subword stores.
rawmem = init - > complete_stores ( control , rawmem , object ,
header_size , size_in_bytes , & _igvn ) ;
}
// We have no more use for this link, since the AllocateNode goes away:
init - > set_req ( InitializeNode : : RawAddress , top ( ) ) ;
// (If we keep the link, it just confuses the register allocator,
// who thinks he sees a real use of the address by the membar.)
}
return rawmem ;
}
// Generate prefetch instructions for next allocations.
Node * PhaseMacroExpand : : prefetch_allocation ( Node * i_o , Node * & needgc_false ,
Node * & contended_phi_rawmem ,
Node * old_eden_top , Node * new_eden_top ,
2018-09-19 21:31:33 +02:00
intx lines ) {
2010-04-07 12:39:27 -07:00
enum { fall_in_path = 1 , pf_path = 2 } ;
2007-12-01 00:00:00 +00:00
if ( UseTLAB & & AllocatePrefetchStyle = = 2 ) {
// Generate prefetch allocation with watermark check.
// As an allocation hits the watermark, we will prefetch starting
// at a "distance" away from watermark.
2014-06-02 08:07:29 +02:00
Node * pf_region = new RegionNode ( 3 ) ;
Node * pf_phi_rawmem = new PhiNode ( pf_region , Type : : MEMORY ,
2007-12-01 00:00:00 +00:00
TypeRawPtr : : BOTTOM ) ;
// I/O is used for Prefetch
2014-06-02 08:07:29 +02:00
Node * pf_phi_abio = new PhiNode ( pf_region , Type : : ABIO ) ;
2007-12-01 00:00:00 +00:00
2014-06-02 08:07:29 +02:00
Node * thread = new ThreadLocalNode ( ) ;
2007-12-01 00:00:00 +00:00
transform_later ( thread ) ;
2014-06-02 08:07:29 +02:00
Node * eden_pf_adr = new AddPNode ( top ( ) /*not oop*/ , thread ,
2007-12-01 00:00:00 +00:00
_igvn . MakeConX ( in_bytes ( JavaThread : : tlab_pf_top_offset ( ) ) ) ) ;
transform_later ( eden_pf_adr ) ;
2014-06-02 08:07:29 +02:00
Node * old_pf_wm = new LoadPNode ( needgc_false ,
2007-12-01 00:00:00 +00:00
contended_phi_rawmem , eden_pf_adr ,
2013-11-15 11:05:32 -08:00
TypeRawPtr : : BOTTOM , TypeRawPtr : : BOTTOM ,
MemNode : : unordered ) ;
2007-12-01 00:00:00 +00:00
transform_later ( old_pf_wm ) ;
// check against new_eden_top
2014-06-02 08:07:29 +02:00
Node * need_pf_cmp = new CmpPNode ( new_eden_top , old_pf_wm ) ;
2007-12-01 00:00:00 +00:00
transform_later ( need_pf_cmp ) ;
2014-06-02 08:07:29 +02:00
Node * need_pf_bol = new BoolNode ( need_pf_cmp , BoolTest : : ge ) ;
2007-12-01 00:00:00 +00:00
transform_later ( need_pf_bol ) ;
2014-06-02 08:07:29 +02:00
IfNode * need_pf_iff = new IfNode ( needgc_false , need_pf_bol ,
2007-12-01 00:00:00 +00:00
PROB_UNLIKELY_MAG ( 4 ) , COUNT_UNKNOWN ) ;
transform_later ( need_pf_iff ) ;
// true node, add prefetchdistance
2014-06-02 08:07:29 +02:00
Node * need_pf_true = new IfTrueNode ( need_pf_iff ) ;
2007-12-01 00:00:00 +00:00
transform_later ( need_pf_true ) ;
2014-06-02 08:07:29 +02:00
Node * need_pf_false = new IfFalseNode ( need_pf_iff ) ;
2007-12-01 00:00:00 +00:00
transform_later ( need_pf_false ) ;
2014-06-02 08:07:29 +02:00
Node * new_pf_wmt = new AddPNode ( top ( ) , old_pf_wm ,
2007-12-01 00:00:00 +00:00
_igvn . MakeConX ( AllocatePrefetchDistance ) ) ;
transform_later ( new_pf_wmt ) ;
new_pf_wmt - > set_req ( 0 , need_pf_true ) ;
2014-06-02 08:07:29 +02:00
Node * store_new_wmt = new StorePNode ( need_pf_true ,
2007-12-01 00:00:00 +00:00
contended_phi_rawmem , eden_pf_adr ,
2013-11-15 11:05:32 -08:00
TypeRawPtr : : BOTTOM , new_pf_wmt ,
MemNode : : unordered ) ;
2007-12-01 00:00:00 +00:00
transform_later ( store_new_wmt ) ;
// adding prefetches
pf_phi_abio - > init_req ( fall_in_path , i_o ) ;
Node * prefetch_adr ;
Node * prefetch ;
uint step_size = AllocatePrefetchStepSize ;
uint distance = 0 ;
2018-09-20 21:14:38 +02:00
for ( intx i = 0 ; i < lines ; i + + ) {
2014-06-02 08:07:29 +02:00
prefetch_adr = new AddPNode ( old_pf_wm , new_pf_wmt ,
2007-12-01 00:00:00 +00:00
_igvn . MakeConX ( distance ) ) ;
transform_later ( prefetch_adr ) ;
2014-06-02 08:07:29 +02:00
prefetch = new PrefetchAllocationNode ( i_o , prefetch_adr ) ;
2007-12-01 00:00:00 +00:00
transform_later ( prefetch ) ;
distance + = step_size ;
i_o = prefetch ;
}
pf_phi_abio - > set_req ( pf_path , i_o ) ;
pf_region - > init_req ( fall_in_path , need_pf_false ) ;
pf_region - > init_req ( pf_path , need_pf_true ) ;
pf_phi_rawmem - > init_req ( fall_in_path , contended_phi_rawmem ) ;
pf_phi_rawmem - > init_req ( pf_path , store_new_wmt ) ;
transform_later ( pf_region ) ;
transform_later ( pf_phi_rawmem ) ;
transform_later ( pf_phi_abio ) ;
needgc_false = pf_region ;
contended_phi_rawmem = pf_phi_rawmem ;
i_o = pf_phi_abio ;
2010-04-07 12:39:27 -07:00
} else if ( UseTLAB & & AllocatePrefetchStyle = = 3 ) {
2016-04-29 08:32:42 +02:00
// Insert a prefetch instruction for each allocation.
2016-12-07 09:29:28 -08:00
// This code is used to generate 1 prefetch instruction per cache line.
2010-04-07 12:39:27 -07:00
2011-08-16 16:59:46 -07:00
// Generate several prefetch instructions.
2010-04-07 12:39:27 -07:00
uint step_size = AllocatePrefetchStepSize ;
uint distance = AllocatePrefetchDistance ;
// Next cache address.
2014-06-02 08:07:29 +02:00
Node * cache_adr = new AddPNode ( old_eden_top , old_eden_top ,
2016-04-29 08:32:42 +02:00
_igvn . MakeConX ( step_size + distance ) ) ;
2010-04-07 12:39:27 -07:00
transform_later ( cache_adr ) ;
2014-06-02 08:07:29 +02:00
cache_adr = new CastP2XNode ( needgc_false , cache_adr ) ;
2010-04-07 12:39:27 -07:00
transform_later ( cache_adr ) ;
2016-12-07 09:29:28 -08:00
// Address is aligned to execute prefetch to the beginning of cache line size
// (it is important when BIS instruction is used on SPARC as prefetch).
2010-04-07 12:39:27 -07:00
Node * mask = _igvn . MakeConX ( ~ ( intptr_t ) ( step_size - 1 ) ) ;
2014-06-02 08:07:29 +02:00
cache_adr = new AndXNode ( cache_adr , mask ) ;
2010-04-07 12:39:27 -07:00
transform_later ( cache_adr ) ;
2014-06-02 08:07:29 +02:00
cache_adr = new CastX2PNode ( cache_adr ) ;
2010-04-07 12:39:27 -07:00
transform_later ( cache_adr ) ;
// Prefetch
2014-06-02 08:07:29 +02:00
Node * prefetch = new PrefetchAllocationNode ( contended_phi_rawmem , cache_adr ) ;
2010-04-07 12:39:27 -07:00
prefetch - > set_req ( 0 , needgc_false ) ;
transform_later ( prefetch ) ;
contended_phi_rawmem = prefetch ;
Node * prefetch_adr ;
distance = step_size ;
2018-09-20 21:14:38 +02:00
for ( intx i = 1 ; i < lines ; i + + ) {
2014-06-02 08:07:29 +02:00
prefetch_adr = new AddPNode ( cache_adr , cache_adr ,
2010-04-07 12:39:27 -07:00
_igvn . MakeConX ( distance ) ) ;
transform_later ( prefetch_adr ) ;
2014-06-02 08:07:29 +02:00
prefetch = new PrefetchAllocationNode ( contended_phi_rawmem , prefetch_adr ) ;
2010-04-07 12:39:27 -07:00
transform_later ( prefetch ) ;
distance + = step_size ;
contended_phi_rawmem = prefetch ;
}
2007-12-01 00:00:00 +00:00
} else if ( AllocatePrefetchStyle > 0 ) {
// Insert a prefetch for each allocation only on the fast-path
Node * prefetch_adr ;
Node * prefetch ;
2011-08-16 16:59:46 -07:00
// Generate several prefetch instructions.
2007-12-01 00:00:00 +00:00
uint step_size = AllocatePrefetchStepSize ;
uint distance = AllocatePrefetchDistance ;
2018-09-20 21:14:38 +02:00
for ( intx i = 0 ; i < lines ; i + + ) {
2014-06-02 08:07:29 +02:00
prefetch_adr = new AddPNode ( old_eden_top , new_eden_top ,
2007-12-01 00:00:00 +00:00
_igvn . MakeConX ( distance ) ) ;
transform_later ( prefetch_adr ) ;
2014-06-02 08:07:29 +02:00
prefetch = new PrefetchAllocationNode ( i_o , prefetch_adr ) ;
2007-12-01 00:00:00 +00:00
// Do not let it float too high, since if eden_top == eden_end,
// both might be null.
if ( i = = 0 ) { // Set control for first prefetch, next follows it
prefetch - > init_req ( 0 , needgc_false ) ;
}
transform_later ( prefetch ) ;
distance + = step_size ;
i_o = prefetch ;
}
}
return i_o ;
}
void PhaseMacroExpand : : expand_allocate ( AllocateNode * alloc ) {
expand_allocate_common ( alloc , NULL ,
OptoRuntime : : new_instance_Type ( ) ,
OptoRuntime : : new_instance_Java ( ) ) ;
}
void PhaseMacroExpand : : expand_allocate_array ( AllocateArrayNode * alloc ) {
Node * length = alloc - > in ( AllocateNode : : ALength ) ;
2011-09-26 10:24:05 -07:00
InitializeNode * init = alloc - > initialization ( ) ;
Node * klass_node = alloc - > in ( AllocateNode : : KlassNode ) ;
ciKlass * k = _igvn . type ( klass_node ) - > is_klassptr ( ) - > klass ( ) ;
address slow_call_address ; // Address of slow call
if ( init ! = NULL & & init - > is_complete_with_arraycopy ( ) & &
k - > is_type_array_klass ( ) ) {
// Don't zero type array during slow allocation in VM since
// it will be initialized later by arraycopy in compiled code.
slow_call_address = OptoRuntime : : new_array_nozero_Java ( ) ;
} else {
slow_call_address = OptoRuntime : : new_array_Java ( ) ;
}
2007-12-01 00:00:00 +00:00
expand_allocate_common ( alloc , length ,
OptoRuntime : : new_array_Type ( ) ,
2011-09-26 10:24:05 -07:00
slow_call_address ) ;
2007-12-01 00:00:00 +00:00
}
2012-01-07 13:26:43 -08:00
//-------------------mark_eliminated_box----------------------------------
//
2011-06-04 10:36:22 -07:00
// During EA obj may point to several objects but after few ideal graph
// transformations (CCP) it may point to only one non escaping object
// (but still using phi), corresponding locks and unlocks will be marked
// for elimination. Later obj could be replaced with a new node (new phi)
// and which does not have escape information. And later after some graph
// reshape other locks and unlocks (which were not marked for elimination
// before) are connected to this new obj (phi) but they still will not be
// marked for elimination since new obj has no escape information.
// Mark all associated (same box and obj) lock and unlock nodes for
// elimination if some of them marked already.
2012-01-07 13:26:43 -08:00
void PhaseMacroExpand : : mark_eliminated_box ( Node * oldbox , Node * obj ) {
2012-01-13 12:58:26 -08:00
if ( oldbox - > as_BoxLock ( ) - > is_eliminated ( ) )
return ; // This BoxLock node was processed already.
// New implementation (EliminateNestedLocks) has separate BoxLock
// node for each locked region so mark all associated locks/unlocks as
// eliminated even if different objects are referenced in one locked region
// (for example, OSR compilation of nested loop inside locked scope).
if ( EliminateNestedLocks | |
2012-01-07 13:26:43 -08:00
oldbox - > as_BoxLock ( ) - > is_simple_lock_region ( NULL , obj ) ) {
// Box is used only in one lock region. Mark this box as eliminated.
_igvn . hash_delete ( oldbox ) ;
oldbox - > as_BoxLock ( ) - > set_eliminated ( ) ; // This changes box's hash value
2015-02-17 13:54:53 -05:00
_igvn . hash_insert ( oldbox ) ;
2012-01-07 13:26:43 -08:00
for ( uint i = 0 ; i < oldbox - > outcnt ( ) ; i + + ) {
Node * u = oldbox - > raw_out ( i ) ;
if ( u - > is_AbstractLock ( ) & & ! u - > as_AbstractLock ( ) - > is_non_esc_obj ( ) ) {
AbstractLockNode * alock = u - > as_AbstractLock ( ) ;
// Check lock's box since box could be referenced by Lock's debug info.
if ( alock - > box_node ( ) = = oldbox ) {
// Mark eliminated all related locks and unlocks.
2015-02-17 13:54:53 -05:00
# ifdef ASSERT
alock - > log_lock_optimization ( C , " eliminate_lock_set_non_esc4 " ) ;
# endif
2012-01-07 13:26:43 -08:00
alock - > set_non_esc_obj ( ) ;
}
}
}
2011-06-04 10:36:22 -07:00
return ;
2008-03-14 16:40:42 -07:00
}
2012-01-07 13:26:43 -08:00
// Create new "eliminated" BoxLock node and use it in monitor debug info
// instead of oldbox for the same object.
2012-01-12 14:45:04 -08:00
BoxLockNode * newbox = oldbox - > clone ( ) - > as_BoxLock ( ) ;
2012-01-07 13:26:43 -08:00
// Note: BoxLock node is marked eliminated only here and it is used
// to indicate that all associated lock and unlock nodes are marked
// for elimination.
newbox - > set_eliminated ( ) ;
transform_later ( newbox ) ;
// Replace old box node with new box for all users of the same object.
for ( uint i = 0 ; i < oldbox - > outcnt ( ) ; ) {
bool next_edge = true ;
Node * u = oldbox - > raw_out ( i ) ;
if ( u - > is_AbstractLock ( ) ) {
AbstractLockNode * alock = u - > as_AbstractLock ( ) ;
2012-01-10 18:05:38 -08:00
if ( alock - > box_node ( ) = = oldbox & & alock - > obj_node ( ) - > eqv_uncast ( obj ) ) {
2012-01-07 13:26:43 -08:00
// Replace Box and mark eliminated all related locks and unlocks.
2015-02-17 13:54:53 -05:00
# ifdef ASSERT
alock - > log_lock_optimization ( C , " eliminate_lock_set_non_esc5 " ) ;
# endif
2012-01-07 13:26:43 -08:00
alock - > set_non_esc_obj ( ) ;
2012-06-12 16:23:31 -07:00
_igvn . rehash_node_delayed ( alock ) ;
2012-01-07 13:26:43 -08:00
alock - > set_box_node ( newbox ) ;
next_edge = false ;
}
}
2012-01-10 18:05:38 -08:00
if ( u - > is_FastLock ( ) & & u - > as_FastLock ( ) - > obj_node ( ) - > eqv_uncast ( obj ) ) {
2012-01-07 13:26:43 -08:00
FastLockNode * flock = u - > as_FastLock ( ) ;
assert ( flock - > box_node ( ) = = oldbox , " sanity " ) ;
2012-06-12 16:23:31 -07:00
_igvn . rehash_node_delayed ( flock ) ;
2012-01-07 13:26:43 -08:00
flock - > set_box_node ( newbox ) ;
next_edge = false ;
}
// Replace old box in monitor debug info.
if ( u - > is_SafePoint ( ) & & u - > as_SafePoint ( ) - > jvms ( ) ) {
SafePointNode * sfn = u - > as_SafePoint ( ) ;
JVMState * youngest_jvms = sfn - > jvms ( ) ;
int max_depth = youngest_jvms - > depth ( ) ;
for ( int depth = 1 ; depth < = max_depth ; depth + + ) {
JVMState * jvms = youngest_jvms - > of_depth ( depth ) ;
int num_mon = jvms - > nof_monitors ( ) ;
// Loop over monitors
for ( int idx = 0 ; idx < num_mon ; idx + + ) {
Node * obj_node = sfn - > monitor_obj ( jvms , idx ) ;
Node * box_node = sfn - > monitor_box ( jvms , idx ) ;
2012-01-10 18:05:38 -08:00
if ( box_node = = oldbox & & obj_node - > eqv_uncast ( obj ) ) {
2012-01-07 13:26:43 -08:00
int j = jvms - > monitor_box_offset ( idx ) ;
2012-06-12 16:23:31 -07:00
_igvn . replace_input_of ( u , j , newbox ) ;
2012-01-07 13:26:43 -08:00
next_edge = false ;
}
}
}
}
if ( next_edge ) i + + ;
}
}
//-----------------------mark_eliminated_locking_nodes-----------------------
void PhaseMacroExpand : : mark_eliminated_locking_nodes ( AbstractLockNode * alock ) {
if ( EliminateNestedLocks ) {
if ( alock - > is_nested ( ) ) {
assert ( alock - > box_node ( ) - > as_BoxLock ( ) - > is_eliminated ( ) , " sanity " ) ;
return ;
} else if ( ! alock - > is_non_esc_obj ( ) ) { // Not eliminated or coarsened
// Only Lock node has JVMState needed here.
2015-02-17 13:54:53 -05:00
// Not that preceding claim is documented anywhere else.
if ( alock - > jvms ( ) ! = NULL ) {
if ( alock - > as_Lock ( ) - > is_nested_lock_region ( ) ) {
// Mark eliminated related nested locks and unlocks.
Node * obj = alock - > obj_node ( ) ;
BoxLockNode * box_node = alock - > box_node ( ) - > as_BoxLock ( ) ;
assert ( ! box_node - > is_eliminated ( ) , " should not be marked yet " ) ;
// Note: BoxLock node is marked eliminated only here
// and it is used to indicate that all associated lock
// and unlock nodes are marked for elimination.
box_node - > set_eliminated ( ) ; // Box's hash is always NO_HASH here
for ( uint i = 0 ; i < box_node - > outcnt ( ) ; i + + ) {
Node * u = box_node - > raw_out ( i ) ;
if ( u - > is_AbstractLock ( ) ) {
alock = u - > as_AbstractLock ( ) ;
if ( alock - > box_node ( ) = = box_node ) {
// Verify that this Box is referenced only by related locks.
assert ( alock - > obj_node ( ) - > eqv_uncast ( obj ) , " " ) ;
// Mark all related locks and unlocks.
# ifdef ASSERT
alock - > log_lock_optimization ( C , " eliminate_lock_set_nested " ) ;
# endif
alock - > set_nested ( ) ;
}
2012-01-07 13:26:43 -08:00
}
2008-12-03 13:41:37 -08:00
}
2015-02-17 13:54:53 -05:00
} else {
# ifdef ASSERT
alock - > log_lock_optimization ( C , " eliminate_lock_NOT_nested_lock_region " ) ;
if ( C - > log ( ) ! = NULL )
alock - > as_Lock ( ) - > is_nested_lock_region ( C ) ; // rerun for debugging output
# endif
2012-01-07 13:26:43 -08:00
}
}
return ;
}
// Process locks for non escaping object
assert ( alock - > is_non_esc_obj ( ) , " " ) ;
} // EliminateNestedLocks
if ( alock - > is_non_esc_obj ( ) ) { // Lock is used for non escaping object
// Look for all locks of this object and mark them and
// corresponding BoxLock nodes as eliminated.
Node * obj = alock - > obj_node ( ) ;
for ( uint j = 0 ; j < obj - > outcnt ( ) ; j + + ) {
Node * o = obj - > raw_out ( j ) ;
2012-01-10 18:05:38 -08:00
if ( o - > is_AbstractLock ( ) & &
o - > as_AbstractLock ( ) - > obj_node ( ) - > eqv_uncast ( obj ) ) {
2012-01-07 13:26:43 -08:00
alock = o - > as_AbstractLock ( ) ;
Node * box = alock - > box_node ( ) ;
// Replace old box node with new eliminated box for all users
// of the same object and mark related locks as eliminated.
mark_eliminated_box ( box , obj ) ;
}
}
}
2011-06-04 10:36:22 -07:00
}
2008-03-14 16:40:42 -07:00
2011-06-04 10:36:22 -07:00
// we have determined that this lock/unlock can be eliminated, we simply
// eliminate the node without expanding it.
//
// Note: The membar's associated with the lock/unlock are currently not
// eliminated. This should be investigated as a future enhancement.
//
bool PhaseMacroExpand : : eliminate_locking_node ( AbstractLockNode * alock ) {
if ( ! alock - > is_eliminated ( ) ) {
return false ;
}
# ifdef ASSERT
2012-01-07 13:26:43 -08:00
if ( ! alock - > is_coarsened ( ) ) {
2011-06-04 10:36:22 -07:00
// Check that new "eliminated" BoxLock node is created.
BoxLockNode * oldbox = alock - > box_node ( ) - > as_BoxLock ( ) ;
assert ( oldbox - > is_eliminated ( ) , " should be done already " ) ;
}
# endif
2009-11-12 09:24:21 -08:00
2015-02-17 13:54:53 -05:00
alock - > log_lock_optimization ( C , " eliminate_lock " ) ;
# ifndef PRODUCT
2008-03-14 16:40:42 -07:00
if ( PrintEliminateLocks ) {
if ( alock - > is_Lock ( ) ) {
2011-11-16 09:13:57 -08:00
tty - > print_cr ( " ++++ Eliminated: %d Lock " , alock - > _idx ) ;
2008-03-14 16:40:42 -07:00
} else {
2011-11-16 09:13:57 -08:00
tty - > print_cr ( " ++++ Eliminated: %d Unlock " , alock - > _idx ) ;
2008-03-14 16:40:42 -07:00
}
}
2015-02-17 13:54:53 -05:00
# endif
2008-03-14 16:40:42 -07:00
Node * mem = alock - > in ( TypeFunc : : Memory ) ;
Node * ctrl = alock - > in ( TypeFunc : : Control ) ;
2018-07-12 16:31:28 +02:00
guarantee ( ctrl ! = NULL , " missing control projection, cannot replace_node() with NULL " ) ;
2008-03-14 16:40:42 -07:00
extract_call_projections ( alock ) ;
// There are 2 projections from the lock. The lock node will
// be deleted when its last use is subsumed below.
assert ( alock - > outcnt ( ) = = 2 & &
_fallthroughproj ! = NULL & &
_memproj_fallthrough ! = NULL ,
" Unexpected projections from Lock/Unlock " ) ;
Node * fallthroughproj = _fallthroughproj ;
Node * memproj_fallthrough = _memproj_fallthrough ;
2007-12-01 00:00:00 +00:00
// The memory projection from a lock/unlock is RawMem
// The input to a Lock is merged memory, so extract its RawMem input
// (unless the MergeMem has been optimized away.)
if ( alock - > is_Lock ( ) ) {
2011-08-02 18:36:40 +02:00
// Seach for MemBarAcquireLock node and delete it also.
2008-03-14 16:40:42 -07:00
MemBarNode * membar = fallthroughproj - > unique_ctrl_out ( ) - > as_MemBar ( ) ;
2011-08-02 18:36:40 +02:00
assert ( membar ! = NULL & & membar - > Opcode ( ) = = Op_MemBarAcquireLock , " " ) ;
2008-03-14 16:40:42 -07:00
Node * ctrlproj = membar - > proj_out ( TypeFunc : : Control ) ;
Node * memproj = membar - > proj_out ( TypeFunc : : Memory ) ;
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( ctrlproj , fallthroughproj ) ;
_igvn . replace_node ( memproj , memproj_fallthrough ) ;
2008-12-03 13:41:37 -08:00
// Delete FastLock node also if this Lock node is unique user
// (a loop peeling may clone a Lock node).
Node * flock = alock - > as_Lock ( ) - > fastlock_node ( ) ;
if ( flock - > outcnt ( ) = = 1 ) {
assert ( flock - > unique_out ( ) = = alock , " sanity " ) ;
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( flock , top ( ) ) ;
2008-12-03 13:41:37 -08:00
}
2007-12-01 00:00:00 +00:00
}
2011-08-02 18:36:40 +02:00
// Seach for MemBarReleaseLock node and delete it also.
2018-07-12 16:31:28 +02:00
if ( alock - > is_Unlock ( ) & & ctrl - > is_Proj ( ) & & ctrl - > in ( 0 ) - > is_MemBar ( ) ) {
2008-03-14 16:40:42 -07:00
MemBarNode * membar = ctrl - > in ( 0 ) - > as_MemBar ( ) ;
2011-08-02 18:36:40 +02:00
assert ( membar - > Opcode ( ) = = Op_MemBarReleaseLock & &
2008-03-14 16:40:42 -07:00
mem - > is_Proj ( ) & & membar = = mem - > in ( 0 ) , " " ) ;
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( fallthroughproj , ctrl ) ;
_igvn . replace_node ( memproj_fallthrough , mem ) ;
2008-03-14 16:40:42 -07:00
fallthroughproj = ctrl ;
memproj_fallthrough = mem ;
ctrl = membar - > in ( TypeFunc : : Control ) ;
mem = membar - > in ( TypeFunc : : Memory ) ;
}
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( fallthroughproj , ctrl ) ;
_igvn . replace_node ( memproj_fallthrough , mem ) ;
2008-03-14 16:40:42 -07:00
return true ;
2007-12-01 00:00:00 +00:00
}
//------------------------------expand_lock_node----------------------
void PhaseMacroExpand : : expand_lock_node ( LockNode * lock ) {
Node * ctrl = lock - > in ( TypeFunc : : Control ) ;
Node * mem = lock - > in ( TypeFunc : : Memory ) ;
Node * obj = lock - > obj_node ( ) ;
Node * box = lock - > box_node ( ) ;
2008-03-14 16:40:42 -07:00
Node * flock = lock - > fastlock_node ( ) ;
2007-12-01 00:00:00 +00:00
2012-01-12 14:45:04 -08:00
assert ( ! box - > as_BoxLock ( ) - > is_eliminated ( ) , " sanity " ) ;
2012-01-07 13:26:43 -08:00
2007-12-01 00:00:00 +00:00
// Make the merge point
2008-11-07 09:29:38 -08:00
Node * region ;
Node * mem_phi ;
Node * slow_path ;
if ( UseOptoBiasInlining ) {
/*
2009-02-27 13:27:09 -08:00
* See the full description in MacroAssembler : : biased_locking_enter ( ) .
2008-11-07 09:29:38 -08:00
*
* if ( ( mark_word & biased_lock_mask ) = = biased_lock_pattern ) {
* // The object is biased.
* proto_node = klass - > prototype_header ;
* o_node = thread | proto_node ;
* x_node = o_node ^ mark_word ;
* if ( ( x_node & ~ age_mask ) = = 0 ) { // Biased to the current thread ?
* // Done.
* } else {
* if ( ( x_node & biased_lock_mask ) ! = 0 ) {
* // The klass's prototype header is no longer biased.
* cas ( & mark_word , mark_word , proto_node )
* goto cas_lock ;
* } else {
* // The klass's prototype header is still biased.
* if ( ( x_node & epoch_mask ) ! = 0 ) { // Expired epoch?
* old = mark_word ;
* new = o_node ;
* } else {
* // Different thread or anonymous biased.
* old = mark_word & ( epoch_mask | age_mask | biased_lock_mask ) ;
* new = thread | old ;
* }
* // Try to rebias.
* if ( cas ( & mark_word , old , new ) = = 0 ) {
* // Done.
* } else {
* goto slow_path ; // Failed.
* }
* }
* }
* } else {
* // The object is not biased.
* cas_lock :
* if ( FastLock ( obj ) = = 0 ) {
* // Done.
* } else {
* slow_path :
* OptoRuntime : : complete_monitor_locking_Java ( obj ) ;
* }
* }
*/
2014-06-02 08:07:29 +02:00
region = new RegionNode ( 5 ) ;
2008-11-07 09:29:38 -08:00
// create a Phi for the memory state
2014-06-02 08:07:29 +02:00
mem_phi = new PhiNode ( region , Type : : MEMORY , TypeRawPtr : : BOTTOM ) ;
2008-11-07 09:29:38 -08:00
2014-06-02 08:07:29 +02:00
Node * fast_lock_region = new RegionNode ( 3 ) ;
Node * fast_lock_mem_phi = new PhiNode ( fast_lock_region , Type : : MEMORY , TypeRawPtr : : BOTTOM ) ;
2008-11-07 09:29:38 -08:00
// First, check mark word for the biased lock pattern.
Node * mark_node = make_load ( ctrl , mem , obj , oopDesc : : mark_offset_in_bytes ( ) , TypeX_X , TypeX_X - > basic_type ( ) ) ;
// Get fast path - mark word has the biased lock pattern.
ctrl = opt_bits_test ( ctrl , fast_lock_region , 1 , mark_node ,
2019-08-06 10:48:21 +02:00
markWord : : biased_lock_mask_in_place ,
markWord : : biased_lock_pattern , true ) ;
2008-11-07 09:29:38 -08:00
// fast_lock_region->in(1) is set to slow path.
fast_lock_mem_phi - > init_req ( 1 , mem ) ;
// Now check that the lock is biased to the current thread and has
// the same epoch and bias as Klass::_prototype_header.
// Special-case a fresh allocation to avoid building nodes:
Node * klass_node = AllocateNode : : Ideal_klass ( obj , & _igvn ) ;
if ( klass_node = = NULL ) {
Node * k_adr = basic_plus_adr ( obj , oopDesc : : klass_offset_in_bytes ( ) ) ;
2014-11-06 09:40:58 +01:00
klass_node = transform_later ( LoadKlassNode : : make ( _igvn , NULL , mem , k_adr , _igvn . type ( k_adr ) - > is_ptr ( ) ) ) ;
2008-12-16 12:23:39 -08:00
# ifdef _LP64
2013-08-12 17:37:02 +02:00
if ( UseCompressedClassPointers & & klass_node - > is_DecodeNKlass ( ) ) {
2008-12-16 12:23:39 -08:00
assert ( klass_node - > in ( 1 ) - > Opcode ( ) = = Op_LoadNKlass , " sanity " ) ;
klass_node - > in ( 1 ) - > init_req ( 0 , ctrl ) ;
} else
# endif
klass_node - > init_req ( 0 , ctrl ) ;
2008-11-07 09:29:38 -08:00
}
2011-12-07 11:35:03 +01:00
Node * proto_node = make_load ( ctrl , mem , klass_node , in_bytes ( Klass : : prototype_header_offset ( ) ) , TypeX_X , TypeX_X - > basic_type ( ) ) ;
2007-12-01 00:00:00 +00:00
2014-06-02 08:07:29 +02:00
Node * thread = transform_later ( new ThreadLocalNode ( ) ) ;
Node * cast_thread = transform_later ( new CastP2XNode ( ctrl , thread ) ) ;
Node * o_node = transform_later ( new OrXNode ( cast_thread , proto_node ) ) ;
Node * x_node = transform_later ( new XorXNode ( o_node , mark_node ) ) ;
2008-11-07 09:29:38 -08:00
// Get slow path - mark word does NOT match the value.
2019-08-30 09:06:46 +02:00
STATIC_ASSERT ( markWord : : age_mask_in_place < = INT_MAX ) ;
2008-11-07 09:29:38 -08:00
Node * not_biased_ctrl = opt_bits_test ( ctrl , region , 3 , x_node ,
2019-08-30 09:06:46 +02:00
( ~ ( int ) markWord : : age_mask_in_place ) , 0 ) ;
2008-11-07 09:29:38 -08:00
// region->in(3) is set to fast path - the object is biased to the current thread.
mem_phi - > init_req ( 3 , mem ) ;
// Mark word does NOT match the value (thread | Klass::_prototype_header).
// First, check biased pattern.
// Get fast path - _prototype_header has the same biased lock pattern.
ctrl = opt_bits_test ( not_biased_ctrl , fast_lock_region , 2 , x_node ,
2019-08-06 10:48:21 +02:00
markWord : : biased_lock_mask_in_place , 0 , true ) ;
2008-11-07 09:29:38 -08:00
not_biased_ctrl = fast_lock_region - > in ( 2 ) ; // Slow path
// fast_lock_region->in(2) - the prototype header is no longer biased
// and we have to revoke the bias on this object.
// We are going to try to reset the mark of this object to the prototype
// value and fall through to the CAS-based locking scheme.
Node * adr = basic_plus_adr ( obj , oopDesc : : mark_offset_in_bytes ( ) ) ;
2014-06-02 08:07:29 +02:00
Node * cas = new StoreXConditionalNode ( not_biased_ctrl , mem , adr ,
proto_node , mark_node ) ;
2008-11-07 09:29:38 -08:00
transform_later ( cas ) ;
2014-06-02 08:07:29 +02:00
Node * proj = transform_later ( new SCMemProjNode ( cas ) ) ;
2008-11-07 09:29:38 -08:00
fast_lock_mem_phi - > init_req ( 2 , proj ) ;
// Second, check epoch bits.
2014-06-02 08:07:29 +02:00
Node * rebiased_region = new RegionNode ( 3 ) ;
Node * old_phi = new PhiNode ( rebiased_region , TypeX_X ) ;
Node * new_phi = new PhiNode ( rebiased_region , TypeX_X ) ;
2008-11-07 09:29:38 -08:00
// Get slow path - mark word does NOT match epoch bits.
Node * epoch_ctrl = opt_bits_test ( ctrl , rebiased_region , 1 , x_node ,
2019-08-06 10:48:21 +02:00
markWord : : epoch_mask_in_place , 0 ) ;
2008-11-07 09:29:38 -08:00
// The epoch of the current bias is not valid, attempt to rebias the object
// toward the current thread.
rebiased_region - > init_req ( 2 , epoch_ctrl ) ;
old_phi - > init_req ( 2 , mark_node ) ;
new_phi - > init_req ( 2 , o_node ) ;
// rebiased_region->in(1) is set to fast path.
// The epoch of the current bias is still valid but we know
// nothing about the owner; it might be set or it might be clear.
2019-08-06 10:48:21 +02:00
Node * cmask = MakeConX ( markWord : : biased_lock_mask_in_place |
markWord : : age_mask_in_place |
markWord : : epoch_mask_in_place ) ;
2014-06-02 08:07:29 +02:00
Node * old = transform_later ( new AndXNode ( mark_node , cmask ) ) ;
cast_thread = transform_later ( new CastP2XNode ( ctrl , thread ) ) ;
Node * new_mark = transform_later ( new OrXNode ( cast_thread , old ) ) ;
2008-11-07 09:29:38 -08:00
old_phi - > init_req ( 1 , old ) ;
new_phi - > init_req ( 1 , new_mark ) ;
transform_later ( rebiased_region ) ;
transform_later ( old_phi ) ;
transform_later ( new_phi ) ;
// Try to acquire the bias of the object using an atomic operation.
// If this fails we will go in to the runtime to revoke the object's bias.
2014-06-02 08:07:29 +02:00
cas = new StoreXConditionalNode ( rebiased_region , mem , adr , new_phi , old_phi ) ;
2008-11-07 09:29:38 -08:00
transform_later ( cas ) ;
2014-06-02 08:07:29 +02:00
proj = transform_later ( new SCMemProjNode ( cas ) ) ;
2008-11-07 09:29:38 -08:00
// Get slow path - Failed to CAS.
not_biased_ctrl = opt_bits_test ( rebiased_region , region , 4 , cas , 0 , 0 ) ;
mem_phi - > init_req ( 4 , proj ) ;
// region->in(4) is set to fast path - the object is rebiased to the current thread.
// Failed to CAS.
2014-06-02 08:07:29 +02:00
slow_path = new RegionNode ( 3 ) ;
Node * slow_mem = new PhiNode ( slow_path , Type : : MEMORY , TypeRawPtr : : BOTTOM ) ;
2008-11-07 09:29:38 -08:00
slow_path - > init_req ( 1 , not_biased_ctrl ) ; // Capture slow-control
slow_mem - > init_req ( 1 , proj ) ;
// Call CAS-based locking scheme (FastLock node).
transform_later ( fast_lock_region ) ;
transform_later ( fast_lock_mem_phi ) ;
// Get slow path - FastLock failed to lock the object.
ctrl = opt_bits_test ( fast_lock_region , region , 2 , flock , 0 , 0 ) ;
mem_phi - > init_req ( 2 , fast_lock_mem_phi ) ;
// region->in(2) is set to fast path - the object is locked to the current thread.
slow_path - > init_req ( 2 , ctrl ) ; // Capture slow-control
slow_mem - > init_req ( 2 , fast_lock_mem_phi ) ;
transform_later ( slow_path ) ;
transform_later ( slow_mem ) ;
// Reset lock's memory edge.
lock - > set_req ( TypeFunc : : Memory , slow_mem ) ;
} else {
2014-06-02 08:07:29 +02:00
region = new RegionNode ( 3 ) ;
2008-11-07 09:29:38 -08:00
// create a Phi for the memory state
2014-06-02 08:07:29 +02:00
mem_phi = new PhiNode ( region , Type : : MEMORY , TypeRawPtr : : BOTTOM ) ;
2008-11-07 09:29:38 -08:00
// Optimize test; set region slot 2
slow_path = opt_bits_test ( ctrl , region , 2 , flock , 0 , 0 ) ;
mem_phi - > init_req ( 2 , mem ) ;
}
2007-12-01 00:00:00 +00:00
// Make slow path call
2015-04-16 08:23:26 -07:00
CallNode * call = make_slow_call ( ( CallNode * ) lock , OptoRuntime : : complete_monitor_enter_Type ( ) ,
OptoRuntime : : complete_monitor_locking_Java ( ) , NULL , slow_path ,
obj , box , NULL ) ;
2007-12-01 00:00:00 +00:00
extract_call_projections ( call ) ;
// Slow path can only throw asynchronous exceptions, which are always
// de-opted. So the compiler thinks the slow-call can never throw an
// exception. If it DOES throw an exception we would need the debug
// info removed first (since if it throws there is no monitor).
assert ( _ioproj_fallthrough = = NULL & & _ioproj_catchall = = NULL & &
_memproj_catchall = = NULL & & _catchallcatchproj = = NULL , " Unexpected projection from Lock " ) ;
// Capture slow path
// disconnect fall-through projection from call and create a new one
// hook up users of fall-through projection to region
Node * slow_ctrl = _fallthroughproj - > clone ( ) ;
transform_later ( slow_ctrl ) ;
_igvn . hash_delete ( _fallthroughproj ) ;
2020-10-12 19:54:25 +00:00
_fallthroughproj - > disconnect_inputs ( C ) ;
2007-12-01 00:00:00 +00:00
region - > init_req ( 1 , slow_ctrl ) ;
// region inputs are now complete
transform_later ( region ) ;
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( _fallthroughproj , region ) ;
2007-12-01 00:00:00 +00:00
2014-06-02 08:07:29 +02:00
Node * memproj = transform_later ( new ProjNode ( call , TypeFunc : : Memory ) ) ;
2007-12-01 00:00:00 +00:00
mem_phi - > init_req ( 1 , memproj ) ;
transform_later ( mem_phi ) ;
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( _memproj_fallthrough , mem_phi ) ;
2007-12-01 00:00:00 +00:00
}
//------------------------------expand_unlock_node----------------------
void PhaseMacroExpand : : expand_unlock_node ( UnlockNode * unlock ) {
2008-03-14 16:40:42 -07:00
Node * ctrl = unlock - > in ( TypeFunc : : Control ) ;
2007-12-01 00:00:00 +00:00
Node * mem = unlock - > in ( TypeFunc : : Memory ) ;
Node * obj = unlock - > obj_node ( ) ;
Node * box = unlock - > box_node ( ) ;
2012-01-12 14:45:04 -08:00
assert ( ! box - > as_BoxLock ( ) - > is_eliminated ( ) , " sanity " ) ;
2012-01-07 13:26:43 -08:00
2007-12-01 00:00:00 +00:00
// No need for a null check on unlock
// Make the merge point
2008-11-07 09:29:38 -08:00
Node * region ;
Node * mem_phi ;
if ( UseOptoBiasInlining ) {
// Check for biased locking unlock case, which is a no-op.
2009-02-27 13:27:09 -08:00
// See the full description in MacroAssembler::biased_locking_exit().
2014-06-02 08:07:29 +02:00
region = new RegionNode ( 4 ) ;
2008-11-07 09:29:38 -08:00
// create a Phi for the memory state
2014-06-02 08:07:29 +02:00
mem_phi = new PhiNode ( region , Type : : MEMORY , TypeRawPtr : : BOTTOM ) ;
2008-11-07 09:29:38 -08:00
mem_phi - > init_req ( 3 , mem ) ;
Node * mark_node = make_load ( ctrl , mem , obj , oopDesc : : mark_offset_in_bytes ( ) , TypeX_X , TypeX_X - > basic_type ( ) ) ;
ctrl = opt_bits_test ( ctrl , region , 3 , mark_node ,
2019-08-06 10:48:21 +02:00
markWord : : biased_lock_mask_in_place ,
markWord : : biased_lock_pattern ) ;
2008-11-07 09:29:38 -08:00
} else {
2014-06-02 08:07:29 +02:00
region = new RegionNode ( 3 ) ;
2008-11-07 09:29:38 -08:00
// create a Phi for the memory state
2014-06-02 08:07:29 +02:00
mem_phi = new PhiNode ( region , Type : : MEMORY , TypeRawPtr : : BOTTOM ) ;
2008-11-07 09:29:38 -08:00
}
2007-12-01 00:00:00 +00:00
2014-06-02 08:07:29 +02:00
FastUnlockNode * funlock = new FastUnlockNode ( ctrl , obj , box ) ;
2007-12-01 00:00:00 +00:00
funlock = transform_later ( funlock ) - > as_FastUnlock ( ) ;
// Optimize test; set region slot 2
2008-11-07 09:29:38 -08:00
Node * slow_path = opt_bits_test ( ctrl , region , 2 , funlock , 0 , 0 ) ;
2015-04-16 08:23:26 -07:00
Node * thread = transform_later ( new ThreadLocalNode ( ) ) ;
2007-12-01 00:00:00 +00:00
2015-04-16 08:23:26 -07:00
CallNode * call = make_slow_call ( ( CallNode * ) unlock , OptoRuntime : : complete_monitor_exit_Type ( ) ,
CAST_FROM_FN_PTR ( address , SharedRuntime : : complete_monitor_unlocking_C ) ,
" complete_monitor_unlocking_C " , slow_path , obj , box , thread ) ;
2007-12-01 00:00:00 +00:00
extract_call_projections ( call ) ;
assert ( _ioproj_fallthrough = = NULL & & _ioproj_catchall = = NULL & &
_memproj_catchall = = NULL & & _catchallcatchproj = = NULL , " Unexpected projection from Lock " ) ;
// No exceptions for unlocking
// Capture slow path
// disconnect fall-through projection from call and create a new one
// hook up users of fall-through projection to region
Node * slow_ctrl = _fallthroughproj - > clone ( ) ;
transform_later ( slow_ctrl ) ;
_igvn . hash_delete ( _fallthroughproj ) ;
2020-10-12 19:54:25 +00:00
_fallthroughproj - > disconnect_inputs ( C ) ;
2007-12-01 00:00:00 +00:00
region - > init_req ( 1 , slow_ctrl ) ;
// region inputs are now complete
transform_later ( region ) ;
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( _fallthroughproj , region ) ;
2007-12-01 00:00:00 +00:00
2014-06-02 08:07:29 +02:00
Node * memproj = transform_later ( new ProjNode ( call , TypeFunc : : Memory ) ) ;
2007-12-01 00:00:00 +00:00
mem_phi - > init_req ( 1 , memproj ) ;
mem_phi - > init_req ( 2 , mem ) ;
transform_later ( mem_phi ) ;
2009-04-07 19:04:24 -07:00
_igvn . replace_node ( _memproj_fallthrough , mem_phi ) ;
2007-12-01 00:00:00 +00:00
}
2020-02-14 10:31:34 +01:00
void PhaseMacroExpand : : expand_subtypecheck_node ( SubTypeCheckNode * check ) {
assert ( check - > in ( SubTypeCheckNode : : Control ) = = NULL , " should be pinned " ) ;
Node * bol = check - > unique_out ( ) ;
Node * obj_or_subklass = check - > in ( SubTypeCheckNode : : ObjOrSubKlass ) ;
Node * superklass = check - > in ( SubTypeCheckNode : : SuperKlass ) ;
assert ( bol - > is_Bool ( ) & & bol - > as_Bool ( ) - > _test . _test = = BoolTest : : ne , " unexpected bool node " ) ;
for ( DUIterator_Last imin , i = bol - > last_outs ( imin ) ; i > = imin ; - - i ) {
Node * iff = bol - > last_out ( i ) ;
assert ( iff - > is_If ( ) , " where's the if? " ) ;
if ( iff - > in ( 0 ) - > is_top ( ) ) {
_igvn . replace_input_of ( iff , 1 , C - > top ( ) ) ;
continue ;
}
Node * iftrue = iff - > as_If ( ) - > proj_out ( 1 ) ;
Node * iffalse = iff - > as_If ( ) - > proj_out ( 0 ) ;
Node * ctrl = iff - > in ( 0 ) ;
Node * subklass = NULL ;
if ( _igvn . type ( obj_or_subklass ) - > isa_klassptr ( ) ) {
subklass = obj_or_subklass ;
} else {
Node * k_adr = basic_plus_adr ( obj_or_subklass , oopDesc : : klass_offset_in_bytes ( ) ) ;
subklass = _igvn . transform ( LoadKlassNode : : make ( _igvn , NULL , C - > immutable_memory ( ) , k_adr , TypeInstPtr : : KLASS ) ) ;
}
Node * not_subtype_ctrl = Phase : : gen_subtype_check ( subklass , superklass , & ctrl , NULL , _igvn ) ;
_igvn . replace_input_of ( iff , 0 , C - > top ( ) ) ;
_igvn . replace_node ( iftrue , not_subtype_ctrl ) ;
_igvn . replace_node ( iffalse , ctrl ) ;
}
_igvn . replace_node ( check , C - > top ( ) ) ;
}
2011-11-16 09:13:57 -08:00
//---------------------------eliminate_macro_nodes----------------------
// Eliminate scalar replaced allocations and associated locks.
void PhaseMacroExpand : : eliminate_macro_nodes ( ) {
2007-12-01 00:00:00 +00:00
if ( C - > macro_count ( ) = = 0 )
2011-11-16 09:13:57 -08:00
return ;
2008-12-03 13:41:37 -08:00
// First, attempt to eliminate locks
2011-06-04 10:36:22 -07:00
int cnt = C - > macro_count ( ) ;
for ( int i = 0 ; i < cnt ; i + + ) {
Node * n = C - > macro_node ( i ) ;
if ( n - > is_AbstractLock ( ) ) { // Lock and Unlock nodes
// Before elimination mark all associated (same box and obj)
// lock and unlock nodes.
mark_eliminated_locking_nodes ( n - > as_AbstractLock ( ) ) ;
}
}
2008-03-20 13:51:55 -07:00
bool progress = true ;
2008-12-03 13:41:37 -08:00
while ( progress ) {
progress = false ;
for ( int i = C - > macro_count ( ) ; i > 0 ; i - - ) {
Node * n = C - > macro_node ( i - 1 ) ;
bool success = false ;
debug_only ( int old_macro_count = C - > macro_count ( ) ; ) ;
if ( n - > is_AbstractLock ( ) ) {
success = eliminate_locking_node ( n - > as_AbstractLock ( ) ) ;
}
assert ( success = = ( C - > macro_count ( ) < old_macro_count ) , " elimination reduces macro count " ) ;
progress = progress | | success ;
}
}
// Next, attempt to eliminate allocations
2014-03-20 17:49:27 -07:00
_has_locks = false ;
2008-12-03 13:41:37 -08:00
progress = true ;
2008-03-20 13:51:55 -07:00
while ( progress ) {
progress = false ;
for ( int i = C - > macro_count ( ) ; i > 0 ; i - - ) {
Node * n = C - > macro_node ( i - 1 ) ;
bool success = false ;
debug_only ( int old_macro_count = C - > macro_count ( ) ; ) ;
switch ( n - > class_id ( ) ) {
case Node : : Class_Allocate :
case Node : : Class_AllocateArray :
success = eliminate_allocate_node ( n - > as_Allocate ( ) ) ;
break ;
2013-05-08 15:08:01 -07:00
case Node : : Class_CallStaticJava :
success = eliminate_boxing_node ( n - > as_CallStaticJava ( ) ) ;
break ;
2008-03-20 13:51:55 -07:00
case Node : : Class_Lock :
case Node : : Class_Unlock :
2008-12-03 13:41:37 -08:00
assert ( ! n - > as_AbstractLock ( ) - > is_eliminated ( ) , " sanity " ) ;
2014-03-20 17:49:27 -07:00
_has_locks = true ;
2008-03-20 13:51:55 -07:00
break ;
2014-08-11 14:12:51 +02:00
case Node : : Class_ArrayCopy :
break ;
2017-11-28 11:59:16 +01:00
case Node : : Class_OuterStripMinedLoop :
break ;
2020-02-14 10:31:34 +01:00
case Node : : Class_SubTypeCheck :
break ;
2020-07-01 17:28:49 +02:00
case Node : : Class_Opaque1 :
break ;
2008-03-20 13:51:55 -07:00
default :
2011-11-16 09:13:57 -08:00
assert ( n - > Opcode ( ) = = Op_LoopLimit | |
2014-03-20 17:49:27 -07:00
n - > Opcode ( ) = = Op_Opaque2 | |
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Erik Osterlund <erik.osterlund@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Kim Barrett <kim.barrett@oracle.com>
Co-authored-by: Nils Eliasson <nils.eliasson@oracle.com>
Co-authored-by: Rickard Backman <rickard.backman@oracle.com>
Co-authored-by: Roland Westrelin <rwestrel@redhat.com>
Co-authored-by: Coleen Phillimore <coleen.phillimore@oracle.com>
Co-authored-by: Robbin Ehn <robbin.ehn@oracle.com>
Co-authored-by: Gerard Ziemski <gerard.ziemski@oracle.com>
Co-authored-by: Hugh Wilkinson <hugh.wilkinson@intel.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Co-authored-by: Bill Wheeler <bill.npo.wheeler@intel.com>
Co-authored-by: Vinay K. Awasthi <vinay.k.awasthi@intel.com>
Co-authored-by: Yasumasa Suenaga <yasuenag@gmail.com>
Reviewed-by: pliden, stefank, eosterlund, ehelin, sjohanss, rbackman, coleenp, ihse, jgeorge, lmesnik, rkennke
2018-06-12 17:40:28 +02:00
n - > Opcode ( ) = = Op_Opaque3 | |
BarrierSet : : barrier_set ( ) - > barrier_set_c2 ( ) - > is_gc_barrier_node ( n ) ,
" unknown node type in macro list " ) ;
2008-03-20 13:51:55 -07:00
}
assert ( success = = ( C - > macro_count ( ) < old_macro_count ) , " elimination reduces macro count " ) ;
progress = progress | | success ;
}
}
2011-11-16 09:13:57 -08:00
}
//------------------------------expand_macro_nodes----------------------
// Returns true if a failure occurred.
bool PhaseMacroExpand : : expand_macro_nodes ( ) {
// Last attempt to eliminate macro nodes.
eliminate_macro_nodes ( ) ;
// Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
bool progress = true ;
while ( progress ) {
progress = false ;
for ( int i = C - > macro_count ( ) ; i > 0 ; i - - ) {
2019-12-04 07:07:21 +01:00
Node * n = C - > macro_node ( i - 1 ) ;
2011-11-16 09:13:57 -08:00
bool success = false ;
debug_only ( int old_macro_count = C - > macro_count ( ) ; ) ;
if ( n - > Opcode ( ) = = Op_LoopLimit ) {
// Remove it from macro list and put on IGVN worklist to optimize.
C - > remove_macro_node ( n ) ;
_igvn . _worklist . push ( n ) ;
success = true ;
2013-05-08 15:08:01 -07:00
} else if ( n - > Opcode ( ) = = Op_CallStaticJava ) {
// Remove it from macro list and put on IGVN worklist to optimize.
C - > remove_macro_node ( n ) ;
_igvn . _worklist . push ( n ) ;
success = true ;
2020-07-01 17:28:49 +02:00
} else if ( n - > is_Opaque1 ( ) | | n - > Opcode ( ) = = Op_Opaque2 ) {
2011-11-16 09:13:57 -08:00
_igvn . replace_node ( n , n - > in ( 1 ) ) ;
success = true ;
2014-03-20 17:49:27 -07:00
# if INCLUDE_RTM_OPT
} else if ( ( n - > Opcode ( ) = = Op_Opaque3 ) & & ( ( Opaque3Node * ) n ) - > rtm_opt ( ) ) {
assert ( C - > profile_rtm ( ) , " should be used only in rtm deoptimization code " ) ;
assert ( ( n - > outcnt ( ) = = 1 ) & & n - > unique_out ( ) - > is_Cmp ( ) , " " ) ;
Node * cmp = n - > unique_out ( ) ;
# ifdef ASSERT
// Validate graph.
assert ( ( cmp - > outcnt ( ) = = 1 ) & & cmp - > unique_out ( ) - > is_Bool ( ) , " " ) ;
BoolNode * bol = cmp - > unique_out ( ) - > as_Bool ( ) ;
assert ( ( bol - > outcnt ( ) = = 1 ) & & bol - > unique_out ( ) - > is_If ( ) & &
( bol - > _test . _test = = BoolTest : : ne ) , " " ) ;
IfNode * ifn = bol - > unique_out ( ) - > as_If ( ) ;
assert ( ( ifn - > outcnt ( ) = = 2 ) & &
2015-03-17 10:06:31 +01:00
ifn - > proj_out ( 1 ) - > is_uncommon_trap_proj ( Deoptimization : : Reason_rtm_state_change ) ! = NULL , " " ) ;
2014-03-20 17:49:27 -07:00
# endif
Node * repl = n - > in ( 1 ) ;
if ( ! _has_locks ) {
// Remove RTM state check if there are no locks in the code.
// Replace input to compare the same value.
repl = ( cmp - > in ( 1 ) = = n ) ? cmp - > in ( 2 ) : cmp - > in ( 1 ) ;
}
_igvn . replace_node ( n , repl ) ;
success = true ;
# endif
2017-11-28 11:59:16 +01:00
} else if ( n - > Opcode ( ) = = Op_OuterStripMinedLoop ) {
n - > as_OuterStripMinedLoop ( ) - > adjust_strip_mined_loop ( & _igvn ) ;
C - > remove_macro_node ( n ) ;
success = true ;
2011-11-16 09:13:57 -08:00
}
2019-12-04 07:07:21 +01:00
assert ( ! success | | ( C - > macro_count ( ) = = ( old_macro_count - 1 ) ) , " elimination must have deleted one node from macro list " ) ;
2011-11-16 09:13:57 -08:00
progress = progress | | success ;
}
}
2020-03-31 10:40:17 +02:00
// Clean up the graph so we're less likely to hit the maximum node
// limit
_igvn . set_delay_transform ( false ) ;
_igvn . optimize ( ) ;
if ( C - > failing ( ) ) return true ;
_igvn . set_delay_transform ( true ) ;
// Because we run IGVN after each expansion, some macro nodes may go
// dead and be removed from the list as we iterate over it. Move
// Allocate nodes (processed in a second pass) at the beginning of
// the list and then iterate from the last element of the list until
// an Allocate node is seen. This is robust to random deletion in
// the list due to nodes going dead.
C - > sort_macro_nodes ( ) ;
2014-08-11 14:12:51 +02:00
// expand arraycopy "macro" nodes first
// For ReduceBulkZeroing, we must first process all arraycopy nodes
// before the allocate nodes are expanded.
2020-03-31 10:40:17 +02:00
while ( C - > macro_count ( ) > 0 ) {
int macro_count = C - > macro_count ( ) ;
Node * n = C - > macro_node ( macro_count - 1 ) ;
2014-08-11 14:12:51 +02:00
assert ( n - > is_macro ( ) , " only macro nodes expected here " ) ;
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Erik Osterlund <erik.osterlund@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Kim Barrett <kim.barrett@oracle.com>
Co-authored-by: Nils Eliasson <nils.eliasson@oracle.com>
Co-authored-by: Rickard Backman <rickard.backman@oracle.com>
Co-authored-by: Roland Westrelin <rwestrel@redhat.com>
Co-authored-by: Coleen Phillimore <coleen.phillimore@oracle.com>
Co-authored-by: Robbin Ehn <robbin.ehn@oracle.com>
Co-authored-by: Gerard Ziemski <gerard.ziemski@oracle.com>
Co-authored-by: Hugh Wilkinson <hugh.wilkinson@intel.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Co-authored-by: Bill Wheeler <bill.npo.wheeler@intel.com>
Co-authored-by: Vinay K. Awasthi <vinay.k.awasthi@intel.com>
Co-authored-by: Yasumasa Suenaga <yasuenag@gmail.com>
Reviewed-by: pliden, stefank, eosterlund, ehelin, sjohanss, rbackman, coleenp, ihse, jgeorge, lmesnik, rkennke
2018-06-12 17:40:28 +02:00
if ( _igvn . type ( n ) = = Type : : TOP | | ( n - > in ( 0 ) ! = NULL & & n - > in ( 0 ) - > is_top ( ) ) ) {
2014-08-11 14:12:51 +02:00
// node is unreachable, so don't try to expand it
C - > remove_macro_node ( n ) ;
2019-07-16 08:56:08 +02:00
continue ;
}
2020-03-31 10:40:17 +02:00
if ( n - > is_Allocate ( ) ) {
break ;
}
// Make sure expansion will not cause node limit to be exceeded.
// Worst case is a macro node gets expanded into about 200 nodes.
// Allow 50% more for optimization.
if ( C - > check_node_count ( 300 , " out of nodes before macro expansion " ) ) {
return true ;
}
2019-12-04 07:07:21 +01:00
debug_only ( int old_macro_count = C - > macro_count ( ) ; ) ;
2019-07-16 08:56:08 +02:00
switch ( n - > class_id ( ) ) {
case Node : : Class_Lock :
expand_lock_node ( n - > as_Lock ( ) ) ;
2019-12-04 07:07:21 +01:00
assert ( C - > macro_count ( ) = = ( old_macro_count - 1 ) , " expansion must have deleted one node from macro list " ) ;
2019-07-16 08:56:08 +02:00
break ;
case Node : : Class_Unlock :
expand_unlock_node ( n - > as_Unlock ( ) ) ;
2019-12-04 07:07:21 +01:00
assert ( C - > macro_count ( ) = = ( old_macro_count - 1 ) , " expansion must have deleted one node from macro list " ) ;
2019-07-16 08:56:08 +02:00
break ;
case Node : : Class_ArrayCopy :
2014-08-11 14:12:51 +02:00
expand_arraycopy_node ( n - > as_ArrayCopy ( ) ) ;
2019-12-04 07:07:21 +01:00
assert ( C - > macro_count ( ) = = ( old_macro_count - 1 ) , " expansion must have deleted one node from macro list " ) ;
2019-07-16 08:56:08 +02:00
break ;
2020-02-14 10:31:34 +01:00
case Node : : Class_SubTypeCheck :
expand_subtypecheck_node ( n - > as_SubTypeCheck ( ) ) ;
assert ( C - > macro_count ( ) = = ( old_macro_count - 1 ) , " expansion must have deleted one node from macro list " ) ;
break ;
2020-03-31 10:40:17 +02:00
default :
assert ( false , " unknown node type in macro list " ) ;
2014-08-11 14:12:51 +02:00
}
2020-03-31 10:40:17 +02:00
assert ( C - > macro_count ( ) < macro_count , " must have deleted a node from macro list " ) ;
if ( C - > failing ( ) ) return true ;
// Clean up the graph so we're less likely to hit the maximum node
// limit
_igvn . set_delay_transform ( false ) ;
_igvn . optimize ( ) ;
2014-08-11 14:12:51 +02:00
if ( C - > failing ( ) ) return true ;
2020-03-31 10:40:17 +02:00
_igvn . set_delay_transform ( true ) ;
2014-08-11 14:12:51 +02:00
}
2019-07-16 08:56:08 +02:00
// All nodes except Allocate nodes are expanded now. There could be
// new optimization opportunities (such as folding newly created
// load from a just allocated object). Run IGVN.
2007-12-01 00:00:00 +00:00
// expand "macro" nodes
// nodes are removed from the macro list as they are processed
while ( C - > macro_count ( ) > 0 ) {
2008-03-20 13:51:55 -07:00
int macro_count = C - > macro_count ( ) ;
Node * n = C - > macro_node ( macro_count - 1 ) ;
2007-12-01 00:00:00 +00:00
assert ( n - > is_macro ( ) , " only macro nodes expected here " ) ;
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Erik Osterlund <erik.osterlund@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Kim Barrett <kim.barrett@oracle.com>
Co-authored-by: Nils Eliasson <nils.eliasson@oracle.com>
Co-authored-by: Rickard Backman <rickard.backman@oracle.com>
Co-authored-by: Roland Westrelin <rwestrel@redhat.com>
Co-authored-by: Coleen Phillimore <coleen.phillimore@oracle.com>
Co-authored-by: Robbin Ehn <robbin.ehn@oracle.com>
Co-authored-by: Gerard Ziemski <gerard.ziemski@oracle.com>
Co-authored-by: Hugh Wilkinson <hugh.wilkinson@intel.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Co-authored-by: Bill Wheeler <bill.npo.wheeler@intel.com>
Co-authored-by: Vinay K. Awasthi <vinay.k.awasthi@intel.com>
Co-authored-by: Yasumasa Suenaga <yasuenag@gmail.com>
Reviewed-by: pliden, stefank, eosterlund, ehelin, sjohanss, rbackman, coleenp, ihse, jgeorge, lmesnik, rkennke
2018-06-12 17:40:28 +02:00
if ( _igvn . type ( n ) = = Type : : TOP | | ( n - > in ( 0 ) ! = NULL & & n - > in ( 0 ) - > is_top ( ) ) ) {
2007-12-01 00:00:00 +00:00
// node is unreachable, so don't try to expand it
C - > remove_macro_node ( n ) ;
continue ;
}
2020-03-31 10:40:17 +02:00
// Make sure expansion will not cause node limit to be exceeded.
// Worst case is a macro node gets expanded into about 200 nodes.
// Allow 50% more for optimization.
if ( C - > check_node_count ( 300 , " out of nodes before macro expansion " ) ) {
return true ;
}
2007-12-01 00:00:00 +00:00
switch ( n - > class_id ( ) ) {
case Node : : Class_Allocate :
expand_allocate ( n - > as_Allocate ( ) ) ;
break ;
case Node : : Class_AllocateArray :
expand_allocate_array ( n - > as_AllocateArray ( ) ) ;
break ;
default :
assert ( false , " unknown node type in macro list " ) ;
}
2008-03-20 13:51:55 -07:00
assert ( C - > macro_count ( ) < macro_count , " must have deleted a node from macro list " ) ;
2007-12-01 00:00:00 +00:00
if ( C - > failing ( ) ) return true ;
2020-03-31 10:40:17 +02:00
// Clean up the graph so we're less likely to hit the maximum node
// limit
_igvn . set_delay_transform ( false ) ;
_igvn . optimize ( ) ;
if ( C - > failing ( ) ) return true ;
_igvn . set_delay_transform ( true ) ;
2007-12-01 00:00:00 +00:00
}
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
_igvn . set_delay_transform ( false ) ;
2018-11-27 09:35:02 +01:00
return false ;
2007-12-01 00:00:00 +00:00
}