2007-12-01 00:00:00 +00:00
/*
2020-02-06 14:29:57 +00:00
* Copyright ( c ) 1997 , 2020 , Oracle and / or its affiliates . All rights reserved .
2007-12-01 00:00:00 +00:00
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER .
*
* This code is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 only , as
* published by the Free Software Foundation .
*
* This code is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* version 2 for more details ( a copy is included in the LICENSE file that
* accompanied this code ) .
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2010-05-27 19:08:38 -07:00
* Please contact Oracle , 500 Oracle Parkway , Redwood Shores , CA 94065 USA
* or visit www . oracle . com if you need additional information or have any
* questions .
2007-12-01 00:00:00 +00:00
*
*/
2010-11-23 13:22:55 -08:00
# include "precompiled.hpp"
2017-10-31 11:55:09 -04:00
# include "jvm.h"
2020-05-07 14:09:20 +02:00
# include "classfile/javaClasses.inline.hpp"
2019-05-09 14:28:30 +02:00
# include "classfile/symbolTable.hpp"
2010-11-23 13:22:55 -08:00
# include "classfile/systemDictionary.hpp"
2014-07-04 11:46:01 +02:00
# include "code/codeCache.hpp"
2010-11-23 13:22:55 -08:00
# include "code/debugInfoRec.hpp"
# include "code/nmethod.hpp"
# include "code/pcDesc.hpp"
# include "code/scopeDesc.hpp"
2019-10-09 12:43:32 -07:00
# include "compiler/compilationPolicy.hpp"
2010-11-23 13:22:55 -08:00
# include "interpreter/bytecode.hpp"
# include "interpreter/interpreter.hpp"
# include "interpreter/oopMapCache.hpp"
# include "memory/allocation.inline.hpp"
# include "memory/oopFactory.hpp"
# include "memory/resourceArea.hpp"
2019-05-11 13:10:24 +02:00
# include "memory/universe.hpp"
2019-02-20 09:53:28 +01:00
# include "oops/constantPool.hpp"
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
# include "oops/method.hpp"
2019-05-09 14:28:30 +02:00
# include "oops/objArrayKlass.hpp"
2016-01-04 15:41:05 +01:00
# include "oops/objArrayOop.inline.hpp"
2010-11-23 13:22:55 -08:00
# include "oops/oop.inline.hpp"
2019-11-13 08:23:23 -05:00
# include "oops/fieldStreams.inline.hpp"
2018-02-15 23:45:15 +01:00
# include "oops/typeArrayOop.inline.hpp"
2015-02-18 08:57:29 +01:00
# include "oops/verifyOopClosure.hpp"
2010-11-23 13:22:55 -08:00
# include "prims/jvmtiThreadState.hpp"
2019-11-26 10:47:46 +01:00
# include "runtime/atomic.hpp"
2010-11-23 13:22:55 -08:00
# include "runtime/biasedLocking.hpp"
# include "runtime/deoptimization.hpp"
2019-06-03 13:21:02 -07:00
# include "runtime/fieldDescriptor.hpp"
# include "runtime/fieldDescriptor.inline.hpp"
2018-03-21 19:45:24 -04:00
# include "runtime/frame.inline.hpp"
2019-01-31 10:31:39 +01:00
# include "runtime/handles.inline.hpp"
2018-03-16 09:12:13 -04:00
# include "runtime/interfaceSupport.inline.hpp"
2019-12-03 12:41:45 +01:00
# include "runtime/jniHandles.inline.hpp"
2018-03-23 18:54:12 +01:00
# include "runtime/safepointVerifiers.hpp"
2010-11-23 13:22:55 -08:00
# include "runtime/sharedRuntime.hpp"
# include "runtime/signature.hpp"
2020-10-09 08:40:33 +00:00
# include "runtime/stackWatermarkSet.hpp"
2010-11-23 13:22:55 -08:00
# include "runtime/stubRoutines.hpp"
# include "runtime/thread.hpp"
2017-11-22 17:54:50 -08:00
# include "runtime/threadSMR.hpp"
2010-11-23 13:22:55 -08:00
# include "runtime/vframe.hpp"
# include "runtime/vframeArray.hpp"
# include "runtime/vframe_hp.hpp"
# include "utilities/events.hpp"
2019-12-03 12:41:45 +01:00
# include "utilities/macros.hpp"
2018-03-02 11:06:51 +01:00
# include "utilities/preserveException.hpp"
2010-11-23 13:22:55 -08:00
# include "utilities/xmlstream.hpp"
2019-12-03 12:41:45 +01:00
# if INCLUDE_JFR
# include "jfr/jfrEvents.hpp"
# include "jfr/metadata/jfrSerializer.hpp"
# endif
2015-10-08 12:49:30 -10:00
2007-12-01 00:00:00 +00:00
bool DeoptimizationMarker : : _is_active = false ;
Deoptimization : : UnrollBlock : : UnrollBlock ( int size_of_deoptimized_frame ,
int caller_adjustment ,
2011-05-12 10:29:02 -07:00
int caller_actual_parameters ,
2007-12-01 00:00:00 +00:00
int number_of_frames ,
intptr_t * frame_sizes ,
address * frame_pcs ,
2015-11-11 14:40:38 -10:00
BasicType return_type ,
int exec_mode ) {
2007-12-01 00:00:00 +00:00
_size_of_deoptimized_frame = size_of_deoptimized_frame ;
_caller_adjustment = caller_adjustment ;
2011-05-12 10:29:02 -07:00
_caller_actual_parameters = caller_actual_parameters ;
2007-12-01 00:00:00 +00:00
_number_of_frames = number_of_frames ;
_frame_sizes = frame_sizes ;
_frame_pcs = frame_pcs ;
2012-06-28 17:03:16 -04:00
_register_block = NEW_C_HEAP_ARRAY ( intptr_t , RegisterMap : : reg_count * 2 , mtCompiler ) ;
2007-12-01 00:00:00 +00:00
_return_type = return_type ;
2011-09-08 10:12:25 +02:00
_initial_info = 0 ;
2007-12-01 00:00:00 +00:00
// PD (x86 only)
_counter_temp = 0 ;
2015-11-11 14:40:38 -10:00
_unpack_kind = exec_mode ;
2007-12-01 00:00:00 +00:00
_sender_sp_temp = 0 ;
_total_frame_sizes = size_of_frames ( ) ;
2015-11-11 14:40:38 -10:00
assert ( exec_mode > = 0 & & exec_mode < Unpack_LIMIT , " Unexpected exec_mode " ) ;
2007-12-01 00:00:00 +00:00
}
Deoptimization : : UnrollBlock : : ~ UnrollBlock ( ) {
2014-12-01 12:16:15 -05:00
FREE_C_HEAP_ARRAY ( intptr_t , _frame_sizes ) ;
FREE_C_HEAP_ARRAY ( intptr_t , _frame_pcs ) ;
FREE_C_HEAP_ARRAY ( intptr_t , _register_block ) ;
2007-12-01 00:00:00 +00:00
}
intptr_t * Deoptimization : : UnrollBlock : : value_addr_at ( int register_number ) const {
assert ( register_number < RegisterMap : : reg_count , " checking register number " ) ;
return & _register_block [ register_number * 2 ] ;
}
int Deoptimization : : UnrollBlock : : size_of_frames ( ) const {
// Acount first for the adjustment of the initial frame
int result = _caller_adjustment ;
for ( int index = 0 ; index < number_of_frames ( ) ; index + + ) {
result + = frame_sizes ( ) [ index ] ;
}
return result ;
}
void Deoptimization : : UnrollBlock : : print ( ) {
ttyLocker ttyl ;
tty - > print_cr ( " UnrollBlock " ) ;
tty - > print_cr ( " size_of_deoptimized_frame = %d " , _size_of_deoptimized_frame ) ;
tty - > print ( " frame_sizes: " ) ;
for ( int index = 0 ; index < number_of_frames ( ) ; index + + ) {
2015-10-09 09:42:33 +02:00
tty - > print ( INTX_FORMAT " " , frame_sizes ( ) [ index ] ) ;
2007-12-01 00:00:00 +00:00
}
tty - > cr ( ) ;
}
// In order to make fetch_unroll_info work properly with escape
// analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
// ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
// of previously eliminated objects occurs in realloc_objects, which is
// called from the method fetch_unroll_info_helper below.
2015-11-11 14:40:38 -10:00
JRT_BLOCK_ENTRY ( Deoptimization : : UnrollBlock * , Deoptimization : : fetch_unroll_info ( JavaThread * thread , int exec_mode ) )
2007-12-01 00:00:00 +00:00
// It is actually ok to allocate handles in a leaf method. It causes no safepoints,
// but makes the entry a little slower. There is however a little dance we have to
// do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
// fetch_unroll_info() is called at the beginning of the deoptimization
// handler. Note this fact before we start generating temporary frames
// that can confuse an asynchronous stack walker. This counter is
// decremented at the end of unpack_frames().
2015-10-08 12:49:30 -10:00
if ( TraceDeoptimization ) {
2015-10-17 19:40:30 -04:00
tty - > print_cr ( " Deoptimizing thread " INTPTR_FORMAT , p2i ( thread ) ) ;
2015-10-08 12:49:30 -10:00
}
2007-12-01 00:00:00 +00:00
thread - > inc_in_deopt_handler ( ) ;
2020-10-09 08:40:33 +00:00
if ( exec_mode = = Unpack_exception ) {
// When we get here, a callee has thrown an exception into a deoptimized
// frame. That throw might have deferred stack watermark checking until
// after unwinding. So we deal with such deferred requests here.
StackWatermarkSet : : after_unwind ( thread ) ;
}
2015-11-11 14:40:38 -10:00
return fetch_unroll_info_helper ( thread , exec_mode ) ;
2007-12-01 00:00:00 +00:00
JRT_END
2019-09-19 10:52:22 +02:00
# if COMPILER2_OR_JVMCI
static bool eliminate_allocations ( JavaThread * thread , int exec_mode , CompiledMethod * compiled_method ,
frame & deoptee , RegisterMap & map , GrowableArray < compiledVFrame * > * chunk ) {
bool realloc_failures = false ;
assert ( chunk - > at ( 0 ) - > scope ( ) ! = NULL , " expect only compiled java frames " ) ;
GrowableArray < ScopeValue * > * objects = chunk - > at ( 0 ) - > scope ( ) - > objects ( ) ;
// The flag return_oop() indicates call sites which return oop
// in compiled code. Such sites include java method calls,
// runtime calls (for example, used to allocate new objects/arrays
// on slow code path) and any other calls generated in compiled code.
// It is not guaranteed that we can get such information here only
// by analyzing bytecode in deoptimized frames. This is why this flag
// is set during method compilation (see Compile::Process_OopMap_Node()).
// If the previous frame was popped or if we are dispatching an exception,
// we don't have an oop result.
bool save_oop_result = chunk - > at ( 0 ) - > scope ( ) - > return_oop ( ) & & ! thread - > popframe_forcing_deopt_reexecution ( ) & & ( exec_mode = = Deoptimization : : Unpack_deopt ) ;
Handle return_value ;
if ( save_oop_result ) {
// Reallocation may trigger GC. If deoptimization happened on return from
// call which returns oop we need to save it since it is not in oopmap.
oop result = deoptee . saved_oop_result ( & map ) ;
assert ( oopDesc : : is_oop_or_null ( result ) , " must be oop " ) ;
return_value = Handle ( thread , result ) ;
assert ( Universe : : heap ( ) - > is_in_or_null ( result ) , " must be heap pointer " ) ;
if ( TraceDeoptimization ) {
ttyLocker ttyl ;
tty - > print_cr ( " SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT , p2i ( result ) , p2i ( thread ) ) ;
}
}
if ( objects ! = NULL ) {
JRT_BLOCK
realloc_failures = Deoptimization : : realloc_objects ( thread , & deoptee , & map , objects , THREAD ) ;
JRT_END
bool skip_internal = ( compiled_method ! = NULL ) & & ! compiled_method - > is_compiled_by_jvmci ( ) ;
Deoptimization : : reassign_fields ( & deoptee , & map , objects , realloc_failures , skip_internal ) ;
# ifndef PRODUCT
if ( TraceDeoptimization ) {
ttyLocker ttyl ;
tty - > print_cr ( " REALLOC OBJECTS in thread " INTPTR_FORMAT , p2i ( thread ) ) ;
Deoptimization : : print_objects ( objects , realloc_failures ) ;
}
# endif
}
if ( save_oop_result ) {
// Restore result.
deoptee . set_saved_oop_result ( & map , return_value ( ) ) ;
}
return realloc_failures ;
}
static void eliminate_locks ( JavaThread * thread , GrowableArray < compiledVFrame * > * chunk , bool realloc_failures ) {
2020-07-30 10:41:31 -04:00
assert ( thread = = Thread : : current ( ) , " should be " ) ;
HandleMark hm ( thread ) ;
2019-09-19 10:52:22 +02:00
# ifndef PRODUCT
bool first = true ;
# endif
for ( int i = 0 ; i < chunk - > length ( ) ; i + + ) {
compiledVFrame * cvf = chunk - > at ( i ) ;
assert ( cvf - > scope ( ) ! = NULL , " expect only compiled java frames " ) ;
GrowableArray < MonitorInfo * > * monitors = cvf - > monitors ( ) ;
if ( monitors - > is_nonempty ( ) ) {
Deoptimization : : relock_objects ( monitors , thread , realloc_failures ) ;
# ifndef PRODUCT
if ( PrintDeoptimizationDetails ) {
ttyLocker ttyl ;
for ( int j = 0 ; j < monitors - > length ( ) ; j + + ) {
MonitorInfo * mi = monitors - > at ( j ) ;
if ( mi - > eliminated ( ) ) {
if ( first ) {
first = false ;
tty - > print_cr ( " RELOCK OBJECTS in thread " INTPTR_FORMAT , p2i ( thread ) ) ;
}
if ( mi - > owner_is_scalar_replaced ( ) ) {
Klass * k = java_lang_Class : : as_Klass ( mi - > owner_klass ( ) ) ;
tty - > print_cr ( " failed reallocation for klass %s " , k - > external_name ( ) ) ;
} else {
tty - > print_cr ( " object < " INTPTR_FORMAT " > locked " , p2i ( mi - > owner ( ) ) ) ;
}
}
}
}
# endif // !PRODUCT
}
}
}
# endif // COMPILER2_OR_JVMCI
2007-12-01 00:00:00 +00:00
// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
2015-11-11 14:40:38 -10:00
Deoptimization : : UnrollBlock * Deoptimization : : fetch_unroll_info_helper ( JavaThread * thread , int exec_mode ) {
2020-10-09 08:40:33 +00:00
// When we get here we are about to unwind the deoptee frame. In order to
// catch not yet safe to use frames, the following stack watermark barrier
// poll will make such frames safe to use.
StackWatermarkSet : : before_unwind ( thread ) ;
2007-12-01 00:00:00 +00:00
// Note: there is a safepoint safety issue here. No matter whether we enter
// via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
// the vframeArray is created.
//
// Allocate our special deoptimization ResourceMark
DeoptResourceMark * dmark = new DeoptResourceMark ( thread ) ;
assert ( thread - > deopt_mark ( ) = = NULL , " Pending deopt! " ) ;
thread - > set_deopt_mark ( dmark ) ;
frame stub_frame = thread - > last_frame ( ) ; // Makes stack walkable as side effect
RegisterMap map ( thread , true ) ;
RegisterMap dummy_map ( thread , false ) ;
// Now get the deoptee with a valid map
frame deoptee = stub_frame . sender ( & map ) ;
2010-09-21 13:38:35 -07:00
// Set the deoptee nmethod
2016-04-26 10:28:51 +02:00
assert ( thread - > deopt_compiled_method ( ) = = NULL , " Pending deopt! " ) ;
CompiledMethod * cm = deoptee . cb ( ) - > as_compiled_method_or_null ( ) ;
thread - > set_deopt_compiled_method ( cm ) ;
2007-12-01 00:00:00 +00:00
2011-05-02 18:53:37 -07:00
if ( VerifyStack ) {
thread - > validate_frame_layout ( ) ;
}
2007-12-01 00:00:00 +00:00
// Create a growable array of VFrames where each VFrame represents an inlined
// Java frame. This storage is allocated with the usual system arena.
assert ( deoptee . is_compiled_frame ( ) , " Wrong frame type " ) ;
GrowableArray < compiledVFrame * > * chunk = new GrowableArray < compiledVFrame * > ( 10 ) ;
vframe * vf = vframe : : new_vframe ( & deoptee , & map , thread ) ;
while ( ! vf - > is_top ( ) ) {
assert ( vf - > is_compiled_frame ( ) , " Wrong frame type " ) ;
chunk - > push ( compiledVFrame : : cast ( vf ) ) ;
vf = vf - > sender ( ) ;
}
assert ( vf - > is_compiled_frame ( ) , " Wrong frame type " ) ;
chunk - > push ( compiledVFrame : : cast ( vf ) ) ;
2014-11-25 17:33:59 +01:00
bool realloc_failures = false ;
2017-11-06 21:28:03 -08:00
# if COMPILER2_OR_JVMCI
2019-09-19 10:52:22 +02:00
# if INCLUDE_JVMCI
bool jvmci_enabled = true ;
# else
bool jvmci_enabled = false ;
# endif
2007-12-01 00:00:00 +00:00
// Reallocate the non-escaping objects and restore their fields. Then
// relock objects if synchronization on them was eliminated.
2019-12-18 12:32:34 -08:00
if ( jvmci_enabled COMPILER2_PRESENT ( | | ( DoEscapeAnalysis & & EliminateAllocations ) ) ) {
2019-09-19 10:52:22 +02:00
realloc_failures = eliminate_allocations ( thread , exec_mode , cm , deoptee , map , chunk ) ;
}
# endif // COMPILER2_OR_JVMCI
// Revoke biases, done with in java state.
// No safepoints allowed after this
revoke_from_deopt_handler ( thread , deoptee , & map ) ;
// Ensure that no safepoint is taken after pointers have been stored
// in fields of rematerialized objects. If a safepoint occurs from here on
// out the java state residing in the vframeArray will be missed.
// Locks may be rebaised in a safepoint.
NoSafepointVerifier no_safepoint ;
# if COMPILER2_OR_JVMCI
2019-12-18 12:32:34 -08:00
if ( jvmci_enabled COMPILER2_PRESENT ( | | ( ( DoEscapeAnalysis | | EliminateNestedLocks ) & & EliminateLocks ) ) ) {
2019-09-19 10:52:22 +02:00
eliminate_locks ( thread , chunk , realloc_failures ) ;
2007-12-01 00:00:00 +00:00
}
2017-11-06 21:28:03 -08:00
# endif // COMPILER2_OR_JVMCI
2015-10-08 12:49:30 -10:00
2017-04-06 00:03:18 -07:00
ScopeDesc * trap_scope = chunk - > at ( 0 ) - > scope ( ) ;
Handle exceptionObject ;
if ( trap_scope - > rethrow_exception ( ) ) {
if ( PrintDeoptimizationDetails ) {
tty - > print_cr ( " Exception to be rethrown in the interpreter for method %s::%s at bci %d " , trap_scope - > method ( ) - > method_holder ( ) - > name ( ) - > as_C_string ( ) , trap_scope - > method ( ) - > name ( ) - > as_C_string ( ) , trap_scope - > bci ( ) ) ;
}
GrowableArray < ScopeValue * > * expressions = trap_scope - > expressions ( ) ;
guarantee ( expressions ! = NULL & & expressions - > length ( ) > 0 , " must have exception to throw " ) ;
ScopeValue * topOfStack = expressions - > top ( ) ;
exceptionObject = StackValue : : create_stack_value ( & deoptee , & map , topOfStack ) - > get_obj ( ) ;
guarantee ( exceptionObject ( ) ! = NULL , " exception oop can not be null " ) ;
}
2014-11-25 17:33:59 +01:00
vframeArray * array = create_vframeArray ( thread , deoptee , & map , chunk , realloc_failures ) ;
2017-11-06 21:28:03 -08:00
# if COMPILER2_OR_JVMCI
2014-11-25 17:33:59 +01:00
if ( realloc_failures ) {
pop_frames_failed_reallocs ( thread , array ) ;
}
# endif
2007-12-01 00:00:00 +00:00
2014-11-25 17:33:59 +01:00
assert ( thread - > vframe_array_head ( ) = = NULL , " Pending deopt! " ) ;
2007-12-01 00:00:00 +00:00
thread - > set_vframe_array_head ( array ) ;
// Now that the vframeArray has been created if we have any deferred local writes
// added by jvmti then we can free up that structure as the data is now in the
// vframeArray
if ( thread - > deferred_locals ( ) ! = NULL ) {
GrowableArray < jvmtiDeferredLocalVariableSet * > * list = thread - > deferred_locals ( ) ;
int i = 0 ;
do {
// Because of inlining we could have multiple vframes for a single frame
// and several of the vframes could have deferred writes. Find them all.
if ( list - > at ( i ) - > id ( ) = = array - > original ( ) . id ( ) ) {
jvmtiDeferredLocalVariableSet * dlv = list - > at ( i ) ;
list - > remove_at ( i ) ;
// individual jvmtiDeferredLocalVariableSet are CHeapObj's
delete dlv ;
} else {
i + + ;
}
} while ( i < list - > length ( ) ) ;
if ( list - > length ( ) = = 0 ) {
thread - > set_deferred_locals ( NULL ) ;
// free the list and elements back to C heap.
delete list ;
}
}
// Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
CodeBlob * cb = stub_frame . cb ( ) ;
// Verify we have the right vframeArray
assert ( cb - > frame_size ( ) > = 0 , " Unexpected frame size " ) ;
intptr_t * unpack_sp = stub_frame . sp ( ) + cb - > frame_size ( ) ;
2010-02-01 19:29:46 +01:00
// If the deopt call site is a MethodHandle invoke call site we have
// to adjust the unpack_sp.
nmethod * deoptee_nm = deoptee . cb ( ) - > as_nmethod_or_null ( ) ;
if ( deoptee_nm ! = NULL & & deoptee_nm - > is_method_handle_return ( deoptee . pc ( ) ) )
unpack_sp = deoptee . unextended_sp ( ) ;
2007-12-01 00:00:00 +00:00
# ifdef ASSERT
2015-10-08 12:49:30 -10:00
assert ( cb - > is_deoptimization_stub ( ) | |
cb - > is_uncommon_trap_stub ( ) | |
strcmp ( " Stub<DeoptimizationStub.deoptimizationHandler> " , cb - > name ( ) ) = = 0 | |
strcmp ( " Stub<UncommonTrapStub.uncommonTrapHandler> " , cb - > name ( ) ) = = 0 ,
2015-10-17 19:40:30 -04:00
" unexpected code blob: %s " , cb - > name ( ) ) ;
2007-12-01 00:00:00 +00:00
# endif
2010-08-11 05:51:21 -07:00
2007-12-01 00:00:00 +00:00
// This is a guarantee instead of an assert because if vframe doesn't match
// we will unpack the wrong deoptimized frame and wind up in strange places
// where it will be very difficult to figure out what went wrong. Better
// to die an early death here than some very obscure death later when the
// trail is cold.
// Note: on ia64 this guarantee can be fooled by frames with no memory stack
// in that it will fail to detect a problem when there is one. This needs
// more work in tiger timeframe.
guarantee ( array - > unextended_sp ( ) = = unpack_sp , " vframe_array_head must contain the vframeArray to unpack " ) ;
int number_of_frames = array - > frames ( ) ;
// Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost
// virtual activation, which is the reverse of the elements in the vframes array.
2012-06-28 17:03:16 -04:00
intptr_t * frame_sizes = NEW_C_HEAP_ARRAY ( intptr_t , number_of_frames , mtCompiler ) ;
2007-12-01 00:00:00 +00:00
// +1 because we always have an interpreter return address for the final slot.
2012-06-28 17:03:16 -04:00
address * frame_pcs = NEW_C_HEAP_ARRAY ( address , number_of_frames + 1 , mtCompiler ) ;
2007-12-01 00:00:00 +00:00
int popframe_extra_args = 0 ;
// Create an interpreter return address for the stub to use as its return
// address so the skeletal frames are perfectly walkable
frame_pcs [ number_of_frames ] = Interpreter : : deopt_entry ( vtos , 0 ) ;
// PopFrame requires that the preserved incoming arguments from the recently-popped topmost
// activation be put back on the expression stack of the caller for reexecution
if ( JvmtiExport : : can_pop_frame ( ) & & thread - > popframe_forcing_deopt_reexecution ( ) ) {
popframe_extra_args = in_words ( thread - > popframe_preserved_args_size_in_words ( ) ) ;
}
2011-05-12 10:29:02 -07:00
// Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
// itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
// than simply use array->sender.pc(). This requires us to walk the current set of frames
//
frame deopt_sender = stub_frame . sender ( & dummy_map ) ; // First is the deoptee frame
deopt_sender = deopt_sender . sender ( & dummy_map ) ; // Now deoptee caller
2014-01-23 14:47:23 +01:00
// It's possible that the number of parameters at the call site is
2011-05-12 10:29:02 -07:00
// different than number of arguments in the callee when method
// handles are used. If the caller is interpreted get the real
// value so that the proper amount of space can be added to it's
// frame.
2011-10-24 07:53:17 -07:00
bool caller_was_method_handle = false ;
2011-05-12 10:29:02 -07:00
if ( deopt_sender . is_interpreted_frame ( ) ) {
2019-11-13 08:23:23 -05:00
methodHandle method ( thread , deopt_sender . interpreter_frame_method ( ) ) ;
2011-11-03 01:43:26 -07:00
Bytecode_invoke cur = Bytecode_invoke_check ( method , deopt_sender . interpreter_frame_bci ( ) ) ;
2012-07-24 10:51:00 -07:00
if ( cur . is_invokedynamic ( ) | | cur . is_invokehandle ( ) ) {
2011-10-24 07:53:17 -07:00
// Method handle invokes may involve fairly arbitrary chains of
// calls so it's impossible to know how much actual space the
// caller has for locals.
caller_was_method_handle = true ;
}
2011-05-12 10:29:02 -07:00
}
2007-12-01 00:00:00 +00:00
//
// frame_sizes/frame_pcs[0] oldest frame (int or c2i)
// frame_sizes/frame_pcs[1] next oldest frame (int)
// frame_sizes/frame_pcs[n] youngest frame (int)
//
// Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
// owns the space for the return address to it's caller). Confusing ain't it.
//
// The vframe array can address vframes with indices running from
// 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame.
// When we create the skeletal frames we need the oldest frame to be in the zero slot
// in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
// so things look a little strange in this loop.
//
2011-10-24 07:53:17 -07:00
int callee_parameters = 0 ;
int callee_locals = 0 ;
2007-12-01 00:00:00 +00:00
for ( int index = 0 ; index < array - > frames ( ) ; index + + ) {
// frame[number_of_frames - 1 ] = on_stack_size(youngest)
// frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
// frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
2014-04-01 09:36:49 +02:00
frame_sizes [ number_of_frames - 1 - index ] = BytesPerWord * array - > element ( index ) - > on_stack_size ( callee_parameters ,
2007-12-01 00:00:00 +00:00
callee_locals ,
index = = 0 ,
popframe_extra_args ) ;
// This pc doesn't have to be perfect just good enough to identify the frame
// as interpreted so the skeleton frame will be walkable
// The correct pc will be set when the skeleton frame is completely filled out
// The final pc we store in the loop is wrong and will be overwritten below
frame_pcs [ number_of_frames - 1 - index ] = Interpreter : : deopt_entry ( vtos , 0 ) - frame : : pc_return_offset ;
callee_parameters = array - > element ( index ) - > method ( ) - > size_of_parameters ( ) ;
callee_locals = array - > element ( index ) - > method ( ) - > max_locals ( ) ;
popframe_extra_args = 0 ;
}
// Compute whether the root vframe returns a float or double value.
BasicType return_type ;
{
methodHandle method ( thread , array - > element ( 0 ) - > method ( ) ) ;
2011-01-13 22:15:41 -08:00
Bytecode_invoke invoke = Bytecode_invoke_check ( method , array - > element ( 0 ) - > bci ( ) ) ;
2011-01-27 16:11:27 -08:00
return_type = invoke . is_valid ( ) ? invoke . result_type ( ) : T_ILLEGAL ;
2007-12-01 00:00:00 +00:00
}
// Compute information for handling adapters and adjusting the frame size of the caller.
int caller_adjustment = 0 ;
// Compute the amount the oldest interpreter frame will have to adjust
// its caller's stack by. If the caller is a compiled frame then
// we pretend that the callee has no parameters so that the
// extension counts for the full amount of locals and not just
// locals-parms. This is because without a c2i adapter the parm
// area as created by the compiled frame will not be usable by
// the interpreter. (Depending on the calling convention there
// may not even be enough space).
// QQQ I'd rather see this pushed down into last_frame_adjust
// and have it take the sender (aka caller).
2011-10-24 07:53:17 -07:00
if ( deopt_sender . is_compiled_frame ( ) | | caller_was_method_handle ) {
2007-12-01 00:00:00 +00:00
caller_adjustment = last_frame_adjust ( 0 , callee_locals ) ;
2011-10-24 07:53:17 -07:00
} else if ( callee_locals > callee_parameters ) {
2007-12-01 00:00:00 +00:00
// The caller frame may need extending to accommodate
// non-parameter locals of the first unpacked interpreted frame.
// Compute that adjustment.
2011-10-24 07:53:17 -07:00
caller_adjustment = last_frame_adjust ( callee_parameters , callee_locals ) ;
2007-12-01 00:00:00 +00:00
}
// If the sender is deoptimized the we must retrieve the address of the handler
// since the frame will "magically" show the original pc before the deopt
// and we'd undo the deopt.
frame_pcs [ 0 ] = deopt_sender . raw_pc ( ) ;
assert ( CodeCache : : find_blob_unsafe ( frame_pcs [ 0 ] ) ! = NULL , " bad pc " ) ;
2018-05-03 09:07:40 -07:00
# if INCLUDE_JVMCI
2015-11-11 14:40:38 -10:00
if ( exceptionObject ( ) ! = NULL ) {
thread - > set_exception_oop ( exceptionObject ( ) ) ;
exec_mode = Unpack_exception ;
}
# endif
2020-09-22 06:26:32 +00:00
if ( thread - > frames_to_pop_failed_realloc ( ) > 0 & & exec_mode ! = Unpack_uncommon_trap ) {
assert ( thread - > has_pending_exception ( ) , " should have thrown OOME " ) ;
2016-06-06 23:24:46 -07:00
thread - > set_exception_oop ( thread - > pending_exception ( ) ) ;
thread - > clear_pending_exception ( ) ;
exec_mode = Unpack_exception ;
}
# if INCLUDE_JVMCI
if ( thread - > frames_to_pop_failed_realloc ( ) > 0 ) {
thread - > set_pending_monitorenter ( false ) ;
}
# endif
2007-12-01 00:00:00 +00:00
UnrollBlock * info = new UnrollBlock ( array - > frame_size ( ) * BytesPerWord ,
caller_adjustment * BytesPerWord ,
2011-10-24 07:53:17 -07:00
caller_was_method_handle ? 0 : callee_parameters ,
2007-12-01 00:00:00 +00:00
number_of_frames ,
frame_sizes ,
frame_pcs ,
2015-11-11 14:40:38 -10:00
return_type ,
exec_mode ) ;
2011-09-08 10:12:25 +02:00
// On some platforms, we need a way to pass some platform dependent
// information to the unpacking code so the skeletal frames come out
// correct (initial fp value, unextended sp, ...)
info - > set_initial_info ( ( intptr_t ) array - > sender ( ) . initial_deoptimization_info ( ) ) ;
2007-12-01 00:00:00 +00:00
if ( array - > frames ( ) > 1 ) {
if ( VerifyStack & & TraceDeoptimization ) {
2012-10-05 18:57:10 -07:00
ttyLocker ttyl ;
2007-12-01 00:00:00 +00:00
tty - > print_cr ( " Deoptimizing method containing inlining " ) ;
}
}
array - > set_unroll_block ( info ) ;
return info ;
}
// Called to cleanup deoptimization data structures in normal case
// after unpacking to stack and when stack overflow error occurs
void Deoptimization : : cleanup_deopt_info ( JavaThread * thread ,
vframeArray * array ) {
// Get array if coming from exception
if ( array = = NULL ) {
array = thread - > vframe_array_head ( ) ;
}
thread - > set_vframe_array_head ( NULL ) ;
// Free the previous UnrollBlock
vframeArray * old_array = thread - > vframe_array_last ( ) ;
thread - > set_vframe_array_last ( array ) ;
if ( old_array ! = NULL ) {
UnrollBlock * old_info = old_array - > unroll_block ( ) ;
old_array - > set_unroll_block ( NULL ) ;
delete old_info ;
delete old_array ;
}
// Deallocate any resource creating in this routine and any ResourceObjs allocated
// inside the vframeArray (StackValueCollections)
delete thread - > deopt_mark ( ) ;
thread - > set_deopt_mark ( NULL ) ;
2016-04-26 10:28:51 +02:00
thread - > set_deopt_compiled_method ( NULL ) ;
2007-12-01 00:00:00 +00:00
if ( JvmtiExport : : can_pop_frame ( ) ) {
// Regardless of whether we entered this routine with the pending
// popframe condition bit set, we should always clear it now
thread - > clear_popframe_condition ( ) ;
}
// unpack_frames() is called at the end of the deoptimization handler
// and (in C2) at the end of the uncommon trap handler. Note this fact
// so that an asynchronous stack walker can work again. This counter is
// incremented at the beginning of fetch_unroll_info() and (in C2) at
// the beginning of uncommon_trap().
thread - > dec_in_deopt_handler ( ) ;
}
2015-12-05 10:40:22 -05:00
// Moved from cpu directories because none of the cpus has callee save values.
// If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp.
void Deoptimization : : unwind_callee_save_values ( frame * f , vframeArray * vframe_array ) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
// the days we had adapter frames. When we deoptimize a situation where a
// compiled caller calls a compiled caller will have registers it expects
// to survive the call to the callee. If we deoptimize the callee the only
// way we can restore these registers is to have the oldest interpreter
// frame that we create restore these values. That is what this routine
// will accomplish.
// At the moment we have modified c2 to not have any callee save registers
// so this problem does not exist and this routine is just a place holder.
assert ( f - > is_interpreted_frame ( ) , " must be interpreted " ) ;
}
2007-12-01 00:00:00 +00:00
// Return BasicType of value being returned
JRT_LEAF ( BasicType , Deoptimization : : unpack_frames ( JavaThread * thread , int exec_mode ) )
2018-03-01 08:58:27 +01:00
// We are already active in the special DeoptResourceMark any ResourceObj's we
2007-12-01 00:00:00 +00:00
// allocate will be freed at the end of the routine.
// It is actually ok to allocate handles in a leaf method. It causes no safepoints,
// but makes the entry a little slower. There is however a little dance we have to
// do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
ResetNoHandleMark rnhm ; // No-op in release/product versions
2020-07-30 10:41:31 -04:00
HandleMark hm ( thread ) ;
2007-12-01 00:00:00 +00:00
frame stub_frame = thread - > last_frame ( ) ;
// Since the frame to unpack is the top frame of this thread, the vframe_array_head
// must point to the vframeArray for the unpack frame.
vframeArray * array = thread - > vframe_array_head ( ) ;
# ifndef PRODUCT
if ( TraceDeoptimization ) {
2012-10-05 18:57:10 -07:00
ttyLocker ttyl ;
2015-10-09 09:42:33 +02:00
tty - > print_cr ( " DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d " ,
p2i ( thread ) , p2i ( array ) , exec_mode ) ;
2007-12-01 00:00:00 +00:00
}
# endif
2019-06-06 09:30:00 +02:00
Events : : log_deopt_message ( thread , " DEOPT UNPACKING pc= " INTPTR_FORMAT " sp= " INTPTR_FORMAT " mode %d " ,
2015-10-09 09:42:33 +02:00
p2i ( stub_frame . pc ( ) ) , p2i ( stub_frame . sp ( ) ) , exec_mode ) ;
2007-12-01 00:00:00 +00:00
UnrollBlock * info = array - > unroll_block ( ) ;
2020-06-09 16:22:54 +00:00
// We set the last_Java frame. But the stack isn't really parsable here. So we
// clear it to make sure JFR understands not to try and walk stacks from events
// in here.
intptr_t * sp = thread - > frame_anchor ( ) - > last_Java_sp ( ) ;
thread - > frame_anchor ( ) - > set_last_Java_sp ( NULL ) ;
2007-12-01 00:00:00 +00:00
// Unpack the interpreter frames and any adapter frame (c2 only) we might create.
2011-05-12 10:29:02 -07:00
array - > unpack_to_stack ( stub_frame , exec_mode , info - > caller_actual_parameters ( ) ) ;
2007-12-01 00:00:00 +00:00
2020-06-09 16:22:54 +00:00
thread - > frame_anchor ( ) - > set_last_Java_sp ( sp ) ;
2007-12-01 00:00:00 +00:00
BasicType bt = info - > return_type ( ) ;
// If we have an exception pending, claim that the return type is an oop
// so the deopt_blob does not overwrite the exception_oop.
if ( exec_mode = = Unpack_exception )
bt = T_OBJECT ;
// Cleanup thread deopt data
cleanup_deopt_info ( thread , array ) ;
# ifndef PRODUCT
if ( VerifyStack ) {
ResourceMark res_mark ;
2018-03-02 11:06:51 +01:00
// Clear pending exception to not break verification code (restored afterwards)
PRESERVE_EXCEPTION_MARK ;
2007-12-01 00:00:00 +00:00
2011-05-02 18:53:37 -07:00
thread - > validate_frame_layout ( ) ;
2007-12-01 00:00:00 +00:00
// Verify that the just-unpacked frames match the interpreter's
// notions of expression stack and locals
vframeArray * cur_array = thread - > vframe_array_last ( ) ;
RegisterMap rm ( thread , false ) ;
rm . set_include_argument_oops ( false ) ;
bool is_top_frame = true ;
int callee_size_of_parameters = 0 ;
int callee_max_locals = 0 ;
for ( int i = 0 ; i < cur_array - > frames ( ) ; i + + ) {
vframeArrayElement * el = cur_array - > element ( i ) ;
frame * iframe = el - > iframe ( ) ;
guarantee ( iframe - > is_interpreted_frame ( ) , " Wrong frame type " ) ;
// Get the oop map for this bci
InterpreterOopMap mask ;
int cur_invoke_parameter_size = 0 ;
bool try_next_mask = false ;
int next_mask_expression_stack_size = - 1 ;
int top_frame_expression_stack_adjustment = 0 ;
methodHandle mh ( thread , iframe - > interpreter_frame_method ( ) ) ;
OopMapCache : : compute_one_oop_map ( mh , iframe - > interpreter_frame_bci ( ) , & mask ) ;
2019-05-06 12:17:54 -07:00
BytecodeStream str ( mh , iframe - > interpreter_frame_bci ( ) ) ;
2007-12-01 00:00:00 +00:00
int max_bci = mh - > code_size ( ) ;
// Get to the next bytecode if possible
assert ( str . bci ( ) < max_bci , " bci in interpreter frame out of bounds " ) ;
// Check to see if we can grab the number of outgoing arguments
// at an uncommon trap for an invoke (where the compiler
// generates debug info before the invoke has executed)
Bytecodes : : Code cur_code = str . next ( ) ;
2018-03-01 08:58:27 +01:00
if ( Bytecodes : : is_invoke ( cur_code ) ) {
2011-01-13 22:15:41 -08:00
Bytecode_invoke invoke ( mh , iframe - > interpreter_frame_bci ( ) ) ;
2018-03-01 08:58:27 +01:00
cur_invoke_parameter_size = invoke . size_of_parameters ( ) ;
2013-05-31 14:40:26 +02:00
if ( i ! = 0 & & ! invoke . is_invokedynamic ( ) & & MethodHandles : : has_member_arg ( invoke . klass ( ) , invoke . name ( ) ) ) {
callee_size_of_parameters + + ;
}
2007-12-01 00:00:00 +00:00
}
if ( str . bci ( ) < max_bci ) {
2018-03-01 08:58:27 +01:00
Bytecodes : : Code next_code = str . next ( ) ;
if ( next_code > = 0 ) {
2007-12-01 00:00:00 +00:00
// The interpreter oop map generator reports results before
// the current bytecode has executed except in the case of
// calls. It seems to be hard to tell whether the compiler
// has emitted debug information matching the "state before"
// a given bytecode or the state after, so we try both
2018-03-01 08:58:27 +01:00
if ( ! Bytecodes : : is_invoke ( cur_code ) & & cur_code ! = Bytecodes : : _athrow ) {
// Get expression stack size for the next bytecode
2018-08-24 11:56:14 -07:00
InterpreterOopMap next_mask ;
OopMapCache : : compute_one_oop_map ( mh , str . bci ( ) , & next_mask ) ;
next_mask_expression_stack_size = next_mask . expression_stack_size ( ) ;
2018-03-01 08:58:27 +01:00
if ( Bytecodes : : is_invoke ( next_code ) ) {
Bytecode_invoke invoke ( mh , str . bci ( ) ) ;
2018-08-24 11:56:14 -07:00
next_mask_expression_stack_size + = invoke . size_of_parameters ( ) ;
2007-12-01 00:00:00 +00:00
}
2018-03-01 08:58:27 +01:00
// Need to subtract off the size of the result type of
// the bytecode because this is not described in the
// debug info but returned to the interpreter in the TOS
// caching register
BasicType bytecode_result_type = Bytecodes : : result_type ( cur_code ) ;
if ( bytecode_result_type ! = T_ILLEGAL ) {
top_frame_expression_stack_adjustment = type2size [ bytecode_result_type ] ;
}
assert ( top_frame_expression_stack_adjustment > = 0 , " stack adjustment must be positive " ) ;
try_next_mask = true ;
2007-12-01 00:00:00 +00:00
}
}
}
// Verify stack depth and oops in frame
// This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
if ( ! (
/* SPARC */
( iframe - > interpreter_frame_expression_stack_size ( ) = = mask . expression_stack_size ( ) + callee_size_of_parameters ) | |
/* x86 */
( iframe - > interpreter_frame_expression_stack_size ( ) = = mask . expression_stack_size ( ) + callee_max_locals ) | |
( try_next_mask & &
( iframe - > interpreter_frame_expression_stack_size ( ) = = ( next_mask_expression_stack_size -
top_frame_expression_stack_adjustment ) ) ) | |
( is_top_frame & & ( exec_mode = = Unpack_exception ) & & iframe - > interpreter_frame_expression_stack_size ( ) = = 0 ) | |
2014-07-29 13:40:58 -07:00
( is_top_frame & & ( exec_mode = = Unpack_uncommon_trap | | exec_mode = = Unpack_reexecute | | el - > should_reexecute ( ) ) & &
2007-12-01 00:00:00 +00:00
( iframe - > interpreter_frame_expression_stack_size ( ) = = mask . expression_stack_size ( ) + cur_invoke_parameter_size ) )
) ) {
2018-08-24 11:56:14 -07:00
{
ttyLocker ttyl ;
// Print out some information that will help us debug the problem
tty - > print_cr ( " Wrong number of expression stack elements during deoptimization " ) ;
tty - > print_cr ( " Error occurred while verifying frame %d (0..%d, 0 is topmost) " , i , cur_array - > frames ( ) - 1 ) ;
tty - > print_cr ( " Fabricated interpreter frame had %d expression stack elements " ,
iframe - > interpreter_frame_expression_stack_size ( ) ) ;
tty - > print_cr ( " Interpreter oop map had %d expression stack elements " , mask . expression_stack_size ( ) ) ;
tty - > print_cr ( " try_next_mask = %d " , try_next_mask ) ;
tty - > print_cr ( " next_mask_expression_stack_size = %d " , next_mask_expression_stack_size ) ;
tty - > print_cr ( " callee_size_of_parameters = %d " , callee_size_of_parameters ) ;
tty - > print_cr ( " callee_max_locals = %d " , callee_max_locals ) ;
tty - > print_cr ( " top_frame_expression_stack_adjustment = %d " , top_frame_expression_stack_adjustment ) ;
tty - > print_cr ( " exec_mode = %d " , exec_mode ) ;
tty - > print_cr ( " cur_invoke_parameter_size = %d " , cur_invoke_parameter_size ) ;
tty - > print_cr ( " Thread = " INTPTR_FORMAT " , thread ID = %d " , p2i ( thread ) , thread - > osthread ( ) - > thread_id ( ) ) ;
tty - > print_cr ( " Interpreted frames: " ) ;
for ( int k = 0 ; k < cur_array - > frames ( ) ; k + + ) {
vframeArrayElement * el = cur_array - > element ( k ) ;
tty - > print_cr ( " %s (bci %d) " , el - > method ( ) - > name_and_sig_as_C_string ( ) , el - > bci ( ) ) ;
}
cur_array - > print_on_2 ( tty ) ;
} // release tty lock before calling guarantee
2007-12-01 00:00:00 +00:00
guarantee ( false , " wrong number of expression stack elements during deopt " ) ;
}
VerifyOopClosure verify ;
2016-04-25 09:51:00 -04:00
iframe - > oops_interpreted_do ( & verify , & rm , false ) ;
2007-12-01 00:00:00 +00:00
callee_size_of_parameters = mh - > size_of_parameters ( ) ;
callee_max_locals = mh - > max_locals ( ) ;
is_top_frame = false ;
}
}
# endif /* !PRODUCT */
return bt ;
JRT_END
2019-12-03 15:32:41 +01:00
class DeoptimizeMarkedClosure : public HandshakeClosure {
2019-09-19 10:52:22 +02:00
public :
2019-12-03 15:32:41 +01:00
DeoptimizeMarkedClosure ( ) : HandshakeClosure ( " Deoptimize " ) { }
void do_thread ( Thread * thread ) {
2020-09-11 01:31:32 +00:00
JavaThread * jt = thread - > as_Java_thread ( ) ;
2019-09-19 10:52:22 +02:00
jt - > deoptimize_marked_methods ( ) ;
}
} ;
2019-12-11 17:17:58 -08:00
void Deoptimization : : deoptimize_all_marked ( nmethod * nmethod_only ) {
2019-09-19 10:52:22 +02:00
ResourceMark rm ;
DeoptimizationMarker dm ;
2007-12-01 00:00:00 +00:00
2019-12-11 17:17:58 -08:00
// Make the dependent methods not entrant
if ( nmethod_only ! = NULL ) {
nmethod_only - > mark_for_deoptimization ( ) ;
nmethod_only - > make_not_entrant ( ) ;
} else {
MutexLocker mu ( SafepointSynchronize : : is_at_safepoint ( ) ? NULL : CodeCache_lock , Mutex : : _no_safepoint_check_flag ) ;
2019-09-19 10:52:22 +02:00
CodeCache : : make_marked_nmethods_not_entrant ( ) ;
2019-12-11 17:17:58 -08:00
}
DeoptimizeMarkedClosure deopt ;
if ( SafepointSynchronize : : is_at_safepoint ( ) ) {
2019-09-19 10:52:22 +02:00
Threads : : java_threads_do ( & deopt ) ;
} else {
Handshake : : execute ( & deopt ) ;
}
2007-12-01 00:00:00 +00:00
}
2014-05-13 11:32:10 -07:00
Deoptimization : : DeoptAction Deoptimization : : _unloaded_action
= Deoptimization : : Action_reinterpret ;
2007-12-01 00:00:00 +00:00
2019-06-03 13:21:02 -07:00
# if INCLUDE_JVMCI || INCLUDE_AOT
template < typename CacheType >
class BoxCacheBase : public CHeapObj < mtCompiler > {
protected :
static InstanceKlass * find_cache_klass ( Symbol * klass_name , TRAPS ) {
ResourceMark rm ;
char * klass_name_str = klass_name - > as_C_string ( ) ;
Klass * k = SystemDictionary : : find ( klass_name , Handle ( ) , Handle ( ) , THREAD ) ;
guarantee ( k ! = NULL , " %s must be loaded " , klass_name_str ) ;
InstanceKlass * ik = InstanceKlass : : cast ( k ) ;
guarantee ( ik - > is_initialized ( ) , " %s must be initialized " , klass_name_str ) ;
CacheType : : compute_offsets ( ik ) ;
return ik ;
}
} ;
template < typename PrimitiveType , typename CacheType , typename BoxType > class BoxCache : public BoxCacheBase < CacheType > {
PrimitiveType _low ;
PrimitiveType _high ;
jobject _cache ;
protected :
static BoxCache < PrimitiveType , CacheType , BoxType > * _singleton ;
BoxCache ( Thread * thread ) {
InstanceKlass * ik = BoxCacheBase < CacheType > : : find_cache_klass ( CacheType : : symbol ( ) , thread ) ;
objArrayOop cache = CacheType : : cache ( ik ) ;
assert ( cache - > length ( ) > 0 , " Empty cache " ) ;
_low = BoxType : : value ( cache - > obj_at ( 0 ) ) ;
_high = _low + cache - > length ( ) - 1 ;
_cache = JNIHandles : : make_global ( Handle ( thread , cache ) ) ;
}
~ BoxCache ( ) {
JNIHandles : : destroy_global ( _cache ) ;
}
public :
static BoxCache < PrimitiveType , CacheType , BoxType > * singleton ( Thread * thread ) {
if ( _singleton = = NULL ) {
BoxCache < PrimitiveType , CacheType , BoxType > * s = new BoxCache < PrimitiveType , CacheType , BoxType > ( thread ) ;
2019-11-25 12:33:15 +01:00
if ( ! Atomic : : replace_if_null ( & _singleton , s ) ) {
2019-06-03 13:21:02 -07:00
delete s ;
}
}
return _singleton ;
}
oop lookup ( PrimitiveType value ) {
if ( _low < = value & & value < = _high ) {
int offset = value - _low ;
return objArrayOop ( JNIHandles : : resolve_non_null ( _cache ) ) - > obj_at ( offset ) ;
}
return NULL ;
}
2019-06-28 21:41:04 -07:00
oop lookup_raw ( intptr_t raw_value ) {
// Have to cast to avoid little/big-endian problems.
if ( sizeof ( PrimitiveType ) > sizeof ( jint ) ) {
jlong value = ( jlong ) raw_value ;
return lookup ( value ) ;
}
PrimitiveType value = ( PrimitiveType ) * ( ( jint * ) & raw_value ) ;
return lookup ( value ) ;
}
2019-06-03 13:21:02 -07:00
} ;
typedef BoxCache < jint , java_lang_Integer_IntegerCache , java_lang_Integer > IntegerBoxCache ;
typedef BoxCache < jlong , java_lang_Long_LongCache , java_lang_Long > LongBoxCache ;
typedef BoxCache < jchar , java_lang_Character_CharacterCache , java_lang_Character > CharacterBoxCache ;
typedef BoxCache < jshort , java_lang_Short_ShortCache , java_lang_Short > ShortBoxCache ;
typedef BoxCache < jbyte , java_lang_Byte_ByteCache , java_lang_Byte > ByteBoxCache ;
template < > BoxCache < jint , java_lang_Integer_IntegerCache , java_lang_Integer > * BoxCache < jint , java_lang_Integer_IntegerCache , java_lang_Integer > : : _singleton = NULL ;
template < > BoxCache < jlong , java_lang_Long_LongCache , java_lang_Long > * BoxCache < jlong , java_lang_Long_LongCache , java_lang_Long > : : _singleton = NULL ;
template < > BoxCache < jchar , java_lang_Character_CharacterCache , java_lang_Character > * BoxCache < jchar , java_lang_Character_CharacterCache , java_lang_Character > : : _singleton = NULL ;
template < > BoxCache < jshort , java_lang_Short_ShortCache , java_lang_Short > * BoxCache < jshort , java_lang_Short_ShortCache , java_lang_Short > : : _singleton = NULL ;
template < > BoxCache < jbyte , java_lang_Byte_ByteCache , java_lang_Byte > * BoxCache < jbyte , java_lang_Byte_ByteCache , java_lang_Byte > : : _singleton = NULL ;
class BooleanBoxCache : public BoxCacheBase < java_lang_Boolean > {
jobject _true_cache ;
jobject _false_cache ;
protected :
static BooleanBoxCache * _singleton ;
BooleanBoxCache ( Thread * thread ) {
InstanceKlass * ik = find_cache_klass ( java_lang_Boolean : : symbol ( ) , thread ) ;
_true_cache = JNIHandles : : make_global ( Handle ( thread , java_lang_Boolean : : get_TRUE ( ik ) ) ) ;
_false_cache = JNIHandles : : make_global ( Handle ( thread , java_lang_Boolean : : get_FALSE ( ik ) ) ) ;
}
~ BooleanBoxCache ( ) {
JNIHandles : : destroy_global ( _true_cache ) ;
JNIHandles : : destroy_global ( _false_cache ) ;
}
public :
static BooleanBoxCache * singleton ( Thread * thread ) {
if ( _singleton = = NULL ) {
BooleanBoxCache * s = new BooleanBoxCache ( thread ) ;
2019-11-25 12:33:15 +01:00
if ( ! Atomic : : replace_if_null ( & _singleton , s ) ) {
2019-06-03 13:21:02 -07:00
delete s ;
}
}
return _singleton ;
}
2019-06-28 21:41:04 -07:00
oop lookup_raw ( intptr_t raw_value ) {
// Have to cast to avoid little/big-endian problems.
jboolean value = ( jboolean ) * ( ( jint * ) & raw_value ) ;
return lookup ( value ) ;
}
2019-06-03 13:21:02 -07:00
oop lookup ( jboolean value ) {
if ( value ! = 0 ) {
return JNIHandles : : resolve_non_null ( _true_cache ) ;
}
return JNIHandles : : resolve_non_null ( _false_cache ) ;
}
} ;
BooleanBoxCache * BooleanBoxCache : : _singleton = NULL ;
oop Deoptimization : : get_cached_box ( AutoBoxObjectValue * bv , frame * fr , RegisterMap * reg_map , TRAPS ) {
Klass * k = java_lang_Class : : as_Klass ( bv - > klass ( ) - > as_ConstantOopReadValue ( ) - > value ( ) ( ) ) ;
BasicType box_type = SystemDictionary : : box_klass_type ( k ) ;
if ( box_type ! = T_OBJECT ) {
2019-06-28 21:41:04 -07:00
StackValue * value = StackValue : : create_stack_value ( fr , reg_map , bv - > field_at ( box_type = = T_LONG ? 1 : 0 ) ) ;
2019-06-03 13:21:02 -07:00
switch ( box_type ) {
2019-06-28 21:41:04 -07:00
case T_INT : return IntegerBoxCache : : singleton ( THREAD ) - > lookup_raw ( value - > get_int ( ) ) ;
case T_CHAR : return CharacterBoxCache : : singleton ( THREAD ) - > lookup_raw ( value - > get_int ( ) ) ;
case T_SHORT : return ShortBoxCache : : singleton ( THREAD ) - > lookup_raw ( value - > get_int ( ) ) ;
case T_BYTE : return ByteBoxCache : : singleton ( THREAD ) - > lookup_raw ( value - > get_int ( ) ) ;
case T_BOOLEAN : return BooleanBoxCache : : singleton ( THREAD ) - > lookup_raw ( value - > get_int ( ) ) ;
case T_LONG : return LongBoxCache : : singleton ( THREAD ) - > lookup_raw ( value - > get_int ( ) ) ;
2019-06-03 13:21:02 -07:00
default : ;
}
}
return NULL ;
}
# endif // INCLUDE_JVMCI || INCLUDE_AOT
2017-11-06 21:28:03 -08:00
# if COMPILER2_OR_JVMCI
2019-06-03 13:21:02 -07:00
bool Deoptimization : : realloc_objects ( JavaThread * thread , frame * fr , RegisterMap * reg_map , GrowableArray < ScopeValue * > * objects , TRAPS ) {
2017-02-15 22:59:57 -05:00
Handle pending_exception ( THREAD , thread - > pending_exception ( ) ) ;
2007-12-01 00:00:00 +00:00
const char * exception_file = thread - > exception_file ( ) ;
int exception_line = thread - > exception_line ( ) ;
thread - > clear_pending_exception ( ) ;
2014-11-25 17:33:59 +01:00
bool failures = false ;
2007-12-01 00:00:00 +00:00
for ( int i = 0 ; i < objects - > length ( ) ; i + + ) {
assert ( objects - > at ( i ) - > is_object ( ) , " invalid debug information " ) ;
ObjectValue * sv = ( ObjectValue * ) objects - > at ( i ) ;
2017-03-15 10:25:37 -04:00
Klass * k = java_lang_Class : : as_Klass ( sv - > klass ( ) - > as_ConstantOopReadValue ( ) - > value ( ) ( ) ) ;
2007-12-01 00:00:00 +00:00
oop obj = NULL ;
2015-10-28 09:47:23 -04:00
if ( k - > is_instance_klass ( ) ) {
2019-06-03 13:21:02 -07:00
# if INCLUDE_JVMCI || INCLUDE_AOT
CompiledMethod * cm = fr - > cb ( ) - > as_compiled_method_or_null ( ) ;
if ( cm - > is_compiled_by_jvmci ( ) & & sv - > is_auto_box ( ) ) {
AutoBoxObjectValue * abv = ( AutoBoxObjectValue * ) sv ;
obj = get_cached_box ( abv , fr , reg_map , THREAD ) ;
if ( obj ! = NULL ) {
// Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
abv - > set_cached ( true ) ;
}
}
# endif // INCLUDE_JVMCI || INCLUDE_AOT
2017-03-15 10:25:37 -04:00
InstanceKlass * ik = InstanceKlass : : cast ( k ) ;
2019-06-03 13:21:02 -07:00
if ( obj = = NULL ) {
obj = ik - > allocate_instance ( THREAD ) ;
}
2015-10-28 09:47:23 -04:00
} else if ( k - > is_typeArray_klass ( ) ) {
2017-03-15 10:25:37 -04:00
TypeArrayKlass * ak = TypeArrayKlass : : cast ( k ) ;
2007-12-01 00:00:00 +00:00
assert ( sv - > field_size ( ) % type2size [ ak - > element_type ( ) ] = = 0 , " non-integral array length " ) ;
int len = sv - > field_size ( ) / type2size [ ak - > element_type ( ) ] ;
2014-11-25 17:33:59 +01:00
obj = ak - > allocate ( len , THREAD ) ;
2015-10-28 09:47:23 -04:00
} else if ( k - > is_objArray_klass ( ) ) {
2017-03-15 10:25:37 -04:00
ObjArrayKlass * ak = ObjArrayKlass : : cast ( k ) ;
2014-11-25 17:33:59 +01:00
obj = ak - > allocate ( sv - > field_size ( ) , THREAD ) ;
}
if ( obj = = NULL ) {
failures = true ;
2007-12-01 00:00:00 +00:00
}
assert ( sv - > value ( ) . is_null ( ) , " redundant reallocation " ) ;
2014-11-25 17:33:59 +01:00
assert ( obj ! = NULL | | HAS_PENDING_EXCEPTION , " allocation should succeed or we should get an exception " ) ;
CLEAR_PENDING_EXCEPTION ;
2007-12-01 00:00:00 +00:00
sv - > set_value ( obj ) ;
}
2014-11-25 17:33:59 +01:00
if ( failures ) {
THROW_OOP_ ( Universe : : out_of_memory_error_realloc_objects ( ) , failures ) ;
} else if ( pending_exception . not_null ( ) ) {
2007-12-01 00:00:00 +00:00
thread - > set_pending_exception ( pending_exception ( ) , exception_file , exception_line ) ;
}
2014-11-25 17:33:59 +01:00
return failures ;
2007-12-01 00:00:00 +00:00
}
2020-04-17 09:31:37 -07:00
# if INCLUDE_JVMCI
/**
* For primitive types whose kind gets " erased " at runtime ( shorts become stack ints ) ,
* we need to somehow be able to recover the actual kind to be able to write the correct
* amount of bytes .
* For that purpose , this method assumes that , for an entry spanning n bytes at index i ,
* the entries at index n + 1 to n + i are ' markers ' .
* For example , if we were writing a short at index 4 of a byte array of size 8 , the
* expected form of the array would be :
*
* { b0 , b1 , b2 , b3 , INT , marker , b6 , b7 }
*
* Thus , in order to get back the size of the entry , we simply need to count the number
* of marked entries
*
* @ param virtualArray the virtualized byte array
* @ param i index of the virtual entry we are recovering
* @ return The number of bytes the entry spans
*/
static int count_number_of_bytes_for_entry ( ObjectValue * virtualArray , int i ) {
int index = i ;
while ( + + index < virtualArray - > field_size ( ) & &
virtualArray - > field_at ( index ) - > is_marker ( ) ) { }
return index - i ;
}
/**
* If there was a guarantee for byte array to always start aligned to a long , we could
* do a simple check on the parity of the index . Unfortunately , that is not always the
* case . Thus , we check alignment of the actual address we are writing to .
* In the unlikely case index 0 is 5 - aligned for example , it would then be possible to
* write a long to index 3.
*/
static jbyte * check_alignment_get_addr ( typeArrayOop obj , int index , int expected_alignment ) {
jbyte * res = obj - > byte_at_addr ( index ) ;
assert ( ( ( ( intptr_t ) res ) % expected_alignment ) = = 0 , " Non-aligned write " ) ;
return res ;
}
static void byte_array_put ( typeArrayOop obj , intptr_t val , int index , int byte_count ) {
switch ( byte_count ) {
case 1 :
obj - > byte_at_put ( index , ( jbyte ) * ( ( jint * ) & val ) ) ;
break ;
case 2 :
* ( ( jshort * ) check_alignment_get_addr ( obj , index , 2 ) ) = ( jshort ) * ( ( jint * ) & val ) ;
break ;
case 4 :
* ( ( jint * ) check_alignment_get_addr ( obj , index , 4 ) ) = ( jint ) * ( ( jint * ) & val ) ;
break ;
8244224: Implementation of JEP 381: Remove the Solaris and SPARC Ports
Reviewed-by: alanb, bchristi, dcubed, dfuchs, eosterlund, erikj, glaubitz, ihse, iignatyev, jjiang, kbarrett, ksrini, kvn, naoto, prr, rriggs, serb, sspitsyn, stefank, tschatzl, valeriep, weijun, weijun
2020-05-20 17:33:37 -07:00
case 8 :
* ( ( jlong * ) check_alignment_get_addr ( obj , index , 8 ) ) = ( jlong ) * ( ( jlong * ) & val ) ;
2020-04-17 09:31:37 -07:00
break ;
default :
ShouldNotReachHere ( ) ;
}
}
# endif // INCLUDE_JVMCI
2007-12-01 00:00:00 +00:00
// restore elements of an eliminated type array
void Deoptimization : : reassign_type_array_elements ( frame * fr , RegisterMap * reg_map , ObjectValue * sv , typeArrayOop obj , BasicType type ) {
int index = 0 ;
2008-03-11 11:25:13 -07:00
intptr_t val ;
2007-12-01 00:00:00 +00:00
for ( int i = 0 ; i < sv - > field_size ( ) ; i + + ) {
StackValue * value = StackValue : : create_stack_value ( fr , reg_map , sv - > field_at ( i ) ) ;
switch ( type ) {
2008-03-11 11:25:13 -07:00
case T_LONG : case T_DOUBLE : {
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
StackValue * low =
StackValue : : create_stack_value ( fr , reg_map , sv - > field_at ( + + i ) ) ;
# ifdef _LP64
jlong res = ( jlong ) low - > get_int ( ) ;
# else
jlong res = jlong_from ( ( jint ) value - > get_int ( ) , ( jint ) low - > get_int ( ) ) ;
# endif
obj - > long_at_put ( index , res ) ;
break ;
}
// Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
2015-10-08 12:49:30 -10:00
case T_INT : case T_FLOAT : { // 4 bytes.
2008-03-11 11:25:13 -07:00
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
2015-10-08 12:49:30 -10:00
bool big_value = false ;
if ( i + 1 < sv - > field_size ( ) & & type = = T_INT ) {
if ( sv - > field_at ( i ) - > is_location ( ) ) {
Location : : Type type = ( ( LocationValue * ) sv - > field_at ( i ) ) - > location ( ) . type ( ) ;
if ( type = = Location : : dbl | | type = = Location : : lng ) {
big_value = true ;
}
} else if ( sv - > field_at ( i ) - > is_constant_int ( ) ) {
ScopeValue * next_scope_field = sv - > field_at ( i + 1 ) ;
if ( next_scope_field - > is_constant_long ( ) | | next_scope_field - > is_constant_double ( ) ) {
big_value = true ;
}
}
}
if ( big_value ) {
StackValue * low = StackValue : : create_stack_value ( fr , reg_map , sv - > field_at ( + + i ) ) ;
# ifdef _LP64
jlong res = ( jlong ) low - > get_int ( ) ;
# else
jlong res = jlong_from ( ( jint ) value - > get_int ( ) , ( jint ) low - > get_int ( ) ) ;
# endif
obj - > int_at_put ( index , ( jint ) * ( ( jint * ) & res ) ) ;
obj - > int_at_put ( + + index , ( jint ) * ( ( ( jint * ) & res ) + 1 ) ) ;
} else {
val = value - > get_int ( ) ;
obj - > int_at_put ( index , ( jint ) * ( ( jint * ) & val ) ) ;
}
2008-03-11 11:25:13 -07:00
break ;
2015-10-08 12:49:30 -10:00
}
2008-03-11 11:25:13 -07:00
2016-02-20 14:11:18 -08:00
case T_SHORT :
2008-03-11 11:25:13 -07:00
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
val = value - > get_int ( ) ;
obj - > short_at_put ( index , ( jshort ) * ( ( jint * ) & val ) ) ;
break ;
2016-02-20 14:11:18 -08:00
case T_CHAR :
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
val = value - > get_int ( ) ;
obj - > char_at_put ( index , ( jchar ) * ( ( jint * ) & val ) ) ;
break ;
2020-04-17 09:31:37 -07:00
case T_BYTE : {
2016-02-20 14:11:18 -08:00
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
2020-04-17 09:31:37 -07:00
// The value we get is erased as a regular int. We will need to find its actual byte count 'by hand'.
2016-02-20 14:11:18 -08:00
val = value - > get_int ( ) ;
2020-04-17 09:31:37 -07:00
# if INCLUDE_JVMCI
int byte_count = count_number_of_bytes_for_entry ( sv , i ) ;
byte_array_put ( obj , val , index , byte_count ) ;
// According to byte_count contract, the values from i + 1 to i + byte_count are illegal values. Skip.
i + = byte_count - 1 ; // Balance the loop counter.
index + = byte_count ;
// index has been updated so continue at top of loop
continue ;
# else
2016-02-20 14:11:18 -08:00
obj - > byte_at_put ( index , ( jbyte ) * ( ( jint * ) & val ) ) ;
break ;
2020-04-17 09:31:37 -07:00
# endif // INCLUDE_JVMCI
}
2016-02-20 14:11:18 -08:00
2020-04-17 09:31:37 -07:00
case T_BOOLEAN : {
2008-03-11 11:25:13 -07:00
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
val = value - > get_int ( ) ;
obj - > bool_at_put ( index , ( jboolean ) * ( ( jint * ) & val ) ) ;
break ;
2020-04-17 09:31:37 -07:00
}
2008-03-11 11:25:13 -07:00
2007-12-01 00:00:00 +00:00
default :
ShouldNotReachHere ( ) ;
}
index + + ;
}
}
// restore fields of an eliminated object array
void Deoptimization : : reassign_object_array_elements ( frame * fr , RegisterMap * reg_map , ObjectValue * sv , objArrayOop obj ) {
for ( int i = 0 ; i < sv - > field_size ( ) ; i + + ) {
StackValue * value = StackValue : : create_stack_value ( fr , reg_map , sv - > field_at ( i ) ) ;
assert ( value - > type ( ) = = T_OBJECT , " object element expected " ) ;
obj - > obj_at_put ( i , value - > get_obj ( ) ( ) ) ;
}
}
2015-10-08 12:49:30 -10:00
class ReassignedField {
public :
int _offset ;
BasicType _type ;
public :
ReassignedField ( ) {
_offset = 0 ;
_type = T_ILLEGAL ;
}
} ;
int compare ( ReassignedField * left , ReassignedField * right ) {
return left - > _offset - right - > _offset ;
}
// Restore fields of an eliminated instance object using the same field order
// returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
static int reassign_fields_by_klass ( InstanceKlass * klass , frame * fr , RegisterMap * reg_map , ObjectValue * sv , int svIndex , oop obj , bool skip_internal ) {
GrowableArray < ReassignedField > * fields = new GrowableArray < ReassignedField > ( ) ;
2020-02-10 09:49:12 -05:00
InstanceKlass * ik = klass ;
while ( ik ! = NULL ) {
for ( AllFieldStream fs ( ik ) ; ! fs . done ( ) ; fs . next ( ) ) {
if ( ! fs . access_flags ( ) . is_static ( ) & & ( ! skip_internal | | ! fs . access_flags ( ) . is_internal ( ) ) ) {
ReassignedField field ;
field . _offset = fs . offset ( ) ;
field . _type = Signature : : basic_type ( fs . signature ( ) ) ;
fields - > append ( field ) ;
}
2015-10-08 12:49:30 -10:00
}
2020-02-10 09:49:12 -05:00
ik = ik - > superklass ( ) ;
2015-10-08 12:49:30 -10:00
}
fields - > sort ( compare ) ;
for ( int i = 0 ; i < fields - > length ( ) ; i + + ) {
intptr_t val ;
ScopeValue * scope_field = sv - > field_at ( svIndex ) ;
StackValue * value = StackValue : : create_stack_value ( fr , reg_map , scope_field ) ;
int offset = fields - > at ( i ) . _offset ;
BasicType type = fields - > at ( i ) . _type ;
switch ( type ) {
case T_OBJECT : case T_ARRAY :
assert ( value - > type ( ) = = T_OBJECT , " Agreement. " ) ;
obj - > obj_field_put ( offset , value - > get_obj ( ) ( ) ) ;
break ;
// Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
case T_INT : case T_FLOAT : { // 4 bytes.
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
bool big_value = false ;
if ( i + 1 < fields - > length ( ) & & fields - > at ( i + 1 ) . _type = = T_INT ) {
if ( scope_field - > is_location ( ) ) {
Location : : Type type = ( ( LocationValue * ) scope_field ) - > location ( ) . type ( ) ;
if ( type = = Location : : dbl | | type = = Location : : lng ) {
big_value = true ;
}
}
if ( scope_field - > is_constant_int ( ) ) {
ScopeValue * next_scope_field = sv - > field_at ( svIndex + 1 ) ;
if ( next_scope_field - > is_constant_long ( ) | | next_scope_field - > is_constant_double ( ) ) {
big_value = true ;
}
}
}
if ( big_value ) {
i + + ;
assert ( i < fields - > length ( ) , " second T_INT field needed " ) ;
assert ( fields - > at ( i ) . _type = = T_INT , " T_INT field needed " ) ;
} else {
val = value - > get_int ( ) ;
obj - > int_field_put ( offset , ( jint ) * ( ( jint * ) & val ) ) ;
break ;
}
}
/* no break */
case T_LONG : case T_DOUBLE : {
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
StackValue * low = StackValue : : create_stack_value ( fr , reg_map , sv - > field_at ( + + svIndex ) ) ;
# ifdef _LP64
jlong res = ( jlong ) low - > get_int ( ) ;
# else
jlong res = jlong_from ( ( jint ) value - > get_int ( ) , ( jint ) low - > get_int ( ) ) ;
# endif
obj - > long_field_put ( offset , res ) ;
break ;
}
2016-02-20 14:11:18 -08:00
case T_SHORT :
2015-10-08 12:49:30 -10:00
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
val = value - > get_int ( ) ;
obj - > short_field_put ( offset , ( jshort ) * ( ( jint * ) & val ) ) ;
break ;
2016-02-20 14:11:18 -08:00
case T_CHAR :
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
val = value - > get_int ( ) ;
obj - > char_field_put ( offset , ( jchar ) * ( ( jint * ) & val ) ) ;
break ;
case T_BYTE :
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
val = value - > get_int ( ) ;
obj - > byte_field_put ( offset , ( jbyte ) * ( ( jint * ) & val ) ) ;
break ;
case T_BOOLEAN :
2015-10-08 12:49:30 -10:00
assert ( value - > type ( ) = = T_INT , " Agreement. " ) ;
val = value - > get_int ( ) ;
obj - > bool_field_put ( offset , ( jboolean ) * ( ( jint * ) & val ) ) ;
break ;
default :
ShouldNotReachHere ( ) ;
}
svIndex + + ;
}
return svIndex ;
}
2007-12-01 00:00:00 +00:00
// restore fields of all eliminated objects and arrays
2015-10-08 12:49:30 -10:00
void Deoptimization : : reassign_fields ( frame * fr , RegisterMap * reg_map , GrowableArray < ScopeValue * > * objects , bool realloc_failures , bool skip_internal ) {
2007-12-01 00:00:00 +00:00
for ( int i = 0 ; i < objects - > length ( ) ; i + + ) {
ObjectValue * sv = ( ObjectValue * ) objects - > at ( i ) ;
2017-03-15 10:25:37 -04:00
Klass * k = java_lang_Class : : as_Klass ( sv - > klass ( ) - > as_ConstantOopReadValue ( ) - > value ( ) ( ) ) ;
2007-12-01 00:00:00 +00:00
Handle obj = sv - > value ( ) ;
2014-11-25 17:33:59 +01:00
assert ( obj . not_null ( ) | | realloc_failures , " reallocation was missed " ) ;
2015-10-08 12:49:30 -10:00
if ( PrintDeoptimizationDetails ) {
tty - > print_cr ( " reassign fields for object of type %s! " , k - > name ( ) - > as_C_string ( ) ) ;
}
2014-11-25 17:33:59 +01:00
if ( obj . is_null ( ) ) {
continue ;
}
2019-06-03 13:21:02 -07:00
# if INCLUDE_JVMCI || INCLUDE_AOT
// Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
if ( sv - > is_auto_box ( ) & & ( ( AutoBoxObjectValue * ) sv ) - > is_cached ( ) ) {
continue ;
}
# endif // INCLUDE_JVMCI || INCLUDE_AOT
2015-10-28 09:47:23 -04:00
if ( k - > is_instance_klass ( ) ) {
2017-03-15 10:25:37 -04:00
InstanceKlass * ik = InstanceKlass : : cast ( k ) ;
2015-10-08 12:49:30 -10:00
reassign_fields_by_klass ( ik , fr , reg_map , sv , 0 , obj ( ) , skip_internal ) ;
2015-10-28 09:47:23 -04:00
} else if ( k - > is_typeArray_klass ( ) ) {
2017-03-15 10:25:37 -04:00
TypeArrayKlass * ak = TypeArrayKlass : : cast ( k ) ;
2007-12-01 00:00:00 +00:00
reassign_type_array_elements ( fr , reg_map , sv , ( typeArrayOop ) obj ( ) , ak - > element_type ( ) ) ;
2015-10-28 09:47:23 -04:00
} else if ( k - > is_objArray_klass ( ) ) {
2007-12-01 00:00:00 +00:00
reassign_object_array_elements ( fr , reg_map , sv , ( objArrayOop ) obj ( ) ) ;
}
}
}
// relock objects for which synchronization was eliminated
2014-11-25 17:33:59 +01:00
void Deoptimization : : relock_objects ( GrowableArray < MonitorInfo * > * monitors , JavaThread * thread , bool realloc_failures ) {
2007-12-01 00:00:00 +00:00
for ( int i = 0 ; i < monitors - > length ( ) ; i + + ) {
2008-04-01 16:14:18 -07:00
MonitorInfo * mon_info = monitors - > at ( i ) ;
if ( mon_info - > eliminated ( ) ) {
2014-11-25 17:33:59 +01:00
assert ( ! mon_info - > owner_is_scalar_replaced ( ) | | realloc_failures , " reallocation was missed " ) ;
if ( ! mon_info - > owner_is_scalar_replaced ( ) ) {
2017-02-15 22:59:57 -05:00
Handle obj ( thread , mon_info - > owner ( ) ) ;
2019-08-06 10:48:21 +02:00
markWord mark = obj - > mark ( ) ;
if ( UseBiasedLocking & & mark . has_bias_pattern ( ) ) {
2014-11-25 17:33:59 +01:00
// New allocated objects may have the mark set to anonymously biased.
// Also the deoptimized method may called methods with synchronization
// where the thread-local object is bias locked to the current thread.
2019-08-06 10:48:21 +02:00
assert ( mark . is_biased_anonymously ( ) | |
mark . biased_locker ( ) = = thread , " should be locked to current thread " ) ;
2014-11-25 17:33:59 +01:00
// Reset mark word to unbiased prototype.
2019-08-06 10:48:21 +02:00
markWord unbiased_prototype = markWord : : prototype ( ) . set_age ( mark . age ( ) ) ;
2014-11-25 17:33:59 +01:00
obj - > set_mark ( unbiased_prototype ) ;
}
BasicLock * lock = mon_info - > lock ( ) ;
2019-08-27 20:10:06 +00:00
ObjectSynchronizer : : enter ( obj , lock , thread ) ;
2014-11-25 17:33:59 +01:00
assert ( mon_info - > owner ( ) - > is_locked ( ) , " object must be locked now " ) ;
2008-04-01 16:14:18 -07:00
}
2007-12-01 00:00:00 +00:00
}
}
}
# ifndef PRODUCT
// print information about reallocated objects
2014-11-25 17:33:59 +01:00
void Deoptimization : : print_objects ( GrowableArray < ScopeValue * > * objects , bool realloc_failures ) {
2007-12-01 00:00:00 +00:00
fieldDescriptor fd ;
for ( int i = 0 ; i < objects - > length ( ) ; i + + ) {
ObjectValue * sv = ( ObjectValue * ) objects - > at ( i ) ;
2017-03-15 10:25:37 -04:00
Klass * k = java_lang_Class : : as_Klass ( sv - > klass ( ) - > as_ConstantOopReadValue ( ) - > value ( ) ( ) ) ;
2007-12-01 00:00:00 +00:00
Handle obj = sv - > value ( ) ;
2015-10-09 09:42:33 +02:00
tty - > print ( " object < " INTPTR_FORMAT " > of type " , p2i ( sv - > value ( ) ( ) ) ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
k - > print_value ( ) ;
2014-11-25 17:33:59 +01:00
assert ( obj . not_null ( ) | | realloc_failures , " reallocation was missed " ) ;
if ( obj . is_null ( ) ) {
tty - > print ( " allocation failed " ) ;
} else {
tty - > print ( " allocated (%d bytes) " , obj - > size ( ) * HeapWordSize ) ;
}
2007-12-01 00:00:00 +00:00
tty - > cr ( ) ;
2014-11-25 17:33:59 +01:00
if ( Verbose & & ! obj . is_null ( ) ) {
2007-12-01 00:00:00 +00:00
k - > oop_print_on ( obj ( ) , tty ) ;
}
}
}
# endif
2017-11-06 21:28:03 -08:00
# endif // COMPILER2_OR_JVMCI
2007-12-01 00:00:00 +00:00
2014-11-25 17:33:59 +01:00
vframeArray * Deoptimization : : create_vframeArray ( JavaThread * thread , frame fr , RegisterMap * reg_map , GrowableArray < compiledVFrame * > * chunk , bool realloc_failures ) {
2019-06-06 09:30:00 +02:00
Events : : log_deopt_message ( thread , " DEOPT PACKING pc= " INTPTR_FORMAT " sp= " INTPTR_FORMAT , p2i ( fr . pc ( ) ) , p2i ( fr . sp ( ) ) ) ;
2007-12-01 00:00:00 +00:00
# ifndef PRODUCT
2015-10-08 12:49:30 -10:00
if ( PrintDeoptimizationDetails ) {
2007-12-01 00:00:00 +00:00
ttyLocker ttyl ;
2015-10-09 09:42:33 +02:00
tty - > print ( " DEOPT PACKING thread " INTPTR_FORMAT " " , p2i ( thread ) ) ;
2007-12-01 00:00:00 +00:00
fr . print_on ( tty ) ;
tty - > print_cr ( " Virtual frames (innermost first): " ) ;
for ( int index = 0 ; index < chunk - > length ( ) ; index + + ) {
compiledVFrame * vf = chunk - > at ( index ) ;
tty - > print ( " %2d - " , index ) ;
vf - > print_value ( ) ;
int bci = chunk - > at ( index ) - > raw_bci ( ) ;
const char * code_name ;
if ( bci = = SynchronizationEntryBCI ) {
code_name = " sync entry " ;
} else {
2011-01-13 22:15:41 -08:00
Bytecodes : : Code code = vf - > method ( ) - > code_at ( bci ) ;
2007-12-01 00:00:00 +00:00
code_name = Bytecodes : : name ( code ) ;
}
tty - > print ( " - %s " , code_name ) ;
tty - > print_cr ( " @ bci %d " , bci ) ;
if ( Verbose ) {
vf - > print ( ) ;
tty - > cr ( ) ;
}
}
}
# endif
// Register map for next frame (used for stack crawl). We capture
// the state of the deopt'ing frame's caller. Thus if we need to
// stuff a C2I adapter we can properly fill in the callee-save
// register locations.
frame caller = fr . sender ( reg_map ) ;
int frame_size = caller . sp ( ) - fr . sp ( ) ;
frame sender = caller ;
// Since the Java thread being deoptimized will eventually adjust it's own stack,
// the vframeArray containing the unpacking information is allocated in the C heap.
// For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
2014-11-25 17:33:59 +01:00
vframeArray * array = vframeArray : : allocate ( thread , frame_size , chunk , reg_map , sender , caller , fr , realloc_failures ) ;
2007-12-01 00:00:00 +00:00
// Compare the vframeArray to the collected vframes
assert ( array - > structural_compare ( thread , chunk ) , " just checking " ) ;
# ifndef PRODUCT
2015-10-08 12:49:30 -10:00
if ( PrintDeoptimizationDetails ) {
2007-12-01 00:00:00 +00:00
ttyLocker ttyl ;
2015-10-09 09:42:33 +02:00
tty - > print_cr ( " Created vframeArray " INTPTR_FORMAT , p2i ( array ) ) ;
2007-12-01 00:00:00 +00:00
}
# endif // PRODUCT
return array ;
}
2017-11-06 21:28:03 -08:00
# if COMPILER2_OR_JVMCI
2014-11-25 17:33:59 +01:00
void Deoptimization : : pop_frames_failed_reallocs ( JavaThread * thread , vframeArray * array ) {
// Reallocation of some scalar replaced objects failed. Record
// that we need to pop all the interpreter frames for the
// deoptimized compiled frame.
assert ( thread - > frames_to_pop_failed_realloc ( ) = = 0 , " missed frames to pop? " ) ;
thread - > set_frames_to_pop_failed_realloc ( array - > frames ( ) ) ;
// Unlock all monitors here otherwise the interpreter will see a
// mix of locked and unlocked monitors (because of failed
// reallocations of synchronized objects) and be confused.
for ( int i = 0 ; i < array - > frames ( ) ; i + + ) {
MonitorChunk * monitors = array - > element ( i ) - > monitors ( ) ;
if ( monitors ! = NULL ) {
for ( int j = 0 ; j < monitors - > number_of_monitors ( ) ; j + + ) {
BasicObjectLock * src = monitors - > at ( j ) ;
if ( src - > obj ( ) ! = NULL ) {
2019-08-27 20:10:06 +00:00
ObjectSynchronizer : : exit ( src - > obj ( ) , src - > lock ( ) , thread ) ;
2014-11-25 17:33:59 +01:00
}
}
array - > element ( i ) - > free_monitors ( thread ) ;
# ifdef ASSERT
array - > element ( i ) - > set_removed_monitors ( ) ;
# endif
}
}
}
# endif
2007-12-01 00:00:00 +00:00
static void collect_monitors ( compiledVFrame * cvf , GrowableArray < Handle > * objects_to_revoke ) {
GrowableArray < MonitorInfo * > * monitors = cvf - > monitors ( ) ;
2017-02-15 22:59:57 -05:00
Thread * thread = Thread : : current ( ) ;
2007-12-01 00:00:00 +00:00
for ( int i = 0 ; i < monitors - > length ( ) ; i + + ) {
MonitorInfo * mon_info = monitors - > at ( i ) ;
2009-06-09 16:19:10 -07:00
if ( ! mon_info - > eliminated ( ) & & mon_info - > owner ( ) ! = NULL ) {
2017-02-15 22:59:57 -05:00
objects_to_revoke - > append ( Handle ( thread , mon_info - > owner ( ) ) ) ;
2007-12-01 00:00:00 +00:00
}
}
}
2019-09-19 10:52:22 +02:00
static void get_monitors_from_stack ( GrowableArray < Handle > * objects_to_revoke , JavaThread * thread , frame fr , RegisterMap * map ) {
2007-12-01 00:00:00 +00:00
// Unfortunately we don't have a RegisterMap available in most of
// the places we want to call this routine so we need to walk the
// stack again to update the register map.
if ( map = = NULL | | ! map - > update_map ( ) ) {
2020-10-09 08:40:33 +00:00
StackFrameStream sfs ( thread , true /* update */ , true /* process_frames */ ) ;
2007-12-01 00:00:00 +00:00
bool found = false ;
while ( ! found & & ! sfs . is_done ( ) ) {
frame * cur = sfs . current ( ) ;
sfs . next ( ) ;
found = cur - > id ( ) = = fr . id ( ) ;
}
assert ( found , " frame to be deoptimized not found on target thread's stack " ) ;
map = sfs . register_map ( ) ;
}
vframe * vf = vframe : : new_vframe ( & fr , map , thread ) ;
compiledVFrame * cvf = compiledVFrame : : cast ( vf ) ;
// Revoke monitors' biases in all scopes
while ( ! cvf - > is_top ( ) ) {
collect_monitors ( cvf , objects_to_revoke ) ;
cvf = compiledVFrame : : cast ( cvf - > sender ( ) ) ;
}
collect_monitors ( cvf , objects_to_revoke ) ;
2019-09-19 10:52:22 +02:00
}
2007-12-01 00:00:00 +00:00
2019-09-19 10:52:22 +02:00
void Deoptimization : : revoke_from_deopt_handler ( JavaThread * thread , frame fr , RegisterMap * map ) {
if ( ! UseBiasedLocking ) {
return ;
}
2020-07-30 10:41:31 -04:00
assert ( thread = = Thread : : current ( ) , " should be " ) ;
ResourceMark rm ( thread ) ;
HandleMark hm ( thread ) ;
2019-09-19 10:52:22 +02:00
GrowableArray < Handle > * objects_to_revoke = new GrowableArray < Handle > ( ) ;
get_monitors_from_stack ( objects_to_revoke , thread , fr , map ) ;
int len = objects_to_revoke - > length ( ) ;
for ( int i = 0 ; i < len ; i + + ) {
oop obj = ( objects_to_revoke - > at ( i ) ) ( ) ;
BiasedLocking : : revoke_own_lock ( objects_to_revoke - > at ( i ) , thread ) ;
assert ( ! obj - > mark ( ) . has_bias_pattern ( ) , " biases should be revoked by now " ) ;
2007-12-01 00:00:00 +00:00
}
}
2015-10-08 12:49:30 -10:00
void Deoptimization : : deoptimize_single_frame ( JavaThread * thread , frame fr , Deoptimization : : DeoptReason reason ) {
2007-12-01 00:00:00 +00:00
assert ( fr . can_be_deoptimized ( ) , " checking frame type " ) ;
2015-10-08 12:49:30 -10:00
gather_statistics ( reason , Action_none , Bytecodes : : _illegal ) ;
if ( LogCompilation & & xtty ! = NULL ) {
2016-04-26 10:28:51 +02:00
CompiledMethod * cm = fr . cb ( ) - > as_compiled_method_or_null ( ) ;
assert ( cm ! = NULL , " only compiled methods can deopt " ) ;
2007-12-01 00:00:00 +00:00
2015-10-08 12:49:30 -10:00
ttyLocker ttyl ;
2016-05-06 05:42:36 -07:00
xtty - > begin_head ( " deoptimized thread=' " UINTX_FORMAT " ' reason='%s' pc=' " INTPTR_FORMAT " ' " , ( uintx ) thread - > osthread ( ) - > thread_id ( ) , trap_reason_name ( reason ) , p2i ( fr . pc ( ) ) ) ;
2016-04-26 10:28:51 +02:00
cm - > log_identity ( xtty ) ;
2015-10-08 12:49:30 -10:00
xtty - > end_head ( ) ;
2016-04-26 10:28:51 +02:00
for ( ScopeDesc * sd = cm - > scope_desc_at ( fr . pc ( ) ) ; ; sd = sd - > sender ( ) ) {
2015-10-08 12:49:30 -10:00
xtty - > begin_elem ( " jvms bci='%d' " , sd - > bci ( ) ) ;
xtty - > method ( sd - > method ( ) ) ;
xtty - > end_elem ( ) ;
if ( sd - > is_top ( ) ) break ;
}
xtty - > tail ( " deoptimized " ) ;
}
// Patch the compiled method so that when execution returns to it we will
2007-12-01 00:00:00 +00:00
// deopt the execution state and return to the interpreter.
fr . deoptimize ( thread ) ;
}
2020-02-12 09:18:37 +01:00
void Deoptimization : : deoptimize ( JavaThread * thread , frame fr , DeoptReason reason ) {
2007-12-01 00:00:00 +00:00
// Deoptimize only if the frame comes from compile code.
// Do not deoptimize the frame which is already patched
// during the execution of the loops below.
if ( ! fr . is_compiled_frame ( ) | | fr . is_deoptimized_frame ( ) ) {
return ;
}
ResourceMark rm ;
DeoptimizationMarker dm ;
2015-10-08 12:49:30 -10:00
deoptimize_single_frame ( thread , fr , reason ) ;
2007-12-01 00:00:00 +00:00
}
2017-08-28 15:21:47 +00:00
# if INCLUDE_JVMCI
address Deoptimization : : deoptimize_for_missing_exception_handler ( CompiledMethod * cm ) {
// there is no exception handler for this pc => deoptimize
cm - > make_not_entrant ( ) ;
// Use Deoptimization::deoptimize for all of its side-effects:
2020-02-12 09:18:37 +01:00
// gathering traps statistics, logging...
2017-08-28 15:21:47 +00:00
// it also patches the return pc but we do not care about that
// since we return a continuation to the deopt_blob below.
JavaThread * thread = JavaThread : : current ( ) ;
2020-02-12 09:18:37 +01:00
RegisterMap reg_map ( thread , false ) ;
2017-08-28 15:21:47 +00:00
frame runtime_frame = thread - > last_frame ( ) ;
frame caller_frame = runtime_frame . sender ( & reg_map ) ;
2017-10-23 21:49:48 -07:00
assert ( caller_frame . cb ( ) - > as_compiled_method_or_null ( ) = = cm , " expect top frame compiled method " ) ;
2020-02-12 09:18:37 +01:00
Deoptimization : : deoptimize ( thread , caller_frame , Deoptimization : : Reason_not_compiled_exception_handler ) ;
2017-08-28 15:21:47 +00:00
2019-11-13 08:23:23 -05:00
MethodData * trap_mdo = get_method_data ( thread , methodHandle ( thread , cm - > method ( ) ) , true ) ;
2017-08-28 15:21:47 +00:00
if ( trap_mdo ! = NULL ) {
trap_mdo - > inc_trap_count ( Deoptimization : : Reason_not_compiled_exception_handler ) ;
}
return SharedRuntime : : deopt_blob ( ) - > unpack_with_exception_in_tls ( ) ;
}
# endif
2007-12-01 00:00:00 +00:00
2015-10-08 12:49:30 -10:00
void Deoptimization : : deoptimize_frame_internal ( JavaThread * thread , intptr_t * id , DeoptReason reason ) {
2010-10-19 16:14:34 -07:00
assert ( thread = = Thread : : current ( ) | | SafepointSynchronize : : is_at_safepoint ( ) ,
" can only deoptimize other thread at a safepoint " ) ;
2007-12-01 00:00:00 +00:00
// Compute frame and register map based on thread and sp.
2020-02-12 09:18:37 +01:00
RegisterMap reg_map ( thread , false ) ;
2007-12-01 00:00:00 +00:00
frame fr = thread - > last_frame ( ) ;
while ( fr . id ( ) ! = id ) {
fr = fr . sender ( & reg_map ) ;
}
2020-02-12 09:18:37 +01:00
deoptimize ( thread , fr , reason ) ;
2007-12-01 00:00:00 +00:00
}
2015-10-08 12:49:30 -10:00
void Deoptimization : : deoptimize_frame ( JavaThread * thread , intptr_t * id , DeoptReason reason ) {
2010-10-19 16:14:34 -07:00
if ( thread = = Thread : : current ( ) ) {
2015-10-08 12:49:30 -10:00
Deoptimization : : deoptimize_frame_internal ( thread , id , reason ) ;
2010-10-19 16:14:34 -07:00
} else {
2015-10-08 12:49:30 -10:00
VM_DeoptimizeFrame deopt ( thread , id , reason ) ;
2010-10-19 16:14:34 -07:00
VMThread : : execute ( & deopt ) ;
}
}
2015-10-08 12:49:30 -10:00
void Deoptimization : : deoptimize_frame ( JavaThread * thread , intptr_t * id ) {
deoptimize_frame ( thread , id , Reason_constraint ) ;
}
2010-10-19 16:14:34 -07:00
2007-12-01 00:00:00 +00:00
// JVMTI PopFrame support
JRT_LEAF ( void , Deoptimization : : popframe_preserve_args ( JavaThread * thread , int bytes_to_save , void * start_address ) )
{
thread - > popframe_preserve_args ( in_ByteSize ( bytes_to_save ) , start_address ) ;
}
JRT_END
2014-05-13 11:32:10 -07:00
MethodData *
2017-07-27 18:06:41 -04:00
Deoptimization : : get_method_data ( JavaThread * thread , const methodHandle & m ,
2014-05-13 11:32:10 -07:00
bool create_if_missing ) {
Thread * THREAD = thread ;
MethodData * mdo = m ( ) - > method_data ( ) ;
if ( mdo = = NULL & & create_if_missing & & ! HAS_PENDING_EXCEPTION ) {
// Build an MDO. Ignore errors like OutOfMemory;
// that simply means we won't have an MDO to update.
Method : : build_interpreter_method_data ( m , THREAD ) ;
if ( HAS_PENDING_EXCEPTION ) {
2020-09-18 05:48:14 +00:00
// Only metaspace OOM is expected. No Java code executed.
2014-05-13 11:32:10 -07:00
assert ( ( PENDING_EXCEPTION - > is_a ( SystemDictionary : : OutOfMemoryError_klass ( ) ) ) , " we expect only an OOM error here " ) ;
CLEAR_PENDING_EXCEPTION ;
}
mdo = m ( ) - > method_data ( ) ;
}
return mdo ;
}
2007-12-01 00:00:00 +00:00
2017-11-06 21:28:03 -08:00
# if COMPILER2_OR_JVMCI
2015-10-23 16:48:38 -04:00
void Deoptimization : : load_class_by_index ( const constantPoolHandle & constant_pool , int index , TRAPS ) {
2020-02-06 14:29:57 +00:00
// In case of an unresolved klass entry, load the class.
// This path is exercised from case _ldc in Parse::do_one_bytecode,
// and probably nowhere else.
// Even that case would benefit from simply re-interpreting the
// bytecode, without paying special attention to the class index.
// So this whole "class index" feature should probably be removed.
2007-12-01 00:00:00 +00:00
if ( constant_pool - > tag_at ( index ) . is_unresolved_klass ( ) ) {
2020-09-18 05:48:14 +00:00
Klass * tk = constant_pool - > klass_at_ignore_error ( index , THREAD ) ;
if ( HAS_PENDING_EXCEPTION ) {
// Exception happened during classloading. We ignore the exception here, since it
// is going to be rethrown since the current activation is going to be deoptimized and
// the interpreter will re-execute the bytecode.
// Do not clear probable Async Exceptions.
CLEAR_PENDING_NONASYNC_EXCEPTION ;
// Class loading called java code which may have caused a stack
// overflow. If the exception was thrown right before the return
// to the runtime the stack is no longer guarded. Reguard the
// stack otherwise if we return to the uncommon trap blob and the
// stack bang causes a stack overflow we crash.
JavaThread * jt = THREAD - > as_Java_thread ( ) ;
2020-10-08 11:24:27 +00:00
bool guard_pages_enabled = jt - > stack_overflow_state ( ) - > reguard_stack_if_needed ( ) ;
2020-09-18 05:48:14 +00:00
assert ( guard_pages_enabled , " stack banging in uncommon trap blob may cause crash " ) ;
}
2007-12-01 00:00:00 +00:00
return ;
}
2020-02-06 14:29:57 +00:00
assert ( ! constant_pool - > tag_at ( index ) . is_symbol ( ) ,
" no symbolic names here, please " ) ;
2007-12-01 00:00:00 +00:00
}
2019-12-03 12:41:45 +01:00
# if INCLUDE_JFR
class DeoptReasonSerializer : public JfrSerializer {
public :
void serialize ( JfrCheckpointWriter & writer ) {
writer . write_count ( ( u4 ) ( Deoptimization : : Reason_LIMIT + 1 ) ) ; // + Reason::many (-1)
for ( int i = - 1 ; i < Deoptimization : : Reason_LIMIT ; + + i ) {
writer . write_key ( ( u8 ) i ) ;
writer . write ( Deoptimization : : trap_reason_name ( i ) ) ;
}
}
} ;
class DeoptActionSerializer : public JfrSerializer {
public :
void serialize ( JfrCheckpointWriter & writer ) {
static const u4 nof_actions = Deoptimization : : Action_LIMIT ;
writer . write_count ( nof_actions ) ;
for ( u4 i = 0 ; i < Deoptimization : : Action_LIMIT ; + + i ) {
writer . write_key ( i ) ;
writer . write ( Deoptimization : : trap_action_name ( ( int ) i ) ) ;
}
}
} ;
static void register_serializers ( ) {
static int critical_section = 0 ;
if ( 1 = = critical_section | | Atomic : : cmpxchg ( & critical_section , 0 , 1 ) = = 1 ) {
return ;
}
JfrSerializer : : register_serializer ( TYPE_DEOPTIMIZATIONREASON , true , new DeoptReasonSerializer ( ) ) ;
JfrSerializer : : register_serializer ( TYPE_DEOPTIMIZATIONACTION , true , new DeoptActionSerializer ( ) ) ;
}
static void post_deoptimization_event ( CompiledMethod * nm ,
const Method * method ,
int trap_bci ,
int instruction ,
Deoptimization : : DeoptReason reason ,
Deoptimization : : DeoptAction action ) {
assert ( nm ! = NULL , " invariant " ) ;
assert ( method ! = NULL , " invariant " ) ;
if ( EventDeoptimization : : is_enabled ( ) ) {
static bool serializers_registered = false ;
if ( ! serializers_registered ) {
register_serializers ( ) ;
serializers_registered = true ;
}
EventDeoptimization event ;
event . set_compileId ( nm - > compile_id ( ) ) ;
event . set_compiler ( nm - > compiler_type ( ) ) ;
event . set_method ( method ) ;
event . set_lineNumber ( method - > line_number_from_bci ( trap_bci ) ) ;
event . set_bci ( trap_bci ) ;
event . set_instruction ( instruction ) ;
event . set_reason ( reason ) ;
event . set_action ( action ) ;
event . commit ( ) ;
}
}
# endif // INCLUDE_JFR
2007-12-01 00:00:00 +00:00
JRT_ENTRY ( void , Deoptimization : : uncommon_trap_inner ( JavaThread * thread , jint trap_request ) ) {
2020-07-30 10:41:31 -04:00
HandleMark hm ( thread ) ;
2007-12-01 00:00:00 +00:00
// uncommon_trap() is called at the beginning of the uncommon trap
// handler. Note this fact before we start generating temporary frames
// that can confuse an asynchronous stack walker. This counter is
// decremented at the end of unpack_frames().
thread - > inc_in_deopt_handler ( ) ;
// We need to update the map if we have biased locking.
2015-10-08 12:49:30 -10:00
# if INCLUDE_JVMCI
// JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid
RegisterMap reg_map ( thread , true ) ;
# else
2007-12-01 00:00:00 +00:00
RegisterMap reg_map ( thread , UseBiasedLocking ) ;
2015-10-08 12:49:30 -10:00
# endif
2007-12-01 00:00:00 +00:00
frame stub_frame = thread - > last_frame ( ) ;
frame fr = stub_frame . sender ( & reg_map ) ;
// Make sure the calling nmethod is not getting deoptimized and removed
// before we are done with it.
nmethodLocker nl ( fr . pc ( ) ) ;
2012-02-01 07:59:01 -08:00
// Log a message
2019-06-06 09:30:00 +02:00
Events : : log_deopt_message ( thread , " Uncommon trap: trap_request= " PTR32_FORMAT " fr.pc= " INTPTR_FORMAT " relative= " INTPTR_FORMAT ,
2015-10-17 19:40:30 -04:00
trap_request , p2i ( fr . pc ( ) ) , fr . pc ( ) - fr . cb ( ) - > code_begin ( ) ) ;
2012-02-01 07:59:01 -08:00
2007-12-01 00:00:00 +00:00
{
ResourceMark rm ;
DeoptReason reason = trap_request_reason ( trap_request ) ;
DeoptAction action = trap_request_action ( trap_request ) ;
2015-10-08 12:49:30 -10:00
# if INCLUDE_JVMCI
int debug_id = trap_request_debug_id ( trap_request ) ;
# endif
2007-12-01 00:00:00 +00:00
jint unloaded_class_index = trap_request_index ( trap_request ) ; // CP idx or -1
vframe * vf = vframe : : new_vframe ( & fr , & reg_map , thread ) ;
compiledVFrame * cvf = compiledVFrame : : cast ( vf ) ;
2016-04-26 10:28:51 +02:00
CompiledMethod * nm = cvf - > code ( ) ;
2007-12-01 00:00:00 +00:00
ScopeDesc * trap_scope = cvf - > scope ( ) ;
2015-10-08 12:49:30 -10:00
if ( TraceDeoptimization ) {
ttyLocker ttyl ;
2015-10-17 19:40:30 -04:00
tty - > print_cr ( " bci=%d pc= " INTPTR_FORMAT " , relative_pc= " INTPTR_FORMAT " , method=%s " JVMCI_ONLY ( " , debug_id=%d " ) , trap_scope - > bci ( ) , p2i ( fr . pc ( ) ) , fr . pc ( ) - nm - > code_begin ( ) , trap_scope - > method ( ) - > name_and_sig_as_C_string ( )
2015-10-08 12:49:30 -10:00
# if INCLUDE_JVMCI
, debug_id
# endif
) ;
}
2019-11-13 08:23:23 -05:00
methodHandle trap_method ( THREAD , trap_scope - > method ( ) ) ;
2007-12-01 00:00:00 +00:00
int trap_bci = trap_scope - > bci ( ) ;
2015-10-08 12:49:30 -10:00
# if INCLUDE_JVMCI
2019-05-01 12:31:29 -07:00
jlong speculation = thread - > pending_failed_speculation ( ) ;
if ( nm - > is_compiled_by_jvmci ( ) & & nm - > is_nmethod ( ) ) { // Exclude AOTed methods
nm - > as_nmethod ( ) - > update_speculation ( thread ) ;
2015-10-08 12:49:30 -10:00
} else {
2019-05-01 12:31:29 -07:00
assert ( speculation = = 0 , " There should not be a speculation for methods compiled by non-JVMCI compilers " ) ;
2015-10-08 12:49:30 -10:00
}
if ( trap_bci = = SynchronizationEntryBCI ) {
trap_bci = 0 ;
thread - > set_pending_monitorenter ( true ) ;
}
if ( reason = = Deoptimization : : Reason_transfer_to_interpreter ) {
thread - > set_pending_transfer_to_interpreter ( true ) ;
}
# endif
2011-01-13 22:15:41 -08:00
Bytecodes : : Code trap_bc = trap_method - > java_code_at ( trap_bci ) ;
2007-12-01 00:00:00 +00:00
// Record this event in the histogram.
gather_statistics ( reason , action , trap_bc ) ;
// Ensure that we can record deopt. history:
2014-03-20 17:49:27 -07:00
// Need MDO to record RTM code generation state.
2014-05-13 11:32:10 -07:00
bool create_if_missing = ProfileTraps | | UseCodeAging RTM_OPT_ONLY ( | | UseRTMLocking ) ;
2007-12-01 00:00:00 +00:00
2015-10-08 12:49:30 -10:00
methodHandle profiled_method ;
# if INCLUDE_JVMCI
if ( nm - > is_compiled_by_jvmci ( ) ) {
2019-11-13 08:23:23 -05:00
profiled_method = methodHandle ( THREAD , nm - > method ( ) ) ;
2015-10-08 12:49:30 -10:00
} else {
profiled_method = trap_method ;
}
# else
profiled_method = trap_method ;
# endif
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
MethodData * trap_mdo =
2015-10-08 12:49:30 -10:00
get_method_data ( thread , profiled_method , create_if_missing ) ;
2007-12-01 00:00:00 +00:00
2019-12-03 12:41:45 +01:00
JFR_ONLY ( post_deoptimization_event ( nm , trap_method ( ) , trap_bci , trap_bc , reason , action ) ; )
2012-11-21 05:57:12 -08:00
// Log a message
2017-01-18 14:36:54 -08:00
Events : : log_deopt_message ( thread , " Uncommon trap: reason=%s action=%s pc= " INTPTR_FORMAT " method=%s @ %d %s " ,
2015-10-09 09:42:33 +02:00
trap_reason_name ( reason ) , trap_action_name ( action ) , p2i ( fr . pc ( ) ) ,
2017-01-18 14:36:54 -08:00
trap_method - > name_and_sig_as_C_string ( ) , trap_bci , nm - > compiler_name ( ) ) ;
2012-11-21 05:57:12 -08:00
2007-12-01 00:00:00 +00:00
// Print a bunch of diagnostics, if requested.
if ( TraceDeoptimization | | LogCompilation ) {
ResourceMark rm ;
ttyLocker ttyl ;
char buf [ 100 ] ;
if ( xtty ! = NULL ) {
2015-06-24 12:12:25 -04:00
xtty - > begin_head ( " uncommon_trap thread=' " UINTX_FORMAT " ' %s " ,
2007-12-01 00:00:00 +00:00
os : : current_thread_id ( ) ,
format_trap_request ( buf , sizeof ( buf ) , trap_request ) ) ;
2019-05-01 12:31:29 -07:00
# if INCLUDE_JVMCI
if ( speculation ! = 0 ) {
xtty - > print ( " speculation=' " JLONG_FORMAT " ' " , speculation ) ;
}
# endif
2007-12-01 00:00:00 +00:00
nm - > log_identity ( xtty ) ;
}
2011-01-27 16:11:27 -08:00
Symbol * class_name = NULL ;
2007-12-01 00:00:00 +00:00
bool unresolved = false ;
if ( unloaded_class_index > = 0 ) {
constantPoolHandle constants ( THREAD , trap_method - > constants ( ) ) ;
if ( constants - > tag_at ( unloaded_class_index ) . is_unresolved_klass ( ) ) {
2011-01-27 16:11:27 -08:00
class_name = constants - > klass_name_at ( unloaded_class_index ) ;
2007-12-01 00:00:00 +00:00
unresolved = true ;
if ( xtty ! = NULL )
xtty - > print ( " unresolved='1' " ) ;
} else if ( constants - > tag_at ( unloaded_class_index ) . is_symbol ( ) ) {
2011-01-27 16:11:27 -08:00
class_name = constants - > symbol_at ( unloaded_class_index ) ;
2007-12-01 00:00:00 +00:00
}
if ( xtty ! = NULL )
xtty - > name ( class_name ) ;
}
2014-05-15 10:37:52 -07:00
if ( xtty ! = NULL & & trap_mdo ! = NULL & & ( int ) reason < ( int ) MethodData : : _trap_hist_limit ) {
2007-12-01 00:00:00 +00:00
// Dump the relevant MDO state.
// This is the deopt count for the current reason, any previous
// reasons or recompiles seen at this point.
int dcnt = trap_mdo - > trap_count ( reason ) ;
if ( dcnt ! = 0 )
xtty - > print ( " count='%d' " , dcnt ) ;
ProfileData * pdata = trap_mdo - > bci_to_data ( trap_bci ) ;
int dos = ( pdata = = NULL ) ? 0 : pdata - > trap_state ( ) ;
if ( dos ! = 0 ) {
xtty - > print ( " state='%s' " , format_trap_state ( buf , sizeof ( buf ) , dos ) ) ;
if ( trap_state_is_recompiled ( dos ) ) {
int recnt2 = trap_mdo - > overflow_recompile_count ( ) ;
if ( recnt2 ! = 0 )
xtty - > print ( " recompiles2='%d' " , recnt2 ) ;
}
}
}
if ( xtty ! = NULL ) {
xtty - > stamp ( ) ;
xtty - > end_head ( ) ;
}
if ( TraceDeoptimization ) { // make noise on the tty
tty - > print ( " Uncommon trap occurred in " ) ;
nm - > method ( ) - > print_short_name ( tty ) ;
2016-10-21 20:12:47 +02:00
tty - > print ( " compiler=%s compile_id=%d " , nm - > compiler_name ( ) , nm - > compile_id ( ) ) ;
2015-10-08 12:49:30 -10:00
# if INCLUDE_JVMCI
2016-04-26 10:28:51 +02:00
if ( nm - > is_nmethod ( ) ) {
2019-05-01 12:31:29 -07:00
const char * installed_code_name = nm - > as_nmethod ( ) - > jvmci_name ( ) ;
2017-11-06 12:53:55 +01:00
if ( installed_code_name ! = NULL ) {
tty - > print ( " (JVMCI: installed code name=%s) " , installed_code_name ) ;
2015-10-08 12:49:30 -10:00
}
}
# endif
tty - > print ( " (@ " INTPTR_FORMAT " ) thread= " UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d " JVMCI_ONLY ( " debug_id=%d " ) ,
2015-10-09 09:42:33 +02:00
p2i ( fr . pc ( ) ) ,
2012-10-05 18:57:10 -07:00
os : : current_thread_id ( ) ,
2007-12-01 00:00:00 +00:00
trap_reason_name ( reason ) ,
trap_action_name ( action ) ,
2015-10-08 12:49:30 -10:00
unloaded_class_index
# if INCLUDE_JVMCI
, debug_id
# endif
) ;
2011-01-27 16:11:27 -08:00
if ( class_name ! = NULL ) {
2007-12-01 00:00:00 +00:00
tty - > print ( unresolved ? " unresolved class: " : " symbol: " ) ;
class_name - > print_symbol_on ( tty ) ;
}
tty - > cr ( ) ;
}
if ( xtty ! = NULL ) {
// Log the precise location of the trap.
for ( ScopeDesc * sd = trap_scope ; ; sd = sd - > sender ( ) ) {
xtty - > begin_elem ( " jvms bci='%d' " , sd - > bci ( ) ) ;
xtty - > method ( sd - > method ( ) ) ;
xtty - > end_elem ( ) ;
if ( sd - > is_top ( ) ) break ;
}
xtty - > tail ( " uncommon_trap " ) ;
}
}
// (End diagnostic printout.)
// Load class if necessary
if ( unloaded_class_index > = 0 ) {
constantPoolHandle constants ( THREAD , trap_method - > constants ( ) ) ;
2020-09-18 05:48:14 +00:00
load_class_by_index ( constants , unloaded_class_index , THREAD ) ;
2007-12-01 00:00:00 +00:00
}
// Flush the nmethod if necessary and desirable.
//
// We need to avoid situations where we are re-flushing the nmethod
// because of a hot deoptimization site. Repeated flushes at the same
// point need to be detected by the compiler and avoided. If the compiler
// cannot avoid them (or has a bug and "refuses" to avoid them), this
// module must take measures to avoid an infinite cycle of recompilation
// and deoptimization. There are several such measures:
//
// 1. If a recompilation is ordered a second time at some site X
// and for the same reason R, the action is adjusted to 'reinterpret',
// to give the interpreter time to exercise the method more thoroughly.
// If this happens, the method's overflow_recompile_count is incremented.
//
// 2. If the compiler fails to reduce the deoptimization rate, then
// the method's overflow_recompile_count will begin to exceed the set
// limit PerBytecodeRecompilationCutoff. If this happens, the action
// is adjusted to 'make_not_compilable', and the method is abandoned
// to the interpreter. This is a performance hit for hot methods,
// but is better than a disastrous infinite cycle of recompilations.
// (Actually, only the method containing the site X is abandoned.)
//
// 3. In parallel with the previous measures, if the total number of
// recompilations of a method exceeds the much larger set limit
// PerMethodRecompilationCutoff, the method is abandoned.
// This should only happen if the method is very large and has
// many "lukewarm" deoptimizations. The code which enforces this
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
// limit is elsewhere (class nmethod, class Method).
2007-12-01 00:00:00 +00:00
//
// Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
// to recompile at each bytecode independently of the per-BCI cutoff.
//
// The decision to update code is up to the compiler, and is encoded
// in the Action_xxx code. If the compiler requests Action_none
// no trap state is changed, no compiled code is changed, and the
// computation suffers along in the interpreter.
//
// The other action codes specify various tactics for decompilation
// and recompilation. Action_maybe_recompile is the loosest, and
// allows the compiled code to stay around until enough traps are seen,
// and until the compiler gets around to recompiling the trapping method.
//
// The other actions cause immediate removal of the present code.
2015-06-11 14:19:40 +03:00
// Traps caused by injected profile shouldn't pollute trap counts.
bool injected_profile_trap = trap_method - > has_injected_profile ( ) & &
( reason = = Reason_intrinsic | | reason = = Reason_unreached ) ;
bool update_trap_state = ( reason ! = Reason_tenured ) & & ! injected_profile_trap ;
2007-12-01 00:00:00 +00:00
bool make_not_entrant = false ;
bool make_not_compilable = false ;
2010-09-03 17:51:07 -07:00
bool reprofile = false ;
2007-12-01 00:00:00 +00:00
switch ( action ) {
case Action_none :
// Keep the old code.
update_trap_state = false ;
break ;
case Action_maybe_recompile :
// Do not need to invalidate the present code, but we can
// initiate another
// Start compiler without (necessarily) invalidating the nmethod.
// The system will tolerate the old code, but new code should be
// generated when possible.
break ;
case Action_reinterpret :
// Go back into the interpreter for a while, and then consider
// recompiling form scratch.
make_not_entrant = true ;
// Reset invocation counter for outer most method.
// This will allow the interpreter to exercise the bytecodes
// for a while before recompiling.
// By contrast, Action_make_not_entrant is immediate.
//
// Note that the compiler will track null_check, null_assert,
// range_check, and class_check events and log them as if they
// had been traps taken from compiled code. This will update
// the MDO trap history so that the next compilation will
// properly detect hot trap sites.
2010-09-03 17:51:07 -07:00
reprofile = true ;
2007-12-01 00:00:00 +00:00
break ;
case Action_make_not_entrant :
// Request immediate recompilation, and get rid of the old code.
// Make them not entrant, so next time they are called they get
// recompiled. Unloaded classes are loaded now so recompile before next
// time they are called. Same for uninitialized. The interpreter will
// link the missing class, if any.
make_not_entrant = true ;
break ;
case Action_make_not_compilable :
// Give up on compiling this method at all.
make_not_entrant = true ;
make_not_compilable = true ;
break ;
default :
ShouldNotReachHere ( ) ;
}
// Setting +ProfileTraps fixes the following, on all platforms:
// 4852688: ProfileInterpreter is off by default for ia64. The result is
// infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
// recompile relies on a MethodData* to record heroic opt failures.
2007-12-01 00:00:00 +00:00
// Whether the interpreter is producing MDO data or not, we also need
// to use the MDO to detect hot deoptimization points and control
// aggressive optimization.
2010-02-01 16:49:49 -08:00
bool inc_recompile_count = false ;
ProfileData * pdata = NULL ;
2017-02-03 19:26:35 -08:00
if ( ProfileTraps & & ! is_client_compilation_mode_vm ( ) & & update_trap_state & & trap_mdo ! = NULL ) {
2015-10-08 12:49:30 -10:00
assert ( trap_mdo = = get_method_data ( thread , profiled_method , false ) , " sanity " ) ;
2007-12-01 00:00:00 +00:00
uint this_trap_count = 0 ;
bool maybe_prior_trap = false ;
bool maybe_prior_recompile = false ;
2015-10-08 12:49:30 -10:00
pdata = query_update_method_data ( trap_mdo , trap_bci , reason , true ,
# if INCLUDE_JVMCI
nm - > is_compiled_by_jvmci ( ) & & nm - > is_osr_method ( ) ,
# endif
2014-02-25 18:16:24 +01:00
nm - > method ( ) ,
2007-12-01 00:00:00 +00:00
//outputs:
this_trap_count ,
maybe_prior_trap ,
maybe_prior_recompile ) ;
// Because the interpreter also counts null, div0, range, and class
// checks, these traps from compiled code are double-counted.
// This is harmless; it just means that the PerXTrapLimit values
// are in effect a little smaller than they look.
DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any ( reason ) ;
if ( per_bc_reason ! = Reason_none ) {
// Now take action based on the partially known per-BCI history.
if ( maybe_prior_trap
& & this_trap_count > = ( uint ) PerBytecodeTrapLimit ) {
// If there are too many traps at this BCI, force a recompile.
// This will allow the compiler to see the limit overflow, and
// take corrective action, if possible. The compiler generally
// does not use the exact PerBytecodeTrapLimit value, but instead
// changes its tactics if it sees any traps at all. This provides
// a little hysteresis, delaying a recompile until a trap happens
// several times.
//
// Actually, since there is only one bit of counter per BCI,
// the possible per-BCI counts are {0,1,(per-method count)}.
// This produces accurate results if in fact there is only
// one hot trap site, but begins to get fuzzy if there are
// many sites. For example, if there are ten sites each
// trapping two or more times, they each get the blame for
// all of their traps.
make_not_entrant = true ;
}
// Detect repeated recompilation at the same BCI, and enforce a limit.
if ( make_not_entrant & & maybe_prior_recompile ) {
// More than one recompile at this point.
2010-02-01 16:49:49 -08:00
inc_recompile_count = maybe_prior_trap ;
2007-12-01 00:00:00 +00:00
}
} else {
// For reasons which are not recorded per-bytecode, we simply
// force recompiles unconditionally.
// (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
make_not_entrant = true ;
}
// Go back to the compiler if there are too many traps in this method.
2014-02-25 18:16:24 +01:00
if ( this_trap_count > = per_method_trap_limit ( reason ) ) {
2007-12-01 00:00:00 +00:00
// If there are too many traps in this method, force a recompile.
// This will allow the compiler to see the limit overflow, and
// take corrective action, if possible.
// (This condition is an unlikely backstop only, because the
// PerBytecodeTrapLimit is more likely to take effect first,
// if it is applicable.)
make_not_entrant = true ;
}
// Here's more hysteresis: If there has been a recompile at
// this trap point already, run the method in the interpreter
// for a while to exercise it more thoroughly.
if ( make_not_entrant & & maybe_prior_recompile & & maybe_prior_trap ) {
2010-09-03 17:51:07 -07:00
reprofile = true ;
2007-12-01 00:00:00 +00:00
}
2010-02-01 16:49:49 -08:00
}
// Take requested actions on the method:
// Recompile
if ( make_not_entrant ) {
if ( ! nm - > make_not_entrant ( ) ) {
return ; // the call did not change nmethod's state
}
if ( pdata ! = NULL ) {
2007-12-01 00:00:00 +00:00
// Record the recompilation event, if any.
int tstate0 = pdata - > trap_state ( ) ;
int tstate1 = trap_state_set_recompiled ( tstate0 , true ) ;
if ( tstate1 ! = tstate0 )
pdata - > set_trap_state ( tstate1 ) ;
}
2014-03-20 17:49:27 -07:00
# if INCLUDE_RTM_OPT
// Restart collecting RTM locking abort statistic if the method
// is recompiled for a reason other than RTM state change.
// Assume that in new recompiled code the statistic could be different,
// for example, due to different inlining.
if ( ( reason ! = Reason_rtm_state_change ) & & ( trap_mdo ! = NULL ) & &
2016-04-26 10:28:51 +02:00
UseRTMDeopt & & ( nm - > as_nmethod ( ) - > rtm_state ( ) ! = ProfileRTM ) ) {
2014-03-20 17:49:27 -07:00
trap_mdo - > atomic_set_rtm_state ( ProfileRTM ) ;
}
# endif
2014-05-13 11:32:10 -07:00
// For code aging we count traps separately here, using make_not_entrant()
// as a guard against simultaneous deopts in multiple threads.
if ( reason = = Reason_tenured & & trap_mdo ! = NULL ) {
trap_mdo - > inc_tenure_traps ( ) ;
}
2007-12-01 00:00:00 +00:00
}
2010-02-01 16:49:49 -08:00
if ( inc_recompile_count ) {
trap_mdo - > inc_overflow_recompile_count ( ) ;
if ( ( uint ) trap_mdo - > overflow_recompile_count ( ) >
( uint ) PerBytecodeRecompilationCutoff ) {
// Give up on the method containing the bad BCI.
if ( trap_method ( ) = = nm - > method ( ) ) {
make_not_compilable = true ;
} else {
2019-06-04 12:44:53 -07:00
trap_method - > set_not_compilable ( " overflow_recompile_count > PerBytecodeRecompilationCutoff " , CompLevel_full_optimization ) ;
2010-02-01 16:49:49 -08:00
// But give grace to the enclosing nm->method().
}
}
}
2007-12-01 00:00:00 +00:00
2010-09-03 17:51:07 -07:00
// Reprofile
if ( reprofile ) {
CompilationPolicy : : policy ( ) - > reprofile ( trap_scope , nm - > is_osr_method ( ) ) ;
2007-12-01 00:00:00 +00:00
}
// Give up compiling
2010-09-03 17:51:07 -07:00
if ( make_not_compilable & & ! nm - > method ( ) - > is_not_compilable ( CompLevel_full_optimization ) ) {
2007-12-01 00:00:00 +00:00
assert ( make_not_entrant , " consistent " ) ;
2019-06-04 12:44:53 -07:00
nm - > method ( ) - > set_not_compilable ( " give up compiling " , CompLevel_full_optimization ) ;
2007-12-01 00:00:00 +00:00
}
} // Free marked resources
}
JRT_END
ProfileData *
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
Deoptimization : : query_update_method_data ( MethodData * trap_mdo ,
2007-12-01 00:00:00 +00:00
int trap_bci ,
Deoptimization : : DeoptReason reason ,
2015-10-08 12:49:30 -10:00
bool update_total_trap_count ,
# if INCLUDE_JVMCI
bool is_osr ,
# endif
2014-02-25 18:16:24 +01:00
Method * compiled_method ,
2007-12-01 00:00:00 +00:00
//outputs:
uint & ret_this_trap_count ,
bool & ret_maybe_prior_trap ,
bool & ret_maybe_prior_recompile ) {
2015-10-08 12:49:30 -10:00
bool maybe_prior_trap = false ;
bool maybe_prior_recompile = false ;
uint this_trap_count = 0 ;
if ( update_total_trap_count ) {
uint idx = reason ;
# if INCLUDE_JVMCI
if ( is_osr ) {
idx + = Reason_LIMIT ;
}
# endif
uint prior_trap_count = trap_mdo - > trap_count ( idx ) ;
this_trap_count = trap_mdo - > inc_trap_count ( idx ) ;
// If the runtime cannot find a place to store trap history,
// it is estimated based on the general condition of the method.
// If the method has ever been recompiled, or has ever incurred
// a trap with the present reason , then this BCI is assumed
// (pessimistically) to be the culprit.
maybe_prior_trap = ( prior_trap_count ! = 0 ) ;
maybe_prior_recompile = ( trap_mdo - > decompile_count ( ) ! = 0 ) ;
}
2007-12-01 00:00:00 +00:00
ProfileData * pdata = NULL ;
// For reasons which are recorded per bytecode, we check per-BCI data.
DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any ( reason ) ;
2015-10-08 12:49:30 -10:00
assert ( per_bc_reason ! = Reason_none | | update_total_trap_count , " must be " ) ;
2007-12-01 00:00:00 +00:00
if ( per_bc_reason ! = Reason_none ) {
// Find the profile data for this BCI. If there isn't one,
// try to allocate one from the MDO's set of spares.
// This will let us detect a repeated trap at this point.
2014-02-25 18:16:24 +01:00
pdata = trap_mdo - > allocate_bci_to_data ( trap_bci , reason_is_speculate ( reason ) ? compiled_method : NULL ) ;
2007-12-01 00:00:00 +00:00
if ( pdata ! = NULL ) {
2014-02-25 18:16:24 +01:00
if ( reason_is_speculate ( reason ) & & ! pdata - > is_SpeculativeTrapData ( ) ) {
if ( LogCompilation & & xtty ! = NULL ) {
ttyLocker ttyl ;
// no more room for speculative traps in this MDO
xtty - > elem ( " speculative_traps_oom " ) ;
}
}
2007-12-01 00:00:00 +00:00
// Query the trap state of this profile datum.
int tstate0 = pdata - > trap_state ( ) ;
if ( ! trap_state_has_reason ( tstate0 , per_bc_reason ) )
maybe_prior_trap = false ;
if ( ! trap_state_is_recompiled ( tstate0 ) )
maybe_prior_recompile = false ;
// Update the trap state of this profile datum.
int tstate1 = tstate0 ;
// Record the reason.
tstate1 = trap_state_add_reason ( tstate1 , per_bc_reason ) ;
// Store the updated state on the MDO, for next time.
if ( tstate1 ! = tstate0 )
pdata - > set_trap_state ( tstate1 ) ;
} else {
2010-02-01 16:49:49 -08:00
if ( LogCompilation & & xtty ! = NULL ) {
ttyLocker ttyl ;
2007-12-01 00:00:00 +00:00
// Missing MDP? Leave a small complaint in the log.
xtty - > elem ( " missing_mdp bci='%d' " , trap_bci ) ;
2010-02-01 16:49:49 -08:00
}
2007-12-01 00:00:00 +00:00
}
}
// Return results:
ret_this_trap_count = this_trap_count ;
ret_maybe_prior_trap = maybe_prior_trap ;
ret_maybe_prior_recompile = maybe_prior_recompile ;
return pdata ;
}
void
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
Deoptimization : : update_method_data_from_interpreter ( MethodData * trap_mdo , int trap_bci , int reason ) {
2007-12-01 00:00:00 +00:00
ResourceMark rm ;
// Ignored outputs:
uint ignore_this_trap_count ;
bool ignore_maybe_prior_trap ;
bool ignore_maybe_prior_recompile ;
2014-02-25 18:16:24 +01:00
assert ( ! reason_is_speculate ( reason ) , " reason speculate only used by compiler " ) ;
2015-10-08 12:49:30 -10:00
// JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
2018-07-12 11:57:06 -07:00
bool update_total_counts = true JVMCI_ONLY ( & & ! UseJVMCICompiler ) ;
2007-12-01 00:00:00 +00:00
query_update_method_data ( trap_mdo , trap_bci ,
( DeoptReason ) reason ,
2015-10-08 12:49:30 -10:00
update_total_counts ,
# if INCLUDE_JVMCI
false ,
# endif
2014-02-25 18:16:24 +01:00
NULL ,
2007-12-01 00:00:00 +00:00
ignore_this_trap_count ,
ignore_maybe_prior_trap ,
ignore_maybe_prior_recompile ) ;
}
2015-11-11 14:40:38 -10:00
Deoptimization : : UnrollBlock * Deoptimization : : uncommon_trap ( JavaThread * thread , jint trap_request , jint exec_mode ) {
2015-10-08 12:49:30 -10:00
if ( TraceDeoptimization ) {
tty - > print ( " Uncommon trap " ) ;
}
2007-12-01 00:00:00 +00:00
// Still in Java no safepoints
{
// This enters VM and may safepoint
uncommon_trap_inner ( thread , trap_request ) ;
}
2015-11-11 14:40:38 -10:00
return fetch_unroll_info_helper ( thread , exec_mode ) ;
2007-12-01 00:00:00 +00:00
}
// Local derived constants.
// Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2018-06-11 15:28:24 +02:00
const int DS_REASON_MASK = ( ( uint ) DataLayout : : trap_mask ) > > 1 ;
2007-12-01 00:00:00 +00:00
const int DS_RECOMPILE_BIT = DataLayout : : trap_mask - DS_REASON_MASK ;
//---------------------------trap_state_reason---------------------------------
Deoptimization : : DeoptReason
Deoptimization : : trap_state_reason ( int trap_state ) {
// This assert provides the link between the width of DataLayout::trap_bits
// and the encoding of "recorded" reasons. It ensures there are enough
// bits to store all needed reasons in the per-BCI MDO profile.
assert ( DS_REASON_MASK > = Reason_RECORDED_LIMIT , " enough bits " ) ;
int recompile_bit = ( trap_state & DS_RECOMPILE_BIT ) ;
trap_state - = recompile_bit ;
if ( trap_state = = DS_REASON_MASK ) {
return Reason_many ;
} else {
assert ( ( int ) Reason_none = = 0 , " state=0 => Reason_none " ) ;
return ( DeoptReason ) trap_state ;
}
}
//-------------------------trap_state_has_reason-------------------------------
int Deoptimization : : trap_state_has_reason ( int trap_state , int reason ) {
assert ( reason_is_recorded_per_bytecode ( ( DeoptReason ) reason ) , " valid reason " ) ;
assert ( DS_REASON_MASK > = Reason_RECORDED_LIMIT , " enough bits " ) ;
int recompile_bit = ( trap_state & DS_RECOMPILE_BIT ) ;
trap_state - = recompile_bit ;
if ( trap_state = = DS_REASON_MASK ) {
return - 1 ; // true, unspecifically (bottom of state lattice)
} else if ( trap_state = = reason ) {
return 1 ; // true, definitely
} else if ( trap_state = = 0 ) {
return 0 ; // false, definitely (top of state lattice)
} else {
return 0 ; // false, definitely
}
}
//-------------------------trap_state_add_reason-------------------------------
int Deoptimization : : trap_state_add_reason ( int trap_state , int reason ) {
assert ( reason_is_recorded_per_bytecode ( ( DeoptReason ) reason ) | | reason = = Reason_many , " valid reason " ) ;
int recompile_bit = ( trap_state & DS_RECOMPILE_BIT ) ;
trap_state - = recompile_bit ;
if ( trap_state = = DS_REASON_MASK ) {
return trap_state + recompile_bit ; // already at state lattice bottom
} else if ( trap_state = = reason ) {
return trap_state + recompile_bit ; // the condition is already true
} else if ( trap_state = = 0 ) {
return reason + recompile_bit ; // no condition has yet been true
} else {
return DS_REASON_MASK + recompile_bit ; // fall to state lattice bottom
}
}
//-----------------------trap_state_is_recompiled------------------------------
bool Deoptimization : : trap_state_is_recompiled ( int trap_state ) {
return ( trap_state & DS_RECOMPILE_BIT ) ! = 0 ;
}
//-----------------------trap_state_set_recompiled-----------------------------
int Deoptimization : : trap_state_set_recompiled ( int trap_state , bool z ) {
if ( z ) return trap_state | DS_RECOMPILE_BIT ;
else return trap_state & ~ DS_RECOMPILE_BIT ;
}
//---------------------------format_trap_state---------------------------------
2013-09-13 04:16:54 -07:00
// This is used for debugging and diagnostics, including LogFile output.
2007-12-01 00:00:00 +00:00
const char * Deoptimization : : format_trap_state ( char * buf , size_t buflen ,
int trap_state ) {
2015-10-26 16:21:37 +01:00
assert ( buflen > 0 , " sanity " ) ;
2007-12-01 00:00:00 +00:00
DeoptReason reason = trap_state_reason ( trap_state ) ;
bool recomp_flag = trap_state_is_recompiled ( trap_state ) ;
// Re-encode the state from its decoded components.
int decoded_state = 0 ;
if ( reason_is_recorded_per_bytecode ( reason ) | | reason = = Reason_many )
decoded_state = trap_state_add_reason ( decoded_state , reason ) ;
if ( recomp_flag )
decoded_state = trap_state_set_recompiled ( decoded_state , recomp_flag ) ;
// If the state re-encodes properly, format it symbolically.
// Because this routine is used for debugging and diagnostics,
// be robust even if the state is a strange value.
size_t len ;
if ( decoded_state ! = trap_state ) {
// Random buggy state that doesn't decode??
len = jio_snprintf ( buf , buflen , " #%d " , trap_state ) ;
} else {
len = jio_snprintf ( buf , buflen , " %s%s " ,
trap_reason_name ( reason ) ,
recomp_flag ? " recompiled " : " " ) ;
}
return buf ;
}
//--------------------------------statics--------------------------------------
2014-05-15 10:37:52 -07:00
const char * Deoptimization : : _trap_reason_name [ ] = {
2007-12-01 00:00:00 +00:00
// Note: Keep this in sync. with enum DeoptReason.
" none " ,
" null_check " ,
2015-10-08 12:49:30 -10:00
" null_assert " JVMCI_ONLY ( " _or_unreached0 " ) ,
2007-12-01 00:00:00 +00:00
" range_check " ,
" class_check " ,
" array_check " ,
2015-10-08 12:49:30 -10:00
" intrinsic " JVMCI_ONLY ( " _or_type_checked_inlining " ) ,
" bimorphic " JVMCI_ONLY ( " _or_optimized_type_check " ) ,
2018-06-19 09:08:39 +02:00
" profile_predicate " ,
2007-12-01 00:00:00 +00:00
" unloaded " ,
" uninitialized " ,
2019-05-30 13:39:13 +03:00
" initialized " ,
2007-12-01 00:00:00 +00:00
" unreached " ,
" unhandled " ,
" constraint " ,
" div0_check " ,
2010-01-12 14:37:35 -08:00
" age " ,
2011-05-04 13:12:42 -07:00
" predicate " ,
2014-02-25 18:16:24 +01:00
" loop_limit_check " ,
2014-03-20 17:49:27 -07:00
" speculate_class_check " ,
2014-03-31 09:08:53 +02:00
" speculate_null_check " ,
2017-06-09 10:51:52 +02:00
" speculate_null_assert " ,
2014-05-15 10:37:52 -07:00
" rtm_state_change " ,
2014-05-22 13:42:44 +02:00
" unstable_if " ,
2015-03-17 10:06:31 +01:00
" unstable_fused_if " ,
2015-10-08 12:49:30 -10:00
# if INCLUDE_JVMCI
" aliasing " ,
" transfer_to_interpreter " ,
" not_compiled_exception_handler " ,
" unresolved " ,
" jsr_mismatch " ,
# endif
2014-05-15 10:37:52 -07:00
" tenured "
2007-12-01 00:00:00 +00:00
} ;
2014-05-15 10:37:52 -07:00
const char * Deoptimization : : _trap_action_name [ ] = {
2007-12-01 00:00:00 +00:00
// Note: Keep this in sync. with enum DeoptAction.
" none " ,
" maybe_recompile " ,
" reinterpret " ,
" make_not_entrant " ,
" make_not_compilable "
} ;
const char * Deoptimization : : trap_reason_name ( int reason ) {
2014-05-15 10:37:52 -07:00
// Check that every reason has a name
STATIC_ASSERT ( sizeof ( _trap_reason_name ) / sizeof ( const char * ) = = Reason_LIMIT ) ;
2007-12-01 00:00:00 +00:00
if ( reason = = Reason_many ) return " many " ;
if ( ( uint ) reason < Reason_LIMIT )
return _trap_reason_name [ reason ] ;
static char buf [ 20 ] ;
sprintf ( buf , " reason%d " , reason ) ;
return buf ;
}
const char * Deoptimization : : trap_action_name ( int action ) {
2014-05-15 10:37:52 -07:00
// Check that every action has a name
STATIC_ASSERT ( sizeof ( _trap_action_name ) / sizeof ( const char * ) = = Action_LIMIT ) ;
2007-12-01 00:00:00 +00:00
if ( ( uint ) action < Action_LIMIT )
return _trap_action_name [ action ] ;
static char buf [ 20 ] ;
sprintf ( buf , " action%d " , action ) ;
return buf ;
}
2013-09-13 04:16:54 -07:00
// This is used for debugging and diagnostics, including LogFile output.
2007-12-01 00:00:00 +00:00
const char * Deoptimization : : format_trap_request ( char * buf , size_t buflen ,
int trap_request ) {
jint unloaded_class_index = trap_request_index ( trap_request ) ;
const char * reason = trap_reason_name ( trap_request_reason ( trap_request ) ) ;
const char * action = trap_action_name ( trap_request_action ( trap_request ) ) ;
2015-10-08 12:49:30 -10:00
# if INCLUDE_JVMCI
int debug_id = trap_request_debug_id ( trap_request ) ;
# endif
2007-12-01 00:00:00 +00:00
size_t len ;
if ( unloaded_class_index < 0 ) {
2015-10-08 12:49:30 -10:00
len = jio_snprintf ( buf , buflen , " reason='%s' action='%s' " JVMCI_ONLY ( " debug_id='%d' " ) ,
reason , action
# if INCLUDE_JVMCI
, debug_id
# endif
) ;
2007-12-01 00:00:00 +00:00
} else {
2015-10-08 12:49:30 -10:00
len = jio_snprintf ( buf , buflen , " reason='%s' action='%s' index='%d' " JVMCI_ONLY ( " debug_id='%d' " ) ,
reason , action , unloaded_class_index
# if INCLUDE_JVMCI
, debug_id
# endif
) ;
2007-12-01 00:00:00 +00:00
}
return buf ;
}
juint Deoptimization : : _deoptimization_hist
[ Deoptimization : : Reason_LIMIT ]
[ 1 + Deoptimization : : Action_LIMIT ]
[ Deoptimization : : BC_CASE_LIMIT ]
= { 0 } ;
enum {
LSB_BITS = 8 ,
LSB_MASK = right_n_bits ( LSB_BITS )
} ;
void Deoptimization : : gather_statistics ( DeoptReason reason , DeoptAction action ,
Bytecodes : : Code bc ) {
assert ( reason > = 0 & & reason < Reason_LIMIT , " oob " ) ;
assert ( action > = 0 & & action < Action_LIMIT , " oob " ) ;
_deoptimization_hist [ Reason_none ] [ 0 ] [ 0 ] + = 1 ; // total
_deoptimization_hist [ reason ] [ 0 ] [ 0 ] + = 1 ; // per-reason total
juint * cases = _deoptimization_hist [ reason ] [ 1 + action ] ;
juint * bc_counter_addr = NULL ;
juint bc_counter = 0 ;
// Look for an unused counter, or an exact match to this BC.
if ( bc ! = Bytecodes : : _illegal ) {
for ( int bc_case = 0 ; bc_case < BC_CASE_LIMIT ; bc_case + + ) {
juint * counter_addr = & cases [ bc_case ] ;
juint counter = * counter_addr ;
if ( ( counter = = 0 & & bc_counter_addr = = NULL )
| | ( Bytecodes : : Code ) ( counter & LSB_MASK ) = = bc ) {
// this counter is either free or is already devoted to this BC
bc_counter_addr = counter_addr ;
bc_counter = counter | bc ;
}
}
}
if ( bc_counter_addr = = NULL ) {
// Overflow, or no given bytecode.
bc_counter_addr = & cases [ BC_CASE_LIMIT - 1 ] ;
bc_counter = ( * bc_counter_addr & ~ LSB_MASK ) ; // clear LSB
}
* bc_counter_addr = bc_counter + ( 1 < < LSB_BITS ) ;
}
jint Deoptimization : : total_deoptimization_count ( ) {
return _deoptimization_hist [ Reason_none ] [ 0 ] [ 0 ] ;
}
void Deoptimization : : print_statistics ( ) {
juint total = total_deoptimization_count ( ) ;
juint account = total ;
if ( total ! = 0 ) {
ttyLocker ttyl ;
if ( xtty ! = NULL ) xtty - > head ( " statistics type='deoptimization' " ) ;
tty - > print_cr ( " Deoptimization traps recorded: " ) ;
# define PRINT_STAT_LINE(name, r) \
tty - > print_cr ( " %4d (%4.1f%%) %s " , ( int ) ( r ) , ( ( r ) * 100.0 ) / total , name ) ;
PRINT_STAT_LINE ( " total " , total ) ;
// For each non-zero entry in the histogram, print the reason,
// the action, and (if specifically known) the type of bytecode.
for ( int reason = 0 ; reason < Reason_LIMIT ; reason + + ) {
for ( int action = 0 ; action < Action_LIMIT ; action + + ) {
juint * cases = _deoptimization_hist [ reason ] [ 1 + action ] ;
for ( int bc_case = 0 ; bc_case < BC_CASE_LIMIT ; bc_case + + ) {
juint counter = cases [ bc_case ] ;
if ( counter ! = 0 ) {
char name [ 1 * K ] ;
Bytecodes : : Code bc = ( Bytecodes : : Code ) ( counter & LSB_MASK ) ;
if ( bc_case = = BC_CASE_LIMIT & & ( int ) bc = = 0 )
bc = Bytecodes : : _illegal ;
sprintf ( name , " %s/%s/%s " ,
trap_reason_name ( reason ) ,
trap_action_name ( action ) ,
Bytecodes : : is_defined ( bc ) ? Bytecodes : : name ( bc ) : " other " ) ;
juint r = counter > > LSB_BITS ;
tty - > print_cr ( " %40s: " UINT32_FORMAT " (%.1f%%) " , name , r , ( r * 100.0 ) / total ) ;
account - = r ;
}
}
}
}
if ( account ! = 0 ) {
PRINT_STAT_LINE ( " unaccounted " , account ) ;
}
# undef PRINT_STAT_LINE
if ( xtty ! = NULL ) xtty - > tail ( " statistics " ) ;
}
}
2017-11-06 21:28:03 -08:00
# else // COMPILER2_OR_JVMCI
2007-12-01 00:00:00 +00:00
// Stubs for C1 only system.
bool Deoptimization : : trap_state_is_recompiled ( int trap_state ) {
return false ;
}
const char * Deoptimization : : trap_reason_name ( int reason ) {
return " unknown " ;
}
void Deoptimization : : print_statistics ( ) {
// no output
}
void
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
Deoptimization : : update_method_data_from_interpreter ( MethodData * trap_mdo , int trap_bci , int reason ) {
2007-12-01 00:00:00 +00:00
// no udpate
}
int Deoptimization : : trap_state_has_reason ( int trap_state , int reason ) {
return 0 ;
}
void Deoptimization : : gather_statistics ( DeoptReason reason , DeoptAction action ,
Bytecodes : : Code bc ) {
// no update
}
const char * Deoptimization : : format_trap_state ( char * buf , size_t buflen ,
int trap_state ) {
jio_snprintf ( buf , buflen , " #%d " , trap_state ) ;
return buf ;
}
2017-11-06 21:28:03 -08:00
# endif // COMPILER2_OR_JVMCI