2007-12-01 00:00:00 +00:00
/*
2015-02-24 17:23:53 -05:00
* Copyright ( c ) 1999 , 2015 , Oracle and / or its affiliates . All rights reserved .
2007-12-01 00:00:00 +00:00
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER .
*
* This code is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 only , as
* published by the Free Software Foundation .
*
* This code is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* version 2 for more details ( a copy is included in the LICENSE file that
* accompanied this code ) .
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2010-05-27 19:08:38 -07:00
* Please contact Oracle , 500 Oracle Parkway , Redwood Shores , CA 94065 USA
* or visit www . oracle . com if you need additional information or have any
* questions .
2007-12-01 00:00:00 +00:00
*
*/
2010-11-23 13:22:55 -08:00
# include "precompiled.hpp"
# include "asm/codeBuffer.hpp"
# include "c1/c1_CodeStubs.hpp"
# include "c1/c1_Defs.hpp"
# include "c1/c1_FrameMap.hpp"
# include "c1/c1_LIRAssembler.hpp"
# include "c1/c1_MacroAssembler.hpp"
# include "c1/c1_Runtime1.hpp"
# include "classfile/systemDictionary.hpp"
# include "classfile/vmSymbols.hpp"
# include "code/codeBlob.hpp"
# include "code/compiledIC.hpp"
# include "code/pcDesc.hpp"
# include "code/scopeDesc.hpp"
# include "code/vtableStubs.hpp"
# include "compiler/disassembler.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/shared/barrierSet.hpp"
# include "gc/shared/collectedHeap.hpp"
2010-11-23 13:22:55 -08:00
# include "interpreter/bytecode.hpp"
# include "interpreter/interpreter.hpp"
# include "memory/allocation.inline.hpp"
# include "memory/oopFactory.hpp"
# include "memory/resourceArea.hpp"
# include "oops/objArrayKlass.hpp"
# include "oops/oop.inline.hpp"
2014-06-04 11:56:44 +02:00
# include "runtime/atomic.inline.hpp"
2010-11-23 13:22:55 -08:00
# include "runtime/biasedLocking.hpp"
# include "runtime/compilationPolicy.hpp"
# include "runtime/interfaceSupport.hpp"
# include "runtime/javaCalls.hpp"
# include "runtime/sharedRuntime.hpp"
# include "runtime/threadCritical.hpp"
# include "runtime/vframe.hpp"
# include "runtime/vframeArray.hpp"
2014-07-04 11:46:01 +02:00
# include "runtime/vm_version.hpp"
2010-11-23 13:22:55 -08:00
# include "utilities/copy.hpp"
# include "utilities/events.hpp"
2007-12-01 00:00:00 +00:00
// Implementation of StubAssembler
StubAssembler : : StubAssembler ( CodeBuffer * code , const char * name , int stub_id ) : C1_MacroAssembler ( code ) {
_name = name ;
_must_gc_arguments = false ;
_frame_size = no_frame_size ;
_num_rt_args = 0 ;
_stub_id = stub_id ;
}
void StubAssembler : : set_info ( const char * name , bool must_gc_arguments ) {
_name = name ;
_must_gc_arguments = must_gc_arguments ;
}
void StubAssembler : : set_frame_size ( int size ) {
if ( _frame_size = = no_frame_size ) {
_frame_size = size ;
}
assert ( _frame_size = = size , " can't change the frame size " ) ;
}
void StubAssembler : : set_num_rt_args ( int args ) {
if ( _num_rt_args = = 0 ) {
_num_rt_args = args ;
}
assert ( _num_rt_args = = args , " can't change the number of args " ) ;
}
// Implementation of Runtime1
CodeBlob * Runtime1 : : _blobs [ Runtime1 : : number_of_ids ] ;
const char * Runtime1 : : _blob_names [ ] = {
RUNTIME1_STUBS ( STUB_NAME , LAST_STUB_NAME )
} ;
# ifndef PRODUCT
// statistics
int Runtime1 : : _generic_arraycopy_cnt = 0 ;
int Runtime1 : : _primitive_arraycopy_cnt = 0 ;
int Runtime1 : : _oop_arraycopy_cnt = 0 ;
2011-04-03 12:00:54 +02:00
int Runtime1 : : _generic_arraycopystub_cnt = 0 ;
2007-12-01 00:00:00 +00:00
int Runtime1 : : _arraycopy_slowcase_cnt = 0 ;
2011-04-03 12:00:54 +02:00
int Runtime1 : : _arraycopy_checkcast_cnt = 0 ;
int Runtime1 : : _arraycopy_checkcast_attempt_cnt = 0 ;
2007-12-01 00:00:00 +00:00
int Runtime1 : : _new_type_array_slowcase_cnt = 0 ;
int Runtime1 : : _new_object_array_slowcase_cnt = 0 ;
int Runtime1 : : _new_instance_slowcase_cnt = 0 ;
int Runtime1 : : _new_multi_array_slowcase_cnt = 0 ;
int Runtime1 : : _monitorenter_slowcase_cnt = 0 ;
int Runtime1 : : _monitorexit_slowcase_cnt = 0 ;
int Runtime1 : : _patch_code_slowcase_cnt = 0 ;
int Runtime1 : : _throw_range_check_exception_count = 0 ;
int Runtime1 : : _throw_index_exception_count = 0 ;
int Runtime1 : : _throw_div0_exception_count = 0 ;
int Runtime1 : : _throw_null_pointer_exception_count = 0 ;
int Runtime1 : : _throw_class_cast_exception_count = 0 ;
int Runtime1 : : _throw_incompatible_class_change_error_count = 0 ;
int Runtime1 : : _throw_array_store_exception_count = 0 ;
int Runtime1 : : _throw_count = 0 ;
2011-04-03 12:00:54 +02:00
2014-06-10 08:53:22 +02:00
static int _byte_arraycopy_stub_cnt = 0 ;
static int _short_arraycopy_stub_cnt = 0 ;
static int _int_arraycopy_stub_cnt = 0 ;
static int _long_arraycopy_stub_cnt = 0 ;
static int _oop_arraycopy_stub_cnt = 0 ;
2011-04-03 12:00:54 +02:00
address Runtime1 : : arraycopy_count_address ( BasicType type ) {
switch ( type ) {
case T_BOOLEAN :
2014-06-10 08:53:22 +02:00
case T_BYTE : return ( address ) & _byte_arraycopy_stub_cnt ;
2011-04-03 12:00:54 +02:00
case T_CHAR :
2014-06-10 08:53:22 +02:00
case T_SHORT : return ( address ) & _short_arraycopy_stub_cnt ;
2011-04-03 12:00:54 +02:00
case T_FLOAT :
2014-06-10 08:53:22 +02:00
case T_INT : return ( address ) & _int_arraycopy_stub_cnt ;
2011-04-03 12:00:54 +02:00
case T_DOUBLE :
2014-06-10 08:53:22 +02:00
case T_LONG : return ( address ) & _long_arraycopy_stub_cnt ;
2011-04-03 12:00:54 +02:00
case T_ARRAY :
2014-06-10 08:53:22 +02:00
case T_OBJECT : return ( address ) & _oop_arraycopy_stub_cnt ;
2011-04-03 12:00:54 +02:00
default :
ShouldNotReachHere ( ) ;
return NULL ;
}
}
2007-12-01 00:00:00 +00:00
# endif
// Simple helper to see if the caller of a runtime stub which
// entered the VM has been deoptimized
static bool caller_is_deopted ( ) {
JavaThread * thread = JavaThread : : current ( ) ;
RegisterMap reg_map ( thread , false ) ;
frame runtime_frame = thread - > last_frame ( ) ;
frame caller_frame = runtime_frame . sender ( & reg_map ) ;
assert ( caller_frame . is_compiled_frame ( ) , " must be compiled " ) ;
return caller_frame . is_deoptimized_frame ( ) ;
}
// Stress deoptimization
static void deopt_caller ( ) {
if ( ! caller_is_deopted ( ) ) {
JavaThread * thread = JavaThread : : current ( ) ;
RegisterMap reg_map ( thread , false ) ;
frame runtime_frame = thread - > last_frame ( ) ;
frame caller_frame = runtime_frame . sender ( & reg_map ) ;
2010-02-01 17:35:05 -07:00
Deoptimization : : deoptimize_frame ( thread , caller_frame . id ( ) ) ;
2007-12-01 00:00:00 +00:00
assert ( caller_is_deopted ( ) , " Must be deoptimized " ) ;
}
}
2010-06-04 11:18:04 -07:00
void Runtime1 : : generate_blob_for ( BufferBlob * buffer_blob , StubID id ) {
2007-12-01 00:00:00 +00:00
assert ( 0 < = id & & id < number_of_ids , " illegal stub id " ) ;
ResourceMark rm ;
// create code buffer for code storage
2010-08-25 05:27:54 -07:00
CodeBuffer code ( buffer_blob ) ;
2007-12-01 00:00:00 +00:00
2010-06-04 11:18:04 -07:00
Compilation : : setup_code_buffer ( & code , 0 ) ;
2007-12-01 00:00:00 +00:00
// create assembler for code generation
StubAssembler * sasm = new StubAssembler ( & code , name_for ( id ) , id ) ;
// generate code for runtime stub
OopMapSet * oop_maps ;
oop_maps = generate_code_for ( id , sasm ) ;
assert ( oop_maps = = NULL | | sasm - > frame_size ( ) ! = no_frame_size ,
" if stub has an oop map it must have a valid frame size " ) ;
# ifdef ASSERT
// Make sure that stubs that need oopmaps have them
switch ( id ) {
// These stubs don't need to have an oopmap
case dtrace_object_alloc_id :
2008-06-05 15:57:56 -07:00
case g1_pre_barrier_slow_id :
case g1_post_barrier_slow_id :
2007-12-01 00:00:00 +00:00
case slow_subtype_check_id :
case fpu2long_stub_id :
case unwind_exception_id :
2010-09-03 17:51:07 -07:00
case counter_overflow_id :
2010-08-03 08:13:38 -04:00
# if defined(SPARC) || defined(PPC)
2007-12-01 00:00:00 +00:00
case handle_exception_nofpu_id : // Unused on sparc
# endif
break ;
// All other stubs should have oopmaps
default :
assert ( oop_maps ! = NULL , " must have an oopmap " ) ;
}
# endif
// align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
sasm - > align ( BytesPerWord ) ;
// make sure all code is in code buffer
sasm - > flush ( ) ;
// create blob - distinguish a few special cases
CodeBlob * blob = RuntimeStub : : new_runtime_stub ( name_for ( id ) ,
& code ,
CodeOffsets : : frame_never_safe ,
sasm - > frame_size ( ) ,
oop_maps ,
sasm - > must_gc_arguments ( ) ) ;
// install blob
assert ( blob ! = NULL , " blob must exist " ) ;
_blobs [ id ] = blob ;
}
2010-06-04 11:18:04 -07:00
void Runtime1 : : initialize ( BufferBlob * blob ) {
// platform-dependent initialization
initialize_pd ( ) ;
// generate stubs
for ( int id = 0 ; id < number_of_ids ; id + + ) generate_blob_for ( blob , ( StubID ) id ) ;
// printing
2007-12-01 00:00:00 +00:00
# ifndef PRODUCT
2010-06-04 11:18:04 -07:00
if ( PrintSimpleStubs ) {
ResourceMark rm ;
for ( int id = 0 ; id < number_of_ids ; id + + ) {
_blobs [ id ] - > print ( ) ;
if ( _blobs [ id ] - > oop_maps ( ) ! = NULL ) {
_blobs [ id ] - > oop_maps ( ) - > print ( ) ;
2007-12-01 00:00:00 +00:00
}
}
}
2010-06-04 11:18:04 -07:00
# endif
2007-12-01 00:00:00 +00:00
}
CodeBlob * Runtime1 : : blob_for ( StubID id ) {
assert ( 0 < = id & & id < number_of_ids , " illegal stub id " ) ;
return _blobs [ id ] ;
}
const char * Runtime1 : : name_for ( StubID id ) {
assert ( 0 < = id & & id < number_of_ids , " illegal stub id " ) ;
return _blob_names [ id ] ;
}
const char * Runtime1 : : name_for_address ( address entry ) {
for ( int id = 0 ; id < number_of_ids ; id + + ) {
if ( entry = = entry_for ( ( StubID ) id ) ) return name_for ( ( StubID ) id ) ;
}
# define FUNCTION_CASE(a, f) \
if ( ( intptr_t ) a = = CAST_FROM_FN_PTR ( intptr_t , f ) ) return # f
FUNCTION_CASE ( entry , os : : javaTimeMillis ) ;
FUNCTION_CASE ( entry , os : : javaTimeNanos ) ;
FUNCTION_CASE ( entry , SharedRuntime : : OSR_migration_end ) ;
FUNCTION_CASE ( entry , SharedRuntime : : d2f ) ;
FUNCTION_CASE ( entry , SharedRuntime : : d2i ) ;
FUNCTION_CASE ( entry , SharedRuntime : : d2l ) ;
FUNCTION_CASE ( entry , SharedRuntime : : dcos ) ;
FUNCTION_CASE ( entry , SharedRuntime : : dexp ) ;
FUNCTION_CASE ( entry , SharedRuntime : : dlog ) ;
FUNCTION_CASE ( entry , SharedRuntime : : dlog10 ) ;
FUNCTION_CASE ( entry , SharedRuntime : : dpow ) ;
FUNCTION_CASE ( entry , SharedRuntime : : drem ) ;
FUNCTION_CASE ( entry , SharedRuntime : : dsin ) ;
FUNCTION_CASE ( entry , SharedRuntime : : dtan ) ;
FUNCTION_CASE ( entry , SharedRuntime : : f2i ) ;
FUNCTION_CASE ( entry , SharedRuntime : : f2l ) ;
FUNCTION_CASE ( entry , SharedRuntime : : frem ) ;
FUNCTION_CASE ( entry , SharedRuntime : : l2d ) ;
FUNCTION_CASE ( entry , SharedRuntime : : l2f ) ;
FUNCTION_CASE ( entry , SharedRuntime : : ldiv ) ;
FUNCTION_CASE ( entry , SharedRuntime : : lmul ) ;
FUNCTION_CASE ( entry , SharedRuntime : : lrem ) ;
FUNCTION_CASE ( entry , SharedRuntime : : lrem ) ;
FUNCTION_CASE ( entry , SharedRuntime : : dtrace_method_entry ) ;
FUNCTION_CASE ( entry , SharedRuntime : : dtrace_method_exit ) ;
2012-06-05 10:15:27 +02:00
FUNCTION_CASE ( entry , is_instance_of ) ;
2007-12-01 00:00:00 +00:00
FUNCTION_CASE ( entry , trace_block_entry ) ;
2012-03-06 12:36:59 +01:00
# ifdef TRACE_HAVE_INTRINSICS
FUNCTION_CASE ( entry , TRACE_TIME_METHOD ) ;
# endif
2013-07-02 20:42:12 -04:00
FUNCTION_CASE ( entry , StubRoutines : : updateBytesCRC32 ( ) ) ;
2007-12-01 00:00:00 +00:00
# undef FUNCTION_CASE
2010-08-03 08:13:38 -04:00
// Soft float adds more runtime names.
return pd_name_for_address ( entry ) ;
2007-12-01 00:00:00 +00:00
}
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
JRT_ENTRY ( void , Runtime1 : : new_instance ( JavaThread * thread , Klass * klass ) )
2007-12-01 00:00:00 +00:00
NOT_PRODUCT ( _new_instance_slowcase_cnt + + ; )
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
assert ( klass - > is_klass ( ) , " not a class " ) ;
2007-12-01 00:00:00 +00:00
instanceKlassHandle h ( thread , klass ) ;
h - > check_valid_for_instantiation ( true , CHECK ) ;
// make sure klass is initialized
h - > initialize ( CHECK ) ;
// allocate instance and return via TLS
oop obj = h - > allocate_instance ( CHECK ) ;
thread - > set_vm_result ( obj ) ;
JRT_END
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
JRT_ENTRY ( void , Runtime1 : : new_type_array ( JavaThread * thread , Klass * klass , jint length ) )
2007-12-01 00:00:00 +00:00
NOT_PRODUCT ( _new_type_array_slowcase_cnt + + ; )
// Note: no handle for klass needed since they are not used
// anymore after new_typeArray() and no GC can happen before.
// (This may have to change if this code changes!)
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
assert ( klass - > is_klass ( ) , " not a class " ) ;
2012-09-29 06:40:00 -04:00
BasicType elt_type = TypeArrayKlass : : cast ( klass ) - > element_type ( ) ;
2007-12-01 00:00:00 +00:00
oop obj = oopFactory : : new_typeArray ( elt_type , length , CHECK ) ;
thread - > set_vm_result ( obj ) ;
// This is pretty rare but this runtime patch is stressful to deoptimization
// if we deoptimize here so force a deopt to stress the path.
if ( DeoptimizeALot ) {
deopt_caller ( ) ;
}
JRT_END
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
JRT_ENTRY ( void , Runtime1 : : new_object_array ( JavaThread * thread , Klass * array_klass , jint length ) )
2007-12-01 00:00:00 +00:00
NOT_PRODUCT ( _new_object_array_slowcase_cnt + + ; )
// Note: no handle for klass needed since they are not used
// anymore after new_objArray() and no GC can happen before.
// (This may have to change if this code changes!)
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
assert ( array_klass - > is_klass ( ) , " not a class " ) ;
2012-09-29 06:40:00 -04:00
Klass * elem_klass = ObjArrayKlass : : cast ( array_klass ) - > element_klass ( ) ;
2007-12-01 00:00:00 +00:00
objArrayOop obj = oopFactory : : new_objArray ( elem_klass , length , CHECK ) ;
thread - > set_vm_result ( obj ) ;
// This is pretty rare but this runtime patch is stressful to deoptimization
// if we deoptimize here so force a deopt to stress the path.
if ( DeoptimizeALot ) {
deopt_caller ( ) ;
}
JRT_END
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
JRT_ENTRY ( void , Runtime1 : : new_multi_array ( JavaThread * thread , Klass * klass , int rank , jint * dims ) )
2007-12-01 00:00:00 +00:00
NOT_PRODUCT ( _new_multi_array_slowcase_cnt + + ; )
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
assert ( klass - > is_klass ( ) , " not a class " ) ;
2007-12-01 00:00:00 +00:00
assert ( rank > = 1 , " rank must be nonzero " ) ;
2012-09-29 06:40:00 -04:00
oop obj = ArrayKlass : : cast ( klass ) - > multi_allocate ( rank , dims , CHECK ) ;
2007-12-01 00:00:00 +00:00
thread - > set_vm_result ( obj ) ;
JRT_END
JRT_ENTRY ( void , Runtime1 : : unimplemented_entry ( JavaThread * thread , StubID id ) )
tty - > print_cr ( " Runtime1::entry_for(%d) returned unimplemented entry point " , id ) ;
JRT_END
2011-01-24 13:34:18 -08:00
JRT_ENTRY ( void , Runtime1 : : throw_array_store_exception ( JavaThread * thread , oopDesc * obj ) )
ResourceMark rm ( thread ) ;
2012-11-12 16:15:05 -05:00
const char * klass_name = obj - > klass ( ) - > external_name ( ) ;
2011-01-24 13:34:18 -08:00
SharedRuntime : : throw_and_post_jvmti_exception ( thread , vmSymbols : : java_lang_ArrayStoreException ( ) , klass_name ) ;
2007-12-01 00:00:00 +00:00
JRT_END
2011-07-01 10:37:37 -07:00
// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
// associated with the top activation record. The inlinee (that is possibly included in the enclosing
// method) method oop is passed as an argument. In order to do that it is embedded in the code as
// a constant.
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
static nmethod * counter_overflow_helper ( JavaThread * THREAD , int branch_bci , Method * m ) {
2010-09-03 17:51:07 -07:00
nmethod * osr_nm = NULL ;
methodHandle method ( THREAD , m ) ;
RegisterMap map ( THREAD , false ) ;
frame fr = THREAD - > last_frame ( ) . sender ( & map ) ;
2007-12-01 00:00:00 +00:00
nmethod * nm = ( nmethod * ) fr . cb ( ) ;
2010-09-03 17:51:07 -07:00
assert ( nm ! = NULL & & nm - > is_nmethod ( ) , " Sanity check " ) ;
methodHandle enclosing_method ( THREAD , nm - > method ( ) ) ;
CompLevel level = ( CompLevel ) nm - > comp_level ( ) ;
int bci = InvocationEntryBci ;
if ( branch_bci ! = InvocationEntryBci ) {
// Compute desination bci
address pc = method ( ) - > code_base ( ) + branch_bci ;
2011-01-13 22:15:41 -08:00
Bytecodes : : Code branch = Bytecodes : : code_at ( method ( ) , pc ) ;
2010-09-03 17:51:07 -07:00
int offset = 0 ;
switch ( branch ) {
case Bytecodes : : _if_icmplt : case Bytecodes : : _iflt :
case Bytecodes : : _if_icmpgt : case Bytecodes : : _ifgt :
case Bytecodes : : _if_icmple : case Bytecodes : : _ifle :
case Bytecodes : : _if_icmpge : case Bytecodes : : _ifge :
case Bytecodes : : _if_icmpeq : case Bytecodes : : _if_acmpeq : case Bytecodes : : _ifeq :
case Bytecodes : : _if_icmpne : case Bytecodes : : _if_acmpne : case Bytecodes : : _ifne :
case Bytecodes : : _ifnull : case Bytecodes : : _ifnonnull : case Bytecodes : : _goto :
offset = ( int16_t ) Bytes : : get_Java_u2 ( pc + 1 ) ;
break ;
case Bytecodes : : _goto_w :
offset = Bytes : : get_Java_u4 ( pc + 1 ) ;
break ;
default : ;
2007-12-01 00:00:00 +00:00
}
2010-09-03 17:51:07 -07:00
bci = branch_bci + offset ;
2007-12-01 00:00:00 +00:00
}
2012-01-26 12:15:24 -08:00
assert ( ! HAS_PENDING_EXCEPTION , " Should not have any exceptions pending " ) ;
2011-07-01 10:37:37 -07:00
osr_nm = CompilationPolicy : : policy ( ) - > event ( enclosing_method , method , branch_bci , bci , level , nm , THREAD ) ;
2012-01-26 12:15:24 -08:00
assert ( ! HAS_PENDING_EXCEPTION , " Event handler should not throw any exceptions " ) ;
2010-09-03 17:51:07 -07:00
return osr_nm ;
}
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
JRT_BLOCK_ENTRY ( address , Runtime1 : : counter_overflow ( JavaThread * thread , int bci , Method * method ) )
2010-09-03 17:51:07 -07:00
nmethod * osr_nm ;
JRT_BLOCK
osr_nm = counter_overflow_helper ( thread , bci , method ) ;
if ( osr_nm ! = NULL ) {
RegisterMap map ( thread , false ) ;
frame fr = thread - > last_frame ( ) . sender ( & map ) ;
2010-10-19 16:14:34 -07:00
Deoptimization : : deoptimize_frame ( thread , fr . id ( ) ) ;
2010-09-03 17:51:07 -07:00
}
JRT_BLOCK_END
return NULL ;
2007-12-01 00:00:00 +00:00
JRT_END
extern void vm_exit ( int code ) ;
// Enter this method from compiled code handler below. This is where we transition
// to VM mode. This is done as a helper routine so that the method called directly
// from compiled code does not have to transition to VM. This allows the entry
// method to see if the nmethod that we have just looked up a handler for has
// been deoptimized while we were in the vm. This simplifies the assembly code
// cpu directories.
//
// We are entering here from exception stub (via the entry method below)
// If there is a compiled exception handler in this method, we will continue there;
// otherwise we will unwind the stack and continue at the caller of top frame method
// Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
// control the area where we can allow a safepoint. After we exit the safepoint area we can
// check to see if the handler we are going to return is now in a nmethod that has
// been deoptimized. If that is the case we return the deopt blob
// unpack_with_exception entry instead. This makes life for the exception blob easier
// because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC ( static address , exception_handler_for_pc_helper ( JavaThread * thread , oopDesc * ex , address pc , nmethod * & nm ) )
2011-02-28 06:07:12 -08:00
// Reset method handle flag.
thread - > set_is_method_handle_return ( false ) ;
2007-12-01 00:00:00 +00:00
Handle exception ( thread , ex ) ;
nm = CodeCache : : find_nmethod ( pc ) ;
assert ( nm ! = NULL , " this is not an nmethod " ) ;
// Adjust the pc as needed/
if ( nm - > is_deopt_pc ( pc ) ) {
RegisterMap map ( thread , false ) ;
frame exception_frame = thread - > last_frame ( ) . sender ( & map ) ;
// if the frame isn't deopted then pc must not correspond to the caller of last_frame
assert ( exception_frame . is_deoptimized_frame ( ) , " must be deopted " ) ;
pc = exception_frame . pc ( ) ;
}
# ifdef ASSERT
assert ( exception . not_null ( ) , " NULL exceptions should be handled by throw_exception " ) ;
assert ( exception - > is_oop ( ) , " just checking " ) ;
// Check that exception is a subclass of Throwable, otherwise we have a VerifyError
2010-01-06 14:22:39 -08:00
if ( ! ( exception - > is_a ( SystemDictionary : : Throwable_klass ( ) ) ) ) {
2007-12-01 00:00:00 +00:00
if ( ExitVMOnVerifyError ) vm_exit ( - 1 ) ;
ShouldNotReachHere ( ) ;
}
# endif
// Check the stack guard pages and reenable them if necessary and there is
// enough space on the stack to do so. Use fast exceptions only if the guard
// pages are enabled.
bool guard_pages_enabled = thread - > stack_yellow_zone_enabled ( ) ;
if ( ! guard_pages_enabled ) guard_pages_enabled = thread - > reguard_stack ( ) ;
2010-02-01 17:35:05 -07:00
if ( JvmtiExport : : can_post_on_exceptions ( ) ) {
2007-12-01 00:00:00 +00:00
// To ensure correct notification of exception catches and throws
// we have to deoptimize here. If we attempted to notify the
// catches and throws during this exception lookup it's possible
// we could deoptimize on the way out of the VM and end back in
// the interpreter at the throw site. This would result in double
// notifications since the interpreter would also notify about
// these same catches and throws as it unwound the frame.
RegisterMap reg_map ( thread ) ;
frame stub_frame = thread - > last_frame ( ) ;
frame caller_frame = stub_frame . sender ( & reg_map ) ;
// We don't really want to deoptimize the nmethod itself since we
// can actually continue in the exception handler ourselves but I
// don't see an easy way to have the desired effect.
2010-10-19 16:14:34 -07:00
Deoptimization : : deoptimize_frame ( thread , caller_frame . id ( ) ) ;
assert ( caller_is_deopted ( ) , " Must be deoptimized " ) ;
2007-12-01 00:00:00 +00:00
return SharedRuntime : : deopt_blob ( ) - > unpack_with_exception_in_tls ( ) ;
}
2011-02-28 06:07:12 -08:00
// ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
2007-12-01 00:00:00 +00:00
if ( guard_pages_enabled ) {
address fast_continuation = nm - > handler_for_exception_and_pc ( exception , pc ) ;
if ( fast_continuation ! = NULL ) {
2011-02-28 06:07:12 -08:00
// Set flag if return address is a method handle call site.
thread - > set_is_method_handle_return ( nm - > is_method_handle_return ( pc ) ) ;
2007-12-01 00:00:00 +00:00
return fast_continuation ;
}
}
// If the stack guard pages are enabled, check whether there is a handler in
// the current method. Otherwise (guard pages disabled), force an unwind and
// skip the exception cache update (i.e., just leave continuation==NULL).
address continuation = NULL ;
if ( guard_pages_enabled ) {
// New exception handling mechanism can support inlined methods
// with exception handlers since the mappings are from PC to PC
// debugging support
// tracing
if ( TraceExceptions ) {
ttyLocker ttyl ;
ResourceMark rm ;
2014-05-09 16:50:54 -04:00
tty - > print_cr ( " Exception <%s> ( " INTPTR_FORMAT " ) thrown in compiled method <%s> at PC " INTPTR_FORMAT " for thread " INTPTR_FORMAT " " ,
exception - > print_value_string ( ) , p2i ( ( address ) exception ( ) ) , nm - > method ( ) - > print_value_string ( ) , p2i ( pc ) , p2i ( thread ) ) ;
2007-12-01 00:00:00 +00:00
}
// for AbortVMOnException flag
NOT_PRODUCT ( Exceptions : : debug_check_abort ( exception ) ) ;
// Clear out the exception oop and pc since looking up an
// exception handler can cause class loading, which might throw an
// exception and those fields are expected to be clear during
// normal bytecode execution.
2013-10-11 10:14:02 -07:00
thread - > clear_exception_oop_and_pc ( ) ;
2007-12-01 00:00:00 +00:00
2014-08-11 15:09:04 +02:00
Handle original_exception ( thread , exception ( ) ) ;
2007-12-01 00:00:00 +00:00
continuation = SharedRuntime : : compute_compiled_exc_handler ( nm , pc , exception , false , false ) ;
// If an exception was thrown during exception dispatch, the exception oop may have changed
thread - > set_exception_oop ( exception ( ) ) ;
thread - > set_exception_pc ( pc ) ;
// the exception cache is used only by non-implicit exceptions
2014-08-11 15:09:04 +02:00
// Update the exception cache only when there didn't happen
// another exception during the computation of the compiled
// exception handler.
if ( continuation ! = NULL & & original_exception ( ) = = exception ( ) ) {
2007-12-01 00:00:00 +00:00
nm - > add_handler_for_exception_and_pc ( exception , pc , continuation ) ;
}
}
thread - > set_vm_result ( exception ( ) ) ;
2011-02-28 06:07:12 -08:00
// Set flag if return address is a method handle call site.
thread - > set_is_method_handle_return ( nm - > is_method_handle_return ( pc ) ) ;
2007-12-01 00:00:00 +00:00
if ( TraceExceptions ) {
ttyLocker ttyl ;
ResourceMark rm ;
tty - > print_cr ( " Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT ,
2014-05-09 16:50:54 -04:00
p2i ( thread ) , p2i ( continuation ) , p2i ( pc ) ) ;
2007-12-01 00:00:00 +00:00
}
return continuation ;
JRT_END
// Enter this method from compiled code only if there is a Java exception handler
2011-02-28 06:07:12 -08:00
// in the method handling the exception.
2007-12-01 00:00:00 +00:00
// We are entering here from exception stub. We don't do a normal VM transition here.
// We do it in a helper. This is so we can check to see if the nmethod we have just
// searched for an exception handler has been deoptimized in the meantime.
2011-02-28 06:07:12 -08:00
address Runtime1 : : exception_handler_for_pc ( JavaThread * thread ) {
2007-12-01 00:00:00 +00:00
oop exception = thread - > exception_oop ( ) ;
address pc = thread - > exception_pc ( ) ;
// Still in Java mode
2011-02-28 06:07:12 -08:00
DEBUG_ONLY ( ResetNoHandleMark rnhm ) ;
2007-12-01 00:00:00 +00:00
nmethod * nm = NULL ;
address continuation = NULL ;
{
// Enter VM mode by calling the helper
ResetNoHandleMark rnhm ;
continuation = exception_handler_for_pc_helper ( thread , exception , pc , nm ) ;
}
// Back in JAVA, use no oops DON'T safepoint
// Now check to see if the nmethod we were called from is now deoptimized.
// If so we must return to the deopt blob and deoptimize the nmethod
if ( nm ! = NULL & & caller_is_deopted ( ) ) {
continuation = SharedRuntime : : deopt_blob ( ) - > unpack_with_exception_in_tls ( ) ;
}
2011-02-28 06:07:12 -08:00
assert ( continuation ! = NULL , " no handler found " ) ;
2007-12-01 00:00:00 +00:00
return continuation ;
}
JRT_ENTRY ( void , Runtime1 : : throw_range_check_exception ( JavaThread * thread , int index ) )
NOT_PRODUCT ( _throw_range_check_exception_count + + ; )
char message [ jintAsStringSize ] ;
sprintf ( message , " %d " , index ) ;
SharedRuntime : : throw_and_post_jvmti_exception ( thread , vmSymbols : : java_lang_ArrayIndexOutOfBoundsException ( ) , message ) ;
JRT_END
JRT_ENTRY ( void , Runtime1 : : throw_index_exception ( JavaThread * thread , int index ) )
NOT_PRODUCT ( _throw_index_exception_count + + ; )
char message [ 16 ] ;
sprintf ( message , " %d " , index ) ;
SharedRuntime : : throw_and_post_jvmti_exception ( thread , vmSymbols : : java_lang_IndexOutOfBoundsException ( ) , message ) ;
JRT_END
JRT_ENTRY ( void , Runtime1 : : throw_div0_exception ( JavaThread * thread ) )
NOT_PRODUCT ( _throw_div0_exception_count + + ; )
SharedRuntime : : throw_and_post_jvmti_exception ( thread , vmSymbols : : java_lang_ArithmeticException ( ) , " / by zero " ) ;
JRT_END
JRT_ENTRY ( void , Runtime1 : : throw_null_pointer_exception ( JavaThread * thread ) )
NOT_PRODUCT ( _throw_null_pointer_exception_count + + ; )
SharedRuntime : : throw_and_post_jvmti_exception ( thread , vmSymbols : : java_lang_NullPointerException ( ) ) ;
JRT_END
JRT_ENTRY ( void , Runtime1 : : throw_class_cast_exception ( JavaThread * thread , oopDesc * object ) )
NOT_PRODUCT ( _throw_class_cast_exception_count + + ; )
ResourceMark rm ( thread ) ;
char * message = SharedRuntime : : generate_class_cast_message (
2012-11-12 16:15:05 -05:00
thread , object - > klass ( ) - > external_name ( ) ) ;
2007-12-01 00:00:00 +00:00
SharedRuntime : : throw_and_post_jvmti_exception (
thread , vmSymbols : : java_lang_ClassCastException ( ) , message ) ;
JRT_END
JRT_ENTRY ( void , Runtime1 : : throw_incompatible_class_change_error ( JavaThread * thread ) )
NOT_PRODUCT ( _throw_incompatible_class_change_error_count + + ; )
ResourceMark rm ( thread ) ;
SharedRuntime : : throw_and_post_jvmti_exception ( thread , vmSymbols : : java_lang_IncompatibleClassChangeError ( ) ) ;
JRT_END
JRT_ENTRY_NO_ASYNC ( void , Runtime1 : : monitorenter ( JavaThread * thread , oopDesc * obj , BasicObjectLock * lock ) )
NOT_PRODUCT ( _monitorenter_slowcase_cnt + + ; )
if ( PrintBiasedLockingStatistics ) {
Atomic : : inc ( BiasedLocking : : slow_path_entry_count_addr ( ) ) ;
}
Handle h_obj ( thread , obj ) ;
assert ( h_obj ( ) - > is_oop ( ) , " must be NULL or an object " ) ;
if ( UseBiasedLocking ) {
// Retry fast entry if bias is revoked to avoid unnecessary inflation
ObjectSynchronizer : : fast_enter ( h_obj , lock - > lock ( ) , true , CHECK ) ;
} else {
if ( UseFastLocking ) {
// When using fast locking, the compiled code has already tried the fast case
assert ( obj = = lock - > obj ( ) , " must match " ) ;
ObjectSynchronizer : : slow_enter ( h_obj , lock - > lock ( ) , THREAD ) ;
} else {
lock - > set_obj ( obj ) ;
ObjectSynchronizer : : fast_enter ( h_obj , lock - > lock ( ) , false , THREAD ) ;
}
}
JRT_END
JRT_LEAF ( void , Runtime1 : : monitorexit ( JavaThread * thread , BasicObjectLock * lock ) )
NOT_PRODUCT ( _monitorexit_slowcase_cnt + + ; )
assert ( thread = = JavaThread : : current ( ) , " threads must correspond " ) ;
assert ( thread - > last_Java_sp ( ) , " last_Java_sp must be set " ) ;
// monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
EXCEPTION_MARK ;
oop obj = lock - > obj ( ) ;
assert ( obj - > is_oop ( ) , " must be NULL or an object " ) ;
if ( UseFastLocking ) {
// When using fast locking, the compiled code has already tried the fast case
ObjectSynchronizer : : slow_exit ( obj , lock - > lock ( ) , THREAD ) ;
} else {
ObjectSynchronizer : : fast_exit ( obj , lock - > lock ( ) , THREAD ) ;
}
JRT_END
2011-10-27 04:43:37 -07:00
// Cf. OptoRuntime::deoptimize_caller_frame
2014-05-13 11:32:10 -07:00
JRT_ENTRY ( void , Runtime1 : : deoptimize ( JavaThread * thread , jint trap_request ) )
2011-10-27 04:43:37 -07:00
// Called from within the owner thread, so no need for safepoint
RegisterMap reg_map ( thread , false ) ;
frame stub_frame = thread - > last_frame ( ) ;
2014-05-13 11:32:10 -07:00
assert ( stub_frame . is_runtime_frame ( ) , " Sanity check " ) ;
2011-10-27 04:43:37 -07:00
frame caller_frame = stub_frame . sender ( & reg_map ) ;
2014-05-13 11:32:10 -07:00
nmethod * nm = caller_frame . cb ( ) - > as_nmethod_or_null ( ) ;
assert ( nm ! = NULL , " Sanity check " ) ;
methodHandle method ( thread , nm - > method ( ) ) ;
assert ( nm = = CodeCache : : find_nmethod ( caller_frame . pc ( ) ) , " Should be the same " ) ;
Deoptimization : : DeoptAction action = Deoptimization : : trap_request_action ( trap_request ) ;
Deoptimization : : DeoptReason reason = Deoptimization : : trap_request_reason ( trap_request ) ;
if ( action = = Deoptimization : : Action_make_not_entrant ) {
if ( nm - > make_not_entrant ( ) ) {
if ( reason = = Deoptimization : : Reason_tenured ) {
MethodData * trap_mdo = Deoptimization : : get_method_data ( thread , method , true /*create_if_missing*/ ) ;
if ( trap_mdo ! = NULL ) {
trap_mdo - > inc_tenure_traps ( ) ;
}
}
}
}
2011-10-27 04:43:37 -07:00
// Deoptimize the caller frame.
Deoptimization : : deoptimize_frame ( thread , caller_frame . id ( ) ) ;
// Return to the now deoptimized frame.
JRT_END
2007-12-01 00:00:00 +00:00
2014-12-11 13:11:53 -08:00
# ifndef DEOPTIMIZE_WHEN_PATCHING
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
static Klass * resolve_field_return_klass ( methodHandle caller , int bci , TRAPS ) {
2011-01-13 22:15:41 -08:00
Bytecode_field field_access ( caller , bci ) ;
2007-12-01 00:00:00 +00:00
// This can be static or non-static field access
2011-01-13 22:15:41 -08:00
Bytecodes : : Code code = field_access . code ( ) ;
2007-12-01 00:00:00 +00:00
// We must load class, initialize class and resolvethe field
2013-09-13 22:38:02 -04:00
fieldDescriptor result ; // initialize class if needed
2007-12-01 00:00:00 +00:00
constantPoolHandle constants ( THREAD , caller - > constants ( ) ) ;
2013-09-13 22:38:02 -04:00
LinkResolver : : resolve_field_access ( result , constants , field_access . index ( ) , Bytecodes : : java_code ( code ) , CHECK_NULL ) ;
return result . field_holder ( ) ;
2007-12-01 00:00:00 +00:00
}
//
// This routine patches sites where a class wasn't loaded or
// initialized at the time the code was generated. It handles
// references to classes, fields and forcing of initialization. Most
// of the cases are straightforward and involving simply forcing
// resolution of a class, rewriting the instruction stream with the
// needed constant and replacing the call in this function with the
// patched code. The case for static field is more complicated since
// the thread which is in the process of initializing a class can
// access it's static fields but other threads can't so the code
// either has to deoptimize when this case is detected or execute a
// check that the current thread is the initializing thread. The
// current
//
// Patches basically look like this:
//
//
// patch_site: jmp patch stub ;; will be patched
// continue: ...
// ...
// ...
// ...
//
// They have a stub which looks like this:
//
// ;; patch body
// movl <const>, reg (for class constants)
// <or> movl [reg1 + <const>], reg (for field offsets)
// <or> movl reg, [reg1 + <const>] (for field offsets)
// <being_init offset> <bytes to copy> <bytes to skip>
// patch_stub: call Runtime1::patch_code (through a runtime stub)
// jmp patch_site
//
//
// A normal patch is done by rewriting the patch body, usually a move,
// and then copying it into place over top of the jmp instruction
// being careful to flush caches and doing it in an MP-safe way. The
// constants following the patch body are used to find various pieces
// of the patch relative to the call site for Runtime1::patch_code.
// The case for getstatic and putstatic is more complicated because
// getstatic and putstatic have special semantics when executing while
// the class is being initialized. getstatic/putstatic on a class
// which is being_initialized may be executed by the initializing
// thread but other threads have to block when they execute it. This
// is accomplished in compiled code by executing a test of the current
// thread against the initializing thread of the class. It's emitted
// as boilerplate in their stub which allows the patched code to be
// executed before it's copied back into the main body of the nmethod.
//
// being_init: get_thread(<tmp reg>
// cmpl [reg1 + <init_thread_offset>], <tmp reg>
// jne patch_stub
// movl [reg1 + <const>], reg (for field offsets) <or>
// movl reg, [reg1 + <const>] (for field offsets)
// jmp continue
// <being_init offset> <bytes to copy> <bytes to skip>
// patch_stub: jmp Runtim1::patch_code (through a runtime stub)
// jmp patch_site
//
// If the class is being initialized the patch body is rewritten and
// the patch site is rewritten to jump to being_init, instead of
// patch_stub. Whenever this code is executed it checks the current
// thread against the intializing thread so other threads will enter
// the runtime and end up blocked waiting the class to finish
// initializing inside the calls to resolve_field below. The
// initializing class will continue on it's way. Once the class is
// fully_initialized, the intializing_thread of the class becomes
// NULL, so the next thread to execute this code will fail the test,
// call into patch_code and complete the patching process by copying
// the patch body back into the main part of the nmethod and resume
// executing.
//
//
JRT_ENTRY ( void , Runtime1 : : patch_code ( JavaThread * thread , Runtime1 : : StubID stub_id ) )
NOT_PRODUCT ( _patch_code_slowcase_cnt + + ; )
ResourceMark rm ( thread ) ;
RegisterMap reg_map ( thread , false ) ;
frame runtime_frame = thread - > last_frame ( ) ;
frame caller_frame = runtime_frame . sender ( & reg_map ) ;
// last java frame on stack
vframeStream vfst ( thread , true ) ;
assert ( ! vfst . at_end ( ) , " Java frame must exist " ) ;
methodHandle caller_method ( THREAD , vfst . method ( ) ) ;
// Note that caller_method->code() may not be same as caller_code because of OSR's
// Note also that in the presence of inlining it is not guaranteed
// that caller_method() == caller_code->method()
int bci = vfst . bci ( ) ;
2011-01-13 22:15:41 -08:00
Bytecodes : : Code code = caller_method ( ) - > java_code_at ( bci ) ;
2007-12-01 00:00:00 +00:00
// this is used by assertions in the access_field_patching_id
BasicType patch_field_type = T_ILLEGAL ;
bool deoptimize_for_volatile = false ;
2014-03-03 15:54:45 +04:00
bool deoptimize_for_atomic = false ;
2007-12-01 00:00:00 +00:00
int patch_field_offset = - 1 ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
KlassHandle init_klass ( THREAD , NULL ) ; // klass needed by load_klass_patching code
KlassHandle load_klass ( THREAD , NULL ) ; // klass needed by load_klass_patching code
Handle mirror ( THREAD , NULL ) ; // oop needed by load_mirror_patching code
2013-08-21 13:34:45 +02:00
Handle appendix ( THREAD , NULL ) ; // oop needed by appendix_patching code
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
bool load_klass_or_mirror_patch_id =
( stub_id = = Runtime1 : : load_klass_patching_id | | stub_id = = Runtime1 : : load_mirror_patching_id ) ;
2007-12-01 00:00:00 +00:00
if ( stub_id = = Runtime1 : : access_field_patching_id ) {
2011-01-13 22:15:41 -08:00
Bytecode_field field_access ( caller_method , bci ) ;
2013-09-13 22:38:02 -04:00
fieldDescriptor result ; // initialize class if needed
2011-01-13 22:15:41 -08:00
Bytecodes : : Code code = field_access . code ( ) ;
2007-12-01 00:00:00 +00:00
constantPoolHandle constants ( THREAD , caller_method - > constants ( ) ) ;
2013-09-13 22:38:02 -04:00
LinkResolver : : resolve_field_access ( result , constants , field_access . index ( ) , Bytecodes : : java_code ( code ) , CHECK ) ;
patch_field_offset = result . offset ( ) ;
2007-12-01 00:00:00 +00:00
// If we're patching a field which is volatile then at compile it
// must not have been know to be volatile, so the generated code
// isn't correct for a volatile reference. The nmethod has to be
// deoptimized so that the code can be regenerated correctly.
// This check is only needed for access_field_patching since this
// is the path for patching field offsets. load_klass is only
// used for patching references to oops which don't need special
// handling in the volatile case.
2014-03-03 15:54:45 +04:00
2007-12-01 00:00:00 +00:00
deoptimize_for_volatile = result . access_flags ( ) . is_volatile ( ) ;
2014-03-03 15:54:45 +04:00
// If we are patching a field which should be atomic, then
// the generated code is not correct either, force deoptimizing.
// We need to only cover T_LONG and T_DOUBLE fields, as we can
// break access atomicity only for them.
// Strictly speaking, the deoptimizaation on 64-bit platforms
// is unnecessary, and T_LONG stores on 32-bit platforms need
// to be handled by special patching code when AlwaysAtomicAccesses
// becomes product feature. At this point, we are still going
// for the deoptimization for consistency against volatile
// accesses.
2007-12-01 00:00:00 +00:00
patch_field_type = result . field_type ( ) ;
2014-03-03 15:54:45 +04:00
deoptimize_for_atomic = ( AlwaysAtomicAccesses & & ( patch_field_type = = T_DOUBLE | | patch_field_type = = T_LONG ) ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
} else if ( load_klass_or_mirror_patch_id ) {
Klass * k = NULL ;
2007-12-01 00:00:00 +00:00
switch ( code ) {
case Bytecodes : : _putstatic :
case Bytecodes : : _getstatic :
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
{ Klass * klass = resolve_field_return_klass ( caller_method , bci , CHECK ) ;
2007-12-01 00:00:00 +00:00
init_klass = KlassHandle ( THREAD , klass ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
mirror = Handle ( THREAD , klass - > java_mirror ( ) ) ;
2007-12-01 00:00:00 +00:00
}
break ;
case Bytecodes : : _new :
2011-01-13 22:15:41 -08:00
{ Bytecode_new bnew ( caller_method ( ) , caller_method - > bcp_from ( bci ) ) ;
k = caller_method - > constants ( ) - > klass_at ( bnew . index ( ) , CHECK ) ;
2007-12-01 00:00:00 +00:00
}
break ;
case Bytecodes : : _multianewarray :
2011-01-13 22:15:41 -08:00
{ Bytecode_multianewarray mna ( caller_method ( ) , caller_method - > bcp_from ( bci ) ) ;
k = caller_method - > constants ( ) - > klass_at ( mna . index ( ) , CHECK ) ;
2007-12-01 00:00:00 +00:00
}
break ;
case Bytecodes : : _instanceof :
2011-01-13 22:15:41 -08:00
{ Bytecode_instanceof io ( caller_method ( ) , caller_method - > bcp_from ( bci ) ) ;
k = caller_method - > constants ( ) - > klass_at ( io . index ( ) , CHECK ) ;
2007-12-01 00:00:00 +00:00
}
break ;
case Bytecodes : : _checkcast :
2011-01-13 22:15:41 -08:00
{ Bytecode_checkcast cc ( caller_method ( ) , caller_method - > bcp_from ( bci ) ) ;
k = caller_method - > constants ( ) - > klass_at ( cc . index ( ) , CHECK ) ;
2007-12-01 00:00:00 +00:00
}
break ;
case Bytecodes : : _anewarray :
2011-01-13 22:15:41 -08:00
{ Bytecode_anewarray anew ( caller_method ( ) , caller_method - > bcp_from ( bci ) ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
Klass * ek = caller_method - > constants ( ) - > klass_at ( anew . index ( ) , CHECK ) ;
2012-11-12 16:15:05 -05:00
k = ek - > array_klass ( CHECK ) ;
2007-12-01 00:00:00 +00:00
}
break ;
case Bytecodes : : _ldc :
case Bytecodes : : _ldc_w :
{
2011-01-13 22:15:41 -08:00
Bytecode_loadconstant cc ( caller_method , bci ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
oop m = cc . resolve_constant ( CHECK ) ;
mirror = Handle ( THREAD , m ) ;
2007-12-01 00:00:00 +00:00
}
break ;
2013-08-21 13:34:45 +02:00
default : fatal ( " unexpected bytecode for load_klass_or_mirror_patch_id " ) ;
2007-12-01 00:00:00 +00:00
}
// convert to handle
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
load_klass = KlassHandle ( THREAD , k ) ;
2013-08-21 13:34:45 +02:00
} else if ( stub_id = = load_appendix_patching_id ) {
Bytecode_invoke bytecode ( caller_method , bci ) ;
Bytecodes : : Code bc = bytecode . invoke_code ( ) ;
CallInfo info ;
constantPoolHandle pool ( thread , caller_method - > constants ( ) ) ;
int index = bytecode . index ( ) ;
LinkResolver : : resolve_invoke ( info , Handle ( ) , pool , index , bc , CHECK ) ;
appendix = info . resolved_appendix ( ) ;
switch ( bc ) {
case Bytecodes : : _invokehandle : {
int cache_index = ConstantPool : : decode_cpcache_index ( index , true ) ;
assert ( cache_index > = 0 & & cache_index < pool - > cache ( ) - > length ( ) , " unexpected cache index " ) ;
pool - > cache ( ) - > entry_at ( cache_index ) - > set_method_handle ( pool , info ) ;
break ;
}
case Bytecodes : : _invokedynamic : {
pool - > invokedynamic_cp_cache_entry_at ( index ) - > set_dynamic_call ( pool , info ) ;
break ;
}
default : fatal ( " unexpected bytecode for load_appendix_patching_id " ) ;
}
2007-12-01 00:00:00 +00:00
} else {
ShouldNotReachHere ( ) ;
}
2014-03-03 15:54:45 +04:00
if ( deoptimize_for_volatile | | deoptimize_for_atomic ) {
// At compile time we assumed the field wasn't volatile/atomic but after
// loading it turns out it was volatile/atomic so we have to throw the
2007-12-01 00:00:00 +00:00
// compiled code out and let it be regenerated.
if ( TracePatching ) {
2014-03-03 15:54:45 +04:00
if ( deoptimize_for_volatile ) {
tty - > print_cr ( " Deoptimizing for patching volatile field reference " ) ;
}
if ( deoptimize_for_atomic ) {
tty - > print_cr ( " Deoptimizing for patching atomic field reference " ) ;
}
2007-12-01 00:00:00 +00:00
}
2014-03-03 15:54:45 +04:00
2008-12-12 19:53:25 -08:00
// It's possible the nmethod was invalidated in the last
// safepoint, but if it's still alive then make it not_entrant.
nmethod * nm = CodeCache : : find_nmethod ( caller_frame . pc ( ) ) ;
if ( nm ! = NULL ) {
nm - > make_not_entrant ( ) ;
}
2010-10-19 16:14:34 -07:00
Deoptimization : : deoptimize_frame ( thread , caller_frame . id ( ) ) ;
2007-12-01 00:00:00 +00:00
// Return to the now deoptimized frame.
}
// Now copy code back
{
MutexLockerEx ml_patch ( Patching_lock , Mutex : : _no_safepoint_check_flag ) ;
//
// Deoptimization may have happened while we waited for the lock.
// In that case we don't bother to do any patching we just return
// and let the deopt happen
if ( ! caller_is_deopted ( ) ) {
NativeGeneralJump * jump = nativeGeneralJump_at ( caller_frame . pc ( ) ) ;
address instr_pc = jump - > jump_destination ( ) ;
NativeInstruction * ni = nativeInstruction_at ( instr_pc ) ;
if ( ni - > is_jump ( ) ) {
// the jump has not been patched yet
// The jump destination is slow case and therefore not part of the stubs
// (stubs are only for StaticCalls)
// format of buffer
// ....
// instr byte 0 <-- copy_buff
// instr byte 1
// ..
// instr byte n-1
// n
// .... <-- call destination
address stub_location = caller_frame . pc ( ) + PatchingStub : : patch_info_offset ( ) ;
unsigned char * byte_count = ( unsigned char * ) ( stub_location - 1 ) ;
unsigned char * byte_skip = ( unsigned char * ) ( stub_location - 2 ) ;
unsigned char * being_initialized_entry_offset = ( unsigned char * ) ( stub_location - 3 ) ;
address copy_buff = stub_location - * byte_skip - * byte_count ;
address being_initialized_entry = stub_location - * being_initialized_entry_offset ;
if ( TracePatching ) {
2014-05-09 16:50:54 -04:00
tty - > print_cr ( " Patching %s at bci %d at address " INTPTR_FORMAT " (%s) " , Bytecodes : : name ( code ) , bci ,
p2i ( instr_pc ) , ( stub_id = = Runtime1 : : access_field_patching_id ) ? " field " : " klass " ) ;
2007-12-01 00:00:00 +00:00
nmethod * caller_code = CodeCache : : find_nmethod ( caller_frame . pc ( ) ) ;
assert ( caller_code ! = NULL , " nmethod not found " ) ;
// NOTE we use pc() not original_pc() because we already know they are
// identical otherwise we'd have never entered this block of code
2015-05-05 16:50:25 +02:00
const ImmutableOopMap * map = caller_code - > oop_map_for_return_address ( caller_frame . pc ( ) ) ;
2007-12-01 00:00:00 +00:00
assert ( map ! = NULL , " null check " ) ;
map - > print ( ) ;
tty - > cr ( ) ;
Disassembler : : decode ( copy_buff , copy_buff + * byte_count , tty ) ;
}
// depending on the code below, do_patch says whether to copy the patch body back into the nmethod
bool do_patch = true ;
if ( stub_id = = Runtime1 : : access_field_patching_id ) {
// The offset may not be correct if the class was not loaded at code generation time.
// Set it now.
NativeMovRegMem * n_move = nativeMovRegMem_at ( copy_buff ) ;
assert ( n_move - > offset ( ) = = 0 | | ( n_move - > offset ( ) = = 4 & & ( patch_field_type = = T_DOUBLE | | patch_field_type = = T_LONG ) ) , " illegal offset for type " ) ;
assert ( patch_field_offset > = 0 , " illegal offset " ) ;
n_move - > add_offset_in_bytes ( patch_field_offset ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
} else if ( load_klass_or_mirror_patch_id ) {
2007-12-01 00:00:00 +00:00
// If a getstatic or putstatic is referencing a klass which
// isn't fully initialized, the patch body isn't copied into
// place until initialization is complete. In this case the
// patch site is setup so that any threads besides the
// initializing thread are forced to come into the VM and
// block.
do_patch = ( code ! = Bytecodes : : _getstatic & & code ! = Bytecodes : : _putstatic ) | |
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
InstanceKlass : : cast ( init_klass ( ) ) - > is_initialized ( ) ;
2007-12-01 00:00:00 +00:00
NativeGeneralJump * jump = nativeGeneralJump_at ( instr_pc ) ;
if ( jump - > jump_destination ( ) = = being_initialized_entry ) {
assert ( do_patch = = true , " initialization must be complete at this point " ) ;
} else {
// patch the instruction <move reg, klass>
NativeMovConstReg * n_copy = nativeMovConstReg_at ( copy_buff ) ;
2010-08-03 08:13:38 -04:00
assert ( n_copy - > data ( ) = = 0 | |
2010-09-03 17:51:07 -07:00
n_copy - > data ( ) = = ( intptr_t ) Universe : : non_oop_word ( ) ,
2010-08-03 08:13:38 -04:00
" illegal init value " ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
if ( stub_id = = Runtime1 : : load_klass_patching_id ) {
2013-08-21 13:34:45 +02:00
assert ( load_klass ( ) ! = NULL , " klass not set " ) ;
n_copy - > set_data ( ( intx ) ( load_klass ( ) ) ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
} else {
assert ( mirror ( ) ! = NULL , " klass not set " ) ;
2014-07-07 10:12:40 +02:00
// Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
2013-09-26 10:25:02 -04:00
n_copy - > set_data ( cast_from_oop < intx > ( mirror ( ) ) ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
}
2007-12-01 00:00:00 +00:00
if ( TracePatching ) {
Disassembler : : decode ( copy_buff , copy_buff + * byte_count , tty ) ;
}
2013-08-21 13:34:45 +02:00
}
} else if ( stub_id = = Runtime1 : : load_appendix_patching_id ) {
NativeMovConstReg * n_copy = nativeMovConstReg_at ( copy_buff ) ;
assert ( n_copy - > data ( ) = = 0 | |
n_copy - > data ( ) = = ( intptr_t ) Universe : : non_oop_word ( ) ,
" illegal init value " ) ;
2013-09-26 10:25:02 -04:00
n_copy - > set_data ( cast_from_oop < intx > ( appendix ( ) ) ) ;
2013-08-21 13:34:45 +02:00
if ( TracePatching ) {
Disassembler : : decode ( copy_buff , copy_buff + * byte_count , tty ) ;
2007-12-01 00:00:00 +00:00
}
} else {
ShouldNotReachHere ( ) ;
}
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
2013-08-21 13:34:45 +02:00
# if defined(SPARC) || defined(PPC)
if ( load_klass_or_mirror_patch_id | |
stub_id = = Runtime1 : : load_appendix_patching_id ) {
// Update the location in the nmethod with the proper
// metadata. When the code was generated, a NULL was stuffed
// in the metadata table and that table needs to be update to
// have the right value. On intel the value is kept
// directly in the instruction instead of in the metadata
// table, so set_data above effectively updated the value.
nmethod * nm = CodeCache : : find_nmethod ( instr_pc ) ;
assert ( nm ! = NULL , " invalid nmethod_pc " ) ;
RelocIterator mds ( nm , copy_buff , copy_buff + 1 ) ;
bool found = false ;
while ( mds . next ( ) & & ! found ) {
if ( mds . type ( ) = = relocInfo : : oop_type ) {
assert ( stub_id = = Runtime1 : : load_mirror_patching_id | |
stub_id = = Runtime1 : : load_appendix_patching_id , " wrong stub id " ) ;
oop_Relocation * r = mds . oop_reloc ( ) ;
oop * oop_adr = r - > oop_addr ( ) ;
* oop_adr = stub_id = = Runtime1 : : load_mirror_patching_id ? mirror ( ) : appendix ( ) ;
r - > fix_oop_relocation ( ) ;
found = true ;
} else if ( mds . type ( ) = = relocInfo : : metadata_type ) {
assert ( stub_id = = Runtime1 : : load_klass_patching_id , " wrong stub id " ) ;
metadata_Relocation * r = mds . metadata_reloc ( ) ;
Metadata * * metadata_adr = r - > metadata_addr ( ) ;
* metadata_adr = load_klass ( ) ;
r - > fix_metadata_relocation ( ) ;
found = true ;
}
}
assert ( found , " the metadata must exist! " ) ;
}
# endif
2007-12-01 00:00:00 +00:00
if ( do_patch ) {
// replace instructions
// first replace the tail, then the call
2010-08-03 08:13:38 -04:00
# ifdef ARM
2013-09-27 13:49:57 -04:00
if ( ( load_klass_or_mirror_patch_id | |
stub_id = = Runtime1 : : load_appendix_patching_id ) & &
2015-02-24 17:23:53 -05:00
nativeMovConstReg_at ( copy_buff ) - > is_pc_relative ( ) ) {
2011-04-20 14:07:57 -04:00
nmethod * nm = CodeCache : : find_nmethod ( instr_pc ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
address addr = NULL ;
2011-04-20 14:07:57 -04:00
assert ( nm ! = NULL , " invalid nmethod_pc " ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
RelocIterator mds ( nm , copy_buff , copy_buff + 1 ) ;
while ( mds . next ( ) ) {
if ( mds . type ( ) = = relocInfo : : oop_type ) {
2013-09-27 13:49:57 -04:00
assert ( stub_id = = Runtime1 : : load_mirror_patching_id | |
stub_id = = Runtime1 : : load_appendix_patching_id , " wrong stub id " ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
oop_Relocation * r = mds . oop_reloc ( ) ;
addr = ( address ) r - > oop_addr ( ) ;
break ;
} else if ( mds . type ( ) = = relocInfo : : metadata_type ) {
assert ( stub_id = = Runtime1 : : load_klass_patching_id , " wrong stub id " ) ;
metadata_Relocation * r = mds . metadata_reloc ( ) ;
addr = ( address ) r - > metadata_addr ( ) ;
2011-04-20 14:07:57 -04:00
break ;
}
}
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
assert ( addr ! = NULL , " metadata relocation must exist " ) ;
2010-08-03 08:13:38 -04:00
copy_buff - = * byte_count ;
NativeMovConstReg * n_copy2 = nativeMovConstReg_at ( copy_buff ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
n_copy2 - > set_pc_relative_offset ( addr , instr_pc ) ;
2010-08-03 08:13:38 -04:00
}
# endif
2007-12-01 00:00:00 +00:00
for ( int i = NativeCall : : instruction_size ; i < * byte_count ; i + + ) {
address ptr = copy_buff + i ;
int a_byte = ( * ptr ) & 0xFF ;
address dst = instr_pc + i ;
* ( unsigned char * ) dst = ( unsigned char ) a_byte ;
}
ICache : : invalidate_range ( instr_pc , * byte_count ) ;
NativeGeneralJump : : replace_mt_safe ( instr_pc , copy_buff ) ;
2013-08-21 13:34:45 +02:00
if ( load_klass_or_mirror_patch_id | |
stub_id = = Runtime1 : : load_appendix_patching_id ) {
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
relocInfo : : relocType rtype =
( stub_id = = Runtime1 : : load_klass_patching_id ) ?
relocInfo : : metadata_type :
relocInfo : : oop_type ;
// update relocInfo to metadata
2007-12-01 00:00:00 +00:00
nmethod * nm = CodeCache : : find_nmethod ( instr_pc ) ;
assert ( nm ! = NULL , " invalid nmethod_pc " ) ;
// The old patch site is now a move instruction so update
// the reloc info so that it will get updated during
// future GCs.
RelocIterator iter ( nm , ( address ) instr_pc , ( address ) ( instr_pc + 1 ) ) ;
relocInfo : : change_reloc_info_for_address ( & iter , ( address ) instr_pc ,
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
relocInfo : : none , rtype ) ;
2007-12-01 00:00:00 +00:00
# ifdef SPARC
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
// Sparc takes two relocations for an metadata so update the second one.
2007-12-01 00:00:00 +00:00
address instr_pc2 = instr_pc + NativeMovConstReg : : add_offset ;
RelocIterator iter2 ( nm , instr_pc2 , instr_pc2 + 1 ) ;
relocInfo : : change_reloc_info_for_address ( & iter2 , ( address ) instr_pc2 ,
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
relocInfo : : none , rtype ) ;
2010-08-03 08:13:38 -04:00
# endif
# ifdef PPC
{ address instr_pc2 = instr_pc + NativeMovConstReg : : lo_offset ;
RelocIterator iter2 ( nm , instr_pc2 , instr_pc2 + 1 ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
relocInfo : : change_reloc_info_for_address ( & iter2 , ( address ) instr_pc2 ,
relocInfo : : none , rtype ) ;
2010-08-03 08:13:38 -04:00
}
2007-12-01 00:00:00 +00:00
# endif
}
} else {
ICache : : invalidate_range ( copy_buff , * byte_count ) ;
NativeGeneralJump : : insert_unconditional ( instr_pc , being_initialized_entry ) ;
}
}
}
}
2013-08-15 10:52:18 +02:00
// If we are patching in a non-perm oop, make sure the nmethod
// is on the right list.
2013-08-21 13:34:45 +02:00
if ( ScavengeRootsInCode & & ( ( mirror . not_null ( ) & & mirror ( ) - > is_scavengable ( ) ) | |
( appendix . not_null ( ) & & appendix - > is_scavengable ( ) ) ) ) {
2013-08-15 10:52:18 +02:00
MutexLockerEx ml_code ( CodeCache_lock , Mutex : : _no_safepoint_check_flag ) ;
nmethod * nm = CodeCache : : find_nmethod ( caller_frame . pc ( ) ) ;
guarantee ( nm ! = NULL , " only nmethods can contain non-perm oops " ) ;
if ( ! nm - > on_scavenge_root_list ( ) ) {
CodeCache : : add_scavenge_root_nmethod ( nm ) ;
}
// Since we've patched some oops in the nmethod,
// (re)register it with the heap.
Universe : : heap ( ) - > register_nmethod ( nm ) ;
}
2007-12-01 00:00:00 +00:00
JRT_END
2014-12-11 13:11:53 -08:00
# else // DEOPTIMIZE_WHEN_PATCHING
JRT_ENTRY ( void , Runtime1 : : patch_code ( JavaThread * thread , Runtime1 : : StubID stub_id ) )
RegisterMap reg_map ( thread , false ) ;
NOT_PRODUCT ( _patch_code_slowcase_cnt + + ; )
if ( TracePatching ) {
tty - > print_cr ( " Deoptimizing because patch is needed " ) ;
}
frame runtime_frame = thread - > last_frame ( ) ;
frame caller_frame = runtime_frame . sender ( & reg_map ) ;
// It's possible the nmethod was invalidated in the last
// safepoint, but if it's still alive then make it not_entrant.
nmethod * nm = CodeCache : : find_nmethod ( caller_frame . pc ( ) ) ;
if ( nm ! = NULL ) {
nm - > make_not_entrant ( ) ;
}
Deoptimization : : deoptimize_frame ( thread , caller_frame . id ( ) ) ;
// Return to the now deoptimized frame.
JRT_END
# endif // DEOPTIMIZE_WHEN_PATCHING
2007-12-01 00:00:00 +00:00
//
// Entry point for compiled code. We want to patch a nmethod.
// We don't do a normal VM transition here because we want to
// know after the patching is complete and any safepoint(s) are taken
// if the calling nmethod was deoptimized. We do this by calling a
// helper method which does the normal VM transition and when it
// completes we can check for deoptimization. This simplifies the
// assembly code in the cpu directories.
//
int Runtime1 : : move_klass_patching ( JavaThread * thread ) {
//
// NOTE: we are still in Java
//
Thread * THREAD = thread ;
debug_only ( NoHandleMark nhm ; )
{
// Enter VM mode
ResetNoHandleMark rnhm ;
patch_code ( thread , load_klass_patching_id ) ;
}
// Back in JAVA, use no oops DON'T safepoint
// Return true if calling code is deoptimized
return caller_is_deopted ( ) ;
}
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
int Runtime1 : : move_mirror_patching ( JavaThread * thread ) {
//
// NOTE: we are still in Java
//
Thread * THREAD = thread ;
debug_only ( NoHandleMark nhm ; )
{
// Enter VM mode
ResetNoHandleMark rnhm ;
patch_code ( thread , load_mirror_patching_id ) ;
}
// Back in JAVA, use no oops DON'T safepoint
// Return true if calling code is deoptimized
return caller_is_deopted ( ) ;
}
2013-08-21 13:34:45 +02:00
int Runtime1 : : move_appendix_patching ( JavaThread * thread ) {
//
// NOTE: we are still in Java
//
Thread * THREAD = thread ;
debug_only ( NoHandleMark nhm ; )
{
// Enter VM mode
ResetNoHandleMark rnhm ;
patch_code ( thread , load_appendix_patching_id ) ;
}
// Back in JAVA, use no oops DON'T safepoint
// Return true if calling code is deoptimized
return caller_is_deopted ( ) ;
}
2007-12-01 00:00:00 +00:00
//
// Entry point for compiled code. We want to patch a nmethod.
// We don't do a normal VM transition here because we want to
// know after the patching is complete and any safepoint(s) are taken
// if the calling nmethod was deoptimized. We do this by calling a
// helper method which does the normal VM transition and when it
// completes we can check for deoptimization. This simplifies the
// assembly code in the cpu directories.
//
int Runtime1 : : access_field_patching ( JavaThread * thread ) {
//
// NOTE: we are still in Java
//
Thread * THREAD = thread ;
debug_only ( NoHandleMark nhm ; )
{
// Enter VM mode
ResetNoHandleMark rnhm ;
patch_code ( thread , access_field_patching_id ) ;
}
// Back in JAVA, use no oops DON'T safepoint
// Return true if calling code is deoptimized
return caller_is_deopted ( ) ;
JRT_END
JRT_LEAF ( void , Runtime1 : : trace_block_entry ( jint block_id ) )
// for now we just print out the block id
tty - > print ( " %d " , block_id ) ;
JRT_END
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
// Array copy return codes.
enum {
ac_failed = - 1 , // arraycopy failed
ac_ok = 0 // arraycopy succeeded
} ;
2010-02-01 17:29:01 -08:00
// Below length is the # elements copied.
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
template < class T > int obj_arraycopy_work ( oopDesc * src , T * src_addr ,
oopDesc * dst , T * dst_addr ,
int length ) {
// For performance reasons, we assume we are using a card marking write
// barrier. The assert will fail if this is not the case.
// Note that we use the non-virtual inlineable variant of write_ref_array.
BarrierSet * bs = Universe : : heap ( ) - > barrier_set ( ) ;
2010-02-01 17:29:01 -08:00
assert ( bs - > has_write_ref_array_opt ( ) , " Barrier set must have ref array opt " ) ;
assert ( bs - > has_write_ref_array_pre_opt ( ) , " For pre-barrier as well. " ) ;
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
if ( src = = dst ) {
// same object, no check
2010-02-01 17:29:01 -08:00
bs - > write_ref_array_pre ( dst_addr , length ) ;
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
Copy : : conjoint_oops_atomic ( src_addr , dst_addr , length ) ;
2010-02-01 17:29:01 -08:00
bs - > write_ref_array ( ( HeapWord * ) dst_addr , length ) ;
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
return ac_ok ;
} else {
2012-09-29 06:40:00 -04:00
Klass * bound = ObjArrayKlass : : cast ( dst - > klass ( ) ) - > element_klass ( ) ;
Klass * stype = ObjArrayKlass : : cast ( src - > klass ( ) ) - > element_klass ( ) ;
2012-11-12 16:15:05 -05:00
if ( stype = = bound | | stype - > is_subtype_of ( bound ) ) {
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
// Elements are guaranteed to be subtypes, so no check necessary
2010-02-01 17:29:01 -08:00
bs - > write_ref_array_pre ( dst_addr , length ) ;
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
Copy : : conjoint_oops_atomic ( src_addr , dst_addr , length ) ;
2010-02-01 17:29:01 -08:00
bs - > write_ref_array ( ( HeapWord * ) dst_addr , length ) ;
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
return ac_ok ;
}
}
return ac_failed ;
}
2007-12-01 00:00:00 +00:00
// fast and direct copy of arrays; returning -1, means that an exception may be thrown
// and we did not copy anything
JRT_LEAF ( int , Runtime1 : : arraycopy ( oopDesc * src , int src_pos , oopDesc * dst , int dst_pos , int length ) )
# ifndef PRODUCT
_generic_arraycopy_cnt + + ; // Slow-path oop array copy
# endif
if ( src = = NULL | | dst = = NULL | | src_pos < 0 | | dst_pos < 0 | | length < 0 ) return ac_failed ;
if ( ! dst - > is_array ( ) | | ! src - > is_array ( ) ) return ac_failed ;
if ( ( unsigned int ) arrayOop ( src ) - > length ( ) < ( unsigned int ) src_pos + ( unsigned int ) length ) return ac_failed ;
if ( ( unsigned int ) arrayOop ( dst ) - > length ( ) < ( unsigned int ) dst_pos + ( unsigned int ) length ) return ac_failed ;
if ( length = = 0 ) return ac_ok ;
if ( src - > is_typeArray ( ) ) {
2013-05-10 08:27:30 -07:00
Klass * klass_oop = src - > klass ( ) ;
2007-12-01 00:00:00 +00:00
if ( klass_oop ! = dst - > klass ( ) ) return ac_failed ;
2012-09-29 06:40:00 -04:00
TypeArrayKlass * klass = TypeArrayKlass : : cast ( klass_oop ) ;
2007-12-01 00:00:00 +00:00
const int l2es = klass - > log2_element_size ( ) ;
const int ihs = klass - > array_header_in_bytes ( ) / wordSize ;
char * src_addr = ( char * ) ( ( oopDesc * * ) src + ihs ) + ( src_pos < < l2es ) ;
char * dst_addr = ( char * ) ( ( oopDesc * * ) dst + ihs ) + ( dst_pos < < l2es ) ;
// Potential problem: memmove is not guaranteed to be word atomic
// Revisit in Merlin
memmove ( dst_addr , src_addr , length < < l2es ) ;
return ac_ok ;
} else if ( src - > is_objArray ( ) & & dst - > is_objArray ( ) ) {
2010-11-30 23:23:40 -08:00
if ( UseCompressedOops ) {
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
narrowOop * src_addr = objArrayOop ( src ) - > obj_at_addr < narrowOop > ( src_pos ) ;
narrowOop * dst_addr = objArrayOop ( dst ) - > obj_at_addr < narrowOop > ( dst_pos ) ;
return obj_arraycopy_work ( src , src_addr , dst , dst_addr , length ) ;
2007-12-01 00:00:00 +00:00
} else {
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
oop * src_addr = objArrayOop ( src ) - > obj_at_addr < oop > ( src_pos ) ;
oop * dst_addr = objArrayOop ( dst ) - > obj_at_addr < oop > ( dst_pos ) ;
return obj_arraycopy_work ( src , src_addr , dst , dst_addr , length ) ;
2007-12-01 00:00:00 +00:00
}
}
return ac_failed ;
JRT_END
JRT_LEAF ( void , Runtime1 : : primitive_arraycopy ( HeapWord * src , HeapWord * dst , int length ) )
# ifndef PRODUCT
_primitive_arraycopy_cnt + + ;
# endif
if ( length = = 0 ) return ;
// Not guaranteed to be word atomic, but that doesn't matter
// for anything but an oop array, which is covered by oop_arraycopy.
2010-06-10 13:04:20 -07:00
Copy : : conjoint_jbytes ( src , dst , length ) ;
2007-12-01 00:00:00 +00:00
JRT_END
JRT_LEAF ( void , Runtime1 : : oop_arraycopy ( HeapWord * src , HeapWord * dst , int num ) )
# ifndef PRODUCT
_oop_arraycopy_cnt + + ;
# endif
if ( num = = 0 ) return ;
BarrierSet * bs = Universe : : heap ( ) - > barrier_set ( ) ;
2010-02-01 17:29:01 -08:00
assert ( bs - > has_write_ref_array_opt ( ) , " Barrier set must have ref array opt " ) ;
assert ( bs - > has_write_ref_array_pre_opt ( ) , " For pre-barrier as well. " ) ;
if ( UseCompressedOops ) {
bs - > write_ref_array_pre ( ( narrowOop * ) dst , num ) ;
2010-11-30 23:23:40 -08:00
Copy : : conjoint_oops_atomic ( ( narrowOop * ) src , ( narrowOop * ) dst , num ) ;
2010-02-01 17:29:01 -08:00
} else {
bs - > write_ref_array_pre ( ( oop * ) dst , num ) ;
2010-11-30 23:23:40 -08:00
Copy : : conjoint_oops_atomic ( ( oop * ) src , ( oop * ) dst , num ) ;
2010-02-01 17:29:01 -08:00
}
bs - > write_ref_array ( dst , num ) ;
2007-12-01 00:00:00 +00:00
JRT_END
2012-06-05 10:15:27 +02:00
JRT_LEAF ( int , Runtime1 : : is_instance_of ( oopDesc * mirror , oopDesc * obj ) )
// had to return int instead of bool, otherwise there may be a mismatch
// between the C calling convention and the Java one.
// e.g., on x86, GCC may clear only %al when returning a bool false, but
// JVM takes the whole %eax as the return value, which may misinterpret
// the return value as a boolean true.
assert ( mirror ! = NULL , " should null-check on mirror before calling " ) ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
Klass * k = java_lang_Class : : as_Klass ( mirror ) ;
2012-06-05 10:15:27 +02:00
return ( k ! = NULL & & obj ! = NULL & & obj - > is_a ( k ) ) ? 1 : 0 ;
JRT_END
2013-03-21 09:27:54 +01:00
JRT_ENTRY ( void , Runtime1 : : predicate_failed_trap ( JavaThread * thread ) )
ResourceMark rm ;
assert ( ! TieredCompilation , " incompatible with tiered compilation " ) ;
RegisterMap reg_map ( thread , false ) ;
frame runtime_frame = thread - > last_frame ( ) ;
frame caller_frame = runtime_frame . sender ( & reg_map ) ;
nmethod * nm = CodeCache : : find_nmethod ( caller_frame . pc ( ) ) ;
assert ( nm ! = NULL , " no more nmethod? " ) ;
nm - > make_not_entrant ( ) ;
methodHandle m ( nm - > method ( ) ) ;
MethodData * mdo = m - > method_data ( ) ;
if ( mdo = = NULL & & ! HAS_PENDING_EXCEPTION ) {
// Build an MDO. Ignore errors like OutOfMemory;
// that simply means we won't have an MDO to update.
Method : : build_interpreter_method_data ( m , THREAD ) ;
if ( HAS_PENDING_EXCEPTION ) {
assert ( ( PENDING_EXCEPTION - > is_a ( SystemDictionary : : OutOfMemoryError_klass ( ) ) ) , " we expect only an OOM error here " ) ;
CLEAR_PENDING_EXCEPTION ;
}
mdo = m - > method_data ( ) ;
}
if ( mdo ! = NULL ) {
mdo - > inc_trap_count ( Deoptimization : : Reason_none ) ;
}
if ( TracePredicateFailedTraps ) {
stringStream ss1 , ss2 ;
vframeStream vfst ( thread ) ;
methodHandle inlinee = methodHandle ( vfst . method ( ) ) ;
inlinee - > print_short_name ( & ss1 ) ;
m - > print_short_name ( & ss2 ) ;
2014-05-09 16:50:54 -04:00
tty - > print_cr ( " Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT , ss1 . as_string ( ) , vfst . bci ( ) , ss2 . as_string ( ) , p2i ( caller_frame . pc ( ) ) ) ;
2013-03-21 09:27:54 +01:00
}
Deoptimization : : deoptimize_frame ( thread , caller_frame . id ( ) ) ;
JRT_END
2012-06-05 10:15:27 +02:00
2007-12-01 00:00:00 +00:00
# ifndef PRODUCT
void Runtime1 : : print_statistics ( ) {
tty - > print_cr ( " C1 Runtime statistics: " ) ;
tty - > print_cr ( " _resolve_invoke_virtual_cnt: %d " , SharedRuntime : : _resolve_virtual_ctr ) ;
tty - > print_cr ( " _resolve_invoke_opt_virtual_cnt: %d " , SharedRuntime : : _resolve_opt_virtual_ctr ) ;
tty - > print_cr ( " _resolve_invoke_static_cnt: %d " , SharedRuntime : : _resolve_static_ctr ) ;
tty - > print_cr ( " _handle_wrong_method_cnt: %d " , SharedRuntime : : _wrong_method_ctr ) ;
tty - > print_cr ( " _ic_miss_cnt: %d " , SharedRuntime : : _ic_miss_ctr ) ;
tty - > print_cr ( " _generic_arraycopy_cnt: %d " , _generic_arraycopy_cnt ) ;
2011-04-03 12:00:54 +02:00
tty - > print_cr ( " _generic_arraycopystub_cnt: %d " , _generic_arraycopystub_cnt ) ;
2014-06-10 08:53:22 +02:00
tty - > print_cr ( " _byte_arraycopy_cnt: %d " , _byte_arraycopy_stub_cnt ) ;
tty - > print_cr ( " _short_arraycopy_cnt: %d " , _short_arraycopy_stub_cnt ) ;
tty - > print_cr ( " _int_arraycopy_cnt: %d " , _int_arraycopy_stub_cnt ) ;
tty - > print_cr ( " _long_arraycopy_cnt: %d " , _long_arraycopy_stub_cnt ) ;
2007-12-01 00:00:00 +00:00
tty - > print_cr ( " _primitive_arraycopy_cnt: %d " , _primitive_arraycopy_cnt ) ;
2011-04-03 12:00:54 +02:00
tty - > print_cr ( " _oop_arraycopy_cnt (C): %d " , Runtime1 : : _oop_arraycopy_cnt ) ;
2014-06-10 08:53:22 +02:00
tty - > print_cr ( " _oop_arraycopy_cnt (stub): %d " , _oop_arraycopy_stub_cnt ) ;
2007-12-01 00:00:00 +00:00
tty - > print_cr ( " _arraycopy_slowcase_cnt: %d " , _arraycopy_slowcase_cnt ) ;
2011-04-03 12:00:54 +02:00
tty - > print_cr ( " _arraycopy_checkcast_cnt: %d " , _arraycopy_checkcast_cnt ) ;
tty - > print_cr ( " _arraycopy_checkcast_attempt_cnt:%d " , _arraycopy_checkcast_attempt_cnt ) ;
2007-12-01 00:00:00 +00:00
tty - > print_cr ( " _new_type_array_slowcase_cnt: %d " , _new_type_array_slowcase_cnt ) ;
tty - > print_cr ( " _new_object_array_slowcase_cnt: %d " , _new_object_array_slowcase_cnt ) ;
tty - > print_cr ( " _new_instance_slowcase_cnt: %d " , _new_instance_slowcase_cnt ) ;
tty - > print_cr ( " _new_multi_array_slowcase_cnt: %d " , _new_multi_array_slowcase_cnt ) ;
tty - > print_cr ( " _monitorenter_slowcase_cnt: %d " , _monitorenter_slowcase_cnt ) ;
tty - > print_cr ( " _monitorexit_slowcase_cnt: %d " , _monitorexit_slowcase_cnt ) ;
tty - > print_cr ( " _patch_code_slowcase_cnt: %d " , _patch_code_slowcase_cnt ) ;
tty - > print_cr ( " _throw_range_check_exception_count: %d: " , _throw_range_check_exception_count ) ;
tty - > print_cr ( " _throw_index_exception_count: %d: " , _throw_index_exception_count ) ;
tty - > print_cr ( " _throw_div0_exception_count: %d: " , _throw_div0_exception_count ) ;
tty - > print_cr ( " _throw_null_pointer_exception_count: %d: " , _throw_null_pointer_exception_count ) ;
tty - > print_cr ( " _throw_class_cast_exception_count: %d: " , _throw_class_cast_exception_count ) ;
tty - > print_cr ( " _throw_incompatible_class_change_error_count: %d: " , _throw_incompatible_class_change_error_count ) ;
tty - > print_cr ( " _throw_array_store_exception_count: %d: " , _throw_array_store_exception_count ) ;
tty - > print_cr ( " _throw_count: %d: " , _throw_count ) ;
SharedRuntime : : print_ic_miss_histogram ( ) ;
tty - > cr ( ) ;
}
# endif // PRODUCT