2007-12-01 00:00:00 +00:00
/*
2016-03-07 15:03:48 -08:00
* Copyright ( c ) 1997 , 2016 , Oracle and / or its affiliates . All rights reserved .
2007-12-01 00:00:00 +00:00
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER .
*
* This code is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 only , as
* published by the Free Software Foundation .
*
* This code is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License
* version 2 for more details ( a copy is included in the LICENSE file that
* accompanied this code ) .
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2010-05-27 19:08:38 -07:00
* Please contact Oracle , 500 Oracle Parkway , Redwood Shores , CA 94065 USA
* or visit www . oracle . com if you need additional information or have any
* questions .
2007-12-01 00:00:00 +00:00
*
*/
2010-11-23 13:22:55 -08:00
# include "precompiled.hpp"
2012-11-30 15:23:16 -08:00
# include "asm/assembler.hpp"
# include "asm/assembler.inline.hpp"
2015-05-13 15:16:06 +02:00
# include "gc/shared/cardTableModRefBS.hpp"
# include "gc/shared/collectedHeap.inline.hpp"
2010-11-23 13:22:55 -08:00
# include "interpreter/interpreter.hpp"
# include "memory/resourceArea.hpp"
# include "prims/methodHandles.hpp"
# include "runtime/biasedLocking.hpp"
# include "runtime/interfaceSupport.hpp"
# include "runtime/objectMonitor.hpp"
# include "runtime/os.hpp"
# include "runtime/sharedRuntime.hpp"
# include "runtime/stubRoutines.hpp"
2013-01-23 13:02:39 -05:00
# include "utilities/macros.hpp"
# if INCLUDE_ALL_GCS
2015-05-13 15:16:06 +02:00
# include "gc/g1/g1CollectedHeap.inline.hpp"
# include "gc/g1/g1SATBCardTableModRefBS.hpp"
# include "gc/g1/heapRegion.hpp"
2013-01-23 13:02:39 -05:00
# endif // INCLUDE_ALL_GCS
2007-12-01 00:00:00 +00:00
2012-07-24 10:51:00 -07:00
# ifdef PRODUCT
# define BLOCK_COMMENT(str) /* nothing */
# define STOP(error) stop(error)
# else
# define BLOCK_COMMENT(str) block_comment(str)
# define STOP(error) block_comment(error); stop(error)
# endif
# define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
2007-12-01 00:00:00 +00:00
// Implementation of AddressLiteral
2015-05-08 11:49:20 -07:00
// A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms.
unsigned char tuple_table [ Assembler : : EVEX_ETUP + 1 ] [ Assembler : : AVX_512bit + 1 ] = {
// -----------------Table 4.5 -------------------- //
16 , 32 , 64 , // EVEX_FV(0)
4 , 4 , 4 , // EVEX_FV(1) - with Evex.b
16 , 32 , 64 , // EVEX_FV(2) - with Evex.w
8 , 8 , 8 , // EVEX_FV(3) - with Evex.w and Evex.b
8 , 16 , 32 , // EVEX_HV(0)
4 , 4 , 4 , // EVEX_HV(1) - with Evex.b
// -----------------Table 4.6 -------------------- //
16 , 32 , 64 , // EVEX_FVM(0)
1 , 1 , 1 , // EVEX_T1S(0)
2 , 2 , 2 , // EVEX_T1S(1)
4 , 4 , 4 , // EVEX_T1S(2)
8 , 8 , 8 , // EVEX_T1S(3)
4 , 4 , 4 , // EVEX_T1F(0)
8 , 8 , 8 , // EVEX_T1F(1)
8 , 8 , 8 , // EVEX_T2(0)
0 , 16 , 16 , // EVEX_T2(1)
0 , 16 , 16 , // EVEX_T4(0)
0 , 0 , 32 , // EVEX_T4(1)
0 , 0 , 32 , // EVEX_T8(0)
8 , 16 , 32 , // EVEX_HVM(0)
4 , 8 , 16 , // EVEX_QVM(0)
2 , 4 , 8 , // EVEX_OVM(0)
16 , 16 , 16 , // EVEX_M128(0)
8 , 32 , 64 , // EVEX_DUP(0)
0 , 0 , 0 // EVEX_NTUP
} ;
2007-12-01 00:00:00 +00:00
AddressLiteral : : AddressLiteral ( address target , relocInfo : : relocType rtype ) {
_is_lval = false ;
_target = target ;
switch ( rtype ) {
case relocInfo : : oop_type :
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
case relocInfo : : metadata_type :
2007-12-01 00:00:00 +00:00
// Oops are a special case. Normally they would be their own section
// but in cases like icBuffer they are literals in the code stream that
// we don't have a section for. We use none so that we get a literal address
// which is always patchable.
break ;
case relocInfo : : external_word_type :
_rspec = external_word_Relocation : : spec ( target ) ;
break ;
case relocInfo : : internal_word_type :
_rspec = internal_word_Relocation : : spec ( target ) ;
break ;
case relocInfo : : opt_virtual_call_type :
_rspec = opt_virtual_call_Relocation : : spec ( ) ;
break ;
case relocInfo : : static_call_type :
_rspec = static_call_Relocation : : spec ( ) ;
break ;
case relocInfo : : runtime_call_type :
_rspec = runtime_call_Relocation : : spec ( ) ;
break ;
2008-08-27 00:21:55 -07:00
case relocInfo : : poll_type :
case relocInfo : : poll_return_type :
_rspec = Relocation : : spec_simple ( rtype ) ;
break ;
2007-12-01 00:00:00 +00:00
case relocInfo : : none :
break ;
default :
ShouldNotReachHere ( ) ;
break ;
}
}
// Implementation of Address
# ifdef _LP64
2008-08-27 00:21:55 -07:00
Address Address : : make_array ( ArrayAddress adr ) {
2007-12-01 00:00:00 +00:00
// Not implementable on 64bit machines
// Should have been handled higher up the call chain.
ShouldNotReachHere ( ) ;
return Address ( ) ;
}
// exceedingly dangerous constructor
Address : : Address ( int disp , address loc , relocInfo : : relocType rtype ) {
_base = noreg ;
_index = noreg ;
_scale = no_scale ;
_disp = disp ;
switch ( rtype ) {
case relocInfo : : external_word_type :
_rspec = external_word_Relocation : : spec ( loc ) ;
break ;
case relocInfo : : internal_word_type :
_rspec = internal_word_Relocation : : spec ( loc ) ;
break ;
case relocInfo : : runtime_call_type :
// HMM
_rspec = runtime_call_Relocation : : spec ( ) ;
break ;
2008-08-27 00:21:55 -07:00
case relocInfo : : poll_type :
case relocInfo : : poll_return_type :
_rspec = Relocation : : spec_simple ( rtype ) ;
break ;
2007-12-01 00:00:00 +00:00
case relocInfo : : none :
break ;
default :
ShouldNotReachHere ( ) ;
}
}
2008-08-27 00:21:55 -07:00
# else // LP64
Address Address : : make_array ( ArrayAddress adr ) {
AddressLiteral base = adr . base ( ) ;
Address index = adr . index ( ) ;
assert ( index . _disp = = 0 , " must not have disp " ) ; // maybe it can?
Address array ( index . _base , index . _index , index . _scale , ( intptr_t ) base . target ( ) ) ;
array . _rspec = base . _rspec ;
return array ;
}
// exceedingly dangerous constructor
Address : : Address ( address loc , RelocationHolder spec ) {
_base = noreg ;
_index = noreg ;
_scale = no_scale ;
_disp = ( intptr_t ) loc ;
_rspec = spec ;
}
# endif // _LP64
2007-12-01 00:00:00 +00:00
// Convert the raw encoding form into the form expected by the constructor for
// Address. An index of 4 (rsp) corresponds to having no index, so convert
// that to noreg for the Address constructor.
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
Address Address : : make_raw ( int base , int index , int scale , int disp , relocInfo : : relocType disp_reloc ) {
2009-03-09 03:17:11 -07:00
RelocationHolder rspec ;
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
if ( disp_reloc ! = relocInfo : : none ) {
rspec = Relocation : : spec_simple ( disp_reloc ) ;
2009-03-09 03:17:11 -07:00
}
2007-12-01 00:00:00 +00:00
bool valid_index = index ! = rsp - > encoding ( ) ;
if ( valid_index ) {
Address madr ( as_Register ( base ) , as_Register ( index ) , ( Address : : ScaleFactor ) scale , in_ByteSize ( disp ) ) ;
2009-03-09 03:17:11 -07:00
madr . _rspec = rspec ;
2007-12-01 00:00:00 +00:00
return madr ;
} else {
Address madr ( as_Register ( base ) , noreg , Address : : no_scale , in_ByteSize ( disp ) ) ;
2009-03-09 03:17:11 -07:00
madr . _rspec = rspec ;
2007-12-01 00:00:00 +00:00
return madr ;
}
}
// Implementation of Assembler
2008-08-27 00:21:55 -07:00
2007-12-01 00:00:00 +00:00
int AbstractAssembler : : code_fill_byte ( ) {
return ( u_char ) ' \xF4 ' ; // hlt
}
2008-08-27 00:21:55 -07:00
// make this go away someday
void Assembler : : emit_data ( jint data , relocInfo : : relocType rtype , int format ) {
if ( rtype = = relocInfo : : none )
2015-05-08 11:49:20 -07:00
emit_int32 ( data ) ;
else
emit_data ( data , Relocation : : spec_simple ( rtype ) , format ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : emit_data ( jint data , RelocationHolder const & rspec , int format ) {
assert ( imm_operand = = 0 , " default format must be immediate in this file " ) ;
2007-12-01 00:00:00 +00:00
assert ( inst_mark ( ) ! = NULL , " must be inside InstructionMark " ) ;
if ( rspec . type ( ) ! = relocInfo : : none ) {
# ifdef ASSERT
check_relocation ( rspec , format ) ;
# endif
// Do not use AbstractAssembler::relocate, which is not intended for
// embedded words. Instead, relocate to the enclosing instruction.
// hack. call32 is too wide for mask so use disp32
if ( format = = call32_operand )
code_section ( ) - > relocate ( inst_mark ( ) , rspec , disp32_operand ) ;
else
code_section ( ) - > relocate ( inst_mark ( ) , rspec , format ) ;
}
2013-01-07 14:08:28 -08:00
emit_int32 ( data ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
static int encode ( Register r ) {
int enc = r - > encoding ( ) ;
if ( enc > = 8 ) {
enc - = 8 ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
return enc ;
2007-12-01 00:00:00 +00:00
}
void Assembler : : emit_arith_b ( int op1 , int op2 , Register dst , int imm8 ) {
2008-08-27 00:21:55 -07:00
assert ( dst - > has_byte_register ( ) , " must have byte register " ) ;
2007-12-01 00:00:00 +00:00
assert ( isByte ( op1 ) & & isByte ( op2 ) , " wrong opcode " ) ;
assert ( isByte ( imm8 ) , " not a byte " ) ;
assert ( ( op1 & 0x01 ) = = 0 , " should be 8bit operation " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( op1 ) ;
emit_int8 ( op2 | encode ( dst ) ) ;
emit_int8 ( imm8 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : emit_arith ( int op1 , int op2 , Register dst , int32_t imm32 ) {
2007-12-01 00:00:00 +00:00
assert ( isByte ( op1 ) & & isByte ( op2 ) , " wrong opcode " ) ;
assert ( ( op1 & 0x01 ) = = 1 , " should be 32bit operation " ) ;
assert ( ( op1 & 0x02 ) = = 0 , " sign-extension bit should not be set " ) ;
if ( is8bit ( imm32 ) ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( op1 | 0x02 ) ; // set sign bit
emit_int8 ( op2 | encode ( dst ) ) ;
emit_int8 ( imm32 & 0xFF ) ;
2007-12-01 00:00:00 +00:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( op1 ) ;
emit_int8 ( op2 | encode ( dst ) ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2007-12-01 00:00:00 +00:00
}
}
2012-02-15 21:37:49 -08:00
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler : : emit_arith_imm32 ( int op1 , int op2 , Register dst , int32_t imm32 ) {
assert ( isByte ( op1 ) & & isByte ( op2 ) , " wrong opcode " ) ;
assert ( ( op1 & 0x01 ) = = 1 , " should be 32bit operation " ) ;
assert ( ( op1 & 0x02 ) = = 0 , " sign-extension bit should not be set " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( op1 ) ;
emit_int8 ( op2 | encode ( dst ) ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2012-02-15 21:37:49 -08:00
}
2007-12-01 00:00:00 +00:00
// immediate-to-memory forms
2008-08-27 00:21:55 -07:00
void Assembler : : emit_arith_operand ( int op1 , Register rm , Address adr , int32_t imm32 ) {
2007-12-01 00:00:00 +00:00
assert ( ( op1 & 0x01 ) = = 1 , " should be 32bit operation " ) ;
assert ( ( op1 & 0x02 ) = = 0 , " sign-extension bit should not be set " ) ;
if ( is8bit ( imm32 ) ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( op1 | 0x02 ) ; // set sign bit
2007-12-01 00:00:00 +00:00
emit_operand ( rm , adr , 1 ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( imm32 & 0xFF ) ;
2007-12-01 00:00:00 +00:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( op1 ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( rm , adr , 4 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2007-12-01 00:00:00 +00:00
}
}
void Assembler : : emit_arith ( int op1 , int op2 , Register dst , Register src ) {
assert ( isByte ( op1 ) & & isByte ( op2 ) , " wrong opcode " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( op1 ) ;
emit_int8 ( op2 | encode ( dst ) < < 3 | encode ( src ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
2015-05-08 11:49:20 -07:00
bool Assembler : : query_compressed_disp_byte ( int disp , bool is_evex_inst , int vector_len ,
int cur_tuple_type , int in_size_in_bits , int cur_encoding ) {
int mod_idx = 0 ;
// We will test if the displacement fits the compressed format and if so
// apply the compression to the displacment iff the result is8bit.
if ( VM_Version : : supports_evex ( ) & & is_evex_inst ) {
switch ( cur_tuple_type ) {
case EVEX_FV :
if ( ( cur_encoding & VEX_W ) = = VEX_W ) {
2015-11-09 11:26:41 -08:00
mod_idx = ( ( cur_encoding & EVEX_Rb ) = = EVEX_Rb ) ? 3 : 2 ;
2015-05-08 11:49:20 -07:00
} else {
mod_idx = ( ( cur_encoding & EVEX_Rb ) = = EVEX_Rb ) ? 1 : 0 ;
}
break ;
case EVEX_HV :
mod_idx = ( ( cur_encoding & EVEX_Rb ) = = EVEX_Rb ) ? 1 : 0 ;
break ;
case EVEX_FVM :
break ;
case EVEX_T1S :
switch ( in_size_in_bits ) {
case EVEX_8bit :
break ;
case EVEX_16bit :
mod_idx = 1 ;
break ;
case EVEX_32bit :
mod_idx = 2 ;
break ;
case EVEX_64bit :
mod_idx = 3 ;
break ;
}
break ;
case EVEX_T1F :
case EVEX_T2 :
case EVEX_T4 :
mod_idx = ( in_size_in_bits = = EVEX_64bit ) ? 1 : 0 ;
break ;
case EVEX_T8 :
break ;
case EVEX_HVM :
break ;
case EVEX_QVM :
break ;
case EVEX_OVM :
break ;
case EVEX_M128 :
break ;
case EVEX_DUP :
break ;
default :
assert ( 0 , " no valid evex tuple_table entry " ) ;
break ;
}
if ( vector_len > = AVX_128bit & & vector_len < = AVX_512bit ) {
int disp_factor = tuple_table [ cur_tuple_type + mod_idx ] [ vector_len ] ;
if ( ( disp % disp_factor ) = = 0 ) {
int new_disp = disp / disp_factor ;
if ( ( - 0x80 < = new_disp & & new_disp < 0x80 ) ) {
disp = new_disp ;
}
} else {
return false ;
}
}
}
return ( - 0x80 < = disp & & disp < 0x80 ) ;
}
bool Assembler : : emit_compressed_disp_byte ( int & disp ) {
int mod_idx = 0 ;
// We will test if the displacement fits the compressed format and if so
// apply the compression to the displacment iff the result is8bit.
2016-03-29 09:53:50 -07:00
if ( VM_Version : : supports_evex ( ) & & _attributes & & _attributes - > is_evex_instruction ( ) ) {
2015-11-09 11:26:41 -08:00
int evex_encoding = _attributes - > get_evex_encoding ( ) ;
int tuple_type = _attributes - > get_tuple_type ( ) ;
switch ( tuple_type ) {
2015-05-08 11:49:20 -07:00
case EVEX_FV :
2015-11-09 11:26:41 -08:00
if ( ( evex_encoding & VEX_W ) = = VEX_W ) {
mod_idx = ( ( evex_encoding & EVEX_Rb ) = = EVEX_Rb ) ? 3 : 2 ;
2015-05-08 11:49:20 -07:00
} else {
2015-11-09 11:26:41 -08:00
mod_idx = ( ( evex_encoding & EVEX_Rb ) = = EVEX_Rb ) ? 1 : 0 ;
2015-05-08 11:49:20 -07:00
}
break ;
case EVEX_HV :
2015-11-09 11:26:41 -08:00
mod_idx = ( ( evex_encoding & EVEX_Rb ) = = EVEX_Rb ) ? 1 : 0 ;
2015-05-08 11:49:20 -07:00
break ;
case EVEX_FVM :
break ;
case EVEX_T1S :
2015-11-09 11:26:41 -08:00
switch ( _attributes - > get_input_size ( ) ) {
2015-05-08 11:49:20 -07:00
case EVEX_8bit :
break ;
case EVEX_16bit :
mod_idx = 1 ;
break ;
case EVEX_32bit :
mod_idx = 2 ;
break ;
case EVEX_64bit :
mod_idx = 3 ;
break ;
}
break ;
case EVEX_T1F :
case EVEX_T2 :
case EVEX_T4 :
2015-11-09 11:26:41 -08:00
mod_idx = ( _attributes - > get_input_size ( ) = = EVEX_64bit ) ? 1 : 0 ;
2015-05-08 11:49:20 -07:00
break ;
case EVEX_T8 :
break ;
case EVEX_HVM :
break ;
case EVEX_QVM :
break ;
case EVEX_OVM :
break ;
case EVEX_M128 :
break ;
case EVEX_DUP :
break ;
default :
assert ( 0 , " no valid evex tuple_table entry " ) ;
break ;
}
2015-11-09 11:26:41 -08:00
int vector_len = _attributes - > get_vector_len ( ) ;
if ( vector_len > = AVX_128bit & & vector_len < = AVX_512bit ) {
int disp_factor = tuple_table [ tuple_type + mod_idx ] [ vector_len ] ;
2015-05-08 11:49:20 -07:00
if ( ( disp % disp_factor ) = = 0 ) {
int new_disp = disp / disp_factor ;
if ( is8bit ( new_disp ) ) {
disp = new_disp ;
}
} else {
return false ;
}
}
}
return is8bit ( disp ) ;
}
2007-12-01 00:00:00 +00:00
void Assembler : : emit_operand ( Register reg , Register base , Register index ,
Address : : ScaleFactor scale , int disp ,
RelocationHolder const & rspec ,
int rip_relative_correction ) {
relocInfo : : relocType rtype = ( relocInfo : : relocType ) rspec . type ( ) ;
2008-08-27 00:21:55 -07:00
// Encode the registers as needed in the fields they are used in
int regenc = encode ( reg ) < < 3 ;
int indexenc = index - > is_valid ( ) ? encode ( index ) < < 3 : 0 ;
int baseenc = base - > is_valid ( ) ? encode ( base ) : 0 ;
2007-12-01 00:00:00 +00:00
if ( base - > is_valid ( ) ) {
if ( index - > is_valid ( ) ) {
assert ( scale ! = Address : : no_scale , " inconsistent address " ) ;
// [base + index*scale + disp]
if ( disp = = 0 & & rtype = = relocInfo : : none & &
2008-08-27 00:21:55 -07:00
base ! = rbp LP64_ONLY ( & & base ! = r13 ) ) {
2007-12-01 00:00:00 +00:00
// [base + index*scale]
// [00 reg 100][ss index base]
assert ( index ! = rsp , " illegal addressing mode " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x04 | regenc ) ;
emit_int8 ( scale < < 6 | indexenc | baseenc ) ;
2015-05-08 11:49:20 -07:00
} else if ( emit_compressed_disp_byte ( disp ) & & rtype = = relocInfo : : none ) {
2007-12-01 00:00:00 +00:00
// [base + index*scale + imm8]
// [01 reg 100][ss index base] imm8
assert ( index ! = rsp , " illegal addressing mode " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x44 | regenc ) ;
emit_int8 ( scale < < 6 | indexenc | baseenc ) ;
emit_int8 ( disp & 0xFF ) ;
2007-12-01 00:00:00 +00:00
} else {
// [base + index*scale + disp32]
// [10 reg 100][ss index base] disp32
assert ( index ! = rsp , " illegal addressing mode " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x84 | regenc ) ;
emit_int8 ( scale < < 6 | indexenc | baseenc ) ;
2007-12-01 00:00:00 +00:00
emit_data ( disp , rspec , disp32_operand ) ;
}
2008-08-27 00:21:55 -07:00
} else if ( base = = rsp LP64_ONLY ( | | base = = r12 ) ) {
2007-12-01 00:00:00 +00:00
// [rsp + disp]
if ( disp = = 0 & & rtype = = relocInfo : : none ) {
// [rsp]
// [00 reg 100][00 100 100]
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x04 | regenc ) ;
emit_int8 ( 0x24 ) ;
2015-05-08 11:49:20 -07:00
} else if ( emit_compressed_disp_byte ( disp ) & & rtype = = relocInfo : : none ) {
2007-12-01 00:00:00 +00:00
// [rsp + imm8]
// [01 reg 100][00 100 100] disp8
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x44 | regenc ) ;
emit_int8 ( 0x24 ) ;
emit_int8 ( disp & 0xFF ) ;
2007-12-01 00:00:00 +00:00
} else {
// [rsp + imm32]
// [10 reg 100][00 100 100] disp32
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x84 | regenc ) ;
emit_int8 ( 0x24 ) ;
2007-12-01 00:00:00 +00:00
emit_data ( disp , rspec , disp32_operand ) ;
}
} else {
// [base + disp]
2008-08-27 00:21:55 -07:00
assert ( base ! = rsp LP64_ONLY ( & & base ! = r12 ) , " illegal addressing mode " ) ;
2007-12-01 00:00:00 +00:00
if ( disp = = 0 & & rtype = = relocInfo : : none & &
2008-08-27 00:21:55 -07:00
base ! = rbp LP64_ONLY ( & & base ! = r13 ) ) {
2007-12-01 00:00:00 +00:00
// [base]
// [00 reg base]
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x00 | regenc | baseenc ) ;
2015-05-08 11:49:20 -07:00
} else if ( emit_compressed_disp_byte ( disp ) & & rtype = = relocInfo : : none ) {
2007-12-01 00:00:00 +00:00
// [base + disp8]
// [01 reg base] disp8
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x40 | regenc | baseenc ) ;
emit_int8 ( disp & 0xFF ) ;
2007-12-01 00:00:00 +00:00
} else {
// [base + disp32]
// [10 reg base] disp32
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x80 | regenc | baseenc ) ;
2007-12-01 00:00:00 +00:00
emit_data ( disp , rspec , disp32_operand ) ;
}
}
} else {
if ( index - > is_valid ( ) ) {
assert ( scale ! = Address : : no_scale , " inconsistent address " ) ;
// [index*scale + disp]
// [00 reg 100][ss index 101] disp32
assert ( index ! = rsp , " illegal addressing mode " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x04 | regenc ) ;
emit_int8 ( scale < < 6 | indexenc | 0x05 ) ;
2007-12-01 00:00:00 +00:00
emit_data ( disp , rspec , disp32_operand ) ;
} else if ( rtype ! = relocInfo : : none ) {
2008-08-27 00:21:55 -07:00
// [disp] (64bit) RIP-RELATIVE (32bit) abs
2007-12-01 00:00:00 +00:00
// [00 000 101] disp32
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x05 | regenc ) ;
2007-12-01 00:00:00 +00:00
// Note that the RIP-rel. correction applies to the generated
// disp field, but _not_ to the target address in the rspec.
// disp was created by converting the target address minus the pc
// at the start of the instruction. That needs more correction here.
// intptr_t disp = target - next_ip;
assert ( inst_mark ( ) ! = NULL , " must be inside InstructionMark " ) ;
address next_ip = pc ( ) + sizeof ( int32_t ) + rip_relative_correction ;
2008-08-27 00:21:55 -07:00
int64_t adjusted = disp ;
// Do rip-rel adjustment for 64bit
LP64_ONLY ( adjusted - = ( next_ip - inst_mark ( ) ) ) ;
2007-12-01 00:00:00 +00:00
assert ( is_simm32 ( adjusted ) ,
" must be 32bit offset (RIP relative address) " ) ;
2008-08-27 00:21:55 -07:00
emit_data ( ( int32_t ) adjusted , rspec , disp32_operand ) ;
2007-12-01 00:00:00 +00:00
} else {
2008-08-27 00:21:55 -07:00
// 32bit never did this, did everything as the rip-rel/disp code above
2007-12-01 00:00:00 +00:00
// [disp] ABSOLUTE
// [00 reg 100][00 100 101] disp32
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x04 | regenc ) ;
emit_int8 ( 0x25 ) ;
2007-12-01 00:00:00 +00:00
emit_data ( disp , rspec , disp32_operand ) ;
}
}
}
void Assembler : : emit_operand ( XMMRegister reg , Register base , Register index ,
Address : : ScaleFactor scale , int disp ,
2008-08-27 00:21:55 -07:00
RelocationHolder const & rspec ) {
2015-05-08 11:49:20 -07:00
if ( UseAVX > 2 ) {
int xreg_enc = reg - > encoding ( ) ;
if ( xreg_enc > 15 ) {
XMMRegister new_reg = as_XMMRegister ( xreg_enc & 0xf ) ;
emit_operand ( ( Register ) new_reg , base , index , scale , disp , rspec ) ;
return ;
}
}
2008-08-27 00:21:55 -07:00
emit_operand ( ( Register ) reg , base , index , scale , disp , rspec ) ;
2007-12-01 00:00:00 +00:00
}
// Secret local extension to Assembler::WhichOperand:
# define end_pc_operand (_WhichOperand_limit)
address Assembler : : locate_operand ( address inst , WhichOperand which ) {
// Decode the given instruction, and return the address of
// an embedded 32-bit operand word.
// If "which" is disp32_operand, selects the displacement portion
// of an effective address specifier.
// If "which" is imm64_operand, selects the trailing immediate constant.
// If "which" is call32_operand, selects the displacement of a call or jump.
// Caller is responsible for ensuring that there is such an operand,
// and that it is 32/64 bits wide.
// If "which" is end_pc_operand, find the end of the instruction.
address ip = inst ;
bool is_64bit = false ;
debug_only ( bool has_disp32 = false ) ;
int tail_size = 0 ; // other random bytes (#32, #16, etc.) at end of insn
again_after_prefix :
switch ( 0xFF & * ip + + ) {
// These convenience macros generate groups of "case" labels for the switch.
# define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
# define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
case ( x ) + 4 : case ( x ) + 5 : case ( x ) + 6 : case ( x ) + 7
# define REP16(x) REP8((x)+0): \
case REP8 ( ( x ) + 8 )
case CS_segment :
case SS_segment :
case DS_segment :
case ES_segment :
case FS_segment :
case GS_segment :
2008-08-27 00:21:55 -07:00
// Seems dubious
LP64_ONLY ( assert ( false , " shouldn't have that prefix " ) ) ;
assert ( ip = = inst + 1 , " only one prefix allowed " ) ;
2007-12-01 00:00:00 +00:00
goto again_after_prefix ;
case 0x67 :
case REX :
case REX_B :
case REX_X :
case REX_XB :
case REX_R :
case REX_RB :
case REX_RX :
case REX_RXB :
2008-08-27 00:21:55 -07:00
NOT_LP64 ( assert ( false , " 64bit prefixes " ) ) ;
2007-12-01 00:00:00 +00:00
goto again_after_prefix ;
case REX_W :
case REX_WB :
case REX_WX :
case REX_WXB :
case REX_WR :
case REX_WRB :
case REX_WRX :
case REX_WRXB :
2008-08-27 00:21:55 -07:00
NOT_LP64 ( assert ( false , " 64bit prefixes " ) ) ;
2007-12-01 00:00:00 +00:00
is_64bit = true ;
goto again_after_prefix ;
case 0xFF : // pushq a; decl a; incl a; call a; jmp a
case 0x88 : // movb a, r
case 0x89 : // movl a, r
case 0x8A : // movb r, a
case 0x8B : // movl r, a
case 0x8F : // popl a
2008-08-27 00:21:55 -07:00
debug_only ( has_disp32 = true ) ;
2007-12-01 00:00:00 +00:00
break ;
case 0x68 : // pushq #32
if ( which = = end_pc_operand ) {
return ip + 4 ;
}
2008-08-27 00:21:55 -07:00
assert ( which = = imm_operand & & ! is_64bit , " pushl has no disp32 or 64bit immediate " ) ;
return ip ; // not produced by emit_operand
2007-12-01 00:00:00 +00:00
case 0x66 : // movw ... (size prefix)
again_after_size_prefix2 :
switch ( 0xFF & * ip + + ) {
case REX :
case REX_B :
case REX_X :
case REX_XB :
case REX_R :
case REX_RB :
case REX_RX :
case REX_RXB :
case REX_W :
case REX_WB :
case REX_WX :
case REX_WXB :
case REX_WR :
case REX_WRB :
case REX_WRX :
case REX_WRXB :
2008-08-27 00:21:55 -07:00
NOT_LP64 ( assert ( false , " 64bit prefix found " ) ) ;
2007-12-01 00:00:00 +00:00
goto again_after_size_prefix2 ;
case 0x8B : // movw r, a
case 0x89 : // movw a, r
2008-08-27 00:21:55 -07:00
debug_only ( has_disp32 = true ) ;
2007-12-01 00:00:00 +00:00
break ;
case 0xC7 : // movw a, #16
2008-08-27 00:21:55 -07:00
debug_only ( has_disp32 = true ) ;
2007-12-01 00:00:00 +00:00
tail_size = 2 ; // the imm16
break ;
case 0x0F : // several SSE/SSE2 variants
ip - - ; // reparse the 0x0F
goto again_after_prefix ;
default :
ShouldNotReachHere ( ) ;
}
break ;
case REP8 ( 0xB8 ) : // movl/q r, #32/#64(oop?)
if ( which = = end_pc_operand ) return ip + ( is_64bit ? 8 : 4 ) ;
2008-08-27 00:21:55 -07:00
// these asserts are somewhat nonsensical
# ifndef _LP64
2012-04-02 16:05:56 -07:00
assert ( which = = imm_operand | | which = = disp32_operand ,
2015-09-29 11:02:08 +02:00
" which %d is_64_bit %d ip " INTPTR_FORMAT , which , is_64bit , p2i ( ip ) ) ;
2008-08-27 00:21:55 -07:00
# else
assert ( ( which = = call32_operand | | which = = imm_operand ) & & is_64bit | |
2012-04-02 16:05:56 -07:00
which = = narrow_oop_operand & & ! is_64bit ,
2015-09-29 11:02:08 +02:00
" which %d is_64_bit %d ip " INTPTR_FORMAT , which , is_64bit , p2i ( ip ) ) ;
2008-08-27 00:21:55 -07:00
# endif // _LP64
2007-12-01 00:00:00 +00:00
return ip ;
case 0x69 : // imul r, a, #32
case 0xC7 : // movl a, #32(oop?)
tail_size = 4 ;
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
break ;
case 0x0F : // movx..., etc.
switch ( 0xFF & * ip + + ) {
2011-12-14 14:54:38 -08:00
case 0x3A : // pcmpestri
tail_size = 1 ;
case 0x38 : // ptest, pmovzxbw
ip + + ; // skip opcode
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
break ;
case 0x70 : // pshufd r, r/a, #8
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
case 0x73 : // psrldq r, #8
tail_size = 1 ;
break ;
2007-12-01 00:00:00 +00:00
case 0x12 : // movlps
case 0x28 : // movaps
case 0x2E : // ucomiss
case 0x2F : // comiss
case 0x54 : // andps
2008-08-27 00:21:55 -07:00
case 0x55 : // andnps
case 0x56 : // orps
2007-12-01 00:00:00 +00:00
case 0x57 : // xorps
2015-12-23 21:09:50 -08:00
case 0x58 : // addpd
2015-11-09 11:26:41 -08:00
case 0x59 : // mulpd
2007-12-01 00:00:00 +00:00
case 0x6E : // movd
case 0x7E : // movd
2011-12-14 14:54:38 -08:00
case 0xAE : // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
2016-03-03 22:02:13 -08:00
case 0xFE : // paddd
2008-08-27 00:21:55 -07:00
debug_only ( has_disp32 = true ) ;
2007-12-01 00:00:00 +00:00
break ;
2008-08-27 00:21:55 -07:00
2007-12-01 00:00:00 +00:00
case 0xAD : // shrd r, a, %cl
case 0xAF : // imul r, a
2008-08-27 00:21:55 -07:00
case 0xBE : // movsbl r, a (movsxb)
case 0xBF : // movswl r, a (movsxw)
case 0xB6 : // movzbl r, a (movzxb)
case 0xB7 : // movzwl r, a (movzxw)
2007-12-01 00:00:00 +00:00
case REP16 ( 0x40 ) : // cmovl cc, r, a
case 0xB0 : // cmpxchgb
case 0xB1 : // cmpxchg
case 0xC1 : // xaddl
case 0xC7 : // cmpxchg8
case REP16 ( 0x90 ) : // setcc a
debug_only ( has_disp32 = true ) ;
// fall out of the switch to decode the address
break ;
2008-08-27 00:21:55 -07:00
2011-12-14 14:54:38 -08:00
case 0xC4 : // pinsrw r, a, #8
debug_only ( has_disp32 = true ) ;
case 0xC5 : // pextrw r, r, #8
tail_size = 1 ; // the imm8
break ;
2007-12-01 00:00:00 +00:00
case 0xAC : // shrd r, a, #8
debug_only ( has_disp32 = true ) ;
tail_size = 1 ; // the imm8
break ;
2008-08-27 00:21:55 -07:00
2007-12-01 00:00:00 +00:00
case REP16 ( 0x80 ) : // jcc rdisp32
if ( which = = end_pc_operand ) return ip + 4 ;
2008-08-27 00:21:55 -07:00
assert ( which = = call32_operand , " jcc has no disp32 or imm " ) ;
2007-12-01 00:00:00 +00:00
return ip ;
default :
ShouldNotReachHere ( ) ;
}
break ;
case 0x81 : // addl a, #32; addl r, #32
// also: orl, adcl, sbbl, andl, subl, xorl, cmpl
2008-08-27 00:21:55 -07:00
// on 32bit in the case of cmpl, the imm might be an oop
2007-12-01 00:00:00 +00:00
tail_size = 4 ;
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
break ;
case 0x83 : // addl a, #8; addl r, #8
// also: orl, adcl, sbbl, andl, subl, xorl, cmpl
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
tail_size = 1 ;
break ;
case 0x9B :
switch ( 0xFF & * ip + + ) {
case 0xD9 : // fnstcw a
debug_only ( has_disp32 = true ) ;
break ;
default :
ShouldNotReachHere ( ) ;
}
break ;
case REP4 ( 0x00 ) : / / addb a , r ; addl a , r ; addb r , a ; addl r , a
case REP4 ( 0x10 ) : // adc...
case REP4 ( 0x20 ) : // and...
case REP4 ( 0x30 ) : // xor...
case REP4 ( 0x08 ) : // or...
case REP4 ( 0x18 ) : // sbb...
case REP4 ( 0x28 ) : // sub...
case 0xF7 : // mull a
2008-08-27 00:21:55 -07:00
case 0x8D : // lea r, a
2007-12-01 00:00:00 +00:00
case 0x87 : // xchg r, a
case REP4 ( 0x38 ) : // cmp...
case 0x85 : // test r, a
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
break ;
case 0xC1 : // sal a, #8; sar a, #8; shl a, #8; shr a, #8
case 0xC6 : // movb a, #8
case 0x80 : // cmpb a, #8
case 0x6B : // imul r, a, #8
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
tail_size = 1 ; // the imm8
break ;
2011-12-14 14:54:38 -08:00
case 0xC4 : // VEX_3bytes
case 0xC5 : // VEX_2bytes
assert ( ( UseAVX > 0 ) , " shouldn't have VEX prefix " ) ;
assert ( ip = = inst + 1 , " no prefixes allowed " ) ;
// C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
// but they have prefix 0x0F and processed when 0x0F processed above.
//
// In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
// instructions (these instructions are not supported in 64-bit mode).
// To distinguish them bits [7:6] are set in the VEX second byte since
// ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
// those VEX bits REX and vvvv bits are inverted.
//
// Fortunately C2 doesn't generate these instructions so we don't need
// to check for them in product version.
// Check second byte
NOT_LP64 ( assert ( ( 0xC0 & * ip ) = = 0xC0 , " shouldn't have LDS and LES instructions " ) ) ;
2015-10-08 12:49:30 -10:00
int vex_opcode ;
2011-12-14 14:54:38 -08:00
// First byte
if ( ( 0xFF & * inst ) = = VEX_3bytes ) {
2015-10-08 12:49:30 -10:00
vex_opcode = VEX_OPCODE_MASK & * ip ;
2011-12-14 14:54:38 -08:00
ip + + ; // third byte
is_64bit = ( ( VEX_W & * ip ) = = VEX_W ) ;
2015-10-08 12:49:30 -10:00
} else {
vex_opcode = VEX_OPCODE_0F ;
2011-12-14 14:54:38 -08:00
}
ip + + ; // opcode
// To find the end of instruction (which == end_pc_operand).
2015-10-08 12:49:30 -10:00
switch ( vex_opcode ) {
case VEX_OPCODE_0F :
switch ( 0xFF & * ip ) {
case 0x70 : // pshufd r, r/a, #8
case 0x71 : // ps[rl|ra|ll]w r, #8
case 0x72 : // ps[rl|ra|ll]d r, #8
case 0x73 : // ps[rl|ra|ll]q r, #8
case 0xC2 : // cmp[ps|pd|ss|sd] r, r, r/a, #8
case 0xC4 : // pinsrw r, r, r/a, #8
case 0xC5 : // pextrw r/a, r, #8
case 0xC6 : // shufp[s|d] r, r, r/a, #8
tail_size = 1 ; // the imm8
break ;
}
break ;
case VEX_OPCODE_0F_3A :
tail_size = 1 ;
break ;
2011-12-14 14:54:38 -08:00
}
ip + + ; // skip opcode
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
break ;
2007-12-01 00:00:00 +00:00
2015-05-08 11:49:20 -07:00
case 0x62 : // EVEX_4bytes
assert ( ( UseAVX > 0 ) , " shouldn't have EVEX prefix " ) ;
assert ( ip = = inst + 1 , " no prefixes allowed " ) ;
// no EVEX collisions, all instructions that have 0x62 opcodes
// have EVEX versions and are subopcodes of 0x66
ip + + ; // skip P0 and exmaine W in P1
is_64bit = ( ( VEX_W & * ip ) = = VEX_W ) ;
ip + + ; // move to P2
ip + + ; // skip P2, move to opcode
// To find the end of instruction (which == end_pc_operand).
switch ( 0xFF & * ip ) {
2016-03-03 22:02:13 -08:00
case 0x22 : // pinsrd r, r/a, #8
2015-05-08 11:49:20 -07:00
case 0x61 : // pcmpestri r, r/a, #8
case 0x70 : // pshufd r, r/a, #8
case 0x73 : // psrldq r, #8
tail_size = 1 ; // the imm8
break ;
default :
break ;
}
ip + + ; // skip opcode
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
break ;
2007-12-01 00:00:00 +00:00
case 0xD1 : // sal a, 1; sar a, 1; shl a, 1; shr a, 1
case 0xD3 : // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
case 0xD9 : // fld_s a; fst_s a; fstp_s a; fldcw a
case 0xDD : // fld_d a; fst_d a; fstp_d a
case 0xDB : // fild_s a; fistp_s a; fld_x a; fstp_x a
case 0xDF : // fild_d a; fistp_d a
case 0xD8 : // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
case 0xDC : // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
case 0xDE : // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
debug_only ( has_disp32 = true ) ;
break ;
2011-12-14 14:54:38 -08:00
case 0xE8 : // call rdisp32
case 0xE9 : // jmp rdisp32
if ( which = = end_pc_operand ) return ip + 4 ;
assert ( which = = call32_operand , " call has no disp32 or imm " ) ;
return ip ;
2008-11-07 09:29:38 -08:00
case 0xF0 : // Lock
assert ( os : : is_MP ( ) , " only on MP " ) ;
goto again_after_prefix ;
2007-12-01 00:00:00 +00:00
case 0xF3 : // For SSE
case 0xF2 : // For SSE2
switch ( 0xFF & * ip + + ) {
case REX :
case REX_B :
case REX_X :
case REX_XB :
case REX_R :
case REX_RB :
case REX_RX :
case REX_RXB :
case REX_W :
case REX_WB :
case REX_WX :
case REX_WXB :
case REX_WR :
case REX_WRB :
case REX_WRX :
case REX_WRXB :
2008-08-27 00:21:55 -07:00
NOT_LP64 ( assert ( false , " found 64bit prefix " ) ) ;
2007-12-01 00:00:00 +00:00
ip + + ;
default :
ip + + ;
}
debug_only ( has_disp32 = true ) ; // has both kinds of operands!
break ;
default :
ShouldNotReachHere ( ) ;
# undef REP8
# undef REP16
}
assert ( which ! = call32_operand , " instruction is not a call, jmp, or jcc " ) ;
2008-08-27 00:21:55 -07:00
# ifdef _LP64
assert ( which ! = imm_operand , " instruction is not a movq reg, imm64 " ) ;
# else
// assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
assert ( which ! = imm_operand | | has_disp32 , " instruction has no imm32 field " ) ;
# endif // LP64
2007-12-01 00:00:00 +00:00
assert ( which ! = disp32_operand | | has_disp32 , " instruction has no disp32 field " ) ;
// parse the output of emit_operand
int op2 = 0xFF & * ip + + ;
int base = op2 & 0x07 ;
int op3 = - 1 ;
const int b100 = 4 ;
const int b101 = 5 ;
if ( base = = b100 & & ( op2 > > 6 ) ! = 3 ) {
op3 = 0xFF & * ip + + ;
base = op3 & 0x07 ; // refetch the base
}
// now ip points at the disp (if any)
switch ( op2 > > 6 ) {
case 0 :
// [00 reg 100][ss index base]
// [00 reg 100][00 100 esp]
// [00 reg base]
// [00 reg 100][ss index 101][disp32]
// [00 reg 101] [disp32]
if ( base = = b101 ) {
if ( which = = disp32_operand )
return ip ; // caller wants the disp32
ip + = 4 ; // skip the disp32
}
break ;
case 1 :
// [01 reg 100][ss index base][disp8]
// [01 reg 100][00 100 esp][disp8]
// [01 reg base] [disp8]
ip + = 1 ; // skip the disp8
break ;
case 2 :
// [10 reg 100][ss index base][disp32]
// [10 reg 100][00 100 esp][disp32]
// [10 reg base] [disp32]
if ( which = = disp32_operand )
return ip ; // caller wants the disp32
ip + = 4 ; // skip the disp32
break ;
case 3 :
// [11 reg base] (not a memory addressing mode)
break ;
}
if ( which = = end_pc_operand ) {
return ip + tail_size ;
}
2008-08-27 00:21:55 -07:00
# ifdef _LP64
2009-03-12 10:37:46 -07:00
assert ( which = = narrow_oop_operand & & ! is_64bit , " instruction is not a movl adr, imm32 " ) ;
2008-08-27 00:21:55 -07:00
# else
assert ( which = = imm_operand , " instruction has only an imm field " ) ;
# endif // LP64
2007-12-01 00:00:00 +00:00
return ip ;
}
address Assembler : : locate_next_instruction ( address inst ) {
// Secretly share code with locate_operand:
return locate_operand ( inst , end_pc_operand ) ;
}
2008-08-27 00:21:55 -07:00
2007-12-01 00:00:00 +00:00
# ifdef ASSERT
void Assembler : : check_relocation ( RelocationHolder const & rspec , int format ) {
address inst = inst_mark ( ) ;
2008-08-27 00:21:55 -07:00
assert ( inst ! = NULL & & inst < pc ( ) , " must point to beginning of instruction " ) ;
2007-12-01 00:00:00 +00:00
address opnd ;
Relocation * r = rspec . reloc ( ) ;
if ( r - > type ( ) = = relocInfo : : none ) {
return ;
} else if ( r - > is_call ( ) | | format = = call32_operand ) {
2008-08-27 00:21:55 -07:00
// assert(format == imm32_operand, "cannot specify a nonzero format");
2007-12-01 00:00:00 +00:00
opnd = locate_operand ( inst , call32_operand ) ;
} else if ( r - > is_data ( ) ) {
2008-08-27 00:21:55 -07:00
assert ( format = = imm_operand | | format = = disp32_operand
LP64_ONLY ( | | format = = narrow_oop_operand ) , " format ok " ) ;
opnd = locate_operand ( inst , ( WhichOperand ) format ) ;
2007-12-01 00:00:00 +00:00
} else {
2008-08-27 00:21:55 -07:00
assert ( format = = imm_operand , " cannot specify a format " ) ;
2007-12-01 00:00:00 +00:00
return ;
}
assert ( opnd = = pc ( ) , " must put operand where relocs can find it " ) ;
}
2008-08-27 00:21:55 -07:00
# endif // ASSERT
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : emit_operand32 ( Register reg , Address adr ) {
assert ( reg - > encoding ( ) < 8 , " no extended registers " ) ;
assert ( ! adr . base_needs_rex ( ) & & ! adr . index_needs_rex ( ) , " no extended registers " ) ;
emit_operand ( reg , adr . _base , adr . _index , adr . _scale , adr . _disp ,
adr . _rspec ) ;
2007-12-01 00:00:00 +00:00
}
void Assembler : : emit_operand ( Register reg , Address adr ,
int rip_relative_correction ) {
emit_operand ( reg , adr . _base , adr . _index , adr . _scale , adr . _disp ,
adr . _rspec ,
rip_relative_correction ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : emit_operand ( XMMRegister reg , Address adr ) {
2007-12-01 00:00:00 +00:00
emit_operand ( reg , adr . _base , adr . _index , adr . _scale , adr . _disp ,
2008-08-27 00:21:55 -07:00
adr . _rspec ) ;
}
// MMX operations
void Assembler : : emit_operand ( MMXRegister reg , Address adr ) {
assert ( ! adr . base_needs_rex ( ) & & ! adr . index_needs_rex ( ) , " no extended registers " ) ;
emit_operand ( ( Register ) reg , adr . _base , adr . _index , adr . _scale , adr . _disp , adr . _rspec ) ;
}
// work around gcc (3.2.1-7a) bug
void Assembler : : emit_operand ( Address adr , MMXRegister reg ) {
assert ( ! adr . base_needs_rex ( ) & & ! adr . index_needs_rex ( ) , " no extended registers " ) ;
emit_operand ( ( Register ) reg , adr . _base , adr . _index , adr . _scale , adr . _disp , adr . _rspec ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
2007-12-01 00:00:00 +00:00
void Assembler : : emit_farith ( int b1 , int b2 , int i ) {
assert ( isByte ( b1 ) & & isByte ( b2 ) , " wrong opcode " ) ;
assert ( 0 < = i & & i < 8 , " illegal stack offset " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( b1 ) ;
emit_int8 ( b2 + i ) ;
2007-12-01 00:00:00 +00:00
}
2011-01-07 10:42:32 -05:00
// Now the Assembler instructions (identical for 32/64 bits)
void Assembler : : adcl ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefix ( dst ) ;
emit_arith_operand ( 0x81 , rdx , dst , imm32 ) ;
}
void Assembler : : adcl ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefix ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x11 ) ;
2011-01-07 10:42:32 -05:00
emit_operand ( src , dst ) ;
}
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : adcl ( Register dst , int32_t imm32 ) {
prefix ( dst ) ;
emit_arith ( 0x81 , 0xD0 , dst , imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : adcl ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x13 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : adcl ( Register dst , Register src ) {
( void ) prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x13 , 0xC0 , dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : addl ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefix ( dst ) ;
emit_arith_operand ( 0x81 , rax , dst , imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : addl ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefix ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x01 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : addl ( Register dst , int32_t imm32 ) {
prefix ( dst ) ;
emit_arith ( 0x81 , 0xC0 , dst , imm32 ) ;
}
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : addl ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x03 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : addl ( Register dst , Register src ) {
( void ) prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x03 , 0xC0 , dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : addr_nop_4 ( ) {
2012-02-15 21:37:49 -08:00
assert ( UseAddressNop , " no CPU support " ) ;
2008-08-27 00:21:55 -07:00
// 4 bytes: NOP DWORD PTR [EAX+0]
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x1F ) ;
emit_int8 ( 0x40 ) ; // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
emit_int8 ( 0 ) ; // 8-bits offset (1 byte)
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : addr_nop_5 ( ) {
2012-02-15 21:37:49 -08:00
assert ( UseAddressNop , " no CPU support " ) ;
2008-08-27 00:21:55 -07:00
// 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x1F ) ;
emit_int8 ( 0x44 ) ; // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
emit_int8 ( 0x00 ) ; // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
emit_int8 ( 0 ) ; // 8-bits offset (1 byte)
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : addr_nop_7 ( ) {
2012-02-15 21:37:49 -08:00
assert ( UseAddressNop , " no CPU support " ) ;
2008-08-27 00:21:55 -07:00
// 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x1F ) ;
emit_int8 ( ( unsigned char ) 0x80 ) ;
// emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
2013-01-07 14:08:28 -08:00
emit_int32 ( 0 ) ; // 32-bits offset (4 bytes)
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : addr_nop_8 ( ) {
2012-02-15 21:37:49 -08:00
assert ( UseAddressNop , " no CPU support " ) ;
2008-08-27 00:21:55 -07:00
// 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x1F ) ;
emit_int8 ( ( unsigned char ) 0x84 ) ;
// emit_rm(cbuf, 0x2, EAX_enc, 0x4);
emit_int8 ( 0x00 ) ; // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
2013-01-07 14:08:28 -08:00
emit_int32 ( 0 ) ; // 32-bits offset (4 bytes)
2008-08-27 00:21:55 -07:00
}
void Assembler : : addsd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : addsd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x58 ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : addss ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : addss ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x58 ) ;
emit_operand ( dst , src ) ;
2008-08-27 00:21:55 -07:00
}
2012-10-24 14:33:22 -07:00
void Assembler : : aesdec ( XMMRegister dst , Address src ) {
assert ( VM_Version : : supports_aes ( ) , " " ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDE ) ;
2012-10-24 14:33:22 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : aesdec ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_aes ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDE ) ;
emit_int8 ( 0xC0 | encode ) ;
2012-10-24 14:33:22 -07:00
}
void Assembler : : aesdeclast ( XMMRegister dst , Address src ) {
assert ( VM_Version : : supports_aes ( ) , " " ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDF ) ;
2012-10-24 14:33:22 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : aesdeclast ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_aes ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-10-24 14:33:22 -07:00
}
void Assembler : : aesenc ( XMMRegister dst , Address src ) {
assert ( VM_Version : : supports_aes ( ) , " " ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDC ) ;
2012-10-24 14:33:22 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : aesenc ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_aes ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDC ) ;
emit_int8 ( 0xC0 | encode ) ;
2012-10-24 14:33:22 -07:00
}
void Assembler : : aesenclast ( XMMRegister dst , Address src ) {
assert ( VM_Version : : supports_aes ( ) , " " ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDD ) ;
2012-10-24 14:33:22 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : aesenclast ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_aes ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDD ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-10-24 14:33:22 -07:00
}
2011-12-14 14:54:38 -08:00
void Assembler : : andl ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x81 ) ;
2011-12-14 14:54:38 -08:00
emit_operand ( rsp , dst , 4 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2011-12-14 14:54:38 -08:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : andl ( Register dst , int32_t imm32 ) {
2007-12-01 00:00:00 +00:00
prefix ( dst ) ;
2008-08-27 00:21:55 -07:00
emit_arith ( 0x81 , 0xE0 , dst , imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : andl ( Register dst , Address src ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x23 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : andl ( Register dst , Register src ) {
( void ) prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x23 , 0xC0 , dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2014-03-12 11:24:26 -07:00
void Assembler : : andnl ( Register dst , Register src1 , Register src2 ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src1 - > encoding ( ) , src2 - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF2 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : andnl ( Register dst , Register src1 , Address src2 ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
vex_prefix ( src2 , src1 - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF2 ) ;
emit_operand ( dst , src2 ) ;
}
2009-05-06 00:27:52 -07:00
void Assembler : : bsfl ( Register dst , Register src ) {
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBC ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-05-06 00:27:52 -07:00
}
void Assembler : : bsrl ( Register dst , Register src ) {
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBD ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-05-06 00:27:52 -07:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : bswapl ( Register reg ) { // bswap
int encode = prefix_and_encode ( reg - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) ( 0xC8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2014-03-12 11:24:26 -07:00
void Assembler : : blsil ( Register dst , Register src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( rbx - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : blsil ( Register dst , Address src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
vex_prefix ( src , dst - > encoding ( ) , rbx - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_operand ( rbx , src ) ;
}
void Assembler : : blsmskl ( Register dst , Register src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( rdx - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : blsmskl ( Register dst , Address src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
vex_prefix ( src , dst - > encoding ( ) , rdx - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_operand ( rdx , src ) ;
}
void Assembler : : blsrl ( Register dst , Register src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( rcx - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : blsrl ( Register dst , Address src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
vex_prefix ( src , dst - > encoding ( ) , rcx - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_operand ( rcx , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : call ( Label & L , relocInfo : : relocType rtype ) {
// suspect disp32 is always good
int operand = LP64_ONLY ( disp32_operand ) NOT_LP64 ( imm_operand ) ;
if ( L . is_bound ( ) ) {
const int long_size = 5 ;
int offs = ( int ) ( target ( L ) - pc ( ) ) ;
assert ( offs < = 0 , " assembler error " ) ;
InstructionMark im ( this ) ;
// 1110 1000 #32-bit disp
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xE8 ) ;
2008-08-27 00:21:55 -07:00
emit_data ( offs - long_size , rtype , operand ) ;
} else {
InstructionMark im ( this ) ;
// 1110 1000 #32-bit disp
L . add_patch_at ( code ( ) , locator ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xE8 ) ;
2008-08-27 00:21:55 -07:00
emit_data ( int ( 0 ) , rtype , operand ) ;
}
}
void Assembler : : call ( Register dst ) {
2011-12-14 14:54:38 -08:00
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
emit_int8 ( ( unsigned char ) ( 0xD0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : call ( Address adr ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( adr ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rdx , adr ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : call_literal ( address entry , RelocationHolder const & rspec ) {
assert ( entry ! = NULL , " call most probably wrong " ) ;
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xE8 ) ;
2012-11-30 11:44:05 -08:00
intptr_t disp = entry - ( pc ( ) + sizeof ( int32_t ) ) ;
2008-08-27 00:21:55 -07:00
assert ( is_simm32 ( disp ) , " must be 32bit offset (call2) " ) ;
// Technically, should use call32_operand, but this format is
// implied by the fact that we're emitting a call instruction.
int operand = LP64_ONLY ( disp32_operand ) NOT_LP64 ( call32_operand ) ;
emit_data ( ( int ) disp , rspec , operand ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cdql ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x99 ) ;
2008-08-27 00:21:55 -07:00
}
2012-11-30 15:23:16 -08:00
void Assembler : : cld ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFC ) ;
2012-11-30 15:23:16 -08:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cmovl ( Condition cc , Register dst , Register src ) {
NOT_LP64 ( guarantee ( VM_Version : : supports_cmov ( ) , " illegal instruction " ) ) ;
2007-12-01 00:00:00 +00:00
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x40 | cc ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cmovl ( Condition cc , Register dst , Address src ) {
NOT_LP64 ( guarantee ( VM_Version : : supports_cmov ( ) , " illegal instruction " ) ) ;
2007-12-01 00:00:00 +00:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x40 | cc ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : cmpb ( Address dst , int imm8 ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x80 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rdi , dst , 1 ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( imm8 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cmpl ( Address dst , int32_t imm32 ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x81 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rdi , dst , 4 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cmpl ( Register dst , int32_t imm32 ) {
prefix ( dst ) ;
emit_arith ( 0x81 , 0xF8 , dst , imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cmpl ( Register dst , Register src ) {
( void ) prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x3B , 0xC0 , dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cmpl ( Register dst , Address src ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x3B ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : cmpw ( Address dst , int imm16 ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
assert ( ! dst . base_needs_rex ( ) & & ! dst . index_needs_rex ( ) , " no extended registers " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
emit_int8 ( ( unsigned char ) 0x81 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rdi , dst , 2 ) ;
2012-12-18 10:47:23 -08:00
emit_int16 ( imm16 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
// The 32-bit cmpxchg compares the value at adr with the contents of rax,
// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
// The ZF is set if the compared values were equal, and cleared otherwise.
void Assembler : : cmpxchgl ( Register reg , Address adr ) { // cmpxchg
2012-10-04 08:43:14 -04:00
InstructionMark im ( this ) ;
prefix ( adr , reg ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB1 ) ;
2012-10-04 08:43:14 -04:00
emit_operand ( reg , adr ) ;
2007-12-01 00:00:00 +00:00
}
2014-10-21 15:07:25 +02:00
// The 8-bit cmpxchg compares the value at adr with the contents of rax,
// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
// The ZF is set if the compared values were equal, and cleared otherwise.
void Assembler : : cmpxchgb ( Register reg , Address adr ) { // cmpxchg
InstructionMark im ( this ) ;
prefix ( adr , reg , true ) ;
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB0 ) ;
emit_operand ( reg , adr ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : comisd ( XMMRegister dst , Address src ) {
// NOTE: dbx seems to decode this as comiss even though the
// 0x66 is there. Strangly ucomisd comes out correct
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ; ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2F ) ;
emit_operand ( dst , src ) ;
2011-12-14 14:54:38 -08:00
}
void Assembler : : comisd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : comiss ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2F ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2011-12-14 14:54:38 -08:00
void Assembler : : comiss ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-14 14:54:38 -08:00
}
2012-11-30 15:23:16 -08:00
void Assembler : : cpuid ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xA2 ) ;
2012-11-30 15:23:16 -08:00
}
2015-09-16 15:54:32 -07:00
// Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented
// F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v
// F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. -
// F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. -
//
// F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v
//
// F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v
//
// F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v
void Assembler : : crc32 ( Register crc , Register v , int8_t sizeInBytes ) {
assert ( VM_Version : : supports_sse4_2 ( ) , " " ) ;
int8_t w = 0x01 ;
Prefix p = Prefix_EMPTY ;
emit_int8 ( ( int8_t ) 0xF2 ) ;
switch ( sizeInBytes ) {
case 1 :
w = 0 ;
break ;
case 2 :
case 4 :
break ;
LP64_ONLY ( case 8 : )
// This instruction is not valid in 32 bits
// Note:
// http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
//
// Page B - 72 Vol. 2C says
// qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2
// mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m
// F0!!!
// while 3 - 208 Vol. 2A
// F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64.
//
// the 0 on a last bit is reserved for a different flavor of this instruction :
// F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8.
p = REX_W ;
break ;
default :
assert ( 0 , " Unsupported value for a sizeInBytes argument " ) ;
break ;
}
LP64_ONLY ( prefix ( crc , v , p ) ; )
emit_int8 ( ( int8_t ) 0x0F ) ;
emit_int8 ( 0x38 ) ;
emit_int8 ( ( int8_t ) ( 0xF0 | w ) ) ;
emit_int8 ( 0xC0 | ( ( crc - > encoding ( ) & 0x7 ) < < 3 ) | ( v - > encoding ( ) & 7 ) ) ;
}
void Assembler : : crc32 ( Register crc , Address adr , int8_t sizeInBytes ) {
assert ( VM_Version : : supports_sse4_2 ( ) , " " ) ;
InstructionMark im ( this ) ;
int8_t w = 0x01 ;
Prefix p = Prefix_EMPTY ;
emit_int8 ( ( int8_t ) 0xF2 ) ;
switch ( sizeInBytes ) {
case 1 :
w = 0 ;
break ;
case 2 :
case 4 :
break ;
LP64_ONLY ( case 8 : )
// This instruction is not valid in 32 bits
p = REX_W ;
break ;
default :
assert ( 0 , " Unsupported value for a sizeInBytes argument " ) ;
break ;
}
LP64_ONLY ( prefix ( crc , adr , p ) ; )
emit_int8 ( ( int8_t ) 0x0F ) ;
emit_int8 ( 0x38 ) ;
emit_int8 ( ( int8_t ) ( 0xF0 | w ) ) ;
emit_operand ( crc , adr ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : cvtdq2pd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xE6 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cvtdq2ps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5B ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cvtsd2ss ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5A ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2011-12-14 14:54:38 -08:00
void Assembler : : cvtsd2ss ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5A ) ;
emit_operand ( dst , src ) ;
2011-12-14 14:54:38 -08:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cvtsi2sdl ( XMMRegister dst , Register src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , as_XMMRegister ( src - > encoding ( ) ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2A ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2011-12-14 14:54:38 -08:00
void Assembler : : cvtsi2sdl ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2A ) ;
emit_operand ( dst , src ) ;
2011-12-14 14:54:38 -08:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cvtsi2ssl ( XMMRegister dst , Register src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , as_XMMRegister ( src - > encoding ( ) ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2A ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2011-12-14 14:54:38 -08:00
void Assembler : : cvtsi2ssl ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2A ) ;
emit_operand ( dst , src ) ;
2011-12-14 14:54:38 -08:00
}
2015-08-19 08:55:18 +02:00
void Assembler : : cvtsi2ssq ( XMMRegister dst , Register src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , as_XMMRegister ( src - > encoding ( ) ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-08-19 08:55:18 +02:00
emit_int8 ( 0x2A ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : cvtss2sd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5A ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2011-12-14 14:54:38 -08:00
void Assembler : : cvtss2sd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5A ) ;
emit_operand ( dst , src ) ;
2011-12-14 14:54:38 -08:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cvttsd2sil ( Register dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( as_XMMRegister ( dst - > encoding ( ) ) , xnoreg , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : cvttss2sil ( Register dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( as_XMMRegister ( dst - > encoding ( ) ) , xnoreg , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2016-04-06 10:29:26 -07:00
void Assembler : : cvttpd2dq ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_128bit ;
InstructionAttr attributes ( vector_len , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xE6 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : decl ( Address dst ) {
// Don't use it directly. Use MacroAssembler::decrement() instead.
InstructionMark im ( this ) ;
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rcx , dst ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : divsd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5E ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : divsd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : divss ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5E ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : divss ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : emms ( ) {
NOT_LP64 ( assert ( VM_Version : : supports_mmx ( ) , " " ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x77 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : hlt ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF4 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : idivl ( Register src ) {
int encode = prefix_and_encode ( src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2010-11-06 18:52:07 -07:00
void Assembler : : divl ( Register src ) { // Unsigned
int encode = prefix_and_encode ( src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xF0 | encode ) ) ;
2010-11-06 18:52:07 -07:00
}
2016-01-08 21:06:50 -08:00
void Assembler : : imull ( Register src ) {
int encode = prefix_and_encode ( src - > encoding ( ) ) ;
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xE8 | encode ) ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : imull ( Register dst , Register src ) {
2007-12-01 00:00:00 +00:00
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : imull ( Register dst , Register src , int value ) {
2007-12-01 00:00:00 +00:00
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2008-08-27 00:21:55 -07:00
if ( is8bit ( value ) ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x6B ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( value & 0xFF ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x69 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( value ) ;
2008-08-27 00:21:55 -07:00
}
2007-12-01 00:00:00 +00:00
}
2013-10-18 10:41:56 +02:00
void Assembler : : imull ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAF ) ;
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : incl ( Address dst ) {
// Don't use it directly. Use MacroAssembler::increment() instead.
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , dst ) ;
2007-12-01 00:00:00 +00:00
}
2011-08-11 12:08:11 -07:00
void Assembler : : jcc ( Condition cc , Label & L , bool maybe_short ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
assert ( ( 0 < = cc ) & & ( cc < 16 ) , " illegal cc " ) ;
if ( L . is_bound ( ) ) {
address dst = target ( L ) ;
assert ( dst ! = NULL , " jcc most probably wrong " ) ;
const int short_size = 2 ;
const int long_size = 6 ;
2012-11-30 11:44:05 -08:00
intptr_t offs = ( intptr_t ) dst - ( intptr_t ) pc ( ) ;
2011-08-11 12:08:11 -07:00
if ( maybe_short & & is8bit ( offs - short_size ) ) {
2008-08-27 00:21:55 -07:00
// 0111 tttn #8-bit disp
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x70 | cc ) ;
emit_int8 ( ( offs - short_size ) & 0xFF ) ;
2008-08-27 00:21:55 -07:00
} else {
// 0000 1111 1000 tttn #32-bit disp
assert ( is_simm32 ( offs - long_size ) ,
" must be 32bit offset (call4) " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) ( 0x80 | cc ) ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( offs - long_size ) ;
2008-08-27 00:21:55 -07:00
}
} else {
// Note: could eliminate cond. jumps to this jump if condition
// is the same however, seems to be rather unlikely case.
// Note: use jccb() if label to be bound is very close to get
// an 8-bit displacement
L . add_patch_at ( code ( ) , locator ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) ( 0x80 | cc ) ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( 0 ) ;
2008-08-27 00:21:55 -07:00
}
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : jccb ( Condition cc , Label & L ) {
if ( L . is_bound ( ) ) {
const int short_size = 2 ;
address entry = target ( L ) ;
2011-12-23 15:24:36 -08:00
# ifdef ASSERT
2012-11-30 11:44:05 -08:00
intptr_t dist = ( intptr_t ) entry - ( ( intptr_t ) pc ( ) + short_size ) ;
2011-12-23 15:24:36 -08:00
intptr_t delta = short_branch_delta ( ) ;
if ( delta ! = 0 ) {
dist + = ( dist < 0 ? ( - delta ) : delta ) ;
}
assert ( is8bit ( dist ) , " Dispacement too large for a short jmp " ) ;
# endif
2012-11-30 11:44:05 -08:00
intptr_t offs = ( intptr_t ) entry - ( intptr_t ) pc ( ) ;
2008-08-27 00:21:55 -07:00
// 0111 tttn #8-bit disp
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x70 | cc ) ;
emit_int8 ( ( offs - short_size ) & 0xFF ) ;
2008-08-27 00:21:55 -07:00
} else {
InstructionMark im ( this ) ;
L . add_patch_at ( code ( ) , locator ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x70 | cc ) ;
emit_int8 ( 0 ) ;
2008-08-27 00:21:55 -07:00
}
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : jmp ( Address adr ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( adr ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rsp , adr ) ;
2007-12-01 00:00:00 +00:00
}
2011-08-11 12:08:11 -07:00
void Assembler : : jmp ( Label & L , bool maybe_short ) {
2008-08-27 00:21:55 -07:00
if ( L . is_bound ( ) ) {
address entry = target ( L ) ;
assert ( entry ! = NULL , " jmp most probably wrong " ) ;
InstructionMark im ( this ) ;
const int short_size = 2 ;
const int long_size = 5 ;
2012-11-30 11:44:05 -08:00
intptr_t offs = entry - pc ( ) ;
2011-08-11 12:08:11 -07:00
if ( maybe_short & & is8bit ( offs - short_size ) ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xEB ) ;
emit_int8 ( ( offs - short_size ) & 0xFF ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xE9 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( offs - long_size ) ;
2008-08-27 00:21:55 -07:00
}
} else {
// By default, forward jumps are always 32-bit displacements, since
// we can't yet know where the label will be bound. If you're sure that
// the forward jump will not run beyond 256 bytes, use jmpb to
// force an 8-bit displacement.
InstructionMark im ( this ) ;
L . add_patch_at ( code ( ) , locator ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xE9 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( 0 ) ;
2008-08-27 00:21:55 -07:00
}
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : jmp ( Register entry ) {
int encode = prefix_and_encode ( entry - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : jmp_literal ( address dest , RelocationHolder const & rspec ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xE9 ) ;
2008-08-27 00:21:55 -07:00
assert ( dest ! = NULL , " must have a target " ) ;
2012-11-30 11:44:05 -08:00
intptr_t disp = dest - ( pc ( ) + sizeof ( int32_t ) ) ;
2008-08-27 00:21:55 -07:00
assert ( is_simm32 ( disp ) , " must be 32bit offset (jmp) " ) ;
emit_data ( disp , rspec . reloc ( ) , call32_operand ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : jmpb ( Label & L ) {
if ( L . is_bound ( ) ) {
const int short_size = 2 ;
address entry = target ( L ) ;
assert ( entry ! = NULL , " jmp most probably wrong " ) ;
2011-12-23 15:24:36 -08:00
# ifdef ASSERT
2012-11-30 11:44:05 -08:00
intptr_t dist = ( intptr_t ) entry - ( ( intptr_t ) pc ( ) + short_size ) ;
2011-12-23 15:24:36 -08:00
intptr_t delta = short_branch_delta ( ) ;
if ( delta ! = 0 ) {
dist + = ( dist < 0 ? ( - delta ) : delta ) ;
}
assert ( is8bit ( dist ) , " Dispacement too large for a short jmp " ) ;
# endif
2012-11-30 11:44:05 -08:00
intptr_t offs = entry - pc ( ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xEB ) ;
emit_int8 ( ( offs - short_size ) & 0xFF ) ;
2008-08-27 00:21:55 -07:00
} else {
InstructionMark im ( this ) ;
L . add_patch_at ( code ( ) , locator ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xEB ) ;
emit_int8 ( 0 ) ;
2008-08-27 00:21:55 -07:00
}
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : ldmxcsr ( Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAE ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( as_Register ( 2 ) , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : leal ( Register dst , Address src ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
# ifdef _LP64
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x67 ) ; // addr32
2008-08-27 00:21:55 -07:00
prefix ( src , dst ) ;
# endif // LP64
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8D ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2012-11-30 15:23:16 -08:00
void Assembler : : lfence ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAE ) ;
emit_int8 ( ( unsigned char ) 0xE8 ) ;
2012-11-30 15:23:16 -08:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : lock ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF0 ) ;
2007-12-01 00:00:00 +00:00
}
2009-05-06 00:27:52 -07:00
void Assembler : : lzcntl ( Register dst , Register src ) {
assert ( VM_Version : : supports_lzcnt ( ) , " encoding is treated as BSR " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
2009-05-06 00:27:52 -07:00
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBD ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-05-06 00:27:52 -07:00
}
2009-03-26 14:31:45 -07:00
// Emit mfence instruction
2008-08-27 00:21:55 -07:00
void Assembler : : mfence ( ) {
2009-03-26 14:31:45 -07:00
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " unsupported " ) ; )
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAE ) ;
emit_int8 ( ( unsigned char ) 0xF0 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : mov ( Register dst , Register src ) {
LP64_ONLY ( movq ( dst , src ) ) NOT_LP64 ( movl ( dst , src ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movapd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_128bit ;
InstructionAttr attributes ( vector_len , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x28 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movaps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_128bit ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x28 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2012-06-15 01:25:19 -07:00
void Assembler : : movlhps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , src , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x16 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-06-15 01:25:19 -07:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movb ( Register dst , Address src ) {
NOT_LP64 ( assert ( dst - > has_byte_register ( ) , " must have byte register " ) ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( src , dst , true ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8A ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
}
2015-10-22 21:39:25 -07:00
void Assembler : : movddup ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse3 ( ) , " " ) ) ;
2016-03-29 09:53:50 -07:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_128bit ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-10-22 21:39:25 -07:00
emit_int8 ( 0x12 ) ;
emit_int8 ( 0xC0 | encode ) ;
}
2015-12-14 14:48:30 -08:00
void Assembler : : kmovbl ( KRegister dst , Register src ) {
assert ( VM_Version : : supports_avx512dq ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x92 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : kmovbl ( Register dst , KRegister src ) {
assert ( VM_Version : : supports_avx512dq ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x93 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-11-19 16:07:22 -08:00
void Assembler : : kmovwl ( KRegister dst , Register src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-11-19 16:07:22 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-12-14 14:48:30 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( ( unsigned char ) 0x92 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-12-14 14:48:30 -08:00
void Assembler : : kmovwl ( Register dst , KRegister src ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x93 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2016-04-18 15:18:14 -07:00
void Assembler : : kmovwl ( KRegister dst , Address src ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x90 ) ;
emit_operand ( ( Register ) dst , src ) ;
}
2015-11-19 16:07:22 -08:00
void Assembler : : kmovdl ( KRegister dst , Register src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
2015-11-19 16:07:22 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-12-14 14:48:30 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( ( unsigned char ) 0x92 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-12-14 14:48:30 -08:00
void Assembler : : kmovdl ( Register dst , KRegister src ) {
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x93 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-09-11 17:02:44 -07:00
void Assembler : : kmovql ( KRegister dst , KRegister src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-12-14 14:48:30 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) 0x90 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-09-11 17:02:44 -07:00
void Assembler : : kmovql ( KRegister dst , Address src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) 0x90 ) ;
emit_operand ( ( Register ) dst , src ) ;
}
2015-09-11 17:02:44 -07:00
void Assembler : : kmovql ( Address dst , KRegister src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
vex_prefix ( dst , 0 , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) 0x90 ) ;
emit_operand ( ( Register ) src , dst ) ;
}
void Assembler : : kmovql ( KRegister dst , Register src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) 0x92 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-12-14 14:48:30 -08:00
void Assembler : : kmovql ( Register dst , KRegister src ) {
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x93 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2016-04-18 15:18:14 -07:00
void Assembler : : knotwl ( KRegister dst , KRegister src ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x44 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-11-19 16:07:22 -08:00
// This instruction produces ZF or CF flags
void Assembler : : kortestbl ( KRegister src1 , KRegister src2 ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_avx512dq ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-12-14 14:48:30 -08:00
int encode = vex_prefix_and_encode ( src1 - > encoding ( ) , 0 , src2 - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( ( unsigned char ) 0x98 ) ;
2015-09-11 17:02:44 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-11-19 16:07:22 -08:00
// This instruction produces ZF or CF flags
void Assembler : : kortestwl ( KRegister src1 , KRegister src2 ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-12-14 14:48:30 -08:00
int encode = vex_prefix_and_encode ( src1 - > encoding ( ) , 0 , src2 - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( ( unsigned char ) 0x98 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// This instruction produces ZF or CF flags
void Assembler : : kortestdl ( KRegister src1 , KRegister src2 ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
2015-11-19 16:07:22 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-12-14 14:48:30 -08:00
int encode = vex_prefix_and_encode ( src1 - > encoding ( ) , 0 , src2 - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( ( unsigned char ) 0x98 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// This instruction produces ZF or CF flags
void Assembler : : kortestql ( KRegister src1 , KRegister src2 ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
2015-11-19 16:07:22 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-12-14 14:48:30 -08:00
int encode = vex_prefix_and_encode ( src1 - > encoding ( ) , 0 , src2 - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( ( unsigned char ) 0x98 ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2007-12-01 00:00:00 +00:00
2016-04-27 13:37:07 -07:00
// This instruction produces ZF or CF flags
void Assembler : : ktestql ( KRegister src1 , KRegister src2 ) {
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( src1 - > encoding ( ) , 0 , src2 - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x99 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : movb ( Address dst , int imm8 ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC6 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , dst , 1 ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( imm8 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movb ( Address dst , Register src ) {
assert ( src - > has_byte_register ( ) , " must have byte register " ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( dst , src , true ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x88 ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( src , dst ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : movdl ( XMMRegister dst , Register src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , as_XMMRegister ( src - > encoding ( ) ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x6E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movdl ( Register dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2008-08-27 00:21:55 -07:00
// swap src/dst to get correct prefix
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( src , xnoreg , as_XMMRegister ( dst - > encoding ( ) ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x7E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2011-02-26 12:10:54 -08:00
void Assembler : : movdl ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x6E ) ;
2011-02-26 12:10:54 -08:00
emit_operand ( dst , src ) ;
}
2012-06-15 01:25:19 -07:00
void Assembler : : movdl ( Address dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( src , xnoreg , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x7E ) ;
2012-06-15 01:25:19 -07:00
emit_operand ( src , dst ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : movdqa ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_128bit ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x6F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2013-07-02 20:42:12 -04:00
void Assembler : : movdqa ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x6F ) ;
emit_operand ( dst , src ) ;
2013-07-02 20:42:12 -04:00
}
2008-10-14 15:10:26 -07:00
void Assembler : : movdqu ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x6F ) ;
emit_operand ( dst , src ) ;
2008-10-14 15:10:26 -07:00
}
void Assembler : : movdqu ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x6F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-10-14 15:10:26 -07:00
}
void Assembler : : movdqu ( Address dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
simd_prefix ( src , xnoreg , dst , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x7F ) ;
2008-10-14 15:10:26 -07:00
emit_operand ( src , dst ) ;
}
2012-06-15 01:25:19 -07:00
// Move Unaligned 256bit Vector
void Assembler : : vmovdqu ( XMMRegister dst , XMMRegister src ) {
2014-04-29 12:20:53 -07:00
assert ( UseAVX > 0 , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_256bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x6F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-06-15 01:25:19 -07:00
}
void Assembler : : vmovdqu ( XMMRegister dst , Address src ) {
2014-04-29 12:20:53 -07:00
assert ( UseAVX > 0 , " " ) ;
2012-06-15 01:25:19 -07:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_256bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x6F ) ;
2012-06-15 01:25:19 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : vmovdqu ( Address dst , XMMRegister src ) {
2014-04-29 12:20:53 -07:00
assert ( UseAVX > 0 , " " ) ;
2012-06-15 01:25:19 -07:00
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_256bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2012-06-15 01:25:19 -07:00
// swap src<->dst for encoding
assert ( src ! = xnoreg , " sanity " ) ;
2015-11-09 11:26:41 -08:00
vex_prefix ( dst , 0 , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( 0x7F ) ;
emit_operand ( src , dst ) ;
}
// Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
2015-11-19 16:07:22 -08:00
void Assembler : : evmovdqub ( XMMRegister dst , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-03-29 09:53:50 -07:00
int prefix = ( _legacy_mode_bw ) ? VEX_SIMD_F2 : VEX_SIMD_F3 ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , ( Assembler : : VexSimdPrefix ) prefix , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x6F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : evmovdqub ( XMMRegister dst , Address src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
int prefix = ( _legacy_mode_bw ) ? VEX_SIMD_F2 : VEX_SIMD_F3 ;
2015-11-19 16:07:22 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-03-29 09:53:50 -07:00
vex_prefix ( src , 0 , dst - > encoding ( ) , ( Assembler : : VexSimdPrefix ) prefix , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x6F ) ;
emit_operand ( dst , src ) ;
}
void Assembler : : evmovdqub ( Address dst , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
assert ( src ! = xnoreg , " sanity " ) ;
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
int prefix = ( _legacy_mode_bw ) ? VEX_SIMD_F2 : VEX_SIMD_F3 ;
2015-11-19 16:07:22 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-03-29 09:53:50 -07:00
vex_prefix ( dst , 0 , src - > encoding ( ) , ( Assembler : : VexSimdPrefix ) prefix , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x7F ) ;
emit_operand ( src , dst ) ;
}
2016-04-27 13:37:07 -07:00
void Assembler : : evmovdqub ( KRegister mask , XMMRegister dst , Address src , int vector_len ) {
assert ( VM_Version : : supports_avx512vlbw ( ) , " " ) ;
assert ( is_vector_masking ( ) , " " ) ; // For stub code use only
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
attributes . set_embedded_opmask_register_specifier ( mask ) ;
attributes . set_is_evex_instruction ( ) ;
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x6F ) ;
emit_operand ( dst , src ) ;
}
2015-11-19 16:07:22 -08:00
void Assembler : : evmovdquw ( XMMRegister dst , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ true , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-03-29 09:53:50 -07:00
int prefix = ( _legacy_mode_bw ) ? VEX_SIMD_F2 : VEX_SIMD_F3 ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , ( Assembler : : VexSimdPrefix ) prefix , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x6F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : evmovdquw ( XMMRegister dst , Address src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ true , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-03-29 09:53:50 -07:00
int prefix = ( _legacy_mode_bw ) ? VEX_SIMD_F2 : VEX_SIMD_F3 ;
vex_prefix ( src , 0 , dst - > encoding ( ) , ( Assembler : : VexSimdPrefix ) prefix , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x6F ) ;
emit_operand ( dst , src ) ;
}
void Assembler : : evmovdquw ( Address dst , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
assert ( src ! = xnoreg , " sanity " ) ;
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ true , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-19 16:07:22 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-03-29 09:53:50 -07:00
int prefix = ( _legacy_mode_bw ) ? VEX_SIMD_F2 : VEX_SIMD_F3 ;
vex_prefix ( dst , 0 , src - > encoding ( ) , ( Assembler : : VexSimdPrefix ) prefix , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x7F ) ;
emit_operand ( src , dst ) ;
}
2016-03-29 09:53:50 -07:00
2015-09-11 17:02:44 -07:00
void Assembler : : evmovdqul ( XMMRegister dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( 0x6F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-09-11 17:02:44 -07:00
void Assembler : : evmovdqul ( XMMRegister dst , Address src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-05-08 11:49:20 -07:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( 0x6F ) ;
emit_operand ( dst , src ) ;
}
2015-09-11 17:02:44 -07:00
void Assembler : : evmovdqul ( Address dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-05-08 11:49:20 -07:00
assert ( src ! = xnoreg , " sanity " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
vex_prefix ( dst , 0 , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-09-11 17:02:44 -07:00
emit_int8 ( 0x7F ) ;
emit_operand ( src , dst ) ;
}
void Assembler : : evmovdquq ( XMMRegister dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-09-11 17:02:44 -07:00
emit_int8 ( 0x6F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : evmovdquq ( XMMRegister dst , Address src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-09-11 17:02:44 -07:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-09-11 17:02:44 -07:00
emit_int8 ( 0x6F ) ;
emit_operand ( dst , src ) ;
}
void Assembler : : evmovdquq ( Address dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-09-11 17:02:44 -07:00
assert ( src ! = xnoreg , " sanity " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
vex_prefix ( dst , 0 , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x7F ) ;
2012-06-15 01:25:19 -07:00
emit_operand ( src , dst ) ;
}
2008-08-27 00:21:55 -07:00
// Uses zero extension on 64bit
void Assembler : : movl ( Register dst , int32_t imm32 ) {
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) ( 0xB8 | encode ) ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movl ( Register dst , Register src ) {
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8B ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movl ( Register dst , Address src ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8B ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : movl ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC7 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , dst , 4 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movl ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefix ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x89 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
// New cpus require to use movsd and movss to avoid partial register stall
// when loading from memory. But for old Opteron use movlpd instead of movsd.
// The selection is done in MacroAssembler::movdbl() and movflt().
void Assembler : : movlpd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2016-04-26 20:43:59 -07:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x12 ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movq ( MMXRegister dst , Address src ) {
assert ( VM_Version : : supports_mmx ( ) , " " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x6F ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movq ( Address dst , MMXRegister src ) {
assert ( VM_Version : : supports_mmx ( ) , " " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x7F ) ;
2008-08-27 00:21:55 -07:00
// workaround gcc (3.2.1-7a) bug
// In that version of gcc with only an emit_operand(MMX, Address)
// gcc will tail jump and try and reverse the parameters completely
// obliterating dst in the process. By having a version available
// that doesn't need to swap the args at the tail jump the bug is
// avoided.
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movq ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , xnoreg , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x7E ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : movq ( Address dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( src , xnoreg , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD6 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movsbl ( Register dst , Address src ) { // movsxb
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBE ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movsbl ( Register dst , Register src ) { // movsxb
NOT_LP64 ( assert ( src - > has_byte_register ( ) , " must have byte register " ) ) ;
2015-10-08 12:49:30 -10:00
int encode = prefix_and_encode ( dst - > encoding ( ) , false , src - > encoding ( ) , true ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBE ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movsd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x10 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movsd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , xnoreg , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x10 ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movsd ( Address dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( src , xnoreg , dst , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x11 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movss ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x10 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movss ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x10 ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movss ( Address dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( src , xnoreg , dst , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x11 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movswl ( Register dst , Address src ) { // movsxw
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBF ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : movswl ( Register dst , Register src ) { // movsxw
2007-12-01 00:00:00 +00:00
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movw ( Address dst , int imm16 ) {
InstructionMark im ( this ) ;
2007-12-01 00:00:00 +00:00
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // switch to 16-bit mode
2008-08-27 00:21:55 -07:00
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC7 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , dst , 2 ) ;
2012-12-18 10:47:23 -08:00
emit_int16 ( imm16 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movw ( Register dst , Address src ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
2008-08-27 00:21:55 -07:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8B ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movw ( Address dst , Register src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
2008-08-27 00:21:55 -07:00
prefix ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x89 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movzbl ( Register dst , Address src ) { // movzxb
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB6 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movzbl ( Register dst , Register src ) { // movzxb
NOT_LP64 ( assert ( src - > has_byte_register ( ) , " must have byte register " ) ) ;
2015-10-08 12:49:30 -10:00
int encode = prefix_and_encode ( dst - > encoding ( ) , false , src - > encoding ( ) , true ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB6 ) ;
emit_int8 ( 0xC0 | encode ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movzwl ( Register dst , Address src ) { // movzxw
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB7 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movzwl ( Register dst , Register src ) { // movzxw
2007-12-01 00:00:00 +00:00
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB7 ) ;
emit_int8 ( 0xC0 | encode ) ;
2007-12-01 00:00:00 +00:00
}
void Assembler : : mull ( Address src ) {
InstructionMark im ( this ) ;
prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( rsp , src ) ;
}
void Assembler : : mull ( Register src ) {
int encode = prefix_and_encode ( src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : mulsd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x59 ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : mulsd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x59 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : mulss ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x59 ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : mulss ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x59 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : negl ( Register dst ) {
2007-12-01 00:00:00 +00:00
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xD8 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : nop ( int i ) {
# ifdef ASSERT
assert ( i > 0 , " " ) ;
// The fancy nops aren't currently recognized by debuggers making it a
// pain to disassemble code while debugging. If asserts are on clearly
// speed is not an issue so simply use the single byte traditional nop
// to do alignment.
2007-12-01 00:00:00 +00:00
2012-12-20 18:53:44 -08:00
for ( ; i > 0 ; i - - ) emit_int8 ( ( unsigned char ) 0x90 ) ;
2008-08-27 00:21:55 -07:00
return ;
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
# endif // ASSERT
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
if ( UseAddressNop & & VM_Version : : is_intel ( ) ) {
//
// Using multi-bytes nops "0x0F 0x1F [address]" for Intel
// 1: 0x90
// 2: 0x66 0x90
// 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
// 4: 0x0F 0x1F 0x40 0x00
// 5: 0x0F 0x1F 0x44 0x00 0x00
// 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
// 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
// 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
// The rest coding is Intel specific - don't use consecutive address nops
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
// 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
// 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
// 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
// 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
while ( i > = 15 ) {
// For Intel don't generate consecutive addess nops (mix with regular nops)
i - = 15 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
addr_nop_8 ( ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( ( unsigned char ) 0x90 ) ;
// nop
2008-08-27 00:21:55 -07:00
}
switch ( i ) {
case 14 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 13 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 12 :
addr_nop_8 ( ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( ( unsigned char ) 0x90 ) ;
// nop
2008-08-27 00:21:55 -07:00
break ;
case 11 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 10 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 9 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 8 :
addr_nop_8 ( ) ;
break ;
case 7 :
addr_nop_7 ( ) ;
break ;
case 6 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 5 :
addr_nop_5 ( ) ;
break ;
case 4 :
addr_nop_4 ( ) ;
break ;
case 3 :
// Don't use "0x0F 0x1F 0x00" - need patching safe padding
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 2 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 1 :
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x90 ) ;
// nop
2008-08-27 00:21:55 -07:00
break ;
default :
assert ( i = = 0 , " " ) ;
}
return ;
}
if ( UseAddressNop & & VM_Version : : is_amd ( ) ) {
//
// Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
// 1: 0x90
// 2: 0x66 0x90
// 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
// 4: 0x0F 0x1F 0x40 0x00
// 5: 0x0F 0x1F 0x44 0x00 0x00
// 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
// 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
// 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// The rest coding is AMD specific - use consecutive address nops
// 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
// 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
// 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
// 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
// 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// Size prefixes (0x66) are added for larger sizes
while ( i > = 22 ) {
i - = 11 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
addr_nop_8 ( ) ;
}
// Generate first nop for size between 21-12
switch ( i ) {
case 21 :
i - = 1 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 20 :
case 19 :
i - = 1 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 18 :
case 17 :
i - = 1 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 16 :
case 15 :
i - = 8 ;
addr_nop_8 ( ) ;
break ;
case 14 :
case 13 :
i - = 7 ;
addr_nop_7 ( ) ;
break ;
case 12 :
i - = 6 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
addr_nop_5 ( ) ;
break ;
default :
assert ( i < 12 , " " ) ;
}
// Generate second nop for size between 11-1
switch ( i ) {
case 11 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 10 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 9 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 8 :
addr_nop_8 ( ) ;
break ;
case 7 :
addr_nop_7 ( ) ;
break ;
case 6 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 5 :
addr_nop_5 ( ) ;
break ;
case 4 :
addr_nop_4 ( ) ;
break ;
case 3 :
// Don't use "0x0F 0x1F 0x00" - need patching safe padding
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 2 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
2008-08-27 00:21:55 -07:00
case 1 :
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x90 ) ;
// nop
2008-08-27 00:21:55 -07:00
break ;
default :
assert ( i = = 0 , " " ) ;
}
return ;
}
// Using nops with size prefixes "0x66 0x90".
// From AMD Optimization Guide:
// 1: 0x90
// 2: 0x66 0x90
// 3: 0x66 0x66 0x90
// 4: 0x66 0x66 0x66 0x90
// 5: 0x66 0x66 0x90 0x66 0x90
// 6: 0x66 0x66 0x90 0x66 0x66 0x90
// 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
// 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
// 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
// 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
//
while ( i > 12 ) {
i - = 4 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ; // size prefix
emit_int8 ( 0x66 ) ;
emit_int8 ( 0x66 ) ;
emit_int8 ( ( unsigned char ) 0x90 ) ;
// nop
2008-08-27 00:21:55 -07:00
}
// 1 - 12 nops
if ( i > 8 ) {
if ( i > 9 ) {
i - = 1 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
2008-08-27 00:21:55 -07:00
}
i - = 3 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
emit_int8 ( 0x66 ) ;
emit_int8 ( ( unsigned char ) 0x90 ) ;
2008-08-27 00:21:55 -07:00
}
// 1 - 8 nops
if ( i > 4 ) {
if ( i > 6 ) {
i - = 1 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
2008-08-27 00:21:55 -07:00
}
i - = 3 ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
emit_int8 ( 0x66 ) ;
emit_int8 ( ( unsigned char ) 0x90 ) ;
2008-08-27 00:21:55 -07:00
}
switch ( i ) {
case 4 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
2008-08-27 00:21:55 -07:00
case 3 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
2008-08-27 00:21:55 -07:00
case 2 :
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x66 ) ;
2008-08-27 00:21:55 -07:00
case 1 :
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x90 ) ;
2008-08-27 00:21:55 -07:00
break ;
default :
assert ( i = = 0 , " " ) ;
}
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : notl ( Register dst ) {
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xD0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : orl ( Address dst , int32_t imm32 ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( dst ) ;
2011-01-07 10:42:32 -05:00
emit_arith_operand ( 0x81 , rcx , dst , imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : orl ( Register dst , int32_t imm32 ) {
prefix ( dst ) ;
emit_arith ( 0x81 , 0xC8 , dst , imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : orl ( Register dst , Address src ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0B ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : orl ( Register dst , Register src ) {
( void ) prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x0B , 0xC0 , dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2015-06-03 15:02:10 -07:00
void Assembler : : orl ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefix ( dst , src ) ;
emit_int8 ( 0x09 ) ;
emit_operand ( src , dst ) ;
}
2011-12-14 14:54:38 -08:00
void Assembler : : packuswb ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
assert ( ( UseAVX > 0 ) , " SSE mode requires address alignment 16 bytes " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x67 ) ;
emit_operand ( dst , src ) ;
2011-12-14 14:54:38 -08:00
}
void Assembler : : packuswb ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x67 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-14 14:54:38 -08:00
}
2009-03-31 14:07:08 -07:00
2015-05-08 11:49:20 -07:00
void Assembler : : vpackuswb ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " some form of AVX must be enabled " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x67 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2013-01-22 15:34:16 -08:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpermq ( XMMRegister dst , XMMRegister src , int imm8 , int vector_len ) {
2013-02-08 15:07:17 -08:00
assert ( VM_Version : : supports_avx2 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2013-02-08 15:07:17 -08:00
emit_int8 ( 0x00 ) ;
emit_int8 ( 0xC0 | encode ) ;
emit_int8 ( imm8 ) ;
2013-01-22 15:34:16 -08:00
}
2016-04-26 21:54:21 -07:00
void Assembler : : vperm2i128 ( XMMRegister dst , XMMRegister nds , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_avx2 ( ) , " " ) ;
InstructionAttr attributes ( AVX_256bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x46 ) ;
emit_int8 ( 0xC0 | encode ) ;
emit_int8 ( imm8 ) ;
}
2014-03-20 17:49:27 -07:00
void Assembler : : pause ( ) {
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_int8 ( ( unsigned char ) 0x90 ) ;
}
2009-03-31 14:07:08 -07:00
void Assembler : : pcmpestri ( XMMRegister dst , Address src , int imm8 ) {
assert ( VM_Version : : supports_sse4_2 ( ) , " " ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x61 ) ;
2009-03-31 14:07:08 -07:00
emit_operand ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( imm8 ) ;
2009-03-31 14:07:08 -07:00
}
void Assembler : : pcmpestri ( XMMRegister dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sse4_2 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x61 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
2009-03-31 14:07:08 -07:00
}
2015-11-19 16:07:22 -08:00
// In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
void Assembler : : pcmpeqb ( XMMRegister dst , XMMRegister src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_sse2 ( ) , " " ) ;
2015-11-19 16:07:22 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x74 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
void Assembler : : vpcmpeqb ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x74 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// In this context, kdst is written the mask used to process the equal components
void Assembler : : evpcmpeqb ( KRegister kdst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( kdst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x74 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-12-14 14:48:30 -08:00
void Assembler : : evpcmpeqb ( KRegister kdst , XMMRegister nds , Address src , int vector_len ) {
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-12-14 14:48:30 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
int dst_enc = kdst - > encoding ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst_enc , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-12-14 14:48:30 -08:00
emit_int8 ( 0x74 ) ;
emit_operand ( as_Register ( dst_enc ) , src ) ;
}
2016-04-27 13:37:07 -07:00
void Assembler : : evpcmpeqb ( KRegister mask , KRegister kdst , XMMRegister nds , Address src , int vector_len ) {
assert ( VM_Version : : supports_avx512vlbw ( ) , " " ) ;
assert ( is_vector_masking ( ) , " " ) ; // For stub code use only
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_reg_mask */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
attributes . set_embedded_opmask_register_specifier ( mask ) ;
attributes . set_is_evex_instruction ( ) ;
vex_prefix ( src , nds - > encoding ( ) , kdst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x74 ) ;
emit_operand ( as_Register ( kdst - > encoding ( ) ) , src ) ;
}
2015-11-19 16:07:22 -08:00
// In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
void Assembler : : pcmpeqw ( XMMRegister dst , XMMRegister src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_sse2 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x75 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
}
2015-11-19 16:07:22 -08:00
// In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
void Assembler : : vpcmpeqw ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x75 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
}
2015-11-19 16:07:22 -08:00
// In this context, kdst is written the mask used to process the equal components
void Assembler : : evpcmpeqw ( KRegister kdst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( kdst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x75 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-12-14 14:48:30 -08:00
void Assembler : : evpcmpeqw ( KRegister kdst , XMMRegister nds , Address src , int vector_len ) {
assert ( VM_Version : : supports_avx512bw ( ) , " " ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-12-14 14:48:30 -08:00
int dst_enc = kdst - > encoding ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst_enc , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-12-14 14:48:30 -08:00
emit_int8 ( 0x75 ) ;
emit_operand ( as_Register ( dst_enc ) , src ) ;
}
2015-11-19 16:07:22 -08:00
// In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
void Assembler : : pcmpeqd ( XMMRegister dst , XMMRegister src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_sse2 ( ) , " " ) ;
2015-11-19 16:07:22 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x76 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
void Assembler : : vpcmpeqd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x76 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// In this context, kdst is written the mask used to process the equal components
void Assembler : : evpcmpeqd ( KRegister kdst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( kdst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x76 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-12-14 14:48:30 -08:00
void Assembler : : evpcmpeqd ( KRegister kdst , XMMRegister nds , Address src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-12-14 14:48:30 -08:00
int dst_enc = kdst - > encoding ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst_enc , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-12-14 14:48:30 -08:00
emit_int8 ( 0x76 ) ;
emit_operand ( as_Register ( dst_enc ) , src ) ;
}
2015-11-19 16:07:22 -08:00
// In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
void Assembler : : pcmpeqq ( XMMRegister dst , XMMRegister src ) {
2015-12-14 14:48:30 -08:00
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
2015-11-19 16:07:22 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( 0x29 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
void Assembler : : vpcmpeqq ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x29 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// In this context, kdst is written the mask used to process the equal components
void Assembler : : evpcmpeqq ( KRegister kdst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( kdst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x29 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// In this context, kdst is written the mask used to process the equal components
void Assembler : : evpcmpeqq ( KRegister kdst , XMMRegister nds , Address src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-03-29 09:53:50 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-19 16:07:22 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
int dst_enc = kdst - > encoding ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst_enc , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-11-19 16:07:22 -08:00
emit_int8 ( 0x29 ) ;
emit_operand ( as_Register ( dst_enc ) , src ) ;
}
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
void Assembler : : pmovmskb ( Register dst , XMMRegister src ) {
assert ( VM_Version : : supports_sse2 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( as_XMMRegister ( dst - > encoding ( ) ) , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
emit_int8 ( ( unsigned char ) 0xD7 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : vpmovmskb ( Register dst , XMMRegister src ) {
assert ( VM_Version : : supports_avx2 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_256bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
emit_int8 ( ( unsigned char ) 0xD7 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2013-07-02 20:42:12 -04:00
void Assembler : : pextrd ( Register dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-12-28 23:11:01 -08:00
int encode = simd_prefix_and_encode ( src , xnoreg , as_XMMRegister ( dst - > encoding ( ) ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2013-07-02 20:42:12 -04:00
emit_int8 ( 0x16 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2015-12-28 23:11:01 -08:00
void Assembler : : pextrd ( Address dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( src , xnoreg , dst , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x16 ) ;
emit_operand ( src , dst ) ;
emit_int8 ( imm8 ) ;
}
2013-07-02 20:42:12 -04:00
void Assembler : : pextrq ( Register dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-12-28 23:11:01 -08:00
int encode = simd_prefix_and_encode ( src , xnoreg , as_XMMRegister ( dst - > encoding ( ) ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2013-07-02 20:42:12 -04:00
emit_int8 ( 0x16 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2015-12-28 23:11:01 -08:00
void Assembler : : pextrq ( Address dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
simd_prefix ( src , xnoreg , dst , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x16 ) ;
emit_operand ( src , dst ) ;
emit_int8 ( imm8 ) ;
}
2015-10-05 20:02:40 -07:00
void Assembler : : pextrw ( Register dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sse2 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( as_XMMRegister ( dst - > encoding ( ) ) , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-10-12 16:35:40 -07:00
emit_int8 ( ( unsigned char ) 0xC5 ) ;
2015-10-05 20:02:40 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2015-12-28 23:11:01 -08:00
void Assembler : : pextrw ( Address dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_16bit ) ;
simd_prefix ( src , xnoreg , dst , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x15 ) ;
emit_operand ( src , dst ) ;
emit_int8 ( imm8 ) ;
}
void Assembler : : pextrb ( Address dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_8bit ) ;
simd_prefix ( src , xnoreg , dst , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x14 ) ;
emit_operand ( src , dst ) ;
emit_int8 ( imm8 ) ;
}
2013-07-02 20:42:12 -04:00
void Assembler : : pinsrd ( XMMRegister dst , Register src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , as_XMMRegister ( src - > encoding ( ) ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2013-07-02 20:42:12 -04:00
emit_int8 ( 0x22 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2015-12-28 23:11:01 -08:00
void Assembler : : pinsrd ( XMMRegister dst , Address src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x22 ) ;
emit_operand ( dst , src ) ;
emit_int8 ( imm8 ) ;
}
2013-07-02 20:42:12 -04:00
void Assembler : : pinsrq ( XMMRegister dst , Register src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , as_XMMRegister ( src - > encoding ( ) ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2013-07-02 20:42:12 -04:00
emit_int8 ( 0x22 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2015-12-28 23:11:01 -08:00
void Assembler : : pinsrq ( XMMRegister dst , Address src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x22 ) ;
emit_operand ( dst , src ) ;
emit_int8 ( imm8 ) ;
}
2015-10-05 20:02:40 -07:00
void Assembler : : pinsrw ( XMMRegister dst , Register src , int imm8 ) {
assert ( VM_Version : : supports_sse2 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , as_XMMRegister ( src - > encoding ( ) ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-10-05 20:02:40 -07:00
emit_int8 ( ( unsigned char ) 0xC4 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2015-12-28 23:11:01 -08:00
void Assembler : : pinsrw ( XMMRegister dst , Address src , int imm8 ) {
assert ( VM_Version : : supports_sse2 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_16bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xC4 ) ;
emit_operand ( dst , src ) ;
emit_int8 ( imm8 ) ;
}
void Assembler : : pinsrb ( XMMRegister dst , Address src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_8bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x20 ) ;
emit_operand ( dst , src ) ;
emit_int8 ( imm8 ) ;
}
2011-12-14 14:54:38 -08:00
void Assembler : : pmovzxbw ( XMMRegister dst , Address src ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_HVM , /* input_size_in_bits */ EVEX_NObit ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x30 ) ;
2011-12-14 14:54:38 -08:00
emit_operand ( dst , src ) ;
}
void Assembler : : pmovzxbw ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x30 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-14 14:54:38 -08:00
}
2015-11-19 16:07:22 -08:00
void Assembler : : vpmovzxbw ( XMMRegister dst , Address src , int vector_len ) {
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
InstructionMark im ( this ) ;
assert ( dst ! = xnoreg , " sanity " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_HVM , /* input_size_in_bits */ EVEX_NObit ) ;
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
emit_int8 ( 0x30 ) ;
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
// generic
void Assembler : : pop ( Register dst ) {
2007-12-01 00:00:00 +00:00
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x58 | encode ) ;
2007-12-01 00:00:00 +00:00
}
2009-03-13 11:35:17 -07:00
void Assembler : : popcntl ( Register dst , Address src ) {
assert ( VM_Version : : supports_popcnt ( ) , " must support " ) ;
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
2009-03-13 11:35:17 -07:00
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB8 ) ;
2009-03-13 11:35:17 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : popcntl ( Register dst , Register src ) {
assert ( VM_Version : : supports_popcnt ( ) , " must support " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
2009-03-13 11:35:17 -07:00
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB8 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-03-13 11:35:17 -07:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : popf ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x9D ) ;
2007-12-01 00:00:00 +00:00
}
2009-11-02 11:17:55 +01:00
# ifndef _LP64 // no 32bit push/pop on amd64
2008-08-27 00:21:55 -07:00
void Assembler : : popl ( Address dst ) {
// NOTE: this will adjust stack by 8byte on 64bits
InstructionMark im ( this ) ;
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8F ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , dst ) ;
2007-12-01 00:00:00 +00:00
}
2009-11-02 11:17:55 +01:00
# endif
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : prefetch_prefix ( Address src ) {
prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : prefetchnta ( Address src ) {
2011-08-22 11:00:39 -07:00
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " must support " ) ) ;
2008-08-27 00:21:55 -07:00
InstructionMark im ( this ) ;
prefetch_prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x18 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , src ) ; // 0, src
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : prefetchr ( Address src ) {
2011-08-16 16:59:46 -07:00
assert ( VM_Version : : supports_3dnow_prefetch ( ) , " must support " ) ;
2008-08-27 00:21:55 -07:00
InstructionMark im ( this ) ;
prefetch_prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0D ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , src ) ; // 0, src
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : prefetcht0 ( Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " must support " ) ) ;
InstructionMark im ( this ) ;
prefetch_prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x18 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rcx , src ) ; // 1, src
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : prefetcht1 ( Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " must support " ) ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefetch_prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x18 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rdx , src ) ; // 2, src
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : prefetcht2 ( Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " must support " ) ) ;
InstructionMark im ( this ) ;
prefetch_prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x18 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rbx , src ) ; // 3, src
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : prefetchw ( Address src ) {
2011-08-16 16:59:46 -07:00
assert ( VM_Version : : supports_3dnow_prefetch ( ) , " must support " ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2008-08-27 00:21:55 -07:00
prefetch_prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0D ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rcx , src ) ; // 1, src
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : prefix ( Prefix p ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( p ) ;
2007-12-01 00:00:00 +00:00
}
2012-10-24 14:33:22 -07:00
void Assembler : : pshufb ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_ssse3 ( ) , " " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x00 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-10-24 14:33:22 -07:00
}
2016-04-26 21:54:21 -07:00
void Assembler : : vpshufb ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( vector_len = = AVX_128bit ? VM_Version : : supports_avx ( ) :
vector_len = = AVX_256bit ? VM_Version : : supports_avx2 ( ) :
0 , " " ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , nds , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( 0x00 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2012-10-24 14:33:22 -07:00
void Assembler : : pshufb ( XMMRegister dst , Address src ) {
assert ( VM_Version : : supports_ssse3 ( ) , " " ) ;
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x00 ) ;
2012-10-24 14:33:22 -07:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : pshufd ( XMMRegister dst , XMMRegister src , int mode ) {
assert ( isByte ( mode ) , " invalid value " ) ;
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2016-03-29 09:53:50 -07:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_128bit ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x70 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( mode & 0xFF ) ;
2007-12-01 00:00:00 +00:00
}
2016-04-26 21:54:21 -07:00
void Assembler : : vpshufd ( XMMRegister dst , XMMRegister src , int mode , int vector_len ) {
assert ( vector_len = = AVX_128bit ? VM_Version : : supports_avx ( ) :
vector_len = = AVX_256bit ? VM_Version : : supports_avx2 ( ) :
0 , " " ) ;
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x70 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( mode & 0xFF ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : pshufd ( XMMRegister dst , Address src , int mode ) {
assert ( isByte ( mode ) , " invalid value " ) ;
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2011-12-14 14:54:38 -08:00
assert ( ( UseAVX > 0 ) , " SSE mode requires address alignment 16 bytes " ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x70 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( mode & 0xFF ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : pshuflw ( XMMRegister dst , XMMRegister src , int mode ) {
assert ( isByte ( mode ) , " invalid value " ) ;
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x70 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( mode & 0xFF ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : pshuflw ( XMMRegister dst , Address src , int mode ) {
assert ( isByte ( mode ) , " invalid value " ) ;
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2011-12-14 14:54:38 -08:00
assert ( ( UseAVX > 0 ) , " SSE mode requires address alignment 16 bytes " ) ;
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x70 ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( mode & 0xFF ) ;
2007-12-01 00:00:00 +00:00
}
2011-02-26 12:10:54 -08:00
void Assembler : : psrldq ( XMMRegister dst , int shift ) {
2015-09-11 17:02:44 -07:00
// Shift left 128 bit value in dst XMMRegister by shift number of bytes.
2011-02-26 12:10:54 -08:00
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( xmm3 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-06-17 17:48:25 -07:00
emit_int8 ( 0x73 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift ) ;
}
void Assembler : : pslldq ( XMMRegister dst , int shift ) {
2015-09-11 17:02:44 -07:00
// Shift left 128 bit value in dst XMMRegister by shift number of bytes.
2015-06-17 17:48:25 -07:00
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2015-09-11 17:02:44 -07:00
// XMM7 is for /7 encoding: 66 0F 73 /7 ib
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( xmm7 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x73 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift ) ;
2011-02-26 12:10:54 -08:00
}
2009-03-31 14:07:08 -07:00
void Assembler : : ptest ( XMMRegister dst , Address src ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
2011-12-14 14:54:38 -08:00
assert ( ( UseAVX > 0 ) , " SSE mode requires address alignment 16 bytes " ) ;
2009-03-31 14:07:08 -07:00
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x17 ) ;
2009-03-31 14:07:08 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : ptest ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x17 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-03-31 14:07:08 -07:00
}
2013-01-08 11:30:51 -08:00
void Assembler : : vptest ( XMMRegister dst , Address src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_256bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2013-01-08 11:30:51 -08:00
assert ( dst ! = xnoreg , " sanity " ) ;
// swap src<->dst for encoding
2015-11-09 11:26:41 -08:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2013-01-08 11:30:51 -08:00
emit_int8 ( 0x17 ) ;
emit_operand ( dst , src ) ;
}
void Assembler : : vptest ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_256bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2013-01-08 11:30:51 -08:00
emit_int8 ( 0x17 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2011-12-14 14:54:38 -08:00
void Assembler : : punpcklbw ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
assert ( ( UseAVX > 0 ) , " SSE mode requires address alignment 16 bytes " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_vlbw , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x60 ) ;
emit_operand ( dst , src ) ;
2011-12-14 14:54:38 -08:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : punpcklbw ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_vlbw , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x60 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2011-12-14 14:54:38 -08:00
void Assembler : : punpckldq ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
assert ( ( UseAVX > 0 ) , " SSE mode requires address alignment 16 bytes " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x62 ) ;
emit_operand ( dst , src ) ;
2011-12-14 14:54:38 -08:00
}
void Assembler : : punpckldq ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x62 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-14 14:54:38 -08:00
}
2012-07-16 17:10:22 -07:00
void Assembler : : punpcklqdq ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x6C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-07-16 17:10:22 -07:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : push ( int32_t imm32 ) {
// in 64bits we push 64bits onto the stack but only
// take a 32bit immediate
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x68 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : push ( Register src ) {
int encode = prefix_and_encode ( src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x50 | encode ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : pushf ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x9C ) ;
2007-12-01 00:00:00 +00:00
}
2009-11-02 11:17:55 +01:00
# ifndef _LP64 // no 32bit push/pop on amd64
2008-08-27 00:21:55 -07:00
void Assembler : : pushl ( Address src ) {
// Note this will push 64bit on 64bit
InstructionMark im ( this ) ;
prefix ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rsi , src ) ;
2007-12-01 00:00:00 +00:00
}
2009-11-02 11:17:55 +01:00
# endif
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : rcll ( Register dst , int imm8 ) {
assert ( isShiftCount ( imm8 ) , " illegal shift count " ) ;
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
if ( imm8 = = 1 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xD0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) 0xD0 | encode ) ;
emit_int8 ( imm8 ) ;
2008-08-27 00:21:55 -07:00
}
2007-12-01 00:00:00 +00:00
}
2015-10-22 21:39:25 -07:00
void Assembler : : rcpps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-10-22 21:39:25 -07:00
emit_int8 ( 0x53 ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2015-10-22 21:39:25 -07:00
}
void Assembler : : rcpss ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-10-22 21:39:25 -07:00
emit_int8 ( 0x53 ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2015-10-22 21:39:25 -07:00
}
2014-03-20 17:49:27 -07:00
void Assembler : : rdtsc ( ) {
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) 0x31 ) ;
}
2008-08-27 00:21:55 -07:00
// copies data from [esi] to [edi] using rcx pointer sized words
// generic
void Assembler : : rep_mov ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
2008-08-27 00:21:55 -07:00
// MOVSQ
LP64_ONLY ( prefix ( REX_W ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xA5 ) ;
2007-12-01 00:00:00 +00:00
}
2013-01-03 15:09:55 -08:00
// sets rcx bytes with rax, value at [edi]
void Assembler : : rep_stosb ( ) {
emit_int8 ( ( unsigned char ) 0xF3 ) ; // REP
LP64_ONLY ( prefix ( REX_W ) ) ;
emit_int8 ( ( unsigned char ) 0xAA ) ; // STOSB
}
2008-08-27 00:21:55 -07:00
// sets rcx pointer sized words with rax, value at [edi]
// generic
2013-01-03 15:09:55 -08:00
void Assembler : : rep_stos ( ) {
emit_int8 ( ( unsigned char ) 0xF3 ) ; // REP
LP64_ONLY ( prefix ( REX_W ) ) ; // LP64:STOSQ, LP32:STOSD
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xAB ) ;
2008-08-27 00:21:55 -07:00
}
// scans rcx pointer sized words at [edi] for occurance of rax,
// generic
void Assembler : : repne_scan ( ) { // repne_scan
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF2 ) ;
2008-08-27 00:21:55 -07:00
// SCASQ
LP64_ONLY ( prefix ( REX_W ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xAF ) ;
2008-08-27 00:21:55 -07:00
}
# ifdef _LP64
// scans rcx 4 byte words at [edi] for occurance of rax,
// generic
void Assembler : : repne_scanl ( ) { // repne_scan
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF2 ) ;
2008-08-27 00:21:55 -07:00
// SCASL
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xAF ) ;
2008-08-27 00:21:55 -07:00
}
# endif
void Assembler : : ret ( int imm16 ) {
if ( imm16 = = 0 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC3 ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC2 ) ;
2012-12-18 10:47:23 -08:00
emit_int16 ( imm16 ) ;
2008-08-27 00:21:55 -07:00
}
}
void Assembler : : sahf ( ) {
# ifdef _LP64
// Not supported in 64bit mode
ShouldNotReachHere ( ) ;
# endif
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x9E ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : sarl ( Register dst , int imm8 ) {
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
assert ( isShiftCount ( imm8 ) , " illegal shift count " ) ;
if ( imm8 = = 1 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | encode ) ) ;
emit_int8 ( imm8 ) ;
2008-08-27 00:21:55 -07:00
}
}
void Assembler : : sarl ( Register dst ) {
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD3 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : sbbl ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefix ( dst ) ;
emit_arith_operand ( 0x81 , rbx , dst , imm32 ) ;
}
void Assembler : : sbbl ( Register dst , int32_t imm32 ) {
prefix ( dst ) ;
emit_arith ( 0x81 , 0xD8 , dst , imm32 ) ;
}
void Assembler : : sbbl ( Register dst , Address src ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x1B ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : sbbl ( Register dst , Register src ) {
( void ) prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x1B , 0xC0 , dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : setb ( Condition cc , Register dst ) {
assert ( 0 < = cc & & cc < 16 , " illegal cc " ) ;
int encode = prefix_and_encode ( dst - > encoding ( ) , true ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0x90 | cc ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2016-03-03 22:02:13 -08:00
void Assembler : : palignr ( XMMRegister dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_ssse3 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2016-04-26 21:54:21 -07:00
void Assembler : : vpalignr ( XMMRegister dst , XMMRegister nds , XMMRegister src , int imm8 , int vector_len ) {
assert ( vector_len = = AVX_128bit ? VM_Version : : supports_avx ( ) :
vector_len = = AVX_256bit ? VM_Version : : supports_avx2 ( ) :
0 , " " ) ;
InstructionAttr attributes ( vector_len , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , nds , src , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2016-03-03 22:02:13 -08:00
void Assembler : : pblendw ( XMMRegister dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2016-03-03 22:02:13 -08:00
emit_int8 ( ( unsigned char ) 0x0E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
void Assembler : : sha1rnds4 ( XMMRegister dst , XMMRegister src , int imm8 ) {
assert ( VM_Version : : supports_sha ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xCC ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( ( unsigned char ) imm8 ) ;
}
void Assembler : : sha1nexte ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_sha ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xC8 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : sha1msg1 ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_sha ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xC9 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : sha1msg2 ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_sha ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xCA ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// xmm0 is implicit additional source to this instruction.
void Assembler : : sha256rnds2 ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_sha ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xCB ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : sha256msg1 ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_sha ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xCC ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : sha256msg2 ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_sha ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xCD ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : shll ( Register dst , int imm8 ) {
assert ( isShiftCount ( imm8 ) , " illegal shift count " ) ;
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
if ( imm8 = = 1 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | encode ) ) ;
emit_int8 ( imm8 ) ;
2008-08-27 00:21:55 -07:00
}
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : shll ( Register dst ) {
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD3 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : shrl ( Register dst , int imm8 ) {
assert ( isShiftCount ( imm8 ) , " illegal shift count " ) ;
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) ( 0xE8 | encode ) ) ;
emit_int8 ( imm8 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : shrl ( Register dst ) {
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD3 ) ;
emit_int8 ( ( unsigned char ) ( 0xE8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
// copies a single word from [esi] to [edi]
void Assembler : : smovl ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xA5 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : sqrtsd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x51 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2010-12-03 01:34:31 -08:00
void Assembler : : sqrtsd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x51 ) ;
emit_operand ( dst , src ) ;
2010-12-03 01:34:31 -08:00
}
void Assembler : : sqrtss ( XMMRegister dst , XMMRegister src ) {
2011-12-14 14:54:38 -08:00
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x51 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2010-12-03 01:34:31 -08:00
}
2012-11-30 15:23:16 -08:00
void Assembler : : std ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFD ) ;
2012-11-30 15:23:16 -08:00
}
2010-12-03 01:34:31 -08:00
void Assembler : : sqrtss ( XMMRegister dst , Address src ) {
2011-12-14 14:54:38 -08:00
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x51 ) ;
emit_operand ( dst , src ) ;
2010-12-03 01:34:31 -08:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : stmxcsr ( Address dst ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
InstructionMark im ( this ) ;
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAE ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( as_Register ( 3 ) , dst ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : subl ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefix ( dst ) ;
2011-01-07 10:42:32 -05:00
emit_arith_operand ( 0x81 , rbp , dst , imm32 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : subl ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefix ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x29 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
}
2011-01-07 10:42:32 -05:00
void Assembler : : subl ( Register dst , int32_t imm32 ) {
prefix ( dst ) ;
emit_arith ( 0x81 , 0xE8 , dst , imm32 ) ;
}
2012-02-15 21:37:49 -08:00
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler : : subl_imm32 ( Register dst , int32_t imm32 ) {
prefix ( dst ) ;
emit_arith_imm32 ( 0x81 , 0xE8 , dst , imm32 ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : subl ( Register dst , Address src ) {
2007-12-01 00:00:00 +00:00
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2B ) ;
2007-12-01 00:00:00 +00:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : subl ( Register dst , Register src ) {
( void ) prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x2B , 0xC0 , dst , src ) ;
}
void Assembler : : subsd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : subsd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5C ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : subss ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : subss ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5C ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : testb ( Register dst , int imm8 ) {
NOT_LP64 ( assert ( dst - > has_byte_register ( ) , " must have byte register " ) ) ;
( void ) prefix_and_encode ( dst - > encoding ( ) , true ) ;
emit_arith_b ( 0xF6 , 0xC0 , dst , imm8 ) ;
2007-12-01 00:00:00 +00:00
}
2016-01-08 21:06:50 -08:00
void Assembler : : testb ( Address dst , int imm8 ) {
InstructionMark im ( this ) ;
prefix ( dst ) ;
emit_int8 ( ( unsigned char ) 0xF6 ) ;
emit_operand ( rax , dst , 1 ) ;
emit_int8 ( imm8 ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : testl ( Register dst , int32_t imm32 ) {
// not using emit_arith because test
// doesn't support sign-extension of
// 8bit operands
int encode = dst - > encoding ( ) ;
if ( encode = = 0 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xA9 ) ;
2008-08-27 00:21:55 -07:00
} else {
encode = prefix_and_encode ( encode ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : testl ( Register dst , Register src ) {
( void ) prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x85 , 0xC0 , dst , src ) ;
}
2007-12-01 00:00:00 +00:00
2015-11-09 11:26:41 -08:00
void Assembler : : testl ( Register dst , Address src ) {
2008-08-27 00:21:55 -07:00
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x85 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2014-03-12 11:24:26 -07:00
void Assembler : : tzcntl ( Register dst , Register src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " tzcnt instruction not supported " ) ;
emit_int8 ( ( unsigned char ) 0xF3 ) ;
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBC ) ;
emit_int8 ( ( unsigned char ) 0xC0 | encode ) ;
}
void Assembler : : tzcntq ( Register dst , Register src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " tzcnt instruction not supported " ) ;
emit_int8 ( ( unsigned char ) 0xF3 ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBC ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : ucomisd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2E ) ;
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : ucomisd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : ucomiss ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2E ) ;
emit_operand ( dst , src ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : ucomiss ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x2E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2014-03-20 17:49:27 -07:00
void Assembler : : xabort ( int8_t imm8 ) {
emit_int8 ( ( unsigned char ) 0xC6 ) ;
emit_int8 ( ( unsigned char ) 0xF8 ) ;
emit_int8 ( ( unsigned char ) ( imm8 & 0xFF ) ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : xaddl ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefix ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xC1 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
}
2014-03-20 17:49:27 -07:00
void Assembler : : xbegin ( Label & abort , relocInfo : : relocType rtype ) {
InstructionMark im ( this ) ;
relocate ( rtype ) ;
if ( abort . is_bound ( ) ) {
address entry = target ( abort ) ;
assert ( entry ! = NULL , " abort entry NULL " ) ;
intptr_t offset = entry - pc ( ) ;
emit_int8 ( ( unsigned char ) 0xC7 ) ;
emit_int8 ( ( unsigned char ) 0xF8 ) ;
emit_int32 ( offset - 6 ) ; // 2 opcode + 4 address
} else {
abort . add_patch_at ( code ( ) , locator ( ) ) ;
emit_int8 ( ( unsigned char ) 0xC7 ) ;
emit_int8 ( ( unsigned char ) 0xF8 ) ;
emit_int32 ( 0 ) ;
}
}
2008-08-27 00:21:55 -07:00
void Assembler : : xchgl ( Register dst , Address src ) { // xchg
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x87 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : xchgl ( Register dst , Register src ) {
int encode = prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x87 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2014-03-20 17:49:27 -07:00
void Assembler : : xend ( ) {
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) 0x01 ) ;
emit_int8 ( ( unsigned char ) 0xD5 ) ;
}
2012-11-30 15:23:16 -08:00
void Assembler : : xgetbv ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x01 ) ;
emit_int8 ( ( unsigned char ) 0xD0 ) ;
2012-11-30 15:23:16 -08:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : xorl ( Register dst , int32_t imm32 ) {
prefix ( dst ) ;
emit_arith ( 0x81 , 0xF0 , dst , imm32 ) ;
}
void Assembler : : xorl ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x33 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : xorl ( Register dst , Register src ) {
( void ) prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x33 , 0xC0 , dst , src ) ;
}
2015-12-28 23:11:01 -08:00
void Assembler : : xorb ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefix ( src , dst ) ;
emit_int8 ( 0x32 ) ;
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
2012-08-20 09:07:21 -07:00
// AVX 3-operands scalar float-point arithmetic instructions
2011-12-20 00:55:02 -08:00
void Assembler : : vaddsd ( XMMRegister dst , XMMRegister nds , Address src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x58 ) ;
emit_operand ( dst , src ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vaddsd ( XMMRegister dst , XMMRegister nds , XMMRegister src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vaddss ( XMMRegister dst , XMMRegister nds , Address src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x58 ) ;
emit_operand ( dst , src ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vaddss ( XMMRegister dst , XMMRegister nds , XMMRegister src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vdivsd ( XMMRegister dst , XMMRegister nds , Address src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5E ) ;
emit_operand ( dst , src ) ;
}
2011-12-20 00:55:02 -08:00
void Assembler : : vdivsd ( XMMRegister dst , XMMRegister nds , XMMRegister src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vdivss ( XMMRegister dst , XMMRegister nds , Address src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5E ) ;
emit_operand ( dst , src ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vdivss ( XMMRegister dst , XMMRegister nds , XMMRegister src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vmulsd ( XMMRegister dst , XMMRegister nds , Address src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x59 ) ;
emit_operand ( dst , src ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vmulsd ( XMMRegister dst , XMMRegister nds , XMMRegister src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x59 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vmulss ( XMMRegister dst , XMMRegister nds , Address src ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x59 ) ;
emit_operand ( dst , src ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vmulss ( XMMRegister dst , XMMRegister nds , XMMRegister src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x59 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vsubsd ( XMMRegister dst , XMMRegister nds , Address src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5C ) ;
emit_operand ( dst , src ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vsubsd ( XMMRegister dst , XMMRegister nds , XMMRegister src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vsubss ( XMMRegister dst , XMMRegister nds , Address src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5C ) ;
emit_operand ( dst , src ) ;
2011-12-20 00:55:02 -08:00
}
void Assembler : : vsubss ( XMMRegister dst , XMMRegister nds , XMMRegister src ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
2012-08-20 09:07:21 -07:00
//====================VECTOR ARITHMETIC=====================================
// Float-point vector arithmetic
void Assembler : : addpd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
2015-12-23 21:09:50 -08:00
void Assembler : : addpd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-12-23 21:09:50 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x58 ) ;
emit_operand ( dst , src ) ;
}
2012-08-20 09:07:21 -07:00
void Assembler : : addps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-06-15 01:25:19 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vaddpd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2011-12-20 00:55:02 -08:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2011-12-20 00:55:02 -08:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vaddps ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-06-15 01:25:19 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-06-15 01:25:19 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vaddpd ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x58 ) ;
emit_operand ( dst , src ) ;
2012-07-16 17:10:22 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vaddps ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-06-15 01:25:19 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x58 ) ;
emit_operand ( dst , src ) ;
2012-06-15 01:25:19 -07:00
}
2012-08-20 09:07:21 -07:00
void Assembler : : subpd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : subps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vsubpd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vsubps ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vsubpd ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5C ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vsubps ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5C ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : mulpd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x59 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-10-05 20:02:40 -07:00
void Assembler : : mulpd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x59 ) ;
emit_operand ( dst , src ) ;
2015-10-05 20:02:40 -07:00
}
2012-08-20 09:07:21 -07:00
void Assembler : : mulps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x59 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vmulpd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x59 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vmulps ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x59 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vmulpd ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x59 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vmulps ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x59 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : divpd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : divps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x5E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vdivpd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vdivps ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vdivpd ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5E ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vdivps ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x5E ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-09-09 10:34:17 -07:00
void Assembler : : vsqrtpd ( XMMRegister dst , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x51 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2015-09-09 10:34:17 -07:00
}
void Assembler : : vsqrtpd ( XMMRegister dst , Address src , int vector_len ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x51 ) ;
emit_operand ( dst , src ) ;
2015-09-09 10:34:17 -07:00
}
2012-08-20 09:07:21 -07:00
void Assembler : : andpd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ ! _legacy_mode_dq , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x54 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : andps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x54 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : andps ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x54 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : andpd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ ! _legacy_mode_dq , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x54 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vandpd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ ! _legacy_mode_dq , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x54 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vandps ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x54 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vandpd ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ ! _legacy_mode_dq , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x54 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vandps ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x54 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-10-05 20:02:40 -07:00
void Assembler : : unpckhpd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x15 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2015-10-05 20:02:40 -07:00
}
void Assembler : : unpcklpd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x14 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2015-10-05 20:02:40 -07:00
}
2012-08-20 09:07:21 -07:00
void Assembler : : xorpd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ ! _legacy_mode_dq , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x57 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : xorps ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x57 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : xorpd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ ! _legacy_mode_dq , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x57 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : xorps ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x57 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vxorpd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ ! _legacy_mode_dq , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x57 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vxorps ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x57 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vxorpd ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ ! _legacy_mode_dq , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x57 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vxorps ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2012-08-20 09:07:21 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( 0x57 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
// Integer vector arithmetic
2015-05-08 11:49:20 -07:00
void Assembler : : vphaddw ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_avx ( ) & & ( vector_len = = 0 ) | |
VM_Version : : supports_avx2 ( ) , " 256 bit integer vectors requires AVX2 " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-04-01 18:07:50 -07:00
emit_int8 ( 0x01 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-05-08 11:49:20 -07:00
void Assembler : : vphaddd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( VM_Version : : supports_avx ( ) & & ( vector_len = = 0 ) | |
VM_Version : : supports_avx2 ( ) , " 256 bit integer vectors requires AVX2 " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-04-01 18:07:50 -07:00
emit_int8 ( 0x02 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2012-08-20 09:07:21 -07:00
void Assembler : : paddb ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xFC ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : paddw ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xFD ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : paddd ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xFE ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2016-03-03 22:02:13 -08:00
void Assembler : : paddd ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
simd_prefix ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2016-03-03 22:02:13 -08:00
emit_int8 ( ( unsigned char ) 0xFE ) ;
emit_operand ( dst , src ) ;
}
2012-08-20 09:07:21 -07:00
void Assembler : : paddq ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xD4 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-04-01 18:07:50 -07:00
void Assembler : : phaddw ( XMMRegister dst , XMMRegister src ) {
2016-04-06 10:29:26 -07:00
assert ( VM_Version : : supports_sse3 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-04-01 18:07:50 -07:00
emit_int8 ( 0x01 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : phaddd ( XMMRegister dst , XMMRegister src ) {
2016-04-06 10:29:26 -07:00
assert ( VM_Version : : supports_sse3 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-04-01 18:07:50 -07:00
emit_int8 ( 0x02 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpaddb ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFC ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpaddw ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFD ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpaddd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFE ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpaddq ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xD4 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpaddb ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFC ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpaddw ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFD ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpaddd ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFE ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpaddq ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xD4 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psubb ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF8 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psubw ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF9 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psubd ( XMMRegister dst , XMMRegister src ) {
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xFA ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psubq ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xFB ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsubb ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xF8 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsubw ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xF9 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsubd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFA ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsubq ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFB ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsubb ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xF8 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsubw ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xF9 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsubd ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFA ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsubq ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xFB ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : pmullw ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xD5 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : pmulld ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_sse4_1 ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x40 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpmullw ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xD5 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpmulld ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x40 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpmullq ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
2016-04-18 15:18:14 -07:00
assert ( UseAVX > 2 , " requires some form of EVEX " ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ true , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_is_evex_instruction ( ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( 0x40 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : vpmullw ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_FVM , /* input_size_in_bits */ EVEX_NObit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xD5 ) ;
emit_operand ( dst , src ) ;
2015-05-08 11:49:20 -07:00
}
void Assembler : : vpmulld ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( 0x40 ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpmullq ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
2016-04-18 15:18:14 -07:00
assert ( UseAVX > 2 , " requires some form of EVEX " ) ;
2012-08-20 09:07:21 -07:00
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ true , /* legacy_mode */ _legacy_mode_dq , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x40 ) ;
2012-08-20 09:07:21 -07:00
emit_operand ( dst , src ) ;
}
// Shift packed integers left by specified number of bits.
void Assembler : : psllw ( XMMRegister dst , int shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM6 is for /6 encoding: 66 0F 71 /6 ib
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( xmm6 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x71 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : pslld ( XMMRegister dst , int shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM6 is for /6 encoding: 66 0F 72 /6 ib
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( xmm6 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x72 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psllq ( XMMRegister dst , int shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM6 is for /6 encoding: 66 0F 73 /6 ib
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( xmm6 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x73 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psllw ( XMMRegister dst , XMMRegister shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , shift , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF1 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : pslld ( XMMRegister dst , XMMRegister shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , shift , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF2 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psllq ( XMMRegister dst , XMMRegister shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , shift , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsllw ( XMMRegister dst , XMMRegister src , int shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM6 is for /6 encoding: 66 0F 71 /6 ib
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( xmm6 - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x71 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpslld ( XMMRegister dst , XMMRegister src , int shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM6 is for /6 encoding: 66 0F 72 /6 ib
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( xmm6 - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x72 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsllq ( XMMRegister dst , XMMRegister src , int shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2012-08-20 09:07:21 -07:00
// XMM6 is for /6 encoding: 66 0F 73 /6 ib
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( xmm6 - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x73 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsllw ( XMMRegister dst , XMMRegister src , XMMRegister shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) , shift - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF1 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpslld ( XMMRegister dst , XMMRegister src , XMMRegister shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) , shift - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF2 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsllq ( XMMRegister dst , XMMRegister src , XMMRegister shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) , shift - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
// Shift packed integers logically right by specified number of bits.
void Assembler : : psrlw ( XMMRegister dst , int shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM2 is for /2 encoding: 66 0F 71 /2 ib
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( xmm2 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x71 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psrld ( XMMRegister dst , int shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM2 is for /2 encoding: 66 0F 72 /2 ib
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( xmm2 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x72 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psrlq ( XMMRegister dst , int shift ) {
// Do not confuse it with psrldq SSE2 instruction which
// shifts 128 bit value in xmm register by number of bytes.
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2012-08-20 09:07:21 -07:00
// XMM2 is for /2 encoding: 66 0F 73 /2 ib
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( xmm2 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x73 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psrlw ( XMMRegister dst , XMMRegister shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , shift , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psrld ( XMMRegister dst , XMMRegister shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , shift , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xD2 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psrlq ( XMMRegister dst , XMMRegister shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , shift , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xD3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsrlw ( XMMRegister dst , XMMRegister src , int shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-09-11 17:02:44 -07:00
// XMM2 is for /2 encoding: 66 0F 71 /2 ib
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( xmm2 - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x71 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsrld ( XMMRegister dst , XMMRegister src , int shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2015-09-11 17:02:44 -07:00
// XMM2 is for /2 encoding: 66 0F 72 /2 ib
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( xmm2 - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x72 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsrlq ( XMMRegister dst , XMMRegister src , int shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2012-08-20 09:07:21 -07:00
// XMM2 is for /2 encoding: 66 0F 73 /2 ib
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( xmm2 - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x73 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsrlw ( XMMRegister dst , XMMRegister src , XMMRegister shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) , shift - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsrld ( XMMRegister dst , XMMRegister src , XMMRegister shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) , shift - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xD2 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsrlq ( XMMRegister dst , XMMRegister src , XMMRegister shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) , shift - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xD3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
// Shift packed integers arithmetically right by specified number of bits.
void Assembler : : psraw ( XMMRegister dst , int shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM4 is for /4 encoding: 66 0F 71 /4 ib
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( xmm4 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x71 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psrad ( XMMRegister dst , int shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM4 is for /4 encoding: 66 0F 72 /4 ib
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( xmm4 , dst , dst , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x72 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psraw ( XMMRegister dst , XMMRegister shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , shift , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xE1 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : psrad ( XMMRegister dst , XMMRegister shift ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , shift , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xE2 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsraw ( XMMRegister dst , XMMRegister src , int shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM4 is for /4 encoding: 66 0F 71 /4 ib
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( xmm4 - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x71 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsrad ( XMMRegister dst , XMMRegister src , int shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2012-08-20 09:07:21 -07:00
// XMM4 is for /4 encoding: 66 0F 71 /4 ib
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( xmm4 - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x72 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( shift & 0xFF ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsraw ( XMMRegister dst , XMMRegister src , XMMRegister shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) , shift - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xE1 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpsrad ( XMMRegister dst , XMMRegister src , XMMRegister shift , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src - > encoding ( ) , shift - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xE2 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-10-05 20:02:40 -07:00
// logical operations packed integers
2012-08-20 09:07:21 -07:00
void Assembler : : pand ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xDB ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpand ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xDB ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpand ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xDB ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2015-10-05 20:02:40 -07:00
void Assembler : : pandn ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xDF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2015-10-05 20:02:40 -07:00
}
2012-08-20 09:07:21 -07:00
void Assembler : : por ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xEB ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpor ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xEB ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpor ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xEB ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
void Assembler : : pxor ( XMMRegister dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xEF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpxor ( XMMRegister dst , XMMRegister nds , XMMRegister src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xEF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
}
2015-05-08 11:49:20 -07:00
void Assembler : : vpxor ( XMMRegister dst , XMMRegister nds , Address src , int vector_len ) {
assert ( UseAVX > 0 , " requires some form of AVX " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_FV , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-11-09 11:26:41 -08:00
emit_int8 ( ( unsigned char ) 0xEF ) ;
emit_operand ( dst , src ) ;
2012-08-20 09:07:21 -07:00
}
2016-04-05 11:37:41 -07:00
// vinserti forms
void Assembler : : vinserti128 ( XMMRegister dst , XMMRegister nds , XMMRegister src , uint8_t imm8 ) {
assert ( VM_Version : : supports_avx2 ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2016-03-29 09:53:50 -07:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_256bit ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x38 ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2012-08-20 09:07:21 -07:00
// 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2012-08-20 09:07:21 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vinserti128 ( XMMRegister dst , XMMRegister nds , Address src , uint8_t imm8 ) {
assert ( VM_Version : : supports_avx2 ( ) , " " ) ;
2015-05-08 11:49:20 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2016-04-05 11:37:41 -07:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_256bit ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-04-05 11:37:41 -07:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_32bit ) ;
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x38 ) ;
2015-05-08 11:49:20 -07:00
emit_operand ( dst , src ) ;
2016-04-05 11:37:41 -07:00
// 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2015-05-08 11:49:20 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vinserti32x4 ( XMMRegister dst , XMMRegister nds , XMMRegister src , uint8_t imm8 ) {
2015-09-11 17:02:44 -07:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_512bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x38 ) ;
2015-09-11 17:02:44 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
// 0x00 - insert into q0 128 bits (0..127)
// 0x01 - insert into q1 128 bits (128..255)
// 0x02 - insert into q2 128 bits (256..383)
// 0x03 - insert into q3 128 bits (384..511)
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x03 ) ;
2015-09-11 17:02:44 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vinserti32x4 ( XMMRegister dst , XMMRegister nds , Address src , uint8_t imm8 ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-09-11 17:02:44 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
2015-11-09 11:26:41 -08:00
int vector_len = VM_Version : : supports_evex ( ) ? AVX_512bit : AVX_256bit ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2015-09-11 17:02:44 -07:00
emit_int8 ( 0x18 ) ;
emit_operand ( dst , src ) ;
// 0x00 - insert into q0 128 bits (0..127)
// 0x01 - insert into q1 128 bits (128..255)
// 0x02 - insert into q2 128 bits (256..383)
// 0x03 - insert into q3 128 bits (384..511)
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x03 ) ;
2015-09-11 17:02:44 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vinserti64x4 ( XMMRegister dst , XMMRegister nds , XMMRegister src , uint8_t imm8 ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2016-04-05 11:37:41 -07:00
InstructionAttr attributes ( AVX_512bit , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x38 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
// 0x00 - insert into lower 256 bits
// 0x01 - insert into upper 256 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2012-09-17 19:39:07 -07:00
}
2016-04-05 11:37:41 -07:00
// vinsertf forms
void Assembler : : vinsertf128 ( XMMRegister dst , XMMRegister nds , XMMRegister src , uint8_t imm8 ) {
2015-04-01 18:07:50 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2016-03-29 09:53:50 -07:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_256bit ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x18 ) ;
2015-04-01 18:07:50 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2016-04-05 11:37:41 -07:00
// 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2015-04-01 18:07:50 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vinsertf128 ( XMMRegister dst , XMMRegister nds , Address src , uint8_t imm8 ) {
2012-09-17 19:39:07 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-04-05 11:37:41 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2016-03-29 09:53:50 -07:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_256bit ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_32bit ) ;
2016-04-05 11:37:41 -07:00
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x18 ) ;
emit_operand ( dst , src ) ;
// 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2012-09-17 19:39:07 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vinsertf32x4 ( XMMRegister dst , XMMRegister nds , XMMRegister src , uint8_t imm8 ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
InstructionAttr attributes ( AVX_512bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x18 ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2016-04-05 11:37:41 -07:00
// 0x00 - insert into q0 128 bits (0..127)
// 0x01 - insert into q1 128 bits (128..255)
// 0x02 - insert into q2 128 bits (256..383)
// 0x03 - insert into q3 128 bits (384..511)
emit_int8 ( imm8 & 0x03 ) ;
2012-07-16 17:10:22 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vinsertf32x4 ( XMMRegister dst , XMMRegister nds , Address src , uint8_t imm8 ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
assert ( dst ! = xnoreg , " sanity " ) ;
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
int vector_len = VM_Version : : supports_evex ( ) ? AVX_512bit : AVX_256bit ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_32bit ) ;
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x18 ) ;
emit_operand ( dst , src ) ;
// 0x00 - insert into q0 128 bits (0..127)
// 0x01 - insert into q1 128 bits (128..255)
// 0x02 - insert into q2 128 bits (256..383)
// 0x03 - insert into q3 128 bits (384..511)
emit_int8 ( imm8 & 0x03 ) ;
}
void Assembler : : vinsertf64x4 ( XMMRegister dst , XMMRegister nds , XMMRegister src , uint8_t imm8 ) {
2015-05-08 11:49:20 -07:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_512bit , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x1A ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
// 0x00 - insert into lower 256 bits
// 0x01 - insert into upper 256 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2015-05-08 11:49:20 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vinsertf64x4 ( XMMRegister dst , XMMRegister nds , Address src , uint8_t imm8 ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2012-09-17 19:39:07 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-04-05 11:37:41 -07:00
InstructionAttr attributes ( AVX_512bit , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_64bit ) ;
vex_prefix ( src , nds - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x1A ) ;
2012-09-17 19:39:07 -07:00
emit_operand ( dst , src ) ;
2016-04-05 11:37:41 -07:00
// 0x00 - insert into lower 256 bits
// 0x01 - insert into upper 256 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2012-09-17 19:39:07 -07:00
}
2016-04-05 11:37:41 -07:00
// vextracti forms
2016-03-07 15:03:48 -08:00
void Assembler : : vextracti128 ( XMMRegister dst , XMMRegister src , uint8_t imm8 ) {
2015-05-08 11:49:20 -07:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2016-03-29 09:53:50 -07:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_256bit ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( src - > encoding ( ) , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( 0x39 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2016-03-07 15:03:48 -08:00
// 0x00 - extract from lower 128 bits
// 0x01 - extract from upper 128 bits
emit_int8 ( imm8 & 0x01 ) ;
2015-05-08 11:49:20 -07:00
}
2016-03-07 15:03:48 -08:00
void Assembler : : vextracti128 ( Address dst , XMMRegister src , uint8_t imm8 ) {
2012-09-17 19:39:07 -07:00
assert ( VM_Version : : supports_avx2 ( ) , " " ) ;
assert ( src ! = xnoreg , " sanity " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2016-03-29 09:53:50 -07:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_256bit ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_32bit ) ;
vex_prefix ( dst , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( 0x39 ) ;
emit_operand ( src , dst ) ;
2016-03-07 15:03:48 -08:00
// 0x00 - extract from lower 128 bits
2015-05-08 11:49:20 -07:00
// 0x01 - extract from upper 128 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2015-05-08 11:49:20 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vextracti32x4 ( XMMRegister dst , XMMRegister src , uint8_t imm8 ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
int vector_len = VM_Version : : supports_evex ( ) ? AVX_512bit : AVX_256bit ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( src - > encoding ( ) , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2016-04-05 11:37:41 -07:00
emit_int8 ( 0x39 ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2016-04-05 11:37:41 -07:00
// 0x00 - extract from bits 127:0
// 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384
emit_int8 ( imm8 & 0x03 ) ;
}
void Assembler : : vextracti32x4 ( Address dst , XMMRegister src , uint8_t imm8 ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
assert ( src ! = xnoreg , " sanity " ) ;
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_512bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_32bit ) ;
vex_prefix ( dst , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x39 ) ;
emit_operand ( src , dst ) ;
// 0x00 - extract from bits 127:0
// 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384
emit_int8 ( imm8 & 0x03 ) ;
2015-05-08 11:49:20 -07:00
}
2016-03-07 15:03:48 -08:00
void Assembler : : vextracti64x2 ( XMMRegister dst , XMMRegister src , uint8_t imm8 ) {
2016-04-18 15:18:14 -07:00
assert ( VM_Version : : supports_avx512dq ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
2016-04-18 15:18:14 -07:00
InstructionAttr attributes ( AVX_512bit , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( src - > encoding ( ) , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x39 ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2016-03-07 15:03:48 -08:00
// 0x00 - extract from bits 127:0
2015-05-08 11:49:20 -07:00
// 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x03 ) ;
2015-05-08 11:49:20 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vextracti64x4 ( XMMRegister dst , XMMRegister src , uint8_t imm8 ) {
2015-05-08 11:49:20 -07:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_512bit , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( src - > encoding ( ) , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2016-04-05 11:37:41 -07:00
emit_int8 ( 0x3B ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2015-11-09 11:26:41 -08:00
// 0x00 - extract from lower 256 bits
2015-05-08 11:49:20 -07:00
// 0x01 - extract from upper 256 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2015-05-08 11:49:20 -07:00
}
2016-04-05 11:37:41 -07:00
// vextractf forms
void Assembler : : vextractf128 ( XMMRegister dst , XMMRegister src , uint8_t imm8 ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_256bit ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( src - > encoding ( ) , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x19 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
// 0x00 - extract from lower 128 bits
// 0x01 - extract from upper 128 bits
emit_int8 ( imm8 & 0x01 ) ;
}
void Assembler : : vextractf128 ( Address dst , XMMRegister src , uint8_t imm8 ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-05-08 11:49:20 -07:00
assert ( src ! = xnoreg , " sanity " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
2016-04-05 11:37:41 -07:00
int vector_len = VM_Version : : supports_avx512novl ( ) ? AVX_512bit : AVX_256bit ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-04-05 11:37:41 -07:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_32bit ) ;
2015-11-09 11:26:41 -08:00
vex_prefix ( dst , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2016-04-05 11:37:41 -07:00
emit_int8 ( 0x19 ) ;
2012-09-17 19:39:07 -07:00
emit_operand ( src , dst ) ;
2016-04-05 11:37:41 -07:00
// 0x00 - extract from lower 128 bits
// 0x01 - extract from upper 128 bits
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x01 ) ;
2012-09-17 19:39:07 -07:00
}
2016-03-07 15:03:48 -08:00
void Assembler : : vextractf32x4 ( XMMRegister dst , XMMRegister src , uint8_t imm8 ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
2015-11-09 11:26:41 -08:00
int vector_len = VM_Version : : supports_evex ( ) ? AVX_512bit : AVX_256bit ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( src - > encoding ( ) , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( 0x19 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2015-09-11 17:02:44 -07:00
// 0x00 - extract from bits 127:0
// 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x03 ) ;
2015-09-11 17:02:44 -07:00
}
2016-03-07 15:03:48 -08:00
void Assembler : : vextractf32x4 ( Address dst , XMMRegister src , uint8_t imm8 ) {
2015-09-11 17:02:44 -07:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
assert ( src ! = xnoreg , " sanity " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_512bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_32bit ) ;
vex_prefix ( dst , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2015-09-11 17:02:44 -07:00
emit_int8 ( 0x19 ) ;
emit_operand ( src , dst ) ;
// 0x00 - extract from bits 127:0
2015-05-08 11:49:20 -07:00
// 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x03 ) ;
2015-05-08 11:49:20 -07:00
}
2016-03-07 15:03:48 -08:00
void Assembler : : vextractf64x2 ( XMMRegister dst , XMMRegister src , uint8_t imm8 ) {
2016-04-18 15:18:14 -07:00
assert ( VM_Version : : supports_avx512dq ( ) , " " ) ;
2016-03-07 15:03:48 -08:00
assert ( imm8 < = 0x03 , " imm8: %u " , imm8 ) ;
2016-04-18 15:18:14 -07:00
InstructionAttr attributes ( AVX_512bit , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( src - > encoding ( ) , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( 0x19 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2016-03-07 15:03:48 -08:00
// 0x00 - extract from bits 127:0
2015-05-08 11:49:20 -07:00
// 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384
2016-03-07 15:03:48 -08:00
emit_int8 ( imm8 & 0x03 ) ;
2015-05-08 11:49:20 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vextractf64x4 ( XMMRegister dst , XMMRegister src , uint8_t imm8 ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
InstructionAttr attributes ( AVX_512bit , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( src - > encoding ( ) , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x1B ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2016-04-05 11:37:41 -07:00
// 0x00 - extract from lower 256 bits
// 0x01 - extract from upper 256 bits
emit_int8 ( imm8 & 0x01 ) ;
2015-05-08 11:49:20 -07:00
}
2016-04-05 11:37:41 -07:00
void Assembler : : vextractf64x4 ( Address dst , XMMRegister src , uint8_t imm8 ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
assert ( src ! = xnoreg , " sanity " ) ;
assert ( imm8 < = 0x01 , " imm8: %u " , imm8 ) ;
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_512bit , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T4 , /* input_size_in_bits */ EVEX_64bit ) ;
vex_prefix ( dst , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( 0x1B ) ;
emit_operand ( src , dst ) ;
// 0x00 - extract from lower 256 bits
// 0x01 - extract from upper 256 bits
emit_int8 ( imm8 & 0x01 ) ;
}
// legacy word/dword replicate
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
void Assembler : : vpbroadcastw ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_avx2 ( ) , " " ) ;
2015-12-14 14:48:30 -08:00
InstructionAttr attributes ( AVX_256bit , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
8141132: JEP 254: Compact Strings
Adopt a more space-efficient internal representation for strings.
Co-authored-by: Brent Christian <brent.christian@oracle.com>
Co-authored-by: Vivek Deshpande <vivek.r.deshpande@intel.com>
Co-authored-by: Charlie Hunt <charlie.hunt@oracle.com>
Co-authored-by: Vladimir Kozlov <vladimir.kozlov@oracle.com>
Co-authored-by: Roger Riggs <roger.riggs@oracle.com>
Co-authored-by: Xueming Shen <xueming.shen@oracle.com>
Co-authored-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
Co-authored-by: Sandhya Viswanathan <sandhya.viswanathan@intel.com>
Reviewed-by: alanb, bdelsart, coleenp, iklam, jiangli, jrose, kevinw, naoto, pliden, roland, smarks, twisti
2015-11-03 09:41:03 +01:00
emit_int8 ( 0x79 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2016-04-05 11:37:41 -07:00
void Assembler : : vpbroadcastd ( XMMRegister dst , XMMRegister src ) {
assert ( VM_Version : : supports_avx2 ( ) , " " ) ;
InstructionAttr attributes ( AVX_256bit , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
// xmm/mem sourced byte/word/dword/qword replicate
// duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastb ( XMMRegister dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x78 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : evpbroadcastb ( XMMRegister dst , Address src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-06-23 12:45:08 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_8bit ) ;
2015-06-23 12:45:08 -07:00
// swap src<->dst for encoding
2016-04-05 11:37:41 -07:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x78 ) ;
emit_operand ( dst , src ) ;
}
2016-04-05 11:37:41 -07:00
// duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastw ( XMMRegister dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x79 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : evpbroadcastw ( XMMRegister dst , Address src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-06-23 12:45:08 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_16bit ) ;
2015-06-23 12:45:08 -07:00
// swap src<->dst for encoding
2016-04-05 11:37:41 -07:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x79 ) ;
emit_operand ( dst , src ) ;
}
2016-04-05 11:37:41 -07:00
// duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
2015-05-08 11:49:20 -07:00
void Assembler : : evpbroadcastd ( XMMRegister dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2013-01-03 16:30:47 -08:00
emit_int8 ( 0x58 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastd ( XMMRegister dst , Address src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-06-23 12:45:08 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
2015-06-23 12:45:08 -07:00
// swap src<->dst for encoding
2016-04-05 11:37:41 -07:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x58 ) ;
emit_operand ( dst , src ) ;
}
2016-04-05 11:37:41 -07:00
// duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastq ( XMMRegister dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-04-18 15:18:14 -07:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x59 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : evpbroadcastq ( XMMRegister dst , Address src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-06-23 12:45:08 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-04-18 15:18:14 -07:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2015-06-23 12:45:08 -07:00
// swap src<->dst for encoding
2016-04-05 11:37:41 -07:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x59 ) ;
emit_operand ( dst , src ) ;
}
2016-04-05 11:37:41 -07:00
// scalar single/double precision replicate
// duplicate single precision data from src into programmed locations in dest : requires AVX512VL
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastss ( XMMRegister dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x18 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : evpbroadcastss ( XMMRegister dst , Address src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-06-23 12:45:08 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_32bit ) ;
2015-06-23 12:45:08 -07:00
// swap src<->dst for encoding
2015-11-09 11:26:41 -08:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x18 ) ;
emit_operand ( dst , src ) ;
}
2016-04-05 11:37:41 -07:00
// duplicate double precision data from src into programmed locations in dest : requires AVX512VL
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastsd ( XMMRegister dst , XMMRegister src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-04-18 15:18:14 -07:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
attributes . set_rex_vex_w_reverted ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x19 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : evpbroadcastsd ( XMMRegister dst , Address src , int vector_len ) {
2015-11-09 11:26:41 -08:00
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-06-23 12:45:08 -07:00
assert ( dst ! = xnoreg , " sanity " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
2016-04-18 15:18:14 -07:00
InstructionAttr attributes ( vector_len , /* vex_w */ VM_Version : : supports_evex ( ) , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2015-11-09 11:26:41 -08:00
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
2016-04-18 15:18:14 -07:00
attributes . set_rex_vex_w_reverted ( ) ;
2015-06-23 12:45:08 -07:00
// swap src<->dst for encoding
2015-11-09 11:26:41 -08:00
vex_prefix ( src , 0 , dst - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( 0x19 ) ;
emit_operand ( dst , src ) ;
}
2016-04-05 11:37:41 -07:00
// gpr source broadcast forms
// duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastb ( XMMRegister dst , Register src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2016-04-18 15:18:14 -07:00
emit_int8 ( 0x7A ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2016-04-05 11:37:41 -07:00
// duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastw ( XMMRegister dst , Register src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2016-02-12 16:12:15 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ _legacy_mode_bw , /* no_mask_reg */ true , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2016-04-18 15:18:14 -07:00
emit_int8 ( 0x7B ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2016-04-05 11:37:41 -07:00
// duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastd ( XMMRegister dst , Register src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2016-04-18 15:18:14 -07:00
emit_int8 ( 0x7C ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2016-04-05 11:37:41 -07:00
// duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
2015-06-23 12:45:08 -07:00
void Assembler : : evpbroadcastq ( XMMRegister dst , Register src , int vector_len ) {
assert ( VM_Version : : supports_evex ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ false , /* uses_vl */ true ) ;
2016-04-18 15:18:14 -07:00
attributes . set_is_evex_instruction ( ) ;
2015-11-09 11:26:41 -08:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
2016-04-18 15:18:14 -07:00
emit_int8 ( 0x7C ) ;
2015-06-23 12:45:08 -07:00
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2016-04-05 11:37:41 -07:00
2014-08-05 15:02:10 -07:00
// Carry-Less Multiplication Quadword
void Assembler : : pclmulqdq ( XMMRegister dst , XMMRegister src , int mask ) {
assert ( VM_Version : : supports_clmul ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , src , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2014-08-05 15:02:10 -07:00
emit_int8 ( 0x44 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( ( unsigned char ) mask ) ;
}
2013-07-02 20:42:12 -04:00
// Carry-Less Multiplication Quadword
void Assembler : : vpclmulqdq ( XMMRegister dst , XMMRegister nds , XMMRegister src , int mask ) {
assert ( VM_Version : : supports_avx ( ) & & VM_Version : : supports_clmul ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2013-07-02 20:42:12 -04:00
emit_int8 ( 0x44 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( ( unsigned char ) mask ) ;
}
2012-06-15 01:25:19 -07:00
void Assembler : : vzeroupper ( ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
( void ) vex_prefix_and_encode ( 0 , 0 , 0 , VEX_SIMD_NONE , VEX_OPCODE_0F , & attributes ) ;
emit_int8 ( 0x77 ) ;
2012-06-15 01:25:19 -07:00
}
2011-12-20 00:55:02 -08:00
2008-08-27 00:21:55 -07:00
# ifndef _LP64
// 32bit only pieces of the assembler
void Assembler : : cmp_literal32 ( Register src1 , int32_t imm32 , RelocationHolder const & rspec ) {
// NO PREFIX AS NEVER 64BIT
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x81 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | src1 - > encoding ( ) ) ) ;
2008-08-27 00:21:55 -07:00
emit_data ( imm32 , rspec , 0 ) ;
}
void Assembler : : cmp_literal32 ( Address src1 , int32_t imm32 , RelocationHolder const & rspec ) {
// NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x81 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rdi , src1 ) ;
emit_data ( imm32 , rspec , 0 ) ;
}
// The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
// and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
// into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
void Assembler : : cmpxchg8 ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xC7 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rcx , adr ) ;
}
void Assembler : : decl ( Register dst ) {
// Don't use it directly. Use MacroAssembler::decrementl() instead.
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x48 | dst - > encoding ( ) ) ;
2008-08-27 00:21:55 -07:00
}
# endif // _LP64
// 64bit typically doesn't use the x87 but needs to for the trig funcs
void Assembler : : fabs ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xE1 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fadd ( int i ) {
emit_farith ( 0xD8 , 0xC0 , i ) ;
}
void Assembler : : fadd_d ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDC ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rax , src ) ;
}
void Assembler : : fadd_s ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD8 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rax , src ) ;
}
void Assembler : : fadda ( int i ) {
emit_farith ( 0xDC , 0xC0 , i ) ;
}
void Assembler : : faddp ( int i ) {
emit_farith ( 0xDE , 0xC0 , i ) ;
}
void Assembler : : fchs ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xE0 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fcom ( int i ) {
emit_farith ( 0xD8 , 0xD0 , i ) ;
}
void Assembler : : fcomp ( int i ) {
emit_farith ( 0xD8 , 0xD8 , i ) ;
}
void Assembler : : fcomp_d ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDC ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbx , src ) ;
}
void Assembler : : fcomp_s ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD8 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbx , src ) ;
}
void Assembler : : fcompp ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDE ) ;
emit_int8 ( ( unsigned char ) 0xD9 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fcos ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xFF ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fdecstp ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xF6 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fdiv ( int i ) {
emit_farith ( 0xD8 , 0xF0 , i ) ;
}
void Assembler : : fdiv_d ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDC ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rsi , src ) ;
}
void Assembler : : fdiv_s ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD8 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rsi , src ) ;
}
void Assembler : : fdiva ( int i ) {
emit_farith ( 0xDC , 0xF8 , i ) ;
}
// Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
// is erroneous for some of the floating-point instructions below.
void Assembler : : fdivp ( int i ) {
emit_farith ( 0xDE , 0xF8 , i ) ; // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
}
void Assembler : : fdivr ( int i ) {
emit_farith ( 0xD8 , 0xF8 , i ) ;
}
void Assembler : : fdivr_d ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDC ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rdi , src ) ;
}
void Assembler : : fdivr_s ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD8 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rdi , src ) ;
}
void Assembler : : fdivra ( int i ) {
emit_farith ( 0xDC , 0xF0 , i ) ;
}
void Assembler : : fdivrp ( int i ) {
emit_farith ( 0xDE , 0xF0 , i ) ; // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
}
void Assembler : : ffree ( int i ) {
emit_farith ( 0xDD , 0xC0 , i ) ;
}
void Assembler : : fild_d ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDF ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbp , adr ) ;
}
void Assembler : : fild_s ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDB ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rax , adr ) ;
}
void Assembler : : fincstp ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xF7 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : finit ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x9B ) ;
emit_int8 ( ( unsigned char ) 0xDB ) ;
emit_int8 ( ( unsigned char ) 0xE3 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fist_s ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDB ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rdx , adr ) ;
}
void Assembler : : fistp_d ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDF ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rdi , adr ) ;
}
void Assembler : : fistp_s ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDB ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbx , adr ) ;
}
void Assembler : : fld1 ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xE8 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fld_d ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDD ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rax , adr ) ;
}
void Assembler : : fld_s ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rax , adr ) ;
}
void Assembler : : fld_s ( int index ) {
emit_farith ( 0xD9 , 0xC0 , index ) ;
}
void Assembler : : fld_x ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDB ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbp , adr ) ;
}
void Assembler : : fldcw ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbp , src ) ;
}
void Assembler : : fldenv ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rsp , src ) ;
}
void Assembler : : fldlg2 ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xEC ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fldln2 ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xED ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fldz ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xEE ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : flog ( ) {
fldln2 ( ) ;
fxch ( ) ;
fyl2x ( ) ;
}
void Assembler : : flog10 ( ) {
fldlg2 ( ) ;
fxch ( ) ;
fyl2x ( ) ;
}
void Assembler : : fmul ( int i ) {
emit_farith ( 0xD8 , 0xC8 , i ) ;
}
void Assembler : : fmul_d ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDC ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rcx , src ) ;
}
void Assembler : : fmul_s ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD8 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rcx , src ) ;
}
void Assembler : : fmula ( int i ) {
emit_farith ( 0xDC , 0xC8 , i ) ;
}
void Assembler : : fmulp ( int i ) {
emit_farith ( 0xDE , 0xC8 , i ) ;
}
void Assembler : : fnsave ( Address dst ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDD ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rsi , dst ) ;
}
void Assembler : : fnstcw ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x9B ) ;
emit_int8 ( ( unsigned char ) 0xD9 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rdi , src ) ;
}
void Assembler : : fnstsw_ax ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDF ) ;
emit_int8 ( ( unsigned char ) 0xE0 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fprem ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xF8 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fprem1 ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xF5 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : frstor ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDD ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rsp , src ) ;
}
void Assembler : : fsin ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xFE ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fsqrt ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xFA ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fst_d ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDD ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rdx , adr ) ;
}
void Assembler : : fst_s ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rdx , adr ) ;
}
void Assembler : : fstp_d ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDD ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbx , adr ) ;
}
void Assembler : : fstp_d ( int index ) {
emit_farith ( 0xDD , 0xD8 , index ) ;
}
void Assembler : : fstp_s ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbx , adr ) ;
}
void Assembler : : fstp_x ( Address adr ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDB ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rdi , adr ) ;
}
void Assembler : : fsub ( int i ) {
emit_farith ( 0xD8 , 0xE0 , i ) ;
}
void Assembler : : fsub_d ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDC ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rsp , src ) ;
}
void Assembler : : fsub_s ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD8 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rsp , src ) ;
}
void Assembler : : fsuba ( int i ) {
emit_farith ( 0xDC , 0xE8 , i ) ;
}
void Assembler : : fsubp ( int i ) {
emit_farith ( 0xDE , 0xE8 , i ) ; // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
}
void Assembler : : fsubr ( int i ) {
emit_farith ( 0xD8 , 0xE8 , i ) ;
}
void Assembler : : fsubr_d ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xDC ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbp , src ) ;
}
void Assembler : : fsubr_s ( Address src ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD8 ) ;
2008-08-27 00:21:55 -07:00
emit_operand32 ( rbp , src ) ;
}
void Assembler : : fsubra ( int i ) {
emit_farith ( 0xDC , 0xE0 , i ) ;
}
void Assembler : : fsubrp ( int i ) {
emit_farith ( 0xDE , 0xE0 , i ) ; // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
}
void Assembler : : ftan ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xF2 ) ;
emit_int8 ( ( unsigned char ) 0xDD ) ;
emit_int8 ( ( unsigned char ) 0xD8 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : ftst ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xE4 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fucomi ( int i ) {
// make sure the instruction is supported (introduced for P6, together with cmov)
guarantee ( VM_Version : : supports_cmov ( ) , " illegal instruction " ) ;
emit_farith ( 0xDB , 0xE8 , i ) ;
}
void Assembler : : fucomip ( int i ) {
// make sure the instruction is supported (introduced for P6, together with cmov)
guarantee ( VM_Version : : supports_cmov ( ) , " illegal instruction " ) ;
emit_farith ( 0xDF , 0xE8 , i ) ;
}
void Assembler : : fwait ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x9B ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : fxch ( int i ) {
emit_farith ( 0xD9 , 0xC8 , i ) ;
}
void Assembler : : fyl2x ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xF1 ) ;
2008-08-27 00:21:55 -07:00
}
2012-05-15 10:10:23 +02:00
void Assembler : : frndint ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xFC ) ;
2012-05-15 10:10:23 +02:00
}
void Assembler : : f2xm1 ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xF0 ) ;
2012-05-15 10:10:23 +02:00
}
void Assembler : : fldl2e ( ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD9 ) ;
emit_int8 ( ( unsigned char ) 0xEA ) ;
2012-05-15 10:10:23 +02:00
}
2011-12-14 14:54:38 -08:00
// SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
static int simd_pre [ 4 ] = { 0 , 0x66 , 0xF3 , 0xF2 } ;
// SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
static int simd_opc [ 4 ] = { 0 , 0 , 0x38 , 0x3A } ;
// Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
void Assembler : : rex_prefix ( Address adr , XMMRegister xreg , VexSimdPrefix pre , VexOpcode opc , bool rex_w ) {
if ( pre > 0 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( simd_pre [ pre ] ) ;
2011-12-14 14:54:38 -08:00
}
if ( rex_w ) {
prefixq ( adr , xreg ) ;
} else {
prefix ( adr , xreg ) ;
}
if ( opc > 0 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
2011-12-14 14:54:38 -08:00
int opc2 = simd_opc [ opc ] ;
if ( opc2 > 0 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( opc2 ) ;
2011-12-14 14:54:38 -08:00
}
}
}
int Assembler : : rex_prefix_and_encode ( int dst_enc , int src_enc , VexSimdPrefix pre , VexOpcode opc , bool rex_w ) {
if ( pre > 0 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( simd_pre [ pre ] ) ;
2011-12-14 14:54:38 -08:00
}
2015-11-09 11:26:41 -08:00
int encode = ( rex_w ) ? prefixq_and_encode ( dst_enc , src_enc ) : prefix_and_encode ( dst_enc , src_enc ) ;
2011-12-14 14:54:38 -08:00
if ( opc > 0 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
2011-12-14 14:54:38 -08:00
int opc2 = simd_opc [ opc ] ;
if ( opc2 > 0 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( opc2 ) ;
2011-12-14 14:54:38 -08:00
}
}
return encode ;
}
2015-11-09 11:26:41 -08:00
void Assembler : : vex_prefix ( bool vex_r , bool vex_b , bool vex_x , int nds_enc , VexSimdPrefix pre , VexOpcode opc ) {
int vector_len = _attributes - > get_vector_len ( ) ;
bool vex_w = _attributes - > is_rex_vex_w ( ) ;
2011-12-14 14:54:38 -08:00
if ( vex_b | | vex_x | | vex_w | | ( opc = = VEX_OPCODE_0F_38 ) | | ( opc = = VEX_OPCODE_0F_3A ) ) {
prefix ( VEX_3bytes ) ;
int byte1 = ( vex_r ? VEX_R : 0 ) | ( vex_x ? VEX_X : 0 ) | ( vex_b ? VEX_B : 0 ) ;
byte1 = ( ~ byte1 ) & 0xE0 ;
byte1 | = opc ;
2012-12-20 18:53:44 -08:00
emit_int8 ( byte1 ) ;
2011-12-14 14:54:38 -08:00
int byte2 = ( ( ~ nds_enc ) & 0xf ) < < 3 ;
2015-05-08 11:49:20 -07:00
byte2 | = ( vex_w ? VEX_W : 0 ) | ( ( vector_len > 0 ) ? 4 : 0 ) | pre ;
2012-12-20 18:53:44 -08:00
emit_int8 ( byte2 ) ;
2011-12-14 14:54:38 -08:00
} else {
prefix ( VEX_2bytes ) ;
int byte1 = vex_r ? VEX_R : 0 ;
byte1 = ( ~ byte1 ) & 0x80 ;
byte1 | = ( ( ~ nds_enc ) & 0xf ) < < 3 ;
2015-05-08 11:49:20 -07:00
byte1 | = ( ( vector_len > 0 ) ? 4 : 0 ) | pre ;
2012-12-20 18:53:44 -08:00
emit_int8 ( byte1 ) ;
2011-12-14 14:54:38 -08:00
}
}
2015-05-08 11:49:20 -07:00
// This is a 4 byte encoding
2015-11-09 11:26:41 -08:00
void Assembler : : evex_prefix ( bool vex_r , bool vex_b , bool vex_x , bool evex_r , bool evex_v , int nds_enc , VexSimdPrefix pre , VexOpcode opc ) {
2015-05-08 11:49:20 -07:00
// EVEX 0x62 prefix
prefix ( EVEX_4bytes ) ;
2015-11-09 11:26:41 -08:00
bool vex_w = _attributes - > is_rex_vex_w ( ) ;
int evex_encoding = ( vex_w ? VEX_W : 0 ) ;
// EVEX.b is not currently used for broadcast of single element or data rounding modes
_attributes - > set_evex_encoding ( evex_encoding ) ;
2015-05-08 11:49:20 -07:00
// P0: byte 2, initialized to RXBR`00mm
// instead of not'd
int byte2 = ( vex_r ? VEX_R : 0 ) | ( vex_x ? VEX_X : 0 ) | ( vex_b ? VEX_B : 0 ) | ( evex_r ? EVEX_Rb : 0 ) ;
byte2 = ( ~ byte2 ) & 0xF0 ;
// confine opc opcode extensions in mm bits to lower two bits
// of form {0F, 0F_38, 0F_3A}
byte2 | = opc ;
emit_int8 ( byte2 ) ;
// P1: byte 3 as Wvvvv1pp
int byte3 = ( ( ~ nds_enc ) & 0xf ) < < 3 ;
// p[10] is always 1
byte3 | = EVEX_F ;
byte3 | = ( vex_w & 1 ) < < 7 ;
// confine pre opcode extensions in pp bits to lower two bits
// of form {66, F3, F2}
byte3 | = pre ;
emit_int8 ( byte3 ) ;
// P2: byte 4 as zL'Lbv'aaa
2016-04-27 13:37:07 -07:00
int byte4 = ( _attributes - > is_no_reg_mask ( ) ) ? 0 : _attributes - > get_embedded_opmask_register_specifier ( ) ; // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now)
2015-05-08 11:49:20 -07:00
// EVEX.v` for extending EVEX.vvvv or VIDX
byte4 | = ( evex_v ? 0 : EVEX_V ) ;
// third EXEC.b for broadcast actions
2015-11-09 11:26:41 -08:00
byte4 | = ( _attributes - > is_extended_context ( ) ? EVEX_Rb : 0 ) ;
2015-05-08 11:49:20 -07:00
// fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024
2015-11-09 11:26:41 -08:00
byte4 | = ( ( _attributes - > get_vector_len ( ) ) & 0x3 ) < < 5 ;
2015-05-08 11:49:20 -07:00
// last is EVEX.z for zero/merge actions
2015-11-09 11:26:41 -08:00
byte4 | = ( _attributes - > is_clear_context ( ) ? EVEX_Z : 0 ) ;
2015-05-08 11:49:20 -07:00
emit_int8 ( byte4 ) ;
}
2015-11-09 11:26:41 -08:00
void Assembler : : vex_prefix ( Address adr , int nds_enc , int xreg_enc , VexSimdPrefix pre , VexOpcode opc , InstructionAttr * attributes ) {
2015-06-23 12:45:08 -07:00
bool vex_r = ( ( xreg_enc & 8 ) = = 8 ) ? 1 : 0 ;
2011-12-14 14:54:38 -08:00
bool vex_b = adr . base_needs_rex ( ) ;
bool vex_x = adr . index_needs_rex ( ) ;
2015-11-09 11:26:41 -08:00
set_attributes ( attributes ) ;
attributes - > set_current_assembler ( this ) ;
2015-05-08 11:49:20 -07:00
2015-09-11 17:02:44 -07:00
// if vector length is turned off, revert to AVX for vectors smaller than 512-bit
2016-03-29 09:53:50 -07:00
if ( UseAVX > 2 & & _legacy_mode_vl & & attributes - > uses_vl ( ) ) {
2015-11-09 11:26:41 -08:00
switch ( attributes - > get_vector_len ( ) ) {
2015-05-08 11:49:20 -07:00
case AVX_128bit :
case AVX_256bit :
2015-11-09 11:26:41 -08:00
attributes - > set_is_legacy_mode ( ) ;
2015-05-08 11:49:20 -07:00
break ;
}
}
2016-03-29 09:53:50 -07:00
// For pure EVEX check and see if this instruction
// is allowed in legacy mode and has resources which will
// fit in it. Pure EVEX instructions will use set_is_evex_instruction in their definition,
// else that field is set when we encode to EVEX
if ( UseAVX > 2 & & ! attributes - > is_legacy_mode ( ) & &
! _is_managed & & ! attributes - > is_evex_instruction ( ) ) {
if ( ! _legacy_mode_vl & & attributes - > get_vector_len ( ) ! = AVX_512bit ) {
bool check_register_bank = NOT_IA32 ( true ) IA32_ONLY ( false ) ;
if ( check_register_bank ) {
// check nds_enc and xreg_enc for upper bank usage
if ( nds_enc < 16 & & xreg_enc < 16 ) {
attributes - > set_is_legacy_mode ( ) ;
}
} else {
attributes - > set_is_legacy_mode ( ) ;
}
}
}
_is_managed = false ;
if ( UseAVX > 2 & & ! attributes - > is_legacy_mode ( ) )
2015-05-08 11:49:20 -07:00
{
bool evex_r = ( xreg_enc > = 16 ) ;
bool evex_v = ( nds_enc > = 16 ) ;
2015-11-09 11:26:41 -08:00
attributes - > set_is_evex_instruction ( ) ;
evex_prefix ( vex_r , vex_b , vex_x , evex_r , evex_v , nds_enc , pre , opc ) ;
2015-05-08 11:49:20 -07:00
} else {
2016-04-18 15:18:14 -07:00
if ( UseAVX > 2 & & attributes - > is_rex_vex_w_reverted ( ) ) {
attributes - > set_rex_vex_w ( false ) ;
}
2015-11-09 11:26:41 -08:00
vex_prefix ( vex_r , vex_b , vex_x , nds_enc , pre , opc ) ;
2015-05-08 11:49:20 -07:00
}
2011-12-14 14:54:38 -08:00
}
2015-11-09 11:26:41 -08:00
int Assembler : : vex_prefix_and_encode ( int dst_enc , int nds_enc , int src_enc , VexSimdPrefix pre , VexOpcode opc , InstructionAttr * attributes ) {
2015-06-23 12:45:08 -07:00
bool vex_r = ( ( dst_enc & 8 ) = = 8 ) ? 1 : 0 ;
bool vex_b = ( ( src_enc & 8 ) = = 8 ) ? 1 : 0 ;
2011-12-14 14:54:38 -08:00
bool vex_x = false ;
2015-11-09 11:26:41 -08:00
set_attributes ( attributes ) ;
attributes - > set_current_assembler ( this ) ;
2016-03-29 09:53:50 -07:00
bool check_register_bank = NOT_IA32 ( true ) IA32_ONLY ( false ) ;
2015-05-08 11:49:20 -07:00
2015-09-11 17:02:44 -07:00
// if vector length is turned off, revert to AVX for vectors smaller than 512-bit
2016-03-29 09:53:50 -07:00
if ( UseAVX > 2 & & _legacy_mode_vl & & attributes - > uses_vl ( ) ) {
2015-11-09 11:26:41 -08:00
switch ( attributes - > get_vector_len ( ) ) {
2015-05-08 11:49:20 -07:00
case AVX_128bit :
case AVX_256bit :
2016-03-29 09:53:50 -07:00
if ( check_register_bank ) {
if ( dst_enc > = 16 | | nds_enc > = 16 | | src_enc > = 16 ) {
// up propagate arithmetic instructions to meet RA requirements
attributes - > set_vector_len ( AVX_512bit ) ;
} else {
attributes - > set_is_legacy_mode ( ) ;
}
2015-11-09 11:26:41 -08:00
} else {
attributes - > set_is_legacy_mode ( ) ;
}
2015-05-08 11:49:20 -07:00
break ;
}
}
2016-03-29 09:53:50 -07:00
// For pure EVEX check and see if this instruction
// is allowed in legacy mode and has resources which will
// fit in it. Pure EVEX instructions will use set_is_evex_instruction in their definition,
// else that field is set when we encode to EVEX
if ( UseAVX > 2 & & ! attributes - > is_legacy_mode ( ) & &
! _is_managed & & ! attributes - > is_evex_instruction ( ) ) {
if ( ! _legacy_mode_vl & & attributes - > get_vector_len ( ) ! = AVX_512bit ) {
if ( check_register_bank ) {
// check dst_enc, nds_enc and src_enc for upper bank usage
if ( dst_enc < 16 & & nds_enc < 16 & & src_enc < 16 ) {
attributes - > set_is_legacy_mode ( ) ;
}
} else {
attributes - > set_is_legacy_mode ( ) ;
}
}
}
_is_managed = false ;
if ( UseAVX > 2 & & ! attributes - > is_legacy_mode ( ) )
2015-05-08 11:49:20 -07:00
{
bool evex_r = ( dst_enc > = 16 ) ;
bool evex_v = ( nds_enc > = 16 ) ;
// can use vex_x as bank extender on rm encoding
vex_x = ( src_enc > = 16 ) ;
2015-11-09 11:26:41 -08:00
attributes - > set_is_evex_instruction ( ) ;
evex_prefix ( vex_r , vex_b , vex_x , evex_r , evex_v , nds_enc , pre , opc ) ;
2015-05-08 11:49:20 -07:00
} else {
2016-04-18 15:18:14 -07:00
if ( UseAVX > 2 & & attributes - > is_rex_vex_w_reverted ( ) ) {
attributes - > set_rex_vex_w ( false ) ;
}
2015-11-09 11:26:41 -08:00
vex_prefix ( vex_r , vex_b , vex_x , nds_enc , pre , opc ) ;
2015-05-08 11:49:20 -07:00
}
// return modrm byte components for operands
2011-12-14 14:54:38 -08:00
return ( ( ( dst_enc & 7 ) < < 3 ) | ( src_enc & 7 ) ) ;
}
2015-05-08 11:49:20 -07:00
void Assembler : : simd_prefix ( XMMRegister xreg , XMMRegister nds , Address adr , VexSimdPrefix pre ,
2015-11-09 11:26:41 -08:00
VexOpcode opc , InstructionAttr * attributes ) {
2011-12-14 14:54:38 -08:00
if ( UseAVX > 0 ) {
int xreg_enc = xreg - > encoding ( ) ;
2015-11-09 11:26:41 -08:00
int nds_enc = nds - > is_valid ( ) ? nds - > encoding ( ) : 0 ;
vex_prefix ( adr , nds_enc , xreg_enc , pre , opc , attributes ) ;
2011-12-14 14:54:38 -08:00
} else {
assert ( ( nds = = xreg ) | | ( nds = = xnoreg ) , " wrong sse encoding " ) ;
2015-11-09 11:26:41 -08:00
rex_prefix ( adr , xreg , pre , opc , attributes - > is_rex_vex_w ( ) ) ;
2011-12-14 14:54:38 -08:00
}
}
2015-05-08 11:49:20 -07:00
int Assembler : : simd_prefix_and_encode ( XMMRegister dst , XMMRegister nds , XMMRegister src , VexSimdPrefix pre ,
2015-11-09 11:26:41 -08:00
VexOpcode opc , InstructionAttr * attributes ) {
2011-12-14 14:54:38 -08:00
int dst_enc = dst - > encoding ( ) ;
int src_enc = src - > encoding ( ) ;
if ( UseAVX > 0 ) {
int nds_enc = nds - > is_valid ( ) ? nds - > encoding ( ) : 0 ;
2015-11-09 11:26:41 -08:00
return vex_prefix_and_encode ( dst_enc , nds_enc , src_enc , pre , opc , attributes ) ;
2011-12-14 14:54:38 -08:00
} else {
assert ( ( nds = = dst ) | | ( nds = = src ) | | ( nds = = xnoreg ) , " wrong sse encoding " ) ;
2015-11-09 11:26:41 -08:00
return rex_prefix_and_encode ( dst_enc , src_enc , pre , opc , attributes - > is_rex_vex_w ( ) ) ;
2011-12-14 14:54:38 -08:00
}
}
2008-08-27 00:21:55 -07:00
2015-10-26 19:33:31 -07:00
void Assembler : : cmppd ( XMMRegister dst , XMMRegister nds , XMMRegister src , int cop , int vector_len ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
assert ( ! VM_Version : : supports_evex ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , nds , src , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2015-10-26 19:33:31 -07:00
emit_int8 ( ( unsigned char ) 0xC2 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( ( unsigned char ) ( 0xF & cop ) ) ;
}
void Assembler : : vpblendd ( XMMRegister dst , XMMRegister nds , XMMRegister src1 , XMMRegister src2 , int vector_len ) {
assert ( VM_Version : : supports_avx ( ) , " " ) ;
assert ( ! VM_Version : : supports_evex ( ) , " " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( vector_len , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
2016-04-05 11:37:41 -07:00
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , nds - > encoding ( ) , src1 - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_3A , & attributes ) ;
2015-10-26 19:33:31 -07:00
emit_int8 ( ( unsigned char ) 0x4B ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
int src2_enc = src2 - > encoding ( ) ;
emit_int8 ( ( unsigned char ) ( 0xF0 & src2_enc < < 4 ) ) ;
}
2016-04-18 15:18:14 -07:00
void Assembler : : shlxl ( Register dst , Register src1 , Register src2 ) {
assert ( VM_Version : : supports_bmi2 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src2 - > encoding ( ) , src1 - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : shlxq ( Register dst , Register src1 , Register src2 ) {
assert ( VM_Version : : supports_bmi2 ( ) , " " ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src2 - > encoding ( ) , src1 - > encoding ( ) , VEX_SIMD_66 , VEX_OPCODE_0F_38 , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2015-10-26 19:33:31 -07:00
2008-08-27 00:21:55 -07:00
# ifndef _LP64
void Assembler : : incl ( Register dst ) {
// Don't use it directly. Use MacroAssembler::incrementl() instead.
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x40 | dst - > encoding ( ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : lea ( Register dst , Address src ) {
leal ( dst , src ) ;
}
2015-11-09 11:26:41 -08:00
void Assembler : : mov_literal32 ( Address dst , int32_t imm32 , RelocationHolder const & rspec ) {
2008-08-27 00:21:55 -07:00
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC7 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , dst ) ;
emit_data ( ( int ) imm32 , rspec , 0 ) ;
}
2009-03-12 10:37:46 -07:00
void Assembler : : mov_literal32 ( Register dst , int32_t imm32 , RelocationHolder const & rspec ) {
InstructionMark im ( this ) ;
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) ( 0xB8 | encode ) ) ;
2009-03-12 10:37:46 -07:00
emit_data ( ( int ) imm32 , rspec , 0 ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : popa ( ) { // 32bit
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x61 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : push_literal32 ( int32_t imm32 , RelocationHolder const & rspec ) {
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x68 ) ;
2008-08-27 00:21:55 -07:00
emit_data ( imm32 , rspec , 0 ) ;
}
void Assembler : : pusha ( ) { // 32bit
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x60 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : set_byte_if_not_zero ( Register dst ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0x95 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | dst - > encoding ( ) ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : shldl ( Register dst , Register src ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xA5 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | src - > encoding ( ) < < 3 | dst - > encoding ( ) ) ) ;
2008-08-27 00:21:55 -07:00
}
2015-09-16 15:54:32 -07:00
// 0F A4 / r ib
void Assembler : : shldl ( Register dst , Register src , int8_t imm8 ) {
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xA4 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | src - > encoding ( ) < < 3 | dst - > encoding ( ) ) ) ;
emit_int8 ( imm8 ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : shrdl ( Register dst , Register src ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAD ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | src - > encoding ( ) < < 3 | dst - > encoding ( ) ) ) ;
2008-08-27 00:21:55 -07:00
}
# else // LP64
2010-04-08 12:13:07 -07:00
void Assembler : : set_byte_if_not_zero ( Register dst ) {
int enc = prefix_and_encode ( dst - > encoding ( ) , true ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0x95 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | enc ) ) ;
2010-04-08 12:13:07 -07:00
}
2008-08-27 00:21:55 -07:00
// 64bit only pieces of the assembler
// This should only be used by 64bit instructions that can use rip-relative
// it cannot be used by instructions that want an immediate value.
bool Assembler : : reachable ( AddressLiteral adr ) {
int64_t disp ;
// None will force a 64bit literal to the code stream. Likely a placeholder
// for something that will be patched later and we need to certain it will
// always be reachable.
if ( adr . reloc ( ) = = relocInfo : : none ) {
return false ;
}
if ( adr . reloc ( ) = = relocInfo : : internal_word_type ) {
// This should be rip relative and easily reachable.
return true ;
}
if ( adr . reloc ( ) = = relocInfo : : virtual_call_type | |
adr . reloc ( ) = = relocInfo : : opt_virtual_call_type | |
adr . reloc ( ) = = relocInfo : : static_call_type | |
adr . reloc ( ) = = relocInfo : : static_stub_type ) {
// This should be rip relative within the code cache and easily
// reachable until we get huge code caches. (At which point
// ic code is going to have issues).
return true ;
}
if ( adr . reloc ( ) ! = relocInfo : : external_word_type & &
adr . reloc ( ) ! = relocInfo : : poll_return_type & & // these are really external_word but need special
adr . reloc ( ) ! = relocInfo : : poll_type & & // relocs to identify them
adr . reloc ( ) ! = relocInfo : : runtime_call_type ) {
return false ;
}
// Stress the correction code
if ( ForceUnreachable ) {
// Must be runtimecall reloc, see if it is in the codecache
// Flipping stuff in the codecache to be unreachable causes issues
// with things like inline caches where the additional instructions
// are not handled.
if ( CodeCache : : find_blob ( adr . _target ) = = NULL ) {
return false ;
}
}
// For external_word_type/runtime_call_type if it is reachable from where we
// are now (possibly a temp buffer) and where we might end up
// anywhere in the codeCache then we are always reachable.
// This would have to change if we ever save/restore shared code
// to be more pessimistic.
disp = ( int64_t ) adr . _target - ( ( int64_t ) CodeCache : : low_bound ( ) + sizeof ( int ) ) ;
if ( ! is_simm32 ( disp ) ) return false ;
disp = ( int64_t ) adr . _target - ( ( int64_t ) CodeCache : : high_bound ( ) + sizeof ( int ) ) ;
if ( ! is_simm32 ( disp ) ) return false ;
2012-11-30 11:44:05 -08:00
disp = ( int64_t ) adr . _target - ( ( int64_t ) pc ( ) + sizeof ( int ) ) ;
2008-08-27 00:21:55 -07:00
// Because rip relative is a disp + address_of_next_instruction and we
// don't know the value of address_of_next_instruction we apply a fudge factor
// to make sure we will be ok no matter the size of the instruction we get placed into.
// We don't have to fudge the checks above here because they are already worst case.
// 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
// + 4 because better safe than sorry.
const int fudge = 12 + 4 ;
if ( disp < 0 ) {
disp - = fudge ;
} else {
disp + = fudge ;
}
return is_simm32 ( disp ) ;
}
2011-03-27 13:17:37 -07:00
// Check if the polling page is not reachable from the code cache using rip-relative
// addressing.
bool Assembler : : is_polling_page_far ( ) {
intptr_t addr = ( intptr_t ) os : : get_polling_page ( ) ;
2011-11-18 10:29:27 -08:00
return ForceUnreachable | |
! is_simm32 ( addr - ( intptr_t ) CodeCache : : low_bound ( ) ) | |
2011-03-27 13:17:37 -07:00
! is_simm32 ( addr - ( intptr_t ) CodeCache : : high_bound ( ) ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : emit_data64 ( jlong data ,
relocInfo : : relocType rtype ,
int format ) {
if ( rtype = = relocInfo : : none ) {
2012-11-30 11:44:05 -08:00
emit_int64 ( data ) ;
2008-08-27 00:21:55 -07:00
} else {
emit_data64 ( data , Relocation : : spec_simple ( rtype ) , format ) ;
}
}
void Assembler : : emit_data64 ( jlong data ,
RelocationHolder const & rspec ,
int format ) {
assert ( imm_operand = = 0 , " default format must be immediate in this file " ) ;
assert ( imm_operand = = format , " must be immediate " ) ;
assert ( inst_mark ( ) ! = NULL , " must be inside InstructionMark " ) ;
// Do not use AbstractAssembler::relocate, which is not intended for
// embedded words. Instead, relocate to the enclosing instruction.
code_section ( ) - > relocate ( inst_mark ( ) , rspec , format ) ;
# ifdef ASSERT
check_relocation ( rspec , format ) ;
# endif
2012-11-30 11:44:05 -08:00
emit_int64 ( data ) ;
2008-08-27 00:21:55 -07:00
}
int Assembler : : prefix_and_encode ( int reg_enc , bool byteinst ) {
if ( reg_enc > = 8 ) {
prefix ( REX_B ) ;
reg_enc - = 8 ;
} else if ( byteinst & & reg_enc > = 4 ) {
prefix ( REX ) ;
}
return reg_enc ;
}
int Assembler : : prefixq_and_encode ( int reg_enc ) {
if ( reg_enc < 8 ) {
prefix ( REX_W ) ;
} else {
prefix ( REX_WB ) ;
reg_enc - = 8 ;
}
return reg_enc ;
}
2015-10-08 12:49:30 -10:00
int Assembler : : prefix_and_encode ( int dst_enc , bool dst_is_byte , int src_enc , bool src_is_byte ) {
2008-08-27 00:21:55 -07:00
if ( dst_enc < 8 ) {
if ( src_enc > = 8 ) {
prefix ( REX_B ) ;
src_enc - = 8 ;
2015-10-08 12:49:30 -10:00
} else if ( ( src_is_byte & & src_enc > = 4 ) | | ( dst_is_byte & & dst_enc > = 4 ) ) {
2008-08-27 00:21:55 -07:00
prefix ( REX ) ;
}
} else {
if ( src_enc < 8 ) {
prefix ( REX_R ) ;
} else {
prefix ( REX_RB ) ;
src_enc - = 8 ;
}
dst_enc - = 8 ;
}
return dst_enc < < 3 | src_enc ;
}
int Assembler : : prefixq_and_encode ( int dst_enc , int src_enc ) {
if ( dst_enc < 8 ) {
if ( src_enc < 8 ) {
prefix ( REX_W ) ;
} else {
prefix ( REX_WB ) ;
src_enc - = 8 ;
}
} else {
if ( src_enc < 8 ) {
prefix ( REX_WR ) ;
} else {
prefix ( REX_WRB ) ;
src_enc - = 8 ;
}
dst_enc - = 8 ;
}
return dst_enc < < 3 | src_enc ;
}
void Assembler : : prefix ( Register reg ) {
if ( reg - > encoding ( ) > = 8 ) {
prefix ( REX_B ) ;
}
}
2015-09-16 15:54:32 -07:00
void Assembler : : prefix ( Register dst , Register src , Prefix p ) {
if ( src - > encoding ( ) > = 8 ) {
p = ( Prefix ) ( p | REX_B ) ;
}
if ( dst - > encoding ( ) > = 8 ) {
p = ( Prefix ) ( p | REX_R ) ;
}
if ( p ! = Prefix_EMPTY ) {
// do not generate an empty prefix
prefix ( p ) ;
}
}
void Assembler : : prefix ( Register dst , Address adr , Prefix p ) {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
assert ( false , " prefix(Register dst, Address adr, Prefix p) does not support handling of an X " ) ;
} else {
prefix ( REX_B ) ;
}
} else {
if ( adr . index_needs_rex ( ) ) {
assert ( false , " prefix(Register dst, Address adr, Prefix p) does not support handling of an X " ) ;
}
}
if ( dst - > encoding ( ) > = 8 ) {
p = ( Prefix ) ( p | REX_R ) ;
}
if ( p ! = Prefix_EMPTY ) {
// do not generate an empty prefix
prefix ( p ) ;
}
}
2008-08-27 00:21:55 -07:00
void Assembler : : prefix ( Address adr ) {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_XB ) ;
} else {
prefix ( REX_B ) ;
}
} else {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_X ) ;
}
}
}
void Assembler : : prefixq ( Address adr ) {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_WXB ) ;
} else {
prefix ( REX_WB ) ;
}
} else {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_WX ) ;
} else {
prefix ( REX_W ) ;
}
}
}
void Assembler : : prefix ( Address adr , Register reg , bool byteinst ) {
if ( reg - > encoding ( ) < 8 ) {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_XB ) ;
} else {
prefix ( REX_B ) ;
}
} else {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_X ) ;
2011-08-17 05:14:43 -07:00
} else if ( byteinst & & reg - > encoding ( ) > = 4 ) {
2008-08-27 00:21:55 -07:00
prefix ( REX ) ;
}
}
} else {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_RXB ) ;
} else {
prefix ( REX_RB ) ;
}
} else {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_RX ) ;
} else {
prefix ( REX_R ) ;
}
}
}
}
void Assembler : : prefixq ( Address adr , Register src ) {
if ( src - > encoding ( ) < 8 ) {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_WXB ) ;
} else {
prefix ( REX_WB ) ;
}
} else {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_WX ) ;
} else {
prefix ( REX_W ) ;
}
}
} else {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_WRXB ) ;
} else {
prefix ( REX_WRB ) ;
}
} else {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_WRX ) ;
} else {
prefix ( REX_WR ) ;
}
}
}
}
void Assembler : : prefix ( Address adr , XMMRegister reg ) {
if ( reg - > encoding ( ) < 8 ) {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_XB ) ;
} else {
prefix ( REX_B ) ;
}
} else {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_X ) ;
}
}
} else {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_RXB ) ;
} else {
prefix ( REX_RB ) ;
}
} else {
if ( adr . index_needs_rex ( ) ) {
prefix ( REX_RX ) ;
} else {
prefix ( REX_R ) ;
}
}
}
}
2011-12-14 14:54:38 -08:00
void Assembler : : prefixq ( Address adr , XMMRegister src ) {
if ( src - > encoding ( ) < 8 ) {
2008-08-27 00:21:55 -07:00
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
2011-12-14 14:54:38 -08:00
prefix ( REX_WXB ) ;
2008-08-27 00:21:55 -07:00
} else {
2011-12-14 14:54:38 -08:00
prefix ( REX_WB ) ;
2008-08-27 00:21:55 -07:00
}
} else {
if ( adr . index_needs_rex ( ) ) {
2011-12-14 14:54:38 -08:00
prefix ( REX_WX ) ;
} else {
prefix ( REX_W ) ;
2008-08-27 00:21:55 -07:00
}
}
} else {
if ( adr . base_needs_rex ( ) ) {
if ( adr . index_needs_rex ( ) ) {
2011-12-14 14:54:38 -08:00
prefix ( REX_WRXB ) ;
2008-08-27 00:21:55 -07:00
} else {
2011-12-14 14:54:38 -08:00
prefix ( REX_WRB ) ;
2008-08-27 00:21:55 -07:00
}
} else {
if ( adr . index_needs_rex ( ) ) {
2011-12-14 14:54:38 -08:00
prefix ( REX_WRX ) ;
2008-08-27 00:21:55 -07:00
} else {
2011-12-14 14:54:38 -08:00
prefix ( REX_WR ) ;
2008-08-27 00:21:55 -07:00
}
}
}
}
void Assembler : : adcq ( Register dst , int32_t imm32 ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) ) ;
emit_arith ( 0x81 , 0xD0 , dst , imm32 ) ;
}
void Assembler : : adcq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x13 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : adcq ( Register dst , Register src ) {
2013-09-28 12:42:22 -07:00
( void ) prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2008-08-27 00:21:55 -07:00
emit_arith ( 0x13 , 0xC0 , dst , src ) ;
}
void Assembler : : addq ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefixq ( dst ) ;
emit_arith_operand ( 0x81 , rax , dst , imm32 ) ;
}
void Assembler : : addq ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefixq ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x01 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
}
void Assembler : : addq ( Register dst , int32_t imm32 ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) ) ;
emit_arith ( 0x81 , 0xC0 , dst , imm32 ) ;
}
void Assembler : : addq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x03 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : addq ( Register dst , Register src ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x03 , 0xC0 , dst , src ) ;
}
2014-09-02 12:48:45 -07:00
void Assembler : : adcxq ( Register dst , Register src ) {
//assert(VM_Version::supports_adx(), "adx instructions not supported");
emit_int8 ( ( unsigned char ) 0x66 ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x38 ) ;
emit_int8 ( ( unsigned char ) 0xF6 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : adoxq ( Register dst , Register src ) {
//assert(VM_Version::supports_adx(), "adx instructions not supported");
emit_int8 ( ( unsigned char ) 0xF3 ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x38 ) ;
emit_int8 ( ( unsigned char ) 0xF6 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2011-06-21 09:04:55 -07:00
void Assembler : : andq ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefixq ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x81 ) ;
2011-06-21 09:04:55 -07:00
emit_operand ( rsp , dst , 4 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2011-06-21 09:04:55 -07:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : andq ( Register dst , int32_t imm32 ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) ) ;
emit_arith ( 0x81 , 0xE0 , dst , imm32 ) ;
}
void Assembler : : andq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x23 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : andq ( Register dst , Register src ) {
2013-09-28 12:42:22 -07:00
( void ) prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2008-08-27 00:21:55 -07:00
emit_arith ( 0x23 , 0xC0 , dst , src ) ;
}
2014-03-12 11:24:26 -07:00
void Assembler : : andnq ( Register dst , Register src1 , Register src2 ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , src1 - > encoding ( ) , src2 - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF2 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : andnq ( Register dst , Register src1 , Address src2 ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
vex_prefix ( src2 , src1 - > encoding ( ) , dst - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF2 ) ;
emit_operand ( dst , src2 ) ;
}
2009-05-06 00:27:52 -07:00
void Assembler : : bsfq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBC ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-05-06 00:27:52 -07:00
}
void Assembler : : bsrq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBD ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-05-06 00:27:52 -07:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : bswapq ( Register reg ) {
int encode = prefixq_and_encode ( reg - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) ( 0xC8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2014-03-12 11:24:26 -07:00
void Assembler : : blsiq ( Register dst , Register src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( rbx - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : blsiq ( Register dst , Address src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
vex_prefix ( src , dst - > encoding ( ) , rbx - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_operand ( rbx , src ) ;
}
void Assembler : : blsmskq ( Register dst , Register src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( rdx - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : blsmskq ( Register dst , Address src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
vex_prefix ( src , dst - > encoding ( ) , rdx - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_operand ( rdx , src ) ;
}
void Assembler : : blsrq ( Register dst , Register src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( rcx - > encoding ( ) , dst - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
void Assembler : : blsrq ( Register dst , Address src ) {
assert ( VM_Version : : supports_bmi1 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionMark im ( this ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
vex_prefix ( src , dst - > encoding ( ) , rcx - > encoding ( ) , VEX_SIMD_NONE , VEX_OPCODE_0F_38 , & attributes ) ;
2014-03-12 11:24:26 -07:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
emit_operand ( rcx , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : cdqq ( ) {
prefix ( REX_W ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x99 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : clflush ( Address adr ) {
prefix ( adr ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAE ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rdi , adr ) ;
}
void Assembler : : cmovq ( Condition cc , Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x40 | cc ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : cmovq ( Condition cc , Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( 0x40 | cc ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : cmpq ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefixq ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x81 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rdi , dst , 4 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : cmpq ( Register dst , int32_t imm32 ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) ) ;
emit_arith ( 0x81 , 0xF8 , dst , imm32 ) ;
}
void Assembler : : cmpq ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefixq ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x3B ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
}
void Assembler : : cmpq ( Register dst , Register src ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x3B , 0xC0 , dst , src ) ;
}
void Assembler : : cmpq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x3B ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : cmpxchgq ( Register reg , Address adr ) {
InstructionMark im ( this ) ;
prefixq ( adr , reg ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB1 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( reg , adr ) ;
}
void Assembler : : cvtsi2sdq ( XMMRegister dst , Register src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , dst , as_XMMRegister ( src - > encoding ( ) ) , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2A ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2011-12-14 14:54:38 -08:00
void Assembler : : cvtsi2sdq ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2A ) ;
2011-12-14 14:54:38 -08:00
emit_operand ( dst , src ) ;
}
void Assembler : : cvtsi2ssq ( XMMRegister dst , Address src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
InstructionMark im ( this ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
attributes . set_address_attributes ( /* tuple_type */ EVEX_T1S , /* input_size_in_bits */ EVEX_64bit ) ;
simd_prefix ( dst , dst , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2A ) ;
2011-12-14 14:54:38 -08:00
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : cvttsd2siq ( Register dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( as_XMMRegister ( dst - > encoding ( ) ) , xnoreg , src , VEX_SIMD_F2 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : cvttss2siq ( Register dst , XMMRegister src ) {
NOT_LP64 ( assert ( VM_Version : : supports_sse ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( as_XMMRegister ( dst - > encoding ( ) ) , xnoreg , src , VEX_SIMD_F3 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2C ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : decl ( Register dst ) {
// Don't use it directly. Use MacroAssembler::decrementl() instead.
// Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
emit_int8 ( ( unsigned char ) ( 0xC8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : decq ( Register dst ) {
// Don't use it directly. Use MacroAssembler::decrementq() instead.
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
emit_int8 ( 0xC8 | encode ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : decq ( Address dst ) {
// Don't use it directly. Use MacroAssembler::decrementq() instead.
InstructionMark im ( this ) ;
prefixq ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rcx , dst ) ;
}
void Assembler : : fxrstor ( Address src ) {
prefixq ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAE ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( as_Register ( 1 ) , src ) ;
}
2015-09-11 17:02:44 -07:00
void Assembler : : xrstor ( Address src ) {
prefixq ( src ) ;
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAE ) ;
emit_operand ( as_Register ( 5 ) , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : fxsave ( Address dst ) {
prefixq ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAE ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( as_Register ( 0 ) , dst ) ;
}
2015-09-11 17:02:44 -07:00
void Assembler : : xsave ( Address dst ) {
prefixq ( dst ) ;
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAE ) ;
emit_operand ( as_Register ( 4 ) , dst ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : idivq ( Register src ) {
int encode = prefixq_and_encode ( src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : imulq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : imulq ( Register dst , Register src , int value ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
if ( is8bit ( value ) ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x6B ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( value & 0xFF ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x69 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( value ) ;
2008-08-27 00:21:55 -07:00
}
}
2013-10-18 10:41:56 +02:00
void Assembler : : imulq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xAF ) ;
emit_operand ( dst , src ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : incl ( Register dst ) {
// Don't use it directly. Use MacroAssembler::incrementl() instead.
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : incq ( Register dst ) {
// Don't use it directly. Use MacroAssembler::incrementq() instead.
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : incq ( Address dst ) {
// Don't use it directly. Use MacroAssembler::incrementq() instead.
InstructionMark im ( this ) ;
prefixq ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , dst ) ;
}
void Assembler : : lea ( Register dst , Address src ) {
leaq ( dst , src ) ;
}
void Assembler : : leaq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8D ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : mov64 ( Register dst , int64_t imm64 ) {
InstructionMark im ( this ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) ( 0xB8 | encode ) ) ;
2012-11-30 11:44:05 -08:00
emit_int64 ( imm64 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : mov_literal64 ( Register dst , intptr_t imm64 , RelocationHolder const & rspec ) {
InstructionMark im ( this ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0xB8 | encode ) ;
2008-08-27 00:21:55 -07:00
emit_data64 ( imm64 , rspec ) ;
}
2009-03-12 10:37:46 -07:00
void Assembler : : mov_narrow_oop ( Register dst , int32_t imm32 , RelocationHolder const & rspec ) {
InstructionMark im ( this ) ;
int encode = prefix_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) ( 0xB8 | encode ) ) ;
2009-03-12 10:37:46 -07:00
emit_data ( ( int ) imm32 , rspec , narrow_oop_operand ) ;
}
void Assembler : : mov_narrow_oop ( Address dst , int32_t imm32 , RelocationHolder const & rspec ) {
InstructionMark im ( this ) ;
prefix ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC7 ) ;
2009-03-12 10:37:46 -07:00
emit_operand ( rax , dst , 4 ) ;
emit_data ( ( int ) imm32 , rspec , narrow_oop_operand ) ;
}
void Assembler : : cmp_narrow_oop ( Register src1 , int32_t imm32 , RelocationHolder const & rspec ) {
InstructionMark im ( this ) ;
int encode = prefix_and_encode ( src1 - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x81 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | encode ) ) ;
2009-03-12 10:37:46 -07:00
emit_data ( ( int ) imm32 , rspec , narrow_oop_operand ) ;
}
void Assembler : : cmp_narrow_oop ( Address src1 , int32_t imm32 , RelocationHolder const & rspec ) {
InstructionMark im ( this ) ;
prefix ( src1 ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x81 ) ;
2009-03-12 10:37:46 -07:00
emit_operand ( rax , src1 , 4 ) ;
emit_data ( ( int ) imm32 , rspec , narrow_oop_operand ) ;
}
2009-05-06 00:27:52 -07:00
void Assembler : : lzcntq ( Register dst , Register src ) {
assert ( VM_Version : : supports_lzcnt ( ) , " encoding is treated as BSR " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
2009-05-06 00:27:52 -07:00
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBD ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-05-06 00:27:52 -07:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movdq ( XMMRegister dst , Register src ) {
// table D-1 says MMX/SSE2
2011-12-14 14:54:38 -08:00
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
int encode = simd_prefix_and_encode ( dst , xnoreg , as_XMMRegister ( src - > encoding ( ) ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x6E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : movdq ( Register dst , XMMRegister src ) {
// table D-1 says MMX/SSE2
2011-12-14 14:54:38 -08:00
NOT_LP64 ( assert ( VM_Version : : supports_sse2 ( ) , " " ) ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* rex_w */ true , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ false ) ;
2008-08-27 00:21:55 -07:00
// swap src/dst to get correct prefix
2015-11-09 11:26:41 -08:00
int encode = simd_prefix_and_encode ( src , xnoreg , as_XMMRegister ( dst - > encoding ( ) ) , VEX_SIMD_66 , VEX_OPCODE_0F , & attributes ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x7E ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8B ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : movq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8B ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : movq ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefixq ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x89 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
}
2007-12-01 00:00:00 +00:00
2009-03-09 03:17:11 -07:00
void Assembler : : movsbq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBE ) ;
2009-03-09 03:17:11 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : movsbq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBE ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-03-09 03:17:11 -07:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : movslq ( Register dst , int32_t imm32 ) {
// dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
// and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
// as a result we shouldn't use until tested at runtime...
ShouldNotReachHere ( ) ;
InstructionMark im ( this ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) ( 0xC7 | encode ) ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : movslq ( Address dst , int32_t imm32 ) {
assert ( is_simm32 ( imm32 ) , " lost bits " ) ;
InstructionMark im ( this ) ;
prefixq ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC7 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , dst , 4 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : movslq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x63 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : movslq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x63 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2009-03-09 03:17:11 -07:00
void Assembler : : movswq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBF ) ;
2009-03-09 03:17:11 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : movswq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xBF ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-03-09 03:17:11 -07:00
}
void Assembler : : movzbq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB6 ) ;
2009-03-09 03:17:11 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : movzbq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB6 ) ;
emit_int8 ( 0xC0 | encode ) ;
2009-03-09 03:17:11 -07:00
}
void Assembler : : movzwq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB7 ) ;
2009-03-09 03:17:11 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : movzwq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB7 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-03-09 03:17:11 -07:00
}
2014-09-02 12:48:45 -07:00
void Assembler : : mulq ( Address src ) {
InstructionMark im ( this ) ;
prefixq ( src ) ;
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_operand ( rsp , src ) ;
}
void Assembler : : mulq ( Register src ) {
int encode = prefixq_and_encode ( src - > encoding ( ) ) ;
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | encode ) ) ;
}
void Assembler : : mulxq ( Register dst1 , Register dst2 , Register src ) {
assert ( VM_Version : : supports_bmi2 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst1 - > encoding ( ) , dst2 - > encoding ( ) , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F_38 , & attributes ) ;
2014-09-02 12:48:45 -07:00
emit_int8 ( ( unsigned char ) 0xF6 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : negq ( Register dst ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xD8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : notq ( Register dst ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xD0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : orq ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefixq ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x81 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rcx , dst , 4 ) ;
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : orq ( Register dst , int32_t imm32 ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) ) ;
emit_arith ( 0x81 , 0xC8 , dst , imm32 ) ;
}
void Assembler : : orq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0B ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : orq ( Register dst , Register src ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x0B , 0xC0 , dst , src ) ;
}
void Assembler : : popa ( ) { // 64bit
movq ( r15 , Address ( rsp , 0 ) ) ;
movq ( r14 , Address ( rsp , wordSize ) ) ;
movq ( r13 , Address ( rsp , 2 * wordSize ) ) ;
movq ( r12 , Address ( rsp , 3 * wordSize ) ) ;
movq ( r11 , Address ( rsp , 4 * wordSize ) ) ;
movq ( r10 , Address ( rsp , 5 * wordSize ) ) ;
movq ( r9 , Address ( rsp , 6 * wordSize ) ) ;
movq ( r8 , Address ( rsp , 7 * wordSize ) ) ;
movq ( rdi , Address ( rsp , 8 * wordSize ) ) ;
movq ( rsi , Address ( rsp , 9 * wordSize ) ) ;
movq ( rbp , Address ( rsp , 10 * wordSize ) ) ;
// skip rsp
movq ( rbx , Address ( rsp , 12 * wordSize ) ) ;
movq ( rdx , Address ( rsp , 13 * wordSize ) ) ;
movq ( rcx , Address ( rsp , 14 * wordSize ) ) ;
movq ( rax , Address ( rsp , 15 * wordSize ) ) ;
addq ( rsp , 16 * wordSize ) ;
}
2009-03-13 11:35:17 -07:00
void Assembler : : popcntq ( Register dst , Address src ) {
assert ( VM_Version : : supports_popcnt ( ) , " must support " ) ;
InstructionMark im ( this ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
2009-03-13 11:35:17 -07:00
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB8 ) ;
2009-03-13 11:35:17 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : popcntq ( Register dst , Register src ) {
assert ( VM_Version : : supports_popcnt ( ) , " must support " ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF3 ) ;
2009-03-13 11:35:17 -07:00
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xB8 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2009-03-13 11:35:17 -07:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : popq ( Address dst ) {
InstructionMark im ( this ) ;
prefixq ( dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x8F ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rax , dst ) ;
}
void Assembler : : pusha ( ) { // 64bit
// we have to store original rsp. ABI says that 128 bytes
// below rsp are local scratch.
movq ( Address ( rsp , - 5 * wordSize ) , rsp ) ;
subq ( rsp , 16 * wordSize ) ;
movq ( Address ( rsp , 15 * wordSize ) , rax ) ;
movq ( Address ( rsp , 14 * wordSize ) , rcx ) ;
movq ( Address ( rsp , 13 * wordSize ) , rdx ) ;
movq ( Address ( rsp , 12 * wordSize ) , rbx ) ;
// skip rsp
movq ( Address ( rsp , 10 * wordSize ) , rbp ) ;
movq ( Address ( rsp , 9 * wordSize ) , rsi ) ;
movq ( Address ( rsp , 8 * wordSize ) , rdi ) ;
movq ( Address ( rsp , 7 * wordSize ) , r8 ) ;
movq ( Address ( rsp , 6 * wordSize ) , r9 ) ;
movq ( Address ( rsp , 5 * wordSize ) , r10 ) ;
movq ( Address ( rsp , 4 * wordSize ) , r11 ) ;
movq ( Address ( rsp , 3 * wordSize ) , r12 ) ;
movq ( Address ( rsp , 2 * wordSize ) , r13 ) ;
movq ( Address ( rsp , wordSize ) , r14 ) ;
movq ( Address ( rsp , 0 ) , r15 ) ;
}
void Assembler : : pushq ( Address src ) {
InstructionMark im ( this ) ;
prefixq ( src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xFF ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( rsi , src ) ;
}
void Assembler : : rclq ( Register dst , int imm8 ) {
assert ( isShiftCount ( imm8 > > 1 ) , " illegal shift count " ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
if ( imm8 = = 1 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xD0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) ( 0xD0 | encode ) ) ;
emit_int8 ( imm8 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
}
2014-09-02 12:48:45 -07:00
2015-06-03 15:02:10 -07:00
void Assembler : : rcrq ( Register dst , int imm8 ) {
assert ( isShiftCount ( imm8 > > 1 ) , " illegal shift count " ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
if ( imm8 = = 1 ) {
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xD8 | encode ) ) ;
} else {
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) ( 0xD8 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
}
2014-09-02 12:48:45 -07:00
void Assembler : : rorq ( Register dst , int imm8 ) {
assert ( isShiftCount ( imm8 > > 1 ) , " illegal shift count " ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
if ( imm8 = = 1 ) {
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xC8 | encode ) ) ;
} else {
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) ( 0xc8 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
}
void Assembler : : rorxq ( Register dst , Register src , int imm8 ) {
assert ( VM_Version : : supports_bmi2 ( ) , " bit manipulation instructions not supported " ) ;
2015-11-09 11:26:41 -08:00
InstructionAttr attributes ( AVX_128bit , /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F_3A , & attributes ) ;
2014-09-02 12:48:45 -07:00
emit_int8 ( ( unsigned char ) 0xF0 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2016-04-26 21:54:21 -07:00
void Assembler : : rorxd ( Register dst , Register src , int imm8 ) {
assert ( VM_Version : : supports_bmi2 ( ) , " bit manipulation instructions not supported " ) ;
InstructionAttr attributes ( AVX_128bit , /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ false , /* uses_vl */ false ) ;
int encode = vex_prefix_and_encode ( dst - > encoding ( ) , 0 , src - > encoding ( ) , VEX_SIMD_F2 , VEX_OPCODE_0F_3A , & attributes ) ;
emit_int8 ( ( unsigned char ) 0xF0 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
emit_int8 ( imm8 ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : sarq ( Register dst , int imm8 ) {
assert ( isShiftCount ( imm8 > > 1 ) , " illegal shift count " ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
if ( imm8 = = 1 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | encode ) ) ;
emit_int8 ( imm8 ) ;
2008-08-27 00:21:55 -07:00
}
}
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : sarq ( Register dst ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD3 ) ;
emit_int8 ( ( unsigned char ) ( 0xF8 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
2011-01-07 10:42:32 -05:00
2008-08-27 00:21:55 -07:00
void Assembler : : sbbq ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefixq ( dst ) ;
emit_arith_operand ( 0x81 , rbx , dst , imm32 ) ;
}
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : sbbq ( Register dst , int32_t imm32 ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) ) ;
emit_arith ( 0x81 , 0xD8 , dst , imm32 ) ;
}
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : sbbq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x1B ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : sbbq ( Register dst , Register src ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x1B , 0xC0 , dst , src ) ;
}
2007-12-01 00:00:00 +00:00
2008-08-27 00:21:55 -07:00
void Assembler : : shlq ( Register dst , int imm8 ) {
assert ( isShiftCount ( imm8 > > 1 ) , " illegal shift count " ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
if ( imm8 = = 1 ) {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD1 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
} else {
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | encode ) ) ;
emit_int8 ( imm8 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
}
void Assembler : : shlq ( Register dst ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD3 ) ;
emit_int8 ( ( unsigned char ) ( 0xE0 | encode ) ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : shrq ( Register dst , int imm8 ) {
assert ( isShiftCount ( imm8 > > 1 ) , " illegal shift count " ) ;
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xC1 ) ;
emit_int8 ( ( unsigned char ) ( 0xE8 | encode ) ) ;
emit_int8 ( imm8 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : shrq ( Register dst ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xD3 ) ;
emit_int8 ( 0xE8 | encode ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : subq ( Address dst , int32_t imm32 ) {
InstructionMark im ( this ) ;
prefixq ( dst ) ;
2011-01-07 10:42:32 -05:00
emit_arith_operand ( 0x81 , rbp , dst , imm32 ) ;
2008-08-27 00:21:55 -07:00
}
void Assembler : : subq ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefixq ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x29 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
}
2011-01-07 10:42:32 -05:00
void Assembler : : subq ( Register dst , int32_t imm32 ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) ) ;
emit_arith ( 0x81 , 0xE8 , dst , imm32 ) ;
}
2012-02-15 21:37:49 -08:00
// Force generation of a 4 byte immediate value even if it fits into 8bit
void Assembler : : subq_imm32 ( Register dst , int32_t imm32 ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) ) ;
emit_arith_imm32 ( 0x81 , 0xE8 , dst , imm32 ) ;
}
2008-08-27 00:21:55 -07:00
void Assembler : : subq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x2B ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
}
void Assembler : : subq ( Register dst , Register src ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x2B , 0xC0 , dst , src ) ;
}
void Assembler : : testq ( Register dst , int32_t imm32 ) {
// not using emit_arith because test
// doesn't support sign-extension of
// 8bit operands
int encode = dst - > encoding ( ) ;
if ( encode = = 0 ) {
prefix ( REX_W ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xA9 ) ;
2007-12-01 00:00:00 +00:00
} else {
2008-08-27 00:21:55 -07:00
encode = prefixq_and_encode ( encode ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0xF7 ) ;
emit_int8 ( ( unsigned char ) ( 0xC0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2013-01-07 14:08:28 -08:00
emit_int32 ( imm32 ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : testq ( Register dst , Register src ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x85 , 0xC0 , dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : xaddq ( Address dst , Register src ) {
InstructionMark im ( this ) ;
prefixq ( dst , src ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x0F ) ;
emit_int8 ( ( unsigned char ) 0xC1 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( src , dst ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : xchgq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x87 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : xchgq ( Register dst , Register src ) {
int encode = prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( ( unsigned char ) 0x87 ) ;
emit_int8 ( ( unsigned char ) ( 0xc0 | encode ) ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : xorq ( Register dst , Register src ) {
( void ) prefixq_and_encode ( dst - > encoding ( ) , src - > encoding ( ) ) ;
emit_arith ( 0x33 , 0xC0 , dst , src ) ;
2007-12-01 00:00:00 +00:00
}
2008-08-27 00:21:55 -07:00
void Assembler : : xorq ( Register dst , Address src ) {
InstructionMark im ( this ) ;
prefixq ( src , dst ) ;
2012-12-20 18:53:44 -08:00
emit_int8 ( 0x33 ) ;
2008-08-27 00:21:55 -07:00
emit_operand ( dst , src ) ;
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
2008-04-13 17:43:42 -04:00
}
2008-08-27 00:21:55 -07:00
# endif // !LP64